diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4bd765afd2..7a5428c3b9 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,7 +16,7 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: weekly target-branch: "v0.35.x" open-pull-requests-limit: 10 reviewers: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2511b6521a..1a040bc1d0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -26,8 +26,8 @@ jobs: - uses: actions/setup-go@v3.2.1 with: go-version: "1.18" - - uses: actions/checkout@v2.4.0 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml new file mode 100644 index 0000000000..d8c1a79f76 --- /dev/null +++ b/.github/workflows/check-generated.yml @@ -0,0 +1,75 @@ +# Verify that generated code is up-to-date. +# +# Note that we run these checks regardless whether the input files have +# changed, because generated code can change in response to toolchain updates +# even if no files in the repository are modified. +name: Check generated code +on: + pull_request: + branches: + - master + +permissions: + contents: read + +jobs: + check-mocks: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.18' + + - uses: actions/checkout@v3 + + - name: "Check generated mocks" + run: | + set -euo pipefail + + readonly MOCKERY=2.12.3 # N.B. no leading "v" + curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf - + make mockery 2>/dev/null + + if ! git diff --stat --exit-code ; then + echo ">> ERROR:" + echo ">>" + echo ">> Generated mocks require update (either Mockery or source files may have changed)." + echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR." + echo ">>" + exit 1 + fi + + check-proto: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.18' + + - uses: actions/checkout@v3 + with: + fetch-depth: 1 # we need a .git directory to run git diff + + - name: "Check protobuf generated code" + run: | + set -euo pipefail + + # Install buf and gogo tools, so that differences that arise from + # toolchain differences are also caught. + readonly tools="$(mktemp -d)" + export PATH="${PATH}:${tools}/bin" + export GOBIN="${tools}/bin" + + go install github.com/bufbuild/buf/cmd/buf + go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest + + make proto-gen + + if ! git diff --stat --exit-code ; then + echo ">> ERROR:" + echo ">>" + echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)." + echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR." + echo ">>" + exit 1 + fi diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1ce4f97501..65a8c177cc 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -50,7 +50,7 @@ jobs: suffix=${{ steps.suffix.outputs.result }} - name: Publish to Docker Hub - uses: docker/build-push-action@v3.0.0 + uses: docker/build-push-action@v3.1.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/docs-deployment.yml b/.github/workflows/docs-deployment.yml new file mode 100644 index 0000000000..082484dd58 --- /dev/null +++ b/.github/workflows/docs-deployment.yml @@ -0,0 +1,62 @@ +# Build and deploy the docs.tendermint.com website content. +# The static content is published to GitHub Pages. +# +# For documentation build info, see docs/DOCS_README.md. +name: Build static documentation site +on: + workflow_dispatch: # allow manual updates + push: + branches: + - master + paths: + - docs/** + - spec/** + +jobs: + # This is split into two jobs so that the build, which runs npm, does not + # have write access to anything. The deploy requires write access to publish + # to the branch used by GitHub Pages, however, so we can't just make the + # whole workflow read-only. + build: + name: VuePress build + runs-on: ubuntu-latest + container: + image: alpine:latest + permissions: + contents: read + steps: + - name: Install generator dependencies + run: | + apk add --no-cache make bash git npm + - uses: actions/checkout@v3 + with: + # We need to fetch full history so the backport branches for previous + # versions will be available for the build. + fetch-depth: 0 + - name: Build documentation + run: | + git config --global --add safe.directory "$PWD" + make build-docs + - uses: actions/upload-artifact@v3 + with: + name: build-output + path: ~/output/ + + deploy: + name: Deploy to GitHub Pages + runs-on: ubuntu-latest + needs: build + permissions: + contents: write + steps: + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 + with: + name: build-output + path: ~/output + - name: Deploy to GitHub Pages + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: 'docs-tendermint-com' + folder: ~/output + single-commit: true diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index 5cb9c95815..476c37577f 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -11,7 +11,7 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01', '02', '03', '04'] runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -29,7 +29,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly/ + run: ./build/generator -g 5 -d networks/nightly/ - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 86bf3a8992..b3216a0b32 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -1,8 +1,7 @@ -# Runs randomly generated E2E testnets nightly -# on the 0.34.x release branch +# Runs randomly generated E2E testnets nightly on the 0.34.x branch. -# !! If you change something in this file, you probably want -# to update the e2e-nightly-master workflow as well! +# !! This file should be kept in sync with the e2e-nightly-master.yml file, +# modulo changes to the version labels. name: e2e-nightly-34x on: diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index df7bbbd959..32bd1faf3d 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -1,7 +1,7 @@ -# Runs randomly generated E2E testnets nightly on v0.35.x. +# Runs randomly generated E2E testnets nightly on the v0.35.x branch. -# !! If you change something in this file, you probably want -# to update the e2e-nightly-master workflow as well! +# !! This file should be kept in sync with the e2e-nightly-master.yml file, +# modulo changes to the version labels. name: e2e-nightly-35x on: @@ -22,7 +22,7 @@ jobs: steps: - uses: actions/setup-go@v3.2.1 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 with: diff --git a/.github/workflows/e2e-nightly-36x.yml b/.github/workflows/e2e-nightly-36x.yml new file mode 100644 index 0000000000..9ac7971467 --- /dev/null +++ b/.github/workflows/e2e-nightly-36x.yml @@ -0,0 +1,74 @@ +# Runs randomly generated E2E testnets nightly on the v0.36.x branch. + +# !! This file should be kept in sync with the e2e-nightly-master.yml file, +# modulo changes to the version labels. + +name: e2e-nightly-36x +on: + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + group: ['00', '01', '02', '03', '04'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.18' + + - uses: actions/checkout@v3 + with: + ref: 'v0.36.x' + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 5 -d networks/nightly + + - name: Run testnets in group ${{ matrix.group }} + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + e2e-nightly-fail-2: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':skull:' + SLACK_COLOR: danger + SLACK_MESSAGE: Nightly E2E tests failed on v0.36.x + SLACK_FOOTER: '' + + e2e-nightly-success: # may turn this off once they seem to pass consistently + needs: e2e-nightly-test + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on success + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':white_check_mark:' + SLACK_COLOR: good + SLACK_MESSAGE: Nightly E2E tests passed on v0.36.x + SLACK_FOOTER: '' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index cca0c6e00e..c25fb54dd1 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -1,7 +1,8 @@ # Runs randomly generated E2E testnets nightly on master -# !! If you change something in this file, you probably want -# to update the e2e-nightly-34x workflow as well! +# !! Relevant changes to this file should be propagated to the e2e-nightly-x +# files for the supported backport branches, when appropriate, modulo version +# markers. name: e2e-nightly-master on: @@ -16,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01', '02', '03', "04"] runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -34,7 +35,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly/ + run: ./build/generator -g 5 -d networks/nightly/ - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml index e6bc45ec19..ceb21941d1 100644 --- a/.github/workflows/janitor.yml +++ b/.github/workflows/janitor.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 3 steps: - - uses: styfle/cancel-workflow-action@0.9.1 + - uses: styfle/cancel-workflow-action@0.10.0 with: workflow_id: 1041851,1401230,2837803 access_token: ${{ github.token }} diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml deleted file mode 100644 index e2ba808617..0000000000 --- a/.github/workflows/linkchecker.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Check Markdown links -on: - schedule: - - cron: '* */24 * * *' -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: creachadair/github-action-markdown-link-check@master - with: - folder-path: "docs" diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index b1fbeab9df..377e43ca63 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.4.0 + - uses: bufbuild/buf-setup-action@v1.6.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a602cd1710..5e4e4defc1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,7 +17,7 @@ jobs: go-version: '1.18' - name: Build - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v3 if: ${{ github.event_name == 'pull_request' }} with: version: latest @@ -26,7 +26,7 @@ jobs: - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v3 if: startsWith(github.ref, 'refs/tags/') with: version: latest diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index da95654e28..da67b0b668 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -37,44 +37,3 @@ jobs: run: | make test-group-${{ matrix.part }} NUM_SPLIT=6 if: env.GIT_DIFF - - uses: actions/upload-artifact@v3 - with: - name: "${{ github.sha }}-${{ matrix.part }}-coverage" - path: ./build/${{ matrix.part }}.profile.out - - upload-coverage-report: - runs-on: ubuntu-latest - needs: tests - steps: - - uses: actions/checkout@v3 - - uses: technote-space/get-diff-action@v6 - with: - PATTERNS: | - **/**.go - "!test/" - go.mod - go.sum - Makefile - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-00-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-01-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-02-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-03-coverage" - if: env.GIT_DIFF - - run: | - cat ./*profile.out | grep -v "mode: set" >> coverage.txt - if: env.GIT_DIFF - - uses: codecov/codecov-action@v3.1.0 - with: - file: ./coverage.txt - if: env.GIT_DIFF diff --git a/CHANGELOG.md b/CHANGELOG.md index 038723933f..376752f6e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1917,10 +1917,101 @@ ### Rfc -- E2e improvements (#6941) -- Add performance taxonomy rfc (#6921) -- Fix a few typos and formatting glitches p2p roadmap (#6960) -- Event system (#6957) +## v0.35.9 + +July 20, 2022 + +This release fixes a deadlock that could occur in some cases when using the +priority mempool with the ABCI socket client. + +### BUG FIXES + +- [mempool] [\#9030](https://github.com/tendermint/tendermint/pull/9030) rework lock discipline to mitigate callback deadlocks (@creachadair) + + +## v0.35.8 + +July 12, 2022 + +Special thanks to external contributors on this release: @joeabbey + +This release fixes an unbounded heap growth issue in the implementation of the +priority mempool, as well as some configuration, logging, and peer dialing +improvements in the non-legacy p2p stack. It also adds a new opt-in +"simple-priority" value for the `p2p.queue-type` setting, that should improve +gossip performance for non-legacy peer networks. + +### BREAKING CHANGES + +- CLI/RPC/Config + + - [node] [\#8902](https://github.com/tendermint/tendermint/pull/8902) Always start blocksync and avoid misconfiguration (@tychoish) + +### FEATURES + +- [cli] [\#8675](https://github.com/tendermint/tendermint/pull/8675) Add command to force compact goleveldb databases (@cmwaters) + +### IMPROVEMENTS + +- [p2p] [\#8914](https://github.com/tendermint/tendermint/pull/8914) [\#8875](https://github.com/tendermint/tendermint/pull/8875) Improvements to peer dialing (backported). (@tychoish) +- [p2p] [\#8820](https://github.com/tendermint/tendermint/pull/8820) add eviction metrics and cleanup dialing error handling (backport #8819) (@tychoish) +- [logging] [\#8896](https://github.com/tendermint/tendermint/pull/8896) Do not pre-process log results (backport #8895). (@tychoish) +- [p2p] [\#8956](https://github.com/tendermint/tendermint/pull/8956) Simpler priority queue (backport #8929). (@tychoish) + +### BUG FIXES + +- [mempool] [\#8944](https://github.com/tendermint/tendermint/pull/8944) Fix unbounded heap growth in the priority mempool. (@creachadair) +- [p2p] [\#8869](https://github.com/tendermint/tendermint/pull/8869) Set empty timeouts to configed values. (backport #8847). (@williambanfield) + + +## v0.35.7 + +June 16, 2022 + +### BUG FIXES + +- [p2p] [\#8692](https://github.com/tendermint/tendermint/pull/8692) scale the number of stored peers by the configured maximum connections (#8684) +- [rpc] [\#8715](https://github.com/tendermint/tendermint/pull/8715) always close http bodies (backport #8712) +- [p2p] [\#8760](https://github.com/tendermint/tendermint/pull/8760) accept should not abort on first error (backport #8759) + +### BREAKING CHANGES + +- P2P Protocol + + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish) + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish) + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish) + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish) + +## v0.35.6 + +June 3, 2022 + +### FEATURES + +- [migrate] [\#8672](https://github.com/tendermint/tendermint/pull/8672) provide function for database production (backport #8614) (@tychoish) + +### BUG FIXES + +- [consensus] [\#8651](https://github.com/tendermint/tendermint/pull/8651) restructure peer catchup sleep (@tychoish) +- [pex] [\#8657](https://github.com/tendermint/tendermint/pull/8657) align max address thresholds (@cmwaters) +- [cmd] [\#8668](https://github.com/tendermint/tendermint/pull/8668) don't used global config for reset commands (@cmwaters) +- [p2p] [\#8681](https://github.com/tendermint/tendermint/pull/8681) shed peers from store from other networks (backport #8678) (@tychoish) + + +## v0.35.5 + +May 26, 2022 + +### BUG FIXES + +- [p2p] [\#8371](https://github.com/tendermint/tendermint/pull/8371) fix setting in con-tracker (backport #8370) (@tychoish) +- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters) +- [statesync] [\#8494](https://github.com/tendermint/tendermint/pull/8494) avoid potential race (@tychoish) +- [keymigrate] [\#8467](https://github.com/tendermint/tendermint/pull/8467) improve filtering for legacy transaction hashes (backport #8466) (@creachadair) +- [rpc] [\#8594](https://github.com/tendermint/tendermint/pull/8594) fix encoding of block_results responses (@creachadair) + +## v0.35.4 ### Rpc @@ -2374,10 +2465,51 @@ ### Security -- Bump github.com/spf13/viper from 1.8.0 to 1.8.1 (#6622) -- Bump github.com/rs/cors from 1.7.0 to 1.8.0 (#6635) -- Bump github.com/go-kit/kit from 0.10.0 to 0.11.0 (#6651) -- Bump github.com/spf13/cobra from 1.2.0 to 1.2.1 (#6650) + - [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) RoundState: `Round`, `LockedRound` & `CommitRound` are now `int32` (@marbar3778) + - [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) HeightVoteSet: `round` is now `int32` (@marbar3778) + - [crypto] [\#4721](https://github.com/tendermint/tendermint/pull/4721) Remove `SimpleHashFromMap()` and `SimpleProofsFromMap()` (@erikgrinaker) + - [crypto] [\#4940](https://github.com/tendermint/tendermint/pull/4940) All keys have become `[]byte` instead of `[]byte`. The byte method no longer returns the marshaled value but just the `[]byte` form of the data. (@marbar3778) + - [crypto] [\#4988](https://github.com/tendermint/tendermint/pull/4988) Removal of key type multisig (@marbar3778) + - The key has been moved to the [Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go) + - [crypto] [\#4989](https://github.com/tendermint/tendermint/pull/4989) Remove `Simple` prefixes from `SimpleProof`, `SimpleValueOp` & `SimpleProofNode`. (@marbar3778) + - `merkle.Proof` has been renamed to `ProofOps`. + - Protobuf messages `Proof` & `ProofOp` has been moved to `proto/crypto/merkle` + - `SimpleHashFromByteSlices` has been renamed to `HashFromByteSlices` + - `SimpleHashFromByteSlicesIterative` has been renamed to `HashFromByteSlicesIterative` + - `SimpleProofsFromByteSlices` has been renamed to `ProofsFromByteSlices` + - [crypto] [\#4941](https://github.com/tendermint/tendermint/pull/4941) Remove suffixes from all keys. (@marbar3778) + - ed25519: type `PrivKeyEd25519` is now `PrivKey` + - ed25519: type `PubKeyEd25519` is now `PubKey` + - secp256k1: type`PrivKeySecp256k1` is now `PrivKey` + - secp256k1: type`PubKeySecp256k1` is now `PubKey` + - sr25519: type `PrivKeySr25519` is now `PrivKey` + - sr25519: type `PubKeySr25519` is now `PubKey` + - [crypto] [\#5214](https://github.com/tendermint/tendermint/pull/5214) Change `GenPrivKeySecp256k1` to `GenPrivKeyFromSecret` to be consistent with other keys (@marbar3778) + - [crypto] [\#5236](https://github.com/tendermint/tendermint/pull/5236) `VerifyBytes` is now `VerifySignature` on the `crypto.PubKey` interface (@marbar3778) + - [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and change evidence interface (@cmwaters) + - [libs] [\#4831](https://github.com/tendermint/tendermint/pull/4831) Remove `Bech32` pkg from Tendermint. This pkg now lives in the [cosmos-sdk](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32) (@marbar3778) + - [light] [\#4946](https://github.com/tendermint/tendermint/pull/4946) Rename `lite2` pkg to `light`. Remove `lite` implementation. (@marbar3778) + - [light] [\#5347](https://github.com/tendermint/tendermint/pull/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes) + - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker) + - [proto] [\#5025](https://github.com/tendermint/tendermint/pull/5025) All proto files have been moved to `/proto` directory. (@marbar3778) + - Using the recommended the file layout from buf, [see here for more info](https://docs.buf.build/lint/rules) + - [rpc/client] [\#4947](https://github.com/tendermint/tendermint/pull/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes) + - `UnconfirmedTxs` `limit` param is a pointer + - [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/pull/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes) + - [state] [\#4679](https://github.com/tendermint/tendermint/pull/4679) `TxResult` is a Protobuf type defined in `abci` types directory (@marbar3778) + - [state] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `State.InitialHeight` field to record initial block height, must be `1` (not `0`) to start from 1 (@erikgrinaker) + - [state] [\#5231](https://github.com/tendermint/tendermint/pull/5231) `LoadStateFromDBOrGenesisFile()` and `LoadStateFromDBOrGenesisDoc()` no longer saves the state in the database if not found, the genesis state is simply returned (@erikgrinaker) + - [state] [\#5348](https://github.com/tendermint/tendermint/pull/5348) Define an Interface for the state store. (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `SignedMsgType` has moved to a Protobuf enum types (@marbar3778) + - [types] [\#4962](https://github.com/tendermint/tendermint/pull/4962) `ConsensusParams`, `BlockParams`, `EvidenceParams`, `ValidatorParams` & `HashedParams` are now Protobuf types (@marbar3778) + - [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778) + - [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes) + - [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes) + - [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Block: `Round` is now `int32` (@marbar3778) ### Testing @@ -3501,7 +3633,7 @@ - Only run when applicable (#4752) - Check git diff on each job (#4770) - Checkout code before git diff check (#4779) -- Add paths +- Add paths - Bump the timeout for test_coverage (#4864) - Migrate localnet to github actions (#4878) - Add timeouts (#4912) @@ -4395,12 +4527,12 @@ ### P2p/conn -- Add Bufferpool (#3664) -- Simplify secret connection handshake malleability fix with merlin (#4185) -- Add a test for MakeSecretConnection (#4829) -- Migrate to Protobuf (#4990) -- Check for channel id overflow before processing receive msg (#6522) -- Check for channel id overflow before processing receive msg (backport #6522) (#6528) +This release fixes yet another issue with the proposer selection algorithm. +We hope it's the last one, but we won't be surprised if it's not. +We plan to one day expose the selection algorithm more directly to +the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)). +For more, see issues marked +[proposer-selection](https://github.com/tendermint/tendermint/labels/proposer-selection). ### P2p/pex @@ -4750,7 +4882,7 @@ ### Swagger - Update swagger port (#4498) -- Remove duplicate blockID +- Remove duplicate blockID - Define version (#4952) - Update (#5257) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 442cca2165..6443f16305 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -21,17 +21,29 @@ Special thanks to external contributors on this release: - [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair) - [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters) - [config] \#8222 default indexer configuration to null. (@creachadair) + - [rpc] \#8570 rework timeouts to be per-method instead of global. (@creachadair) + - [rpc] \#8624 deprecate `broadcast_tx_commit` and `braodcast_tx_sync` and `broadcast_tx_async` in favor of `braodcast_tx`. (@tychoish) + - [config] \#8654 remove deprecated `seeds` field from config. Users should switch to `bootstrap-peers` instead. (@cmwaters) - Apps - [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec). - [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish) + - [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic) + - [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena) + - [abci] \#8656 Added cli command for `PrepareProposal`. (@jmalicevic) + - [sink/psql] \#8637 tx_results emitted from psql sink are now json encoded, previously they were protobuf encoded + - [abci] \#8901 Added cli command for `ProcessProposal`. (@hvanz) - P2P Protocol - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) - [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish) - [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui) + - [p2p] \#8737 Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish) + - [p2p] \#8737 Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish) + - [p2p] \#8737 Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish) + - [p2p] \#8737 Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish) - Go API @@ -57,13 +69,16 @@ Special thanks to external contributors on this release: - [rpc] [\#7701] Add `ApplicationInfo` to `status` rpc call which contains the application version. (@jonasbostoen) - [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade. - [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) -- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParameter` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) +- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParams` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) - [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield) - [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield) - [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca) - [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca) - [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca) - [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair) +- [consenus] \#8514 move `RecheckTx` from the local node mempool config to a global `ConsensusParams` field in `BlockParams` (@cmwaters) +- [abci] ABCI++ [specified](https://github.com/tendermint/tendermint/tree/master/spec/abci%2B%2B). (@sergio-mena, @cmwaters, @josef-widder) +- [abci] ABCI++ [implemented](https://github.com/orgs/tendermint/projects/9). (@williambanfield, @thanethomson, @sergio-mena) ### IMPROVEMENTS @@ -86,3 +101,4 @@ Special thanks to external contributors on this release: - [cli] \#8276 scmigrate: ensure target key is correctly renamed. (@creachadair) - [cli] \#8294 keymigrate: ensure block hash keys are correctly translated. (@creachadair) - [cli] \#8352 keymigrate: ensure transaction hash keys are correctly translated. (@creachadair) +- (indexer) \#8625 Fix overriding tx index of duplicated txs. diff --git a/Makefile b/Makefile index 775d7d2235..8ac63d59e0 100644 --- a/Makefile +++ b/Makefile @@ -110,9 +110,6 @@ $(BUILDDIR)/: ############################################################################### check-proto-deps: -ifeq (,$(shell which buf)) - $(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.") -endif ifeq (,$(shell which protoc-gen-gogofaster)) $(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install") endif @@ -126,7 +123,7 @@ endif proto-gen: check-proto-deps @echo "Generating Protobuf files" - @buf generate + @go run github.com/bufbuild/buf/cmd/buf generate @mv ./proto/tendermint/abci/types.pb.go ./abci/types/ .PHONY: proto-gen @@ -134,7 +131,7 @@ proto-gen: check-proto-deps # execution only. proto-lint: check-proto-deps @echo "Linting Protobuf files" - @buf lint + @go run github.com/bufbuild/buf/cmd/buf lint .PHONY: proto-lint proto-format: check-proto-format-deps @@ -147,7 +144,7 @@ proto-check-breaking: check-proto-deps @echo "Note: This is only useful if your changes have not yet been committed." @echo " Otherwise read up on buf's \"breaking\" command usage:" @echo " https://docs.buf.build/breaking/usage" - @buf breaking --against ".git" + @go run github.com/bufbuild/buf/cmd/buf breaking --against ".git" .PHONY: proto-check-breaking ############################################################################### @@ -273,7 +270,8 @@ DESTINATION = ./index.html.md build-docs: @cd docs && \ while read -r branch path_prefix; do \ - (git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ + ( git checkout $${branch} && npm ci --quiet && \ + VUEPRESS_BASE="/$${path_prefix}/" npm run build --quiet ) ; \ mkdir -p ~/output/$${path_prefix} ; \ cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ cp ~/output/$${path_prefix}/index.html ~/output ; \ @@ -301,6 +299,21 @@ mockery: go generate -run="./scripts/mockery_generate.sh" ./... .PHONY: mockery +############################################################################### +### Metrics ### +############################################################################### + +metrics: testdata-metrics + go generate -run="scripts/metricsgen" ./... +.PHONY: metrics + + # By convention, the go tool ignores subdirectories of directories named + # 'testdata'. This command invokes the generate command on the folder directly + # to avoid this. +testdata-metrics: + ls ./scripts/metricsgen/testdata | xargs -I{} go generate -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{} +.PHONY: testdata-metrics + ############################################################################### ### Local testnet using docker ### ############################################################################### diff --git a/RELEASES.md b/RELEASES.md index f3bfd20d5c..803fc0d95a 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,8 +1,9 @@ # Releases -Tendermint uses [semantic versioning](https://semver.org/) with each release following -a `vX.Y.Z` format. The `master` branch is used for active development and thus it's -advisable not to build against it. +Tendermint uses modified [semantic versioning](https://semver.org/) with each +release following a `vX.Y.Z` format. Tendermint is currently on major version +0 and uses the minor version to signal breaking changes. The `master` branch is +used for active development and thus it is not advisable to build against it. The latest changes are always initially merged into `master`. Releases are specified using tags and are built from long-lived "backport" branches @@ -29,8 +30,8 @@ merging the pull request. ### Creating a backport branch -If this is the first release candidate for a major release, you get to have the -honor of creating the backport branch! +If this is the first release candidate for a minor version release, e.g. +v0.25.0, you get to have the honor of creating the backport branch! Note that, after creating the backport branch, you'll also need to update the tags on `master` so that `go mod` is able to order the branches correctly. You @@ -77,7 +78,8 @@ the 0.35.x line. After doing these steps, go back to `master` and do the following: -1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub. +1. Tag `master` as the dev branch for the _next_ minor version release and push + it up to GitHub. For example: ```sh git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36." @@ -99,7 +101,7 @@ After doing these steps, go back to `master` and do the following: ## Release candidates -Before creating an official release, especially a major release, we may want to create a +Before creating an official release, especially a minor release, we may want to create a release candidate (RC) for our friends and partners to test out. We use git tags to create RCs, and we build them off of backport branches. @@ -109,7 +111,7 @@ Tags for RCs should follow the "standard" release naming conventions, with `-rcX (Note that branches and tags _cannot_ have the same names, so it's important that these branches have distinct names from the tags/release names.) -If this is the first RC for a major release, you'll have to make a new backport branch (see above). +If this is the first RC for a minor release, you'll have to make a new backport branch (see above). Otherwise: 1. Start from the backport branch (e.g. `v0.35.x`). @@ -140,11 +142,13 @@ Note that this process should only be used for "true" RCs-- release candidates that, if successful, will be the next release. For more experimental "RCs," create a new, short-lived branch and tag that instead. -## Major release +## Minor release -This major release process assumes that this release was preceded by release candidates. +This minor release process assumes that this release was preceded by release candidates. If there were no release candidates, begin by creating a backport branch, as described above. +Before performing these steps, be sure the [Minor Release Checklist](#minor-release-checklist) has been completed. + 1. Start on the backport branch (e.g. `v0.35.x`) 2. Run integration tests (`make test_integrations`) and the e2e nightlies. 3. Prepare the release: @@ -176,16 +180,16 @@ If there were no release candidates, begin by creating a backport branch, as des - Commit these changes to `master` and backport them into the backport branch for this release. -## Minor release (point releases) +## Patch release -Minor releases are done differently from major releases: They are built off of +Patch releases are done differently from minor releases: They are built off of long-lived backport branches, rather than from master. As non-breaking changes land on `master`, they should also be backported into these backport branches. -Minor releases don't have release candidates by default, although any tricky +Patch releases don't have release candidates by default, although any tricky changes may merit a release candidate. -To create a minor release: +To create a patch release: 1. Checkout the long-lived backport branch: `git checkout v0.35.x` 2. Run integration tests (`make test_integrations`) and the nightlies. @@ -197,11 +201,143 @@ To create a minor release: - Bump the TMDefaultVersion in `version.go` - Bump the ABCI version number, if necessary. (Note that ABCI follows semver, and that ABCI versions are the only versions - which can change during minor releases, and only field additions are valid minor changes.) + which can change during patch releases, and only field additions are valid patch changes.) 4. Open a PR with these changes that will land them back on `v0.35.x` 5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - `git tag -a v0.35.1 -m 'Release v0.35.1'` - `git push origin v0.35.1` 6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - - Remove all `R:minor` labels from the pull requests that were included in the release. + - Remove all `R:patch` labels from the pull requests that were included in the release. - Do not merge the backport branch into master. + +## Minor Release Checklist + +The following set of steps are performed on all releases that increment the +_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is +well tested, stable, and suitable for adoption by the various diverse projects +that rely on Tendermint. + +### Feature Freeze + +Ahead of any minor version release of Tendermint, the software enters 'Feature +Freeze' for at least two weeks. A feature freeze means that _no_ new features +are added to the code being prepared for release. No code changes should be made +to the code being released that do not directly improve pressing issues of code +quality. The following must not be merged during a feature freeze: + +* Refactors that are not related to specific bug fixes. +* Dependency upgrades. +* New test code that does not test a discovered regression. +* New features of any kind. +* Documentation or spec improvements that are not related to the newly developed +code. + +This period directly follows the creation of the [backport +branch](#creating-a-backport-branch). The Tendermint team instead directs all +attention to ensuring that the existing code is stable and reliable. Broken +tests are fixed, flakey-tests are remedied, end-to-end test failures are +thoroughly diagnosed and all efforts of the team are aimed at improving the +quality of the code. During this period, the upgrade harness tests are run +repeatedly and a variety of in-house testnets are run to ensure Tendermint +functions at the scale it will be used by application developers and node +operators. + +### Nightly End-To-End Tests + +The Tendermint team maintains [a set of end-to-end +tests](https://github.com/tendermint/tendermint/blob/master/test/e2e/README.md#L1) +that run each night on the latest commit of the project and on the code in the +tip of each supported backport branch. These tests start a network of containerized +Tendermint processes and run automated checks that the network functions as +expected in both stable and unstable conditions. During the feature freeze, +these tests are run nightly and must pass consistently for a release of +Tendermint to be considered stable. + +### Upgrade Harness + +> TODO(williambanfield): Change to past tense and clarify this section once +> upgrade harness is complete. + +The Tendermint team is creating an upgrade test harness to exercise the +workflow of stopping an instance of Tendermint running one version of the +software and starting up the same application running the next version. To +support upgrade testing, we will add the ability to terminate the Tendermint +process at specific pre-defined points in its execution so that we can verify +upgrades work in a representative sample of stop conditions. + +### Large Scale Testnets + +The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise +basic consensus interactions. Real world deployments of Tendermint often have over +a hundred nodes just in the validator set, with many others acting as full +nodes and sentry nodes. To gain more assurance before a release, we will also run +larger-scale test networks to shake out emergent behaviors at scale. + +Large-scale test networks are run on a set of virtual machines (VMs). Each VM +is equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very +simple key-value store application. The application adds artificial delays to +different ABCI calls to simulate a slow application. Each testnet is briefly +run with no load being generated to collect a baseline performance. Once +baseline is captured, a consistent load is applied across the network. This +load takes the form of 10% of the running nodes all receiving a consistent +stream of two hundred transactions per minute each. + +During each test net, the following metrics are monitored and collected on each +node: + +* Consensus rounds per height +* Maximum connected peers, Minimum connected peers, Rate of change of peer connections +* Memory resident set size +* CPU utilization +* Blocks produced per minute +* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit) +* Latency to receive block proposals + +For these tests we intentionally target low-powered host machines (with low core +counts and limited memory) to ensure we observe similar kinds of resource contention +and limitation that real-world deployments of Tendermint experience in production. + +#### 200 Node Testnet + +To test the stability and performance of Tendermint in a real world scenario, +a 200 node test network is run. The network comprises 5 seed nodes, 100 +validators and 95 non-validating full nodes. All nodes begin by dialing +a subset of the seed nodes to discover peers. The network is run for several +days, with metrics being collected continuously. In cases of changes to performance +critical systems, testnets of larger sizes should be considered. + +#### Rotating Node Testnet + +Real-world deployments of Tendermint frequently see new nodes arrive and old +nodes exit the network. The rotating node testnet ensures that Tendermint is +able to handle this reliably. In this test, a network with 10 validators and +3 seed nodes is started. A rolling set of 25 full nodes are started and each +connects to the network by dialing one of the seed nodes. Once the node is able +to blocksync to the head of the chain and begins producing blocks using +Tendermint consensus it is stopped. Once stopped, a new node is started and +takes its place. This network is run for several days. + +#### Network Partition Testnet + +Tendermint is expected to recover from network partitions. A partition where no +subset of the nodes is left with the super-majority of the stake is expected to +stop making blocks. Upon alleviation of the partition, the network is expected +to once again become fully connected and capable of producing blocks. The +network partition testnet ensures that Tendermint is able to handle this +reliably at scale. In this test, a network with 100 validators and 95 full +nodes is started. All validators have equal stake. Once the network is +producing blocks, a set of firewall rules is deployed to create a partitioned +network with 50% of the stake on one side and 50% on the other. Once the +network stops producing blocks, the firewall rules are removed and the nodes +are monitored to ensure they reconnect and that the network again begins +producing blocks. + +#### Absent Stake Testnet + +Tendermint networks often run with _some_ portion of the voting power offline. +The absent stake testnet ensures that large networks are able to handle this +reliably. A set of 150 validator nodes and three seed nodes is started. The set +of 150 validators is configured to only possess a cumulative stake of 67% of +the total stake. The remaining 33% of the stake is configured to belong to +a validator that is never actually run in the test network. The network is run +for multiple days, ensuring that it is able to produce blocks without issue. diff --git a/UPGRADING.md b/UPGRADING.md index 28e44e58c0..f9122a5ded 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -6,9 +6,28 @@ This guide provides instructions for upgrading to specific versions of Tendermin ### ABCI Changes +### ResponseCheckTx Parameter Change + +`ResponseCheckTx` had fields that are not used by Tendermint, they are now removed. +In 0.36, we removed the following fields, from `ResponseCheckTx`: `Log`, `Info`, `Events`, + `GasUsed` and `MempoolError`. +`MempoolError` was used to signal to operators that a transaction was rejected from the mempool +by Tendermint itself. Right now, we return a regular error when this happens. + #### ABCI++ -Coming soon... +For information on how ABCI++ works, see the +[Specification](spec/abci%2B%2B/README.md). +In particular, the simplest way to upgrade your application is described +[here](spec/abci%2B%2B/abci++_tmint_expected_behavior.md#adapting-existing-applications-that-use-abci). + +#### Moving the `app_hash` parameter + +The Application's hash (or any data representing the Application's current +state) is known by the time `FinalizeBlock` finishes its execution. +Accordingly, the `app_hash` parameter has been moved from `ResponseCommit` to +`ResponseFinalizeBlock`, since it makes sense for the Application to return +this value as soon as is it known. #### ABCI Mutex @@ -51,6 +70,11 @@ applications remains correct. turned on are not affected. Operators who wish to enable indexing for a new node, however, must now edit the `config.toml` explicitly. +- The function of seed nodes was modified in the past release. Now, seed nodes + are treated identically to any other peer, however they only run the PEX + reactor. Because of this `seeds` has been removed from the config. Users + should add any seed nodes in the list of `bootstrap-peers`. + ### RPC Changes Tendermint v0.36 adds a new RPC event subscription API. The existing event @@ -88,6 +112,18 @@ callback. For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which defines and describes the new API in detail. +#### BroadcastTx Methods + +All callers should use the new `broadcast_tx` method, which has the +same semantics as the legacy `broadcast_tx_sync` method. The +`broadcast_tx_sync` and `broadcast_tx_async` methods are now +deprecated and will be removed in 0.37. + +Additionally the `broadcast_tx_commit` method is *also* deprecated, +and will be removed in 0.37. Client code that submits a transaction +and needs to wait for it to be committed to the chain, should poll +the tendermint to observe the transaction in the committed state. + ### Timeout Parameter Changes Tendermint v0.36 updates how the Tendermint consensus timing parameters are @@ -126,6 +162,19 @@ lays out the reasoning for the changes as well as [RFC 009](https://tinyurl.com/rfc009) for a discussion of the complexities of upgrading consensus parameters. +### RecheckTx Parameter Change + +`RecheckTx` was previously enabled by the `recheck` parameter in the mempool +section of the `config.toml`. +Setting it to true made Tendermint invoke another `CheckTx` ABCI call on +each transaction remaining in the mempool following the execution of a block. +Similar to the timeout parameter changes, this parameter makes more sense as a +network-wide coordinated variable so that applications can be written knowing +either all nodes agree on whether to run `RecheckTx`. + +Applications can turn on `RecheckTx` by altering the `ConsensusParams` in the +`FinalizeBlock` ABCI response. + ### CLI Changes The functionality around resetting a node has been extended to make it safer. The @@ -212,22 +261,25 @@ and one function have moved to the Tendermint `crypto` package: The format of all tendermint on-disk database keys changes in 0.35. Upgrading nodes must either re-sync all data or run a migration -script provided in this release. The script located in -`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` -provides the function `Migrate(context.Context, db.DB)` which you can -operationalize as makes sense for your deployment. +script provided in this release. + +The script located in +`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the +function `Migrate(context.Context, db.DB)` which you can operationalize as +makes sense for your deployment. For ease of use the `tendermint` command includes a CLI version of the migration script, which you can invoke, as in: tendermint key-migrate -This reads the configuration file as normal and allows the -`--db-backend` and `--db-dir` flags to change database operations as -needed. +This reads the configuration file as normal and allows the `--db-backend` and +`--db-dir` flags to override the database location as needed. -The migration operation is idempotent and can be run more than once, -if needed. +The migration operation is intended to be idempotent, and should be safe to +rerun on the same database multiple times. As a safety measure, however, we +recommend that operators test out the migration on a copy of the database +first, if it is practical to do so, before applying it to the production data. ### CLI Changes diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 1e163056d3..bd255bebe4 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -3,7 +3,6 @@ package abciclient import ( "context" "errors" - "fmt" "net" "sync" "time" @@ -65,7 +64,7 @@ RETRY_LOOP: if cli.mustConnect { return err } - cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err) + cli.logger.Error("abci.grpcClient failed to connect, Retrying...", "addr", cli.addr, "err", err) timer.Reset(time.Second * dialRetryIntervalSeconds) select { case <-ctx.Done(): diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index e5f2898f33..31325d7fec 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -4,10 +4,8 @@ package mocks import ( context "context" - testing "testing" mock "github.com/stretchr/testify/mock" - types "github.com/tendermint/tendermint/abci/types" ) @@ -422,8 +420,13 @@ func (_m *Client) Wait() { _m.Called() } -// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t testing.TB) *Client { +type mockConstructorTestingTNewClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t mockConstructorTestingTNewClient) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index aa4fdcbe93..d023e70741 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -17,12 +17,6 @@ import ( "github.com/tendermint/tendermint/libs/service" ) -const ( - // reqQueueSize is the max number of queued async requests. - // (memory: 256MB max assuming 1MB transactions) - reqQueueSize = 256 -) - // This is goroutine-safe, but users should beware that the application in // general is not meant to be interfaced with concurrent callers. type socketClient struct { @@ -48,7 +42,7 @@ var _ Client = (*socketClient)(nil) func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &socketClient{ logger: logger, - reqQueue: make(chan *requestAndResponse, reqQueueSize), + reqQueue: make(chan *requestAndResponse), mustConnect: mustConnect, addr: addr, reqSent: list.New(), @@ -73,8 +67,11 @@ func (cli *socketClient) OnStart(ctx context.Context) error { if cli.mustConnect { return err } - cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...", - cli.addr, dialRetryIntervalSeconds), "err", err) + + cli.logger.Error("abci.socketClient failed to connect, retrying after", + "retry_after", dialRetryIntervalSeconds, + "target", cli.addr, + "err", err) timer.Reset(time.Second * dialRetryIntervalSeconds) select { @@ -83,7 +80,6 @@ func (cli *socketClient) OnStart(ctx context.Context) error { case <-timer.C: continue } - } cli.conn = conn @@ -118,6 +114,11 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer case <-ctx.Done(): return case reqres := <-cli.reqQueue: + // N.B. We must enqueue before sending out the request, otherwise the + // server may reply before we do it, and the receiver will fail for an + // unsolicited reply. + cli.trackRequest(reqres) + if err := types.WriteMessage(reqres.Request, bw); err != nil { cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return @@ -158,14 +159,15 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader } } -func (cli *socketClient) willSendReq(reqres *requestAndResponse) { - cli.mtx.Lock() - defer cli.mtx.Unlock() - +func (cli *socketClient) trackRequest(reqres *requestAndResponse) { + // N.B. We must NOT hold the client state lock while checking this, or we + // may deadlock with shutdown. if !cli.IsRunning() { return } + cli.mtx.Lock() + defer cli.mtx.Unlock() cli.reqSent.PushBack(reqres) } @@ -199,7 +201,6 @@ func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*ty } reqres := makeReqRes(req) - cli.willSendReq(reqres) select { case cli.reqQueue <- reqres: diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 22c62873a9..a9c9994ca7 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -78,10 +79,11 @@ func RootCmmand(logger log.Logger) *cobra.Command { // Structure for data passed to print response. type response struct { // generic abci response - Data []byte - Code uint32 - Info string - Log string + Data []byte + Code uint32 + Info string + Log string + Status int32 Query *queryResponse } @@ -130,6 +132,8 @@ func addCommands(cmd *cobra.Command, logger log.Logger) { cmd.AddCommand(commitCmd) cmd.AddCommand(versionCmd) cmd.AddCommand(testCmd) + cmd.AddCommand(prepareProposalCmd) + cmd.AddCommand(processProposalCmd) cmd.AddCommand(getQueryCmd()) // examples @@ -151,8 +155,10 @@ where example.file looks something like: check_tx 0x00 check_tx 0xff finalize_block 0x00 + commit check_tx 0x00 finalize_block 0x01 0x04 0xff + commit info `, Args: cobra.ExactArgs(0), @@ -168,7 +174,7 @@ This command opens an interactive console for running any of the other commands without opening a new connection each time `, Args: cobra.ExactArgs(0), - ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"}, + ValidArgs: []string{"echo", "info", "query", "check_tx", "prepare_proposal", "process_proposal", "finalize_block", "commit"}, RunE: cmdConsole, } @@ -191,7 +197,7 @@ var finalizeBlockCmd = &cobra.Command{ Use: "finalize_block", Short: "deliver a block of transactions to the application", Long: "deliver a block of transactions to the application", - Args: cobra.MinimumNArgs(1), + Args: cobra.MinimumNArgs(0), RunE: cmdFinalizeBlock, } @@ -222,6 +228,22 @@ var versionCmd = &cobra.Command{ }, } +var prepareProposalCmd = &cobra.Command{ + Use: "prepare_proposal", + Short: "prepare proposal", + Long: "prepare proposal", + Args: cobra.MinimumNArgs(0), + RunE: cmdPrepareProposal, +} + +var processProposalCmd = &cobra.Command{ + Use: "process_proposal", + Short: "process proposal", + Long: "process proposal", + Args: cobra.MinimumNArgs(0), + RunE: cmdProcessProposal, +} + func getQueryCmd() *cobra.Command { cmd := &cobra.Command{ Use: "query", @@ -298,28 +320,23 @@ func cmdTest(cmd *cobra.Command, args []string) error { return compose( []func() error{ func() error { return servertest.InitChain(ctx, client) }, - func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.Commit(ctx, client) }, func() error { return servertest.FinalizeBlock(ctx, client, [][]byte{ []byte("abc"), }, []uint32{ code.CodeTypeBadNonce, - }, nil) + }, nil, nil) }, - func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.Commit(ctx, client) }, func() error { return servertest.FinalizeBlock(ctx, client, [][]byte{ {0x00}, }, []uint32{ code.CodeTypeOK, - }, nil) - }, - func() error { - return servertest.Commit(ctx, client, []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - }) + }, nil, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, + func() error { return servertest.Commit(ctx, client) }, func() error { return servertest.FinalizeBlock(ctx, client, [][]byte{ {0x00}, @@ -338,9 +355,21 @@ func cmdTest(cmd *cobra.Command, args []string) error { code.CodeTypeOK, code.CodeTypeOK, code.CodeTypeBadNonce, + }, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5}) + }, + func() error { return servertest.Commit(ctx, client) }, + func() error { + return servertest.PrepareProposal(ctx, client, [][]byte{ + {0x01}, + }, []types.TxRecord_TxAction{ + types.TxRecord_UNMODIFIED, }, nil) }, - func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, + func() error { + return servertest.ProcessProposal(ctx, client, [][]byte{ + {0x01}, + }, types.ResponseProcessProposal_ACCEPT) + }, }) } @@ -441,6 +470,10 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { return cmdInfo(cmd, actualArgs) case "query": return cmdQuery(cmd, actualArgs) + case "prepare_proposal": + return cmdPrepareProposal(cmd, actualArgs) + case "process_proposal": + return cmdProcessProposal(cmd, actualArgs) default: return cmdUnimplemented(cmd, pArgs) } @@ -502,15 +535,8 @@ func cmdInfo(cmd *cobra.Command, args []string) error { const codeBad uint32 = 10 -// Append a new tx to application +// Append new txs to application func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - printResponse(cmd, args, response{ - Code: codeBad, - Log: "Must provide at least one transaction", - }) - return nil - } txs := make([][]byte, len(args)) for i, arg := range args { txBytes, err := stringOrHexToBytes(arg) @@ -523,14 +549,19 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { if err != nil { return err } + resps := make([]response, 0, len(res.TxResults)+1) for _, tx := range res.TxResults { - printResponse(cmd, args, response{ + resps = append(resps, response{ Code: tx.Code, Data: tx.Data, Info: tx.Info, Log: tx.Log, }) } + resps = append(resps, response{ + Data: res.AppHash, + }) + printResponse(cmd, args, resps...) return nil } @@ -554,21 +585,17 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { printResponse(cmd, args, response{ Code: res.Code, Data: res.Data, - Info: res.Info, - Log: res.Log, }) return nil } // Get application Merkle root hash func cmdCommit(cmd *cobra.Command, args []string) error { - res, err := client.Commit(cmd.Context()) + _, err := client.Commit(cmd.Context()) if err != nil { return err } - printResponse(cmd, args, response{ - Data: res.Data, - }) + printResponse(cmd, args, response{}) return nil } @@ -610,6 +637,81 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return nil } +func inTxArray(txByteArray [][]byte, tx []byte) bool { + for _, txTmp := range txByteArray { + if bytes.Equal(txTmp, tx) { + return true + } + + } + return false +} + +func cmdPrepareProposal(cmd *cobra.Command, args []string) error { + txsBytesArray := make([][]byte, len(args)) + + for i, arg := range args { + txBytes, err := stringOrHexToBytes(arg) + if err != nil { + return err + } + txsBytesArray[i] = txBytes + } + + res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{ + Txs: txsBytesArray, + // kvstore has to have this parameter in order not to reject a tx as the default value is 0 + MaxTxBytes: 65536, + }) + if err != nil { + return err + } + resps := make([]response, 0, len(res.TxResults)+1) + for _, tx := range res.TxRecords { + existingTx := inTxArray(txsBytesArray, tx.Tx) + if tx.Action == types.TxRecord_UNKNOWN || + (existingTx && tx.Action == types.TxRecord_ADDED) || + (!existingTx && (tx.Action == types.TxRecord_UNMODIFIED || tx.Action == types.TxRecord_REMOVED)) { + resps = append(resps, response{ + Code: codeBad, + Log: "Failed. Tx: " + string(tx.GetTx()) + " action: " + tx.Action.String(), + }) + } else { + resps = append(resps, response{ + Code: code.CodeTypeOK, + Log: "Succeeded. Tx: " + string(tx.Tx) + " action: " + tx.Action.String(), + }) + } + } + + printResponse(cmd, args, resps...) + return nil +} + +func cmdProcessProposal(cmd *cobra.Command, args []string) error { + txsBytesArray := make([][]byte, len(args)) + + for i, arg := range args { + txBytes, err := stringOrHexToBytes(arg) + if err != nil { + return err + } + txsBytesArray[i] = txBytes + } + + res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{ + Txs: txsBytesArray, + }) + if err != nil { + return err + } + + printResponse(cmd, args, response{ + Status: int32(res.Status), + }) + return nil +} + func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { // Create the application - in memory or persisted to disk @@ -642,44 +744,48 @@ func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { //-------------------------------------------------------------------------------- -func printResponse(cmd *cobra.Command, args []string, rsp response) { +func printResponse(cmd *cobra.Command, args []string, rsps ...response) { if flagVerbose { fmt.Println(">", strings.Join(append([]string{cmd.Use}, args...), " ")) } - // Always print the status code. - if rsp.Code == types.CodeTypeOK { - fmt.Printf("-> code: OK\n") - } else { - fmt.Printf("-> code: %d\n", rsp.Code) - - } - - if len(rsp.Data) != 0 { - // Do no print this line when using the commit command - // because the string comes out as gibberish - if cmd.Use != "commit" { - fmt.Printf("-> data: %s\n", rsp.Data) + for _, rsp := range rsps { + // Always print the status code. + if rsp.Code == types.CodeTypeOK { + fmt.Printf("-> code: OK\n") + } else { + fmt.Printf("-> code: %d\n", rsp.Code) } - fmt.Printf("-> data.hex: 0x%X\n", rsp.Data) - } - if rsp.Log != "" { - fmt.Printf("-> log: %s\n", rsp.Log) - } - if rsp.Query != nil { - fmt.Printf("-> height: %d\n", rsp.Query.Height) - if rsp.Query.Key != nil { - fmt.Printf("-> key: %s\n", rsp.Query.Key) - fmt.Printf("-> key.hex: %X\n", rsp.Query.Key) + if len(rsp.Data) != 0 { + // Do no print this line when using the finalize_block command + // because the string comes out as gibberish + if cmd.Use != "finalize_block" { + fmt.Printf("-> data: %s\n", rsp.Data) + } + fmt.Printf("-> data.hex: 0x%X\n", rsp.Data) + } + if rsp.Log != "" { + fmt.Printf("-> log: %s\n", rsp.Log) } - if rsp.Query.Value != nil { - fmt.Printf("-> value: %s\n", rsp.Query.Value) - fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) + if cmd.Use == "process_proposal" { + fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status]) } - if rsp.Query.ProofOps != nil { - fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps) + + if rsp.Query != nil { + fmt.Printf("-> height: %d\n", rsp.Query.Height) + if rsp.Query.Key != nil { + fmt.Printf("-> key: %s\n", rsp.Query.Key) + fmt.Printf("-> key.hex: %X\n", rsp.Query.Key) + } + if rsp.Query.Value != nil { + fmt.Printf("-> value: %s\n", rsp.Query.Value) + fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) + } + if rsp.Query.ProofOps != nil { + fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps) + } } } } diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 4bb6f5b404..8edf5543c6 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -35,7 +35,6 @@ func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (* if len(req.Tx) > 8 { return &types.ResponseCheckTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx)), }, nil } tx8 := make([]byte, 8) @@ -44,7 +43,6 @@ func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (* if txValue < uint64(app.txCount) { return &types.ResponseCheckTx{ Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue), }, nil } } @@ -56,11 +54,9 @@ func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) if app.txCount == 0 { return &types.ResponseCommit{}, nil } - hash := make([]byte, 24) - endHash := make([]byte, 8) - binary.BigEndian.PutUint64(endHash, uint64(app.txCount)) - hash = append(hash, endHash...) - return &types.ResponseCommit{Data: hash}, nil + hash := make([]byte, 32) + binary.BigEndian.PutUint64(hash[24:], uint64(app.txCount)) + return &types.ResponseCommit{}, nil } func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) { diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index c1ea46108c..f7ec4b5430 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -180,7 +180,7 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal app.valSetUpdate.ValidatorUpdates = make([]types.ValidatorUpdate, 0) // Punish validators who committed equivocation. - for _, ev := range req.ByzantineValidators { + for _, ev := range req.Misbehavior { // TODO it seems this code is not needed to keep here if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE { proTxHash := crypto.ProTxHash(ev.Validator.ProTxHash) @@ -197,9 +197,16 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal respTxs[i] = app.handleTx(tx) } + // Using a memdb - just return the big endian size of the db + appHash := make([]byte, 32) + binary.PutVarint(appHash, app.state.Size) + app.state.AppHash = appHash + app.state.Height++ + return &types.ResponseFinalizeBlock{ TxResults: respTxs, ValidatorSetUpdate: proto.Clone(&app.valSetUpdate).(*types.ValidatorSetUpdate), + AppHash: appHash, }, nil } @@ -211,14 +218,9 @@ func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) app.mu.Lock() defer app.mu.Unlock() - // Using a memdb - just return the big endian size of the db - appHash := make([]byte, 32) - binary.PutVarint(appHash, app.state.Size) - app.state.AppHash = appHash - app.state.Height++ saveState(app.state) - resp := &types.ResponseCommit{Data: appHash} + resp := &types.ResponseCommit{} if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks { resp.RetainHeight = app.state.Height - app.RetainBlocks + 1 } @@ -328,7 +330,7 @@ func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPre func (*Application) ProcessProposal(_ context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { for _, tx := range req.Txs { - if len(tx) == 0 { + if len(tx) == 0 || isPrepareTx(tx) { return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil } } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index ed20d3cb07..8f38eeb07e 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -33,25 +33,20 @@ func InitChain(ctx context.Context, client abciclient.Client) error { return nil } -func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error { - res, err := client.Commit(ctx) - data := res.Data +func Commit(ctx context.Context, client abciclient.Client) error { + _, err := client.Commit(ctx) if err != nil { fmt.Println("Failed test: Commit") fmt.Printf("error while committing: %v\n", err) return err } - if !bytes.Equal(data, hashExp) { - fmt.Println("Failed test: Commit") - fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp) - return errors.New("commitTx failed") - } fmt.Println("Passed test: Commit") return nil } -func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error { +func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte, hashExp []byte) error { res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes}) + appHash := res.AppHash for i, tx := range res.TxResults { code, data, log := tx.Code, tx.Data, tx.Log if code != codeExp[i] { @@ -67,17 +62,48 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by return errors.New("FinalizeBlock error") } } + if !bytes.Equal(appHash, hashExp) { + fmt.Println("Failed test: FinalizeBlock") + fmt.Printf("Application hash was unexpected. Got %X expected %X\n", appHash, hashExp) + return errors.New("FinalizeBlock error") + } fmt.Println("Passed test: FinalizeBlock") return nil } +func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []types.TxRecord_TxAction, dataExp []byte) error { + res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes}) + for i, tx := range res.TxRecords { + if tx.Action != codeExp[i] { + fmt.Println("Failed test: PrepareProposal") + fmt.Printf("PrepareProposal response code was unexpected. Got %v expected %v.", + tx.Action, codeExp) + return errors.New("PrepareProposal error") + } + } + fmt.Println("Passed test: PrepareProposal") + return nil +} + +func ProcessProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error { + res, _ := client.ProcessProposal(ctx, &types.RequestProcessProposal{Txs: txBytes}) + if res.Status != statusExp { + fmt.Println("Failed test: ProcessProposal") + fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.", + res.Status, statusExp) + return errors.New("ProcessProposal error") + } + fmt.Println("Passed test: ProcessProposal") + return nil +} + func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes}) - code, data, log := res.Code, res.Data, res.Log + code, data := res.Code, res.Data if code != codeExp { fmt.Println("Failed test: CheckTx") - fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n", - code, codeExp, log) + fmt.Printf("CheckTx response code was unexpected. Got %v expected %v.,", + code, codeExp) return errors.New("checkTx") } if !bytes.Equal(data, dataExp) { diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci index 09457189ed..aa6acc3272 100644 --- a/abci/tests/test_cli/ex1.abci +++ b/abci/tests/test_cli/ex1.abci @@ -1,10 +1,18 @@ echo hello info -commit +prepare_proposal "abc" +process_proposal "abc" finalize_block "abc" -info commit +info query "abc" finalize_block "def=xyz" "ghi=123" commit query "def" +prepare_proposal "preparedef" +process_proposal "def" +process_proposal "preparedef" +prepare_proposal +process_proposal +finalize_block +commit diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 01d0150f0f..b95343a70d 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -8,26 +8,31 @@ -> data: {"size":0} -> data.hex: 0x7B2273697A65223A307D -> commit +> prepare_proposal "abc" +-> code: OK +-> log: Succeeded. Tx: abc action: UNMODIFIED + +> process_proposal "abc" -> code: OK --> data.hex: 0x0000000000000000000000000000000000000000000000000000000000000000 +-> status: ACCEPT > finalize_block "abc" -> code: OK +-> code: OK +-> data.hex: 0x0200000000000000000000000000000000000000000000000000000000000000 + +> commit +-> code: OK > info -> code: OK -> data: {"size":1} -> data.hex: 0x7B2273697A65223A317D -> commit --> code: OK --> data.hex: 0x0200000000000000000000000000000000000000000000000000000000000000 - > query "abc" -> code: OK -> log: exists --> height: 2 +-> height: 1 -> key: abc -> key.hex: 616263 -> value: abc @@ -35,19 +40,46 @@ > finalize_block "def=xyz" "ghi=123" -> code: OK -> finalize_block "def=xyz" "ghi=123" -> code: OK +-> code: OK +-> data.hex: 0x0600000000000000000000000000000000000000000000000000000000000000 > commit -> code: OK --> data.hex: 0x0600000000000000000000000000000000000000000000000000000000000000 > query "def" -> code: OK -> log: exists --> height: 3 +-> height: 2 -> key: def -> key.hex: 646566 -> value: xyz -> value.hex: 78797A +> prepare_proposal "preparedef" +-> code: OK +-> log: Succeeded. Tx: def action: ADDED +-> code: OK +-> log: Succeeded. Tx: preparedef action: REMOVED + +> process_proposal "def" +-> code: OK +-> status: ACCEPT + +> process_proposal "preparedef" +-> code: OK +-> status: REJECT + +> prepare_proposal + +> process_proposal +-> code: OK +-> status: ACCEPT + +> finalize_block +-> code: OK +-> data.hex: 0x0600000000000000000000000000000000000000000000000000000000000000 + +> commit +-> code: OK + diff --git a/abci/tests/test_cli/ex2.abci b/abci/tests/test_cli/ex2.abci index 90e99c2f90..1cabba1512 100644 --- a/abci/tests/test_cli/ex2.abci +++ b/abci/tests/test_cli/ex2.abci @@ -1,7 +1,10 @@ check_tx 0x00 check_tx 0xff finalize_block 0x00 +commit check_tx 0x00 finalize_block 0x01 +commit finalize_block 0x04 +commit info diff --git a/abci/tests/test_cli/ex2.abci.out b/abci/tests/test_cli/ex2.abci.out index aab0b1966f..39340dde5e 100644 --- a/abci/tests/test_cli/ex2.abci.out +++ b/abci/tests/test_cli/ex2.abci.out @@ -6,17 +6,32 @@ > finalize_block 0x00 -> code: OK +-> code: OK +-> data.hex: 0x0200000000000000000000000000000000000000000000000000000000000000 + +> commit +-> code: OK > check_tx 0x00 -> code: OK > finalize_block 0x01 -> code: OK +-> code: OK +-> data.hex: 0x0400000000000000000000000000000000000000000000000000000000000000 + +> commit +-> code: OK > finalize_block 0x04 -> code: OK +-> code: OK +-> data.hex: 0x0600000000000000000000000000000000000000000000000000000000000000 + +> commit +-> code: OK -> info +> info -> code: OK -> data: {"size":3} -> data.hex: 0x7B2273697A65223A337D diff --git a/abci/tests/test_cli/test.sh b/abci/tests/test_cli/test.sh index 9c02ce6f54..d160d59c9e 100755 --- a/abci/tests/test_cli/test.sh +++ b/abci/tests/test_cli/test.sh @@ -30,6 +30,8 @@ function testExample() { cat "${INPUT}.out.new" echo "Expected:" cat "${INPUT}.out" + echo "Diff:" + diff "${INPUT}.out" "${INPUT}.out.new" exit 1 fi diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 4f17f9f83c..404d552225 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -21,14 +21,6 @@ func TestMarshalJSON(t *testing.T) { Code: 1, Data: []byte("hello"), GasWanted: 43, - Events: []Event{ - { - Type: "testEvent", - Attributes: []EventAttribute{ - {Key: "pho", Value: "bo"}, - }, - }, - }, } b, err = json.Marshal(&r1) assert.NoError(t, err) @@ -86,16 +78,7 @@ func TestWriteReadMessage2(t *testing.T) { cases := []proto.Message{ &ResponseCheckTx{ Data: []byte(phrase), - Log: phrase, GasWanted: 10, - Events: []Event{ - { - Type: "testEvent", - Attributes: []EventAttribute{ - {Key: "abc", Value: "def"}, - }, - }, - }, }, // TODO: add the rest } diff --git a/abci/types/mocks/application.go b/abci/types/mocks/application.go index 2d35c481f0..16cf59d663 100644 --- a/abci/types/mocks/application.go +++ b/abci/types/mocks/application.go @@ -4,10 +4,8 @@ package mocks import ( context "context" - testing "testing" mock "github.com/stretchr/testify/mock" - types "github.com/tendermint/tendermint/abci/types" ) @@ -338,8 +336,13 @@ func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.Reque return r0, r1 } -// NewApplication creates a new instance of Application. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewApplication(t testing.TB) *Application { +type mockConstructorTestingTNewApplication interface { + mock.TestingT + Cleanup(func()) +} + +// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewApplication(t mockConstructorTestingTNewApplication) *Application { mock := &Application{} mock.Mock.Test(t) diff --git a/abci/types/types.go b/abci/types/types.go index d13947d1a9..d02d9551d6 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -5,6 +5,10 @@ import ( "encoding/json" "github.com/gogo/protobuf/jsonpb" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/jsontypes" ) const ( @@ -135,6 +139,96 @@ func (r *EventAttribute) UnmarshalJSON(b []byte) error { return jsonpbUnmarshaller.Unmarshal(reader, r) } +// validatorUpdateJSON is the JSON encoding of a validator update. +// +// It handles translation of public keys from the protobuf representation to +// the legacy Amino-compatible format expected by RPC clients. +type validatorUpdateJSON struct { + PubKey json.RawMessage `json:"pub_key,omitempty"` + Power int64 `json:"power,string"` +} + +func (v *ValidatorUpdate) MarshalJSON() ([]byte, error) { + if v.PubKey == nil { + return nil, nil + } + key, err := encoding.PubKeyFromProto(*v.PubKey) + if err != nil { + return nil, err + } + jkey, err := jsontypes.Marshal(key) + if err != nil { + return nil, err + } + return json.Marshal(validatorUpdateJSON{ + PubKey: jkey, + Power: v.GetPower(), + }) +} + +func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error { + var vu validatorUpdateJSON + if err := json.Unmarshal(data, &vu); err != nil { + return err + } + var key crypto.PubKey + if err := jsontypes.Unmarshal(vu.PubKey, &key); err != nil { + return err + } + pkey, err := encoding.PubKeyToProto(key) + if err != nil { + return err + } + v.PubKey = &pkey + v.Power = vu.Power + return nil +} + +type validatorSetUpdateJSON struct { + ValidatorUpdates []ValidatorUpdate `json:"validator_updates"` + ThresholdPubKey json.RawMessage `json:"threshold_public_key"` + QuorumHash []byte `json:"quorum_hash,omitempty"` +} + +func (m *ValidatorSetUpdate) MarshalJSON() ([]byte, error) { + ret := validatorSetUpdateJSON{ + ValidatorUpdates: m.ValidatorUpdates, + QuorumHash: m.QuorumHash, + } + if m.ThresholdPublicKey.Sum != nil { + key, err := encoding.PubKeyFromProto(m.ThresholdPublicKey) + if err != nil { + return nil, err + } + ret.ThresholdPubKey, err = jsontypes.Marshal(key) + if err != nil { + return nil, err + } + } + return json.Marshal(ret) +} + +func (m *ValidatorSetUpdate) UnmarshalJSON(data []byte) error { + var vsu validatorSetUpdateJSON + err := json.Unmarshal(data, &vsu) + if err != nil { + return err + } + var key crypto.PubKey + if err := jsontypes.Unmarshal(vsu.ThresholdPubKey, &key); err != nil { + return err + } + if key != nil { + m.ThresholdPublicKey, err = encoding.PubKeyToProto(key) + if err != nil { + return err + } + } + m.ValidatorUpdates = vsu.ValidatorUpdates + m.QuorumHash = vsu.QuorumHash + return nil +} + // Some compile time assertions to ensure we don't // have accidental runtime surprises later on. diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 949309b0f0..fec4ec050b 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -121,7 +121,7 @@ func (x ResponseOfferSnapshot_Result) String() string { } func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33, 0} + return fileDescriptor_252557cfdd89a31a, []int{28, 0} } type ResponseApplySnapshotChunk_Result int32 @@ -158,7 +158,7 @@ func (x ResponseApplySnapshotChunk_Result) String() string { } func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35, 0} + return fileDescriptor_252557cfdd89a31a, []int{30, 0} } type ResponseProcessProposal_ProposalStatus int32 @@ -186,7 +186,7 @@ func (x ResponseProcessProposal_ProposalStatus) String() string { } func (ResponseProcessProposal_ProposalStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37, 0} + return fileDescriptor_252557cfdd89a31a, []int{32, 0} } type ResponseVerifyVoteExtension_VerifyStatus int32 @@ -214,7 +214,7 @@ func (x ResponseVerifyVoteExtension_VerifyStatus) String() string { } func (ResponseVerifyVoteExtension_VerifyStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40, 0} + return fileDescriptor_252557cfdd89a31a, []int{35, 0} } // TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal @@ -246,7 +246,7 @@ func (x TxRecord_TxAction) String() string { } func (TxRecord_TxAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{48, 0} + return fileDescriptor_252557cfdd89a31a, []int{43, 0} } type Request struct { @@ -256,10 +256,7 @@ type Request struct { // *Request_Info // *Request_InitChain // *Request_Query - // *Request_BeginBlock // *Request_CheckTx - // *Request_DeliverTx - // *Request_EndBlock // *Request_Commit // *Request_ListSnapshots // *Request_OfferSnapshot @@ -327,18 +324,9 @@ type Request_InitChain struct { type Request_Query struct { Query *RequestQuery `protobuf:"bytes,5,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Request_BeginBlock struct { - BeginBlock *RequestBeginBlock `protobuf:"bytes,6,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Request_CheckTx struct { CheckTx *RequestCheckTx `protobuf:"bytes,7,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Request_DeliverTx struct { - DeliverTx *RequestDeliverTx `protobuf:"bytes,8,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Request_EndBlock struct { - EndBlock *RequestEndBlock `protobuf:"bytes,9,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Request_Commit struct { Commit *RequestCommit `protobuf:"bytes,10,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -375,10 +363,7 @@ func (*Request_Flush) isRequest_Value() {} func (*Request_Info) isRequest_Value() {} func (*Request_InitChain) isRequest_Value() {} func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} func (*Request_Commit) isRequest_Value() {} func (*Request_ListSnapshots) isRequest_Value() {} func (*Request_OfferSnapshot) isRequest_Value() {} @@ -432,14 +417,6 @@ func (m *Request) GetQuery() *RequestQuery { return nil } -// Deprecated: Do not use. -func (m *Request) GetBeginBlock() *RequestBeginBlock { - if x, ok := m.GetValue().(*Request_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Request) GetCheckTx() *RequestCheckTx { if x, ok := m.GetValue().(*Request_CheckTx); ok { return x.CheckTx @@ -447,22 +424,6 @@ func (m *Request) GetCheckTx() *RequestCheckTx { return nil } -// Deprecated: Do not use. -func (m *Request) GetDeliverTx() *RequestDeliverTx { - if x, ok := m.GetValue().(*Request_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -// Deprecated: Do not use. -func (m *Request) GetEndBlock() *RequestEndBlock { - if x, ok := m.GetValue().(*Request_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Request) GetCommit() *RequestCommit { if x, ok := m.GetValue().(*Request_Commit); ok { return x.Commit @@ -541,10 +502,7 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_Info)(nil), (*Request_InitChain)(nil), (*Request_Query)(nil), - (*Request_BeginBlock)(nil), (*Request_CheckTx)(nil), - (*Request_DeliverTx)(nil), - (*Request_EndBlock)(nil), (*Request_Commit)(nil), (*Request_ListSnapshots)(nil), (*Request_OfferSnapshot)(nil), @@ -866,74 +824,6 @@ func (m *RequestQuery) GetProve() bool { return false } -type RequestBeginBlock struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo CommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` -} - -func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } -func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } -func (*RequestBeginBlock) ProtoMessage() {} -func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{6} -} -func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBeginBlock.Merge(m, src) -} -func (m *RequestBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo - -func (m *RequestBeginBlock) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *RequestBeginBlock) GetHeader() types1.Header { - if m != nil { - return m.Header - } - return types1.Header{} -} - -func (m *RequestBeginBlock) GetLastCommitInfo() CommitInfo { - if m != nil { - return m.LastCommitInfo - } - return CommitInfo{} -} - -func (m *RequestBeginBlock) GetByzantineValidators() []Misbehavior { - if m != nil { - return m.ByzantineValidators - } - return nil -} - type RequestCheckTx struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=tendermint.abci.CheckTxType" json:"type,omitempty"` @@ -943,7 +833,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{7} + return fileDescriptor_252557cfdd89a31a, []int{6} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -986,94 +876,6 @@ func (m *RequestCheckTx) GetType() CheckTxType { return CheckTxType_New } -type RequestDeliverTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } -func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } -func (*RequestDeliverTx) ProtoMessage() {} -func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{8} -} -func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestDeliverTx.Merge(m, src) -} -func (m *RequestDeliverTx) XXX_Size() int { - return m.Size() -} -func (m *RequestDeliverTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestDeliverTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestDeliverTx proto.InternalMessageInfo - -func (m *RequestDeliverTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type RequestEndBlock struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` -} - -func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } -func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } -func (*RequestEndBlock) ProtoMessage() {} -func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{9} -} -func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEndBlock.Merge(m, src) -} -func (m *RequestEndBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestEndBlock proto.InternalMessageInfo - -func (m *RequestEndBlock) GetHeight() int64 { - if m != nil { - return m.Height - } - return 0 -} - type RequestCommit struct { } @@ -1081,7 +883,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{10} + return fileDescriptor_252557cfdd89a31a, []int{7} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1118,7 +920,7 @@ func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } func (*RequestListSnapshots) ProtoMessage() {} func (*RequestListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{11} + return fileDescriptor_252557cfdd89a31a, []int{8} } func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1157,7 +959,7 @@ func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*RequestOfferSnapshot) ProtoMessage() {} func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{12} + return fileDescriptor_252557cfdd89a31a, []int{9} } func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1211,7 +1013,7 @@ func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChu func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestLoadSnapshotChunk) ProtoMessage() {} func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{13} + return fileDescriptor_252557cfdd89a31a, []int{10} } func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1272,7 +1074,7 @@ func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotC func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestApplySnapshotChunk) ProtoMessage() {} func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{14} + return fileDescriptor_252557cfdd89a31a, []int{11} } func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1327,12 +1129,12 @@ type RequestPrepareProposal struct { MaxTxBytes int64 `protobuf:"varint,1,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` // txs is an array of transactions that will be included in a block, // sent to the app for possible modifications. - Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` - LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` - NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,4,rep,name=misbehavior,proto3" json:"misbehavior"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` // Dash's fields CoreChainLockedHeight uint32 `protobuf:"varint,100,opt,name=core_chain_locked_height,json=coreChainLockedHeight,proto3" json:"core_chain_locked_height,omitempty"` ProposerProTxHash []byte `protobuf:"bytes,101,opt,name=proposer_pro_tx_hash,json=proposerProTxHash,proto3" json:"proposer_pro_tx_hash,omitempty"` @@ -1344,7 +1146,7 @@ func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } func (*RequestPrepareProposal) ProtoMessage() {} func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{15} + return fileDescriptor_252557cfdd89a31a, []int{12} } func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1394,9 +1196,9 @@ func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { return ExtendedCommitInfo{} } -func (m *RequestPrepareProposal) GetByzantineValidators() []Misbehavior { +func (m *RequestPrepareProposal) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1451,9 +1253,9 @@ func (m *RequestPrepareProposal) GetVersion() *version.Consensus { } type RequestProcessProposal struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` // hash is the merkle root hash of the fields of the proposed block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` @@ -1466,7 +1268,7 @@ func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } func (*RequestProcessProposal) ProtoMessage() {} func (*RequestProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{13} } func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1509,9 +1311,9 @@ func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { return CommitInfo{} } -func (m *RequestProcessProposal) GetByzantineValidators() []Misbehavior { +func (m *RequestProcessProposal) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1561,7 +1363,7 @@ func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } func (*RequestExtendVote) ProtoMessage() {} func (*RequestExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_252557cfdd89a31a, []int{14} } func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1616,7 +1418,7 @@ func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExt func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*RequestVerifyVoteExtension) ProtoMessage() {} func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{18} + return fileDescriptor_252557cfdd89a31a, []int{15} } func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1674,10 +1476,10 @@ func (m *RequestVerifyVoteExtension) GetVoteExtensions() []*ExtendVoteExtension } type RequestFinalizeBlock struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - // hash is the merkle root hash of the fields of the proposed block. + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` + // hash is the merkle root hash of the fields of the decided block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` @@ -1693,7 +1495,7 @@ func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } func (m *RequestFinalizeBlock) String() string { return proto.CompactTextString(m) } func (*RequestFinalizeBlock) ProtoMessage() {} func (*RequestFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} + return fileDescriptor_252557cfdd89a31a, []int{16} } func (m *RequestFinalizeBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1736,9 +1538,9 @@ func (m *RequestFinalizeBlock) GetDecidedLastCommit() CommitInfo { return CommitInfo{} } -func (m *RequestFinalizeBlock) GetByzantineValidators() []Misbehavior { +func (m *RequestFinalizeBlock) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1807,10 +1609,7 @@ type Response struct { // *Response_Info // *Response_InitChain // *Response_Query - // *Response_BeginBlock // *Response_CheckTx - // *Response_DeliverTx - // *Response_EndBlock // *Response_Commit // *Response_ListSnapshots // *Response_OfferSnapshot @@ -1828,7 +1627,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} + return fileDescriptor_252557cfdd89a31a, []int{17} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1881,18 +1680,9 @@ type Response_InitChain struct { type Response_Query struct { Query *ResponseQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Response_BeginBlock struct { - BeginBlock *ResponseBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Response_CheckTx struct { CheckTx *ResponseCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Response_DeliverTx struct { - DeliverTx *ResponseDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Response_EndBlock struct { - EndBlock *ResponseEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Response_Commit struct { Commit *ResponseCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -1930,10 +1720,7 @@ func (*Response_Flush) isResponse_Value() {} func (*Response_Info) isResponse_Value() {} func (*Response_InitChain) isResponse_Value() {} func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} func (*Response_Commit) isResponse_Value() {} func (*Response_ListSnapshots) isResponse_Value() {} func (*Response_OfferSnapshot) isResponse_Value() {} @@ -1994,14 +1781,6 @@ func (m *Response) GetQuery() *ResponseQuery { return nil } -// Deprecated: Do not use. -func (m *Response) GetBeginBlock() *ResponseBeginBlock { - if x, ok := m.GetValue().(*Response_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Response) GetCheckTx() *ResponseCheckTx { if x, ok := m.GetValue().(*Response_CheckTx); ok { return x.CheckTx @@ -2009,22 +1788,6 @@ func (m *Response) GetCheckTx() *ResponseCheckTx { return nil } -// Deprecated: Do not use. -func (m *Response) GetDeliverTx() *ResponseDeliverTx { - if x, ok := m.GetValue().(*Response_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -// Deprecated: Do not use. -func (m *Response) GetEndBlock() *ResponseEndBlock { - if x, ok := m.GetValue().(*Response_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Response) GetCommit() *ResponseCommit { if x, ok := m.GetValue().(*Response_Commit); ok { return x.Commit @@ -2104,10 +1867,7 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_Info)(nil), (*Response_InitChain)(nil), (*Response_Query)(nil), - (*Response_BeginBlock)(nil), (*Response_CheckTx)(nil), - (*Response_DeliverTx)(nil), - (*Response_EndBlock)(nil), (*Response_Commit)(nil), (*Response_ListSnapshots)(nil), (*Response_OfferSnapshot)(nil), @@ -2130,7 +1890,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} + return fileDescriptor_252557cfdd89a31a, []int{18} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2174,7 +1934,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} + return fileDescriptor_252557cfdd89a31a, []int{19} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2217,7 +1977,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} + return fileDescriptor_252557cfdd89a31a, []int{20} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2259,7 +2019,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} + return fileDescriptor_252557cfdd89a31a, []int{21} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2335,7 +2095,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} + return fileDescriptor_252557cfdd89a31a, []int{22} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2416,7 +2176,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} + return fileDescriptor_252557cfdd89a31a, []int{23} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2508,70 +2268,20 @@ func (m *ResponseQuery) GetCodespace() string { return "" } -type ResponseBeginBlock struct { - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` -} - -func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } -func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseBeginBlock) ProtoMessage() {} -func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} -} -func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBeginBlock.Merge(m, src) -} -func (m *ResponseBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *ResponseBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo - -func (m *ResponseBeginBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` - Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` - // ABCI applications creating a ResponseCheckTX should not set mempool_error. - MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} + return fileDescriptor_252557cfdd89a31a, []int{24} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2614,20 +2324,6 @@ func (m *ResponseCheckTx) GetData() []byte { return nil } -func (m *ResponseCheckTx) GetLog() string { - if m != nil { - return m.Log - } - return "" -} - -func (m *ResponseCheckTx) GetInfo() string { - if m != nil { - return m.Info - } - return "" -} - func (m *ResponseCheckTx) GetGasWanted() int64 { if m != nil { return m.GasWanted @@ -2635,20 +2331,6 @@ func (m *ResponseCheckTx) GetGasWanted() int64 { return 0 } -func (m *ResponseCheckTx) GetGasUsed() int64 { - if m != nil { - return m.GasUsed - } - return 0 -} - -func (m *ResponseCheckTx) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - func (m *ResponseCheckTx) GetCodespace() string { if m != nil { return m.Codespace @@ -2670,13 +2352,6 @@ func (m *ResponseCheckTx) GetPriority() int64 { return 0 } -func (m *ResponseCheckTx) GetMempoolError() string { - if m != nil { - return m.MempoolError - } - return "" -} - type ResponseDeliverTx struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2692,7 +2367,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} + return fileDescriptor_252557cfdd89a31a, []int{25} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2777,25 +2452,22 @@ func (m *ResponseDeliverTx) GetCodespace() string { return "" } -type ResponseEndBlock struct { - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` - NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,100,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` - ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,101,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` +type ResponseCommit struct { + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } -func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } -func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseEndBlock) ProtoMessage() {} -func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} +func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } +func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } +func (*ResponseCommit) ProtoMessage() {} +func (*ResponseCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{26} } -func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { +func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2805,93 +2477,19 @@ func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEndBlock.Merge(m, src) +func (m *ResponseCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCommit.Merge(m, src) } -func (m *ResponseEndBlock) XXX_Size() int { +func (m *ResponseCommit) XXX_Size() int { return m.Size() } -func (m *ResponseEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) +func (m *ResponseCommit) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCommit.DiscardUnknown(m) } -var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo +var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo -func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { - if m != nil { - return m.ConsensusParamUpdates - } - return nil -} - -func (m *ResponseEndBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *ResponseEndBlock) GetNextCoreChainLockUpdate() *types1.CoreChainLock { - if m != nil { - return m.NextCoreChainLockUpdate - } - return nil -} - -func (m *ResponseEndBlock) GetValidatorSetUpdate() *ValidatorSetUpdate { - if m != nil { - return m.ValidatorSetUpdate - } - return nil -} - -type ResponseCommit struct { - // reserve 1 - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` -} - -func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } -func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } -func (*ResponseCommit) ProtoMessage() {} -func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} -} -func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCommit.Merge(m, src) -} -func (m *ResponseCommit) XXX_Size() int { - return m.Size() -} -func (m *ResponseCommit) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseCommit.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo - -func (m *ResponseCommit) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ResponseCommit) GetRetainHeight() int64 { +func (m *ResponseCommit) GetRetainHeight() int64 { if m != nil { return m.RetainHeight } @@ -2906,7 +2504,7 @@ func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } func (*ResponseListSnapshots) ProtoMessage() {} func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} + return fileDescriptor_252557cfdd89a31a, []int{27} } func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2950,7 +2548,7 @@ func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*ResponseOfferSnapshot) ProtoMessage() {} func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} + return fileDescriptor_252557cfdd89a31a, []int{28} } func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2994,7 +2592,7 @@ func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotC func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseLoadSnapshotChunk) ProtoMessage() {} func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} + return fileDescriptor_252557cfdd89a31a, []int{29} } func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3040,7 +2638,7 @@ func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapsho func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseApplySnapshotChunk) ProtoMessage() {} func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} + return fileDescriptor_252557cfdd89a31a, []int{30} } func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3102,7 +2700,7 @@ func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } func (*ResponsePrepareProposal) ProtoMessage() {} func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} + return fileDescriptor_252557cfdd89a31a, []int{31} } func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3178,7 +2776,7 @@ func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } func (*ResponseProcessProposal) ProtoMessage() {} func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{32} } func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3251,7 +2849,7 @@ func (m *ExtendVoteExtension) Reset() { *m = ExtendVoteExtension{} } func (m *ExtendVoteExtension) String() string { return proto.CompactTextString(m) } func (*ExtendVoteExtension) ProtoMessage() {} func (*ExtendVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_252557cfdd89a31a, []int{33} } func (m *ExtendVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3302,7 +2900,7 @@ func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } func (*ResponseExtendVote) ProtoMessage() {} func (*ResponseExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} + return fileDescriptor_252557cfdd89a31a, []int{34} } func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3346,7 +2944,7 @@ func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteE func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*ResponseVerifyVoteExtension) ProtoMessage() {} func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} + return fileDescriptor_252557cfdd89a31a, []int{35} } func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3387,7 +2985,6 @@ type ResponseFinalizeBlock struct { TxResults []*ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - RetainHeight int64 `protobuf:"varint,6,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,100,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,101,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` } @@ -3396,7 +2993,7 @@ func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } func (*ResponseFinalizeBlock) ProtoMessage() {} func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{41} + return fileDescriptor_252557cfdd89a31a, []int{36} } func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3453,13 +3050,6 @@ func (m *ResponseFinalizeBlock) GetAppHash() []byte { return nil } -func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { - if m != nil { - return m.RetainHeight - } - return 0 -} - func (m *ResponseFinalizeBlock) GetNextCoreChainLockUpdate() *types1.CoreChainLock { if m != nil { return m.NextCoreChainLockUpdate @@ -3486,7 +3076,7 @@ func (m *CommitInfo) Reset() { *m = CommitInfo{} } func (m *CommitInfo) String() string { return proto.CompactTextString(m) } func (*CommitInfo) ProtoMessage() {} func (*CommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{42} + return fileDescriptor_252557cfdd89a31a, []int{37} } func (m *CommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3568,7 +3158,7 @@ func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedCommitInfo) ProtoMessage() {} func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{43} + return fileDescriptor_252557cfdd89a31a, []int{38} } func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3633,7 +3223,7 @@ func (m *ExtendedCommitInfo) GetThresholdVoteExtensions() []*types1.VoteExtensio } // Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// ResponseFinalizeBlock, ResponseDeliverTx, ExecTxResult // Later, transactions may be queried using these events. type Event struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` @@ -3644,7 +3234,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{44} + return fileDescriptor_252557cfdd89a31a, []int{39} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3698,7 +3288,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{45} + return fileDescriptor_252557cfdd89a31a, []int{40} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3766,7 +3356,7 @@ func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } func (*ExecTxResult) ProtoMessage() {} func (*ExecTxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{46} + return fileDescriptor_252557cfdd89a31a, []int{41} } func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3865,7 +3455,7 @@ func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{47} + return fileDescriptor_252557cfdd89a31a, []int{42} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3931,7 +3521,7 @@ func (m *TxRecord) Reset() { *m = TxRecord{} } func (m *TxRecord) String() string { return proto.CompactTextString(m) } func (*TxRecord) ProtoMessage() {} func (*TxRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{48} + return fileDescriptor_252557cfdd89a31a, []int{43} } func (m *TxRecord) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3986,7 +3576,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{49} + return fileDescriptor_252557cfdd89a31a, []int{44} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4041,7 +3631,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{50} + return fileDescriptor_252557cfdd89a31a, []int{45} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4108,7 +3698,7 @@ func (m *ValidatorSetUpdate) Reset() { *m = ValidatorSetUpdate{} } func (m *ValidatorSetUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorSetUpdate) ProtoMessage() {} func (*ValidatorSetUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{51} + return fileDescriptor_252557cfdd89a31a, []int{46} } func (m *ValidatorSetUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4166,7 +3756,7 @@ func (m *ThresholdPublicKeyUpdate) Reset() { *m = ThresholdPublicKeyUpda func (m *ThresholdPublicKeyUpdate) String() string { return proto.CompactTextString(m) } func (*ThresholdPublicKeyUpdate) ProtoMessage() {} func (*ThresholdPublicKeyUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{52} + return fileDescriptor_252557cfdd89a31a, []int{47} } func (m *ThresholdPublicKeyUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4210,7 +3800,7 @@ func (m *QuorumHashUpdate) Reset() { *m = QuorumHashUpdate{} } func (m *QuorumHashUpdate) String() string { return proto.CompactTextString(m) } func (*QuorumHashUpdate) ProtoMessage() {} func (*QuorumHashUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{53} + return fileDescriptor_252557cfdd89a31a, []int{48} } func (m *QuorumHashUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4256,7 +3846,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{54} + return fileDescriptor_252557cfdd89a31a, []int{49} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4313,7 +3903,7 @@ func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedVoteInfo) ProtoMessage() {} func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{55} + return fileDescriptor_252557cfdd89a31a, []int{50} } func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4381,7 +3971,7 @@ func (m *Misbehavior) Reset() { *m = Misbehavior{} } func (m *Misbehavior) String() string { return proto.CompactTextString(m) } func (*Misbehavior) ProtoMessage() {} func (*Misbehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{56} + return fileDescriptor_252557cfdd89a31a, []int{51} } func (m *Misbehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4458,7 +4048,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{57} + return fileDescriptor_252557cfdd89a31a, []int{52} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4543,10 +4133,7 @@ func init() { proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") - proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") - proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") - proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") @@ -4564,10 +4151,8 @@ func init() { proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") - proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.ResponseBeginBlock") proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.ResponseDeliverTx") - proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.ResponseEndBlock") proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") @@ -4600,248 +4185,236 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3850 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x23, 0x47, - 0x76, 0x67, 0xf3, 0x4b, 0xe4, 0xe3, 0xa7, 0x4a, 0x9a, 0x19, 0x0e, 0x67, 0x46, 0x92, 0x7b, 0x62, - 0xcf, 0x78, 0x6c, 0x4b, 0xb6, 0x26, 0xf6, 0x8c, 0x63, 0x27, 0x06, 0x45, 0x71, 0x4c, 0xcd, 0x68, - 0x24, 0x4d, 0x8b, 0x92, 0xe1, 0x38, 0x9e, 0x76, 0x8b, 0x5d, 0x12, 0xdb, 0x43, 0xb2, 0xdb, 0xdd, - 0x4d, 0x99, 0xf2, 0xd5, 0xf1, 0xc5, 0x27, 0xdf, 0x92, 0x8b, 0x91, 0x4b, 0x02, 0xe4, 0x92, 0xbf, - 0x20, 0x40, 0x72, 0x35, 0x72, 0x32, 0x10, 0x20, 0x09, 0x02, 0xc4, 0x31, 0xec, 0xcb, 0x62, 0x8f, - 0xbb, 0xc0, 0x2e, 0xb0, 0x87, 0xdd, 0x45, 0x7d, 0xf4, 0x17, 0xc9, 0xe6, 0x87, 0xe5, 0xfd, 0xf4, - 0xde, 0xba, 0x5e, 0xbd, 0xf7, 0xba, 0xaa, 0xfa, 0xd5, 0x7b, 0xaf, 0x7e, 0xf5, 0x1a, 0xae, 0xd8, - 0xb8, 0xab, 0x62, 0xb3, 0xa3, 0x75, 0xed, 0x35, 0xe5, 0xa8, 0xa9, 0xad, 0xd9, 0x67, 0x06, 0xb6, - 0x56, 0x0d, 0x53, 0xb7, 0x75, 0x54, 0xf0, 0x3a, 0x57, 0x49, 0x67, 0xf9, 0x9a, 0x8f, 0xbb, 0x69, - 0x9e, 0x19, 0xb6, 0xbe, 0x66, 0x98, 0xba, 0x7e, 0xcc, 0xf8, 0xcb, 0x7e, 0x65, 0x54, 0xcf, 0x9a, - 0xaa, 0x58, 0x2d, 0xde, 0x79, 0x75, 0xa8, 0xd3, 0xf7, 0xaa, 0x40, 0x2f, 0xd7, 0xfc, 0x04, 0x9f, - 0x39, 0xbd, 0xd7, 0x86, 0x64, 0x0d, 0xc5, 0x54, 0x3a, 0x4e, 0xf7, 0x92, 0xaf, 0xfb, 0x14, 0x9b, - 0x96, 0xa6, 0x77, 0x03, 0xca, 0x97, 0x4f, 0x74, 0xfd, 0xa4, 0x8d, 0xd7, 0x68, 0xeb, 0xa8, 0x77, - 0xbc, 0x66, 0x6b, 0x1d, 0x6c, 0xd9, 0x4a, 0xc7, 0xe0, 0x0c, 0x8b, 0x27, 0xfa, 0x89, 0x4e, 0x1f, - 0xd7, 0xc8, 0x13, 0xa3, 0x8a, 0xbf, 0x02, 0x98, 0x93, 0xf0, 0x07, 0x3d, 0x6c, 0xd9, 0x68, 0x1d, - 0xe2, 0xb8, 0xd9, 0xd2, 0x4b, 0xc2, 0x8a, 0x70, 0x33, 0xb3, 0x7e, 0x75, 0x75, 0x60, 0x65, 0x56, - 0x39, 0x5f, 0xad, 0xd9, 0xd2, 0xeb, 0x11, 0x89, 0xf2, 0xa2, 0x97, 0x21, 0x71, 0xdc, 0xee, 0x59, - 0xad, 0x52, 0x94, 0x0a, 0x5d, 0x0b, 0x13, 0xba, 0x47, 0x98, 0xea, 0x11, 0x89, 0x71, 0x93, 0x57, - 0x69, 0xdd, 0x63, 0xbd, 0x14, 0x1b, 0xff, 0xaa, 0xad, 0xee, 0x31, 0x7d, 0x15, 0xe1, 0x45, 0x1b, - 0x00, 0x5a, 0x57, 0xb3, 0xe5, 0x66, 0x4b, 0xd1, 0xba, 0xa5, 0x38, 0x95, 0x7c, 0x2a, 0x5c, 0x52, - 0xb3, 0xab, 0x84, 0xb1, 0x1e, 0x91, 0xd2, 0x9a, 0xd3, 0x20, 0xc3, 0xfd, 0xa0, 0x87, 0xcd, 0xb3, - 0x52, 0x62, 0xfc, 0x70, 0x1f, 0x11, 0x26, 0x32, 0x5c, 0xca, 0x8d, 0xb6, 0x20, 0x73, 0x84, 0x4f, - 0xb4, 0xae, 0x7c, 0xd4, 0xd6, 0x9b, 0x4f, 0x4a, 0x49, 0x2a, 0x2c, 0x86, 0x09, 0x6f, 0x10, 0xd6, - 0x0d, 0xc2, 0xb9, 0x11, 0x2d, 0x09, 0xf5, 0x88, 0x04, 0x47, 0x2e, 0x05, 0xbd, 0x0e, 0xa9, 0x66, - 0x0b, 0x37, 0x9f, 0xc8, 0x76, 0xbf, 0x34, 0x47, 0xf5, 0x2c, 0x87, 0xe9, 0xa9, 0x12, 0xbe, 0x46, - 0xbf, 0x1e, 0x91, 0xe6, 0x9a, 0xec, 0x11, 0xdd, 0x03, 0x50, 0x71, 0x5b, 0x3b, 0xc5, 0x26, 0x91, - 0x4f, 0x8d, 0x5f, 0x83, 0x4d, 0xc6, 0xd9, 0xe8, 0xf3, 0x61, 0xa4, 0x55, 0x87, 0x80, 0xaa, 0x90, - 0xc6, 0x5d, 0x95, 0x4f, 0x27, 0x4d, 0xd5, 0xac, 0x84, 0x7e, 0xef, 0xae, 0xea, 0x9f, 0x4c, 0x0a, - 0xf3, 0x36, 0xba, 0x0b, 0xc9, 0xa6, 0xde, 0xe9, 0x68, 0x76, 0x09, 0xa8, 0x86, 0xa5, 0xd0, 0x89, - 0x50, 0xae, 0x7a, 0x44, 0xe2, 0xfc, 0x68, 0x07, 0xf2, 0x6d, 0xcd, 0xb2, 0x65, 0xab, 0xab, 0x18, - 0x56, 0x4b, 0xb7, 0xad, 0x52, 0x86, 0x6a, 0x78, 0x3a, 0x4c, 0xc3, 0xb6, 0x66, 0xd9, 0xfb, 0x0e, - 0x73, 0x3d, 0x22, 0xe5, 0xda, 0x7e, 0x02, 0xd1, 0xa7, 0x1f, 0x1f, 0x63, 0xd3, 0x55, 0x58, 0xca, - 0x8e, 0xd7, 0xb7, 0x4b, 0xb8, 0x1d, 0x79, 0xa2, 0x4f, 0xf7, 0x13, 0xd0, 0x3b, 0xb0, 0xd0, 0xd6, - 0x15, 0xd5, 0x55, 0x27, 0x37, 0x5b, 0xbd, 0xee, 0x93, 0x52, 0x8e, 0x2a, 0x7d, 0x36, 0x74, 0x90, - 0xba, 0xa2, 0x3a, 0x2a, 0xaa, 0x44, 0xa0, 0x1e, 0x91, 0xe6, 0xdb, 0x83, 0x44, 0xf4, 0x18, 0x16, - 0x15, 0xc3, 0x68, 0x9f, 0x0d, 0x6a, 0xcf, 0x53, 0xed, 0xb7, 0xc2, 0xb4, 0x57, 0x88, 0xcc, 0xa0, - 0x7a, 0xa4, 0x0c, 0x51, 0x51, 0x03, 0x8a, 0x86, 0x89, 0x0d, 0xc5, 0xc4, 0xb2, 0x61, 0xea, 0x86, - 0x6e, 0x29, 0xed, 0x52, 0x81, 0xea, 0xbe, 0x11, 0xa6, 0x7b, 0x8f, 0xf1, 0xef, 0x71, 0xf6, 0x7a, - 0x44, 0x2a, 0x18, 0x41, 0x12, 0xd3, 0xaa, 0x37, 0xb1, 0x65, 0x79, 0x5a, 0x8b, 0x93, 0xb4, 0x52, - 0xfe, 0xa0, 0xd6, 0x00, 0x09, 0xd5, 0x20, 0x83, 0xfb, 0x44, 0x5c, 0x3e, 0xd5, 0x6d, 0x5c, 0x9a, - 0x1f, 0xbf, 0xb1, 0x6a, 0x94, 0xf5, 0x50, 0xb7, 0x31, 0xd9, 0x54, 0xd8, 0x6d, 0x21, 0x05, 0x2e, - 0x9c, 0x62, 0x53, 0x3b, 0x3e, 0xa3, 0x6a, 0x64, 0xda, 0x43, 0x3c, 0x64, 0x09, 0x51, 0x85, 0xcf, - 0x85, 0x29, 0x3c, 0xa4, 0x42, 0x44, 0x45, 0xcd, 0x11, 0xa9, 0x47, 0xa4, 0x85, 0xd3, 0x61, 0x32, - 0x31, 0xb1, 0x63, 0xad, 0xab, 0xb4, 0xb5, 0x8f, 0x30, 0xdf, 0x36, 0x0b, 0xe3, 0x4d, 0xec, 0x1e, - 0xe7, 0xa6, 0x7b, 0x85, 0x98, 0xd8, 0xb1, 0x9f, 0xb0, 0x31, 0x07, 0x89, 0x53, 0xa5, 0xdd, 0xc3, - 0xe2, 0x0d, 0xc8, 0xf8, 0x1c, 0x2b, 0x2a, 0xc1, 0x5c, 0x07, 0x5b, 0x96, 0x72, 0x82, 0xa9, 0x1f, - 0x4e, 0x4b, 0x4e, 0x53, 0xcc, 0x43, 0xd6, 0xef, 0x4c, 0xc5, 0xcf, 0x04, 0x57, 0x92, 0xf8, 0x49, - 0x22, 0xc9, 0x03, 0x83, 0x23, 0xc9, 0x9b, 0xe8, 0x3a, 0xe4, 0xe8, 0x90, 0x65, 0xa7, 0x9f, 0x38, - 0xeb, 0xb8, 0x94, 0xa5, 0xc4, 0x43, 0xce, 0xb4, 0x0c, 0x19, 0x63, 0xdd, 0x70, 0x59, 0x62, 0x94, - 0x05, 0x8c, 0x75, 0xc3, 0x61, 0x78, 0x0a, 0xb2, 0x64, 0x7e, 0x2e, 0x47, 0x9c, 0xbe, 0x24, 0x43, - 0x68, 0x9c, 0x45, 0xfc, 0xdb, 0x18, 0x14, 0x07, 0x1d, 0x30, 0xba, 0x0b, 0x71, 0x12, 0x8b, 0x78, - 0x58, 0x29, 0xaf, 0xb2, 0x40, 0xb5, 0xea, 0x04, 0xaa, 0xd5, 0x86, 0x13, 0xa8, 0x36, 0x52, 0x5f, - 0x7c, 0xb5, 0x1c, 0xf9, 0xec, 0xff, 0x97, 0x05, 0x89, 0x4a, 0xa0, 0xcb, 0xc4, 0x57, 0x2a, 0x5a, - 0x57, 0xd6, 0x54, 0x3a, 0xe4, 0x34, 0x71, 0x84, 0x8a, 0xd6, 0xdd, 0x52, 0xd1, 0x36, 0x14, 0x9b, - 0x7a, 0xd7, 0xc2, 0x5d, 0xab, 0x67, 0xc9, 0x2c, 0x50, 0xf2, 0x60, 0x12, 0x70, 0x87, 0x2c, 0x42, - 0x56, 0x1d, 0xce, 0x3d, 0xca, 0x28, 0x15, 0x9a, 0x41, 0x02, 0xda, 0x81, 0xdc, 0xa9, 0xd2, 0xd6, - 0x54, 0xc5, 0xd6, 0x4d, 0xd9, 0xc2, 0x36, 0x8f, 0x2e, 0xd7, 0x87, 0xbe, 0xed, 0xa1, 0xc3, 0xb5, - 0x8f, 0xed, 0x03, 0x43, 0x55, 0x6c, 0xbc, 0x11, 0xff, 0xe2, 0xab, 0x65, 0x41, 0xca, 0x9e, 0xfa, - 0x7a, 0xd0, 0x33, 0x50, 0x50, 0x0c, 0x43, 0xb6, 0x6c, 0xc5, 0xc6, 0xf2, 0xd1, 0x99, 0x8d, 0x2d, - 0x1a, 0x70, 0xb2, 0x52, 0x4e, 0x31, 0x8c, 0x7d, 0x42, 0xdd, 0x20, 0x44, 0xf4, 0x34, 0xe4, 0x49, - 0x6c, 0xd2, 0x94, 0xb6, 0xdc, 0xc2, 0xda, 0x49, 0xcb, 0xa6, 0xa1, 0x25, 0x26, 0xe5, 0x38, 0xb5, - 0x4e, 0x89, 0x68, 0x15, 0x16, 0x1c, 0xb6, 0xa6, 0x6e, 0x62, 0x87, 0x97, 0x84, 0x8f, 0x9c, 0x34, - 0xcf, 0xbb, 0xaa, 0xba, 0x89, 0x19, 0xbf, 0xa8, 0xba, 0x96, 0x42, 0xe3, 0x18, 0x42, 0x10, 0x57, - 0x15, 0x5b, 0xa1, 0x5f, 0x20, 0x2b, 0xd1, 0x67, 0x42, 0x33, 0x14, 0xbb, 0xc5, 0xd7, 0x95, 0x3e, - 0xa3, 0x8b, 0x90, 0xe4, 0xaa, 0x63, 0x74, 0x18, 0xbc, 0x85, 0x16, 0x21, 0x61, 0x98, 0xfa, 0x29, - 0xa6, 0xcb, 0x92, 0x92, 0x58, 0x43, 0xfc, 0x38, 0x0a, 0xf3, 0x43, 0x11, 0x8f, 0xe8, 0x6d, 0x29, - 0x56, 0xcb, 0x79, 0x17, 0x79, 0x46, 0xaf, 0x10, 0xbd, 0x8a, 0x8a, 0x4d, 0x9e, 0x25, 0x94, 0x86, - 0x3f, 0x51, 0x9d, 0xf6, 0xd3, 0xc5, 0x8c, 0x48, 0x9c, 0x1b, 0x3d, 0x80, 0x62, 0x5b, 0xb1, 0x6c, - 0x99, 0x45, 0x0d, 0xd9, 0x97, 0x31, 0x5c, 0x19, 0xfa, 0x32, 0x2c, 0xc6, 0x90, 0x8d, 0xc0, 0x95, - 0xe4, 0x89, 0xa8, 0x47, 0x45, 0x07, 0xb0, 0x78, 0x74, 0xf6, 0x91, 0xd2, 0xb5, 0xb5, 0x2e, 0x96, - 0xdd, 0xaf, 0x65, 0x95, 0xe2, 0x2b, 0xb1, 0x91, 0x29, 0xc8, 0x43, 0xcd, 0x3a, 0xc2, 0x2d, 0xe5, - 0x54, 0xd3, 0x9d, 0x61, 0x2d, 0xb8, 0xf2, 0xae, 0x19, 0x58, 0xa2, 0x04, 0xf9, 0x60, 0xb8, 0x46, - 0x79, 0x88, 0xda, 0x7d, 0x3e, 0xff, 0xa8, 0xdd, 0x47, 0x2f, 0x42, 0x9c, 0xcc, 0x91, 0xce, 0x3d, - 0x3f, 0xe2, 0x45, 0x5c, 0xae, 0x71, 0x66, 0x60, 0x89, 0x72, 0x8a, 0xa2, 0xbb, 0x8b, 0xdc, 0x10, - 0x3e, 0xa8, 0x55, 0x7c, 0x16, 0x0a, 0x03, 0xf1, 0xd9, 0xf7, 0xf9, 0x04, 0xff, 0xe7, 0x13, 0x0b, - 0x90, 0x0b, 0x04, 0x62, 0xf1, 0x22, 0x2c, 0x8e, 0x8a, 0xab, 0x62, 0xcb, 0xa5, 0x07, 0xe2, 0x23, - 0x7a, 0x19, 0x52, 0x6e, 0x60, 0x65, 0xbb, 0xf8, 0xf2, 0xd0, 0x2c, 0x1c, 0x66, 0xc9, 0x65, 0x25, - 0xdb, 0x97, 0xec, 0x02, 0x6a, 0x0e, 0x51, 0x3a, 0xf0, 0x39, 0xc5, 0x30, 0xea, 0x8a, 0xd5, 0x12, - 0xdf, 0x83, 0x52, 0x58, 0xd0, 0x1c, 0x98, 0x46, 0xdc, 0xb5, 0xc2, 0x8b, 0x90, 0x3c, 0xd6, 0xcd, - 0x8e, 0x62, 0x53, 0x65, 0x39, 0x89, 0xb7, 0x88, 0x75, 0xb2, 0x00, 0x1a, 0xa3, 0x64, 0xd6, 0x10, - 0x65, 0xb8, 0x1c, 0x1a, 0x38, 0x89, 0x88, 0xd6, 0x55, 0x31, 0x5b, 0xcf, 0x9c, 0xc4, 0x1a, 0x9e, - 0x22, 0x36, 0x58, 0xd6, 0x20, 0xaf, 0xb5, 0xe8, 0x5c, 0xa9, 0xfe, 0xb4, 0xc4, 0x5b, 0xe2, 0x7f, - 0xc6, 0xe1, 0xe2, 0xe8, 0xf0, 0x89, 0x56, 0x20, 0xdb, 0x51, 0xfa, 0xb2, 0xdd, 0xe7, 0x7b, 0x9f, - 0x7d, 0x0e, 0xe8, 0x28, 0xfd, 0x46, 0x9f, 0x6d, 0xfc, 0x22, 0xc4, 0xec, 0xbe, 0x55, 0x8a, 0xae, - 0xc4, 0x6e, 0x66, 0x25, 0xf2, 0x88, 0x0e, 0x60, 0xbe, 0xad, 0x37, 0x95, 0xb6, 0xec, 0xb3, 0x78, - 0x6e, 0xec, 0xc3, 0x6e, 0x88, 0x05, 0x42, 0xac, 0x0e, 0x19, 0x7d, 0x81, 0xea, 0xd8, 0x76, 0x2d, - 0xff, 0x37, 0x64, 0xf5, 0xbe, 0x6f, 0x94, 0x08, 0x78, 0x0a, 0xc7, 0xd7, 0x27, 0x67, 0xf6, 0xf5, - 0x2f, 0xc2, 0x62, 0x17, 0xf7, 0x6d, 0xdf, 0x18, 0x99, 0xe1, 0xcc, 0xd1, 0x6f, 0x81, 0x48, 0x9f, - 0xf7, 0x7e, 0x62, 0x43, 0xe8, 0x0e, 0x94, 0xa8, 0x37, 0x64, 0x21, 0x82, 0xec, 0x00, 0xac, 0x3a, - 0xae, 0x51, 0xa5, 0xdf, 0xf5, 0x02, 0xe9, 0xa7, 0x41, 0x68, 0x9b, 0xf6, 0x72, 0x77, 0xba, 0x06, - 0x8b, 0x2c, 0x85, 0xc1, 0x26, 0xc9, 0x65, 0xc8, 0x77, 0xa2, 0xaf, 0xc2, 0xf4, 0x55, 0xf3, 0x4e, - 0xdf, 0x9e, 0xa9, 0x37, 0xfa, 0xf4, 0x4d, 0x2f, 0xba, 0x02, 0xaa, 0x4c, 0x2c, 0xda, 0x89, 0x80, - 0xc7, 0xd4, 0x3e, 0x91, 0xd3, 0x57, 0x31, 0xdc, 0x58, 0x79, 0xc7, 0x8b, 0xc5, 0x27, 0xc3, 0x27, - 0x0d, 0xde, 0xe5, 0xc5, 0x25, 0x37, 0x54, 0x8b, 0xff, 0x10, 0xf3, 0x59, 0x55, 0x30, 0x57, 0xe2, - 0x36, 0x23, 0x78, 0x36, 0xb3, 0xef, 0x1b, 0x97, 0xdf, 0x6c, 0xa2, 0xd3, 0xfa, 0x48, 0x77, 0xe8, - 0x53, 0x58, 0x4c, 0xec, 0x7c, 0x16, 0xe3, 0xc4, 0x85, 0xb8, 0x2f, 0x2e, 0xfc, 0x3e, 0x58, 0x51, - 0x98, 0x31, 0xa4, 0x42, 0x8c, 0x41, 0x7c, 0xc3, 0x8d, 0x7a, 0x5e, 0x3a, 0x3a, 0x32, 0xea, 0x79, - 0xb3, 0x8b, 0x06, 0xdc, 0xf1, 0x7f, 0x08, 0x50, 0x0e, 0xcf, 0x3f, 0x47, 0xaa, 0x7a, 0x09, 0x2e, - 0x78, 0xf9, 0x89, 0x7f, 0x94, 0xcc, 0x53, 0x21, 0xb7, 0xd3, 0xb3, 0xd9, 0xb0, 0x58, 0xfe, 0x10, - 0x0a, 0xc1, 0x1c, 0xd9, 0xf1, 0x05, 0x7f, 0x16, 0xe2, 0x65, 0x02, 0xa3, 0x93, 0xf2, 0xa7, 0xfe, - 0xa6, 0x25, 0xfe, 0x7b, 0xdc, 0x8d, 0x19, 0x81, 0x84, 0x77, 0x84, 0xb5, 0x3e, 0x82, 0x05, 0x15, - 0x37, 0x35, 0xf5, 0xbb, 0x1a, 0xeb, 0x3c, 0x97, 0xfe, 0xe1, 0xda, 0xea, 0x1f, 0xa5, 0xc7, 0xfb, - 0xef, 0x0c, 0xa4, 0x24, 0x6c, 0x19, 0xa4, 0x07, 0x6d, 0x40, 0x1a, 0xf7, 0x9b, 0xd8, 0xb0, 0x9d, - 0x53, 0xcc, 0xe8, 0xd3, 0x20, 0xe3, 0xae, 0x39, 0x9c, 0xf5, 0x88, 0xe4, 0x89, 0xa1, 0xdb, 0x1c, - 0xc6, 0x0a, 0x47, 0xa4, 0xb8, 0xb8, 0x1f, 0xc7, 0x7a, 0xc5, 0xc1, 0xb1, 0x62, 0xa1, 0x50, 0x06, - 0x93, 0x1a, 0x00, 0xb2, 0x6e, 0x73, 0x20, 0x2b, 0x3e, 0xe1, 0x65, 0x01, 0x24, 0xab, 0x1a, 0x40, - 0xb2, 0x12, 0x13, 0xa6, 0x19, 0x02, 0x65, 0xbd, 0xe2, 0x40, 0x59, 0xc9, 0x09, 0x23, 0x1e, 0xc0, - 0xb2, 0xee, 0x07, 0xb1, 0xac, 0xb9, 0x90, 0x14, 0xc3, 0x91, 0x1e, 0x0b, 0x66, 0xfd, 0xa5, 0x0f, - 0xcc, 0x4a, 0x85, 0xa2, 0x48, 0x4c, 0xd1, 0x08, 0x34, 0xeb, 0xcd, 0x00, 0x9a, 0x95, 0x9e, 0xb0, - 0x0e, 0x63, 0xe0, 0xac, 0x4d, 0x3f, 0x9c, 0x05, 0xa1, 0xa8, 0x18, 0xff, 0xee, 0x61, 0x78, 0xd6, - 0xab, 0x2e, 0x9e, 0x95, 0x09, 0x05, 0xe6, 0xf8, 0x5c, 0x06, 0x01, 0xad, 0xdd, 0x21, 0x40, 0x8b, - 0x01, 0x50, 0xcf, 0x84, 0xaa, 0x98, 0x80, 0x68, 0xed, 0x0e, 0x21, 0x5a, 0xb9, 0x09, 0x0a, 0x27, - 0x40, 0x5a, 0x7f, 0x33, 0x1a, 0xd2, 0x0a, 0x07, 0x9d, 0xf8, 0x30, 0xa7, 0xc3, 0xb4, 0xe4, 0x10, - 0x4c, 0xab, 0x10, 0x8a, 0xbf, 0x30, 0xf5, 0x53, 0x83, 0x5a, 0x07, 0x23, 0x40, 0x2d, 0x06, 0x3f, - 0xdd, 0x0c, 0x55, 0x3e, 0x05, 0xaa, 0x75, 0x30, 0x02, 0xd5, 0x9a, 0x9f, 0xa8, 0x76, 0x22, 0xac, - 0x75, 0x2f, 0x08, 0x6b, 0xa1, 0x09, 0x7b, 0x2c, 0x14, 0xd7, 0x3a, 0x0a, 0xc3, 0xb5, 0x18, 0xf6, - 0xf4, 0x7c, 0xa8, 0xc6, 0x19, 0x80, 0xad, 0xdd, 0x21, 0x60, 0x6b, 0x71, 0x82, 0xa5, 0x4d, 0x8b, - 0x6c, 0x3d, 0x4b, 0x32, 0xa5, 0x01, 0x57, 0x4d, 0x0e, 0x59, 0xd8, 0x34, 0x75, 0x93, 0x63, 0x54, - 0xac, 0x21, 0xde, 0x84, 0xac, 0xdf, 0x2d, 0x8f, 0x41, 0xc1, 0xe8, 0x61, 0xd6, 0xe7, 0x8a, 0xc5, - 0xff, 0x13, 0x3c, 0x59, 0x7a, 0xd0, 0xf7, 0xa3, 0x1d, 0x69, 0x8e, 0x76, 0xf8, 0xb0, 0xb1, 0x68, - 0x10, 0x1b, 0x5b, 0x86, 0x8c, 0x3f, 0xc0, 0x71, 0xd8, 0x4b, 0xf1, 0x02, 0xdb, 0x2d, 0x98, 0xa7, - 0xe9, 0x0a, 0x43, 0xd0, 0x78, 0xb4, 0x8d, 0xd3, 0x1c, 0xa0, 0x40, 0x3a, 0xd8, 0x2a, 0xb0, 0x38, - 0xfb, 0x02, 0x2c, 0xf8, 0x78, 0xdd, 0xc3, 0x2f, 0xc3, 0x7e, 0x8a, 0x2e, 0x77, 0x85, 0x9d, 0x82, - 0xef, 0xc7, 0x53, 0x6a, 0x11, 0x4b, 0xd7, 0x78, 0x36, 0x34, 0x3a, 0xb0, 0x8b, 0x3f, 0x8a, 0x7a, - 0xcb, 0xe8, 0x81, 0x6a, 0xa3, 0xf0, 0x2f, 0xe1, 0x3b, 0xe3, 0x5f, 0xfe, 0x93, 0x7a, 0x2c, 0x70, - 0x52, 0x47, 0xef, 0xc0, 0x62, 0x00, 0x1a, 0x93, 0x7b, 0x14, 0xf6, 0xa2, 0xf9, 0xc6, 0x0c, 0x08, - 0x59, 0xc4, 0x97, 0xa4, 0xba, 0x3d, 0xe8, 0x5d, 0xb8, 0x42, 0x53, 0xa0, 0x81, 0xc9, 0x3b, 0xef, - 0xc0, 0xc3, 0x6e, 0xd8, 0x99, 0x90, 0x2f, 0xcb, 0x91, 0x2e, 0x11, 0x1d, 0x01, 0x12, 0x57, 0x1f, - 0x82, 0x9b, 0x1d, 0x87, 0xe1, 0x66, 0x3f, 0x17, 0x3c, 0xe3, 0x72, 0x91, 0xb3, 0xa6, 0xae, 0x62, - 0x8e, 0x13, 0xd0, 0x67, 0x92, 0xd9, 0xb6, 0xf5, 0x13, 0x8e, 0x06, 0x90, 0x47, 0xc2, 0xe5, 0x26, - 0x01, 0x69, 0x1e, 0xe3, 0x5d, 0x88, 0x81, 0xa5, 0x8b, 0x1c, 0x62, 0x28, 0x42, 0xec, 0x09, 0x66, - 0x21, 0x3b, 0x2b, 0x91, 0x47, 0xc2, 0x47, 0x77, 0x0b, 0x4f, 0xfb, 0x58, 0x03, 0xdd, 0x85, 0x34, - 0xbd, 0x74, 0x94, 0x75, 0xc3, 0xe2, 0x91, 0x35, 0x90, 0x21, 0xb3, 0xeb, 0xc3, 0xd5, 0x3d, 0xc2, - 0xb3, 0x6b, 0x58, 0x52, 0xca, 0xe0, 0x4f, 0xbe, 0x3c, 0x35, 0x1d, 0xc8, 0x53, 0xaf, 0x42, 0x9a, - 0x8c, 0xde, 0x32, 0x94, 0x26, 0xa6, 0x21, 0x32, 0x2d, 0x79, 0x04, 0xf1, 0x31, 0xa0, 0xe1, 0x80, - 0x8f, 0xea, 0x90, 0xc4, 0xa7, 0xb8, 0x6b, 0xb3, 0x34, 0x3e, 0xb3, 0x7e, 0x71, 0xf8, 0x88, 0x40, - 0xba, 0x37, 0x4a, 0xe4, 0x03, 0xff, 0xf8, 0xab, 0xe5, 0x22, 0xe3, 0x7e, 0x5e, 0xef, 0x68, 0x36, - 0xee, 0x18, 0xf6, 0x99, 0xc4, 0xe5, 0xc5, 0xff, 0x8d, 0x42, 0x61, 0x20, 0x11, 0x18, 0xb9, 0xb6, - 0xce, 0xde, 0x8d, 0xfa, 0x90, 0xca, 0xe9, 0xd6, 0xfb, 0x1a, 0xc0, 0x89, 0x62, 0xc9, 0x1f, 0x2a, - 0x5d, 0x1b, 0xab, 0x7c, 0xd1, 0xd3, 0x27, 0x8a, 0xf5, 0x16, 0x25, 0x10, 0x0b, 0x27, 0xdd, 0x3d, - 0x0b, 0xab, 0x1c, 0x63, 0x9d, 0x3b, 0x51, 0xac, 0x03, 0x0b, 0xab, 0xbe, 0x59, 0xce, 0x9d, 0x6f, - 0x96, 0xc1, 0x35, 0x4e, 0x0d, 0xac, 0xb1, 0x0f, 0x48, 0x4a, 0xfb, 0x81, 0x24, 0x54, 0x86, 0x94, - 0x61, 0x6a, 0xba, 0xa9, 0xd9, 0x67, 0xf4, 0xc3, 0xc4, 0x24, 0xb7, 0x8d, 0xae, 0x43, 0xae, 0x83, - 0x3b, 0x86, 0xae, 0xb7, 0x65, 0xe6, 0x35, 0x33, 0x54, 0x34, 0xcb, 0x89, 0x35, 0xea, 0x3c, 0x3f, - 0xf1, 0x79, 0x08, 0x0f, 0x30, 0xfc, 0x7e, 0x97, 0x77, 0x69, 0xc4, 0xf2, 0xfa, 0x28, 0x64, 0x12, - 0x03, 0xeb, 0xeb, 0xb6, 0x7f, 0x5b, 0x0b, 0x2c, 0xfe, 0x34, 0x0a, 0xc5, 0xc1, 0x24, 0x0f, 0xbd, - 0x0d, 0x97, 0x06, 0x1c, 0x25, 0xf7, 0x2e, 0x16, 0x3f, 0x20, 0x4c, 0xe1, 0x2f, 0x2f, 0x04, 0xfd, - 0x25, 0xf3, 0x2e, 0x96, 0x6f, 0x5e, 0xb1, 0x73, 0xce, 0x6b, 0x82, 0x1f, 0x54, 0xcf, 0xe9, 0x07, - 0xc3, 0x7c, 0x38, 0x9e, 0xf5, 0x96, 0x63, 0x84, 0x0f, 0x17, 0xb7, 0x20, 0x1f, 0x4c, 0x8b, 0x47, - 0x5a, 0xd9, 0x75, 0xc8, 0x99, 0xd8, 0x26, 0x13, 0x0b, 0xa0, 0x12, 0x59, 0x46, 0xe4, 0xfe, 0x77, - 0x0f, 0x2e, 0x8c, 0x4c, 0x8f, 0xd1, 0x1d, 0x48, 0x7b, 0x99, 0x35, 0xf3, 0x45, 0x63, 0x10, 0x68, - 0x8f, 0x57, 0xfc, 0x37, 0xc1, 0x53, 0x19, 0xc4, 0xb4, 0x6b, 0x90, 0x34, 0xb1, 0xd5, 0x6b, 0x33, - 0x94, 0x39, 0xbf, 0xfe, 0xc2, 0x74, 0x89, 0x35, 0xa1, 0xf6, 0xda, 0xb6, 0xc4, 0x85, 0xc5, 0xc7, - 0x90, 0x64, 0x14, 0x94, 0x81, 0xb9, 0x83, 0x9d, 0x07, 0x3b, 0xbb, 0x6f, 0xed, 0x14, 0x23, 0x08, - 0x20, 0x59, 0xa9, 0x56, 0x6b, 0x7b, 0x8d, 0xa2, 0x80, 0xd2, 0x90, 0xa8, 0x6c, 0xec, 0x4a, 0x8d, - 0x62, 0x94, 0x90, 0xa5, 0xda, 0xfd, 0x5a, 0xb5, 0x51, 0x8c, 0xa1, 0x79, 0xc8, 0xb1, 0x67, 0xf9, - 0xde, 0xae, 0xf4, 0xb0, 0xd2, 0x28, 0xc6, 0x7d, 0xa4, 0xfd, 0xda, 0xce, 0x66, 0x4d, 0x2a, 0x26, - 0xc4, 0x97, 0xe0, 0x72, 0x68, 0x2a, 0xee, 0x01, 0xd6, 0x82, 0x0f, 0xb0, 0x16, 0xff, 0x3e, 0x0a, - 0xe5, 0xf0, 0xfc, 0x1a, 0xdd, 0x1f, 0x98, 0xf8, 0xfa, 0x0c, 0xc9, 0xf9, 0xc0, 0xec, 0xd1, 0xd3, - 0x90, 0x37, 0xf1, 0x31, 0xb6, 0x9b, 0x2d, 0x96, 0xef, 0x33, 0x44, 0x3b, 0x27, 0xe5, 0x38, 0x95, - 0x0a, 0x59, 0x8c, 0xed, 0x7d, 0xdc, 0xb4, 0x65, 0xe6, 0xf2, 0xd8, 0x86, 0x49, 0x13, 0x36, 0x42, - 0xdd, 0x67, 0x44, 0xf1, 0xbd, 0x99, 0xd6, 0x32, 0x0d, 0x09, 0xa9, 0xd6, 0x90, 0xde, 0x2e, 0xc6, - 0x10, 0x82, 0x3c, 0x7d, 0x94, 0xf7, 0x77, 0x2a, 0x7b, 0xfb, 0xf5, 0x5d, 0xb2, 0x96, 0x0b, 0x50, - 0x70, 0xd6, 0xd2, 0x21, 0x26, 0xc4, 0xff, 0x8a, 0xc2, 0xa5, 0x90, 0xd3, 0x01, 0xba, 0x0b, 0x60, - 0xf7, 0x65, 0x13, 0x37, 0x75, 0x53, 0x0d, 0x37, 0xb2, 0x46, 0x5f, 0xa2, 0x1c, 0x52, 0xda, 0xe6, - 0x4f, 0xd6, 0x98, 0x7b, 0x0e, 0xf4, 0x3a, 0x57, 0x4a, 0x66, 0xe5, 0xb8, 0x89, 0x6b, 0x23, 0x80, - 0x36, 0xdc, 0x24, 0x8a, 0xe9, 0xda, 0x52, 0xc5, 0x94, 0x1f, 0x3d, 0x84, 0x79, 0x6f, 0xdf, 0x3a, - 0x5e, 0x8b, 0xa1, 0x75, 0x2b, 0xe1, 0x9b, 0x96, 0xed, 0x4b, 0xa9, 0x78, 0x1a, 0x24, 0x58, 0xe3, - 0x5c, 0x61, 0xe2, 0x7c, 0xae, 0x50, 0xfc, 0xc7, 0x98, 0x7f, 0x61, 0x83, 0x87, 0xa1, 0x5d, 0x48, - 0x5a, 0xb6, 0x62, 0xf7, 0x2c, 0x6e, 0x70, 0x77, 0xa6, 0x3d, 0x59, 0xad, 0x3a, 0x0f, 0xfb, 0x54, - 0x5c, 0xe2, 0x6a, 0xfe, 0xb4, 0xde, 0x96, 0xf8, 0x32, 0xe4, 0x83, 0x8b, 0x13, 0xbe, 0x65, 0x3c, - 0x9f, 0x13, 0x15, 0xdb, 0xb0, 0x30, 0x02, 0xd4, 0x45, 0x77, 0xf8, 0x0d, 0x25, 0xfb, 0x3e, 0xd7, - 0x87, 0x47, 0x15, 0x60, 0xf7, 0x2e, 0x2a, 0x49, 0x3c, 0xf6, 0xce, 0xa4, 0xec, 0x53, 0x78, 0x04, - 0xb1, 0xe9, 0x25, 0x95, 0x3e, 0xa8, 0x7c, 0x04, 0x00, 0x2d, 0x9c, 0x03, 0x80, 0xfe, 0x27, 0x01, - 0xae, 0x8c, 0x39, 0xf5, 0xa2, 0x47, 0x03, 0xd6, 0xf7, 0xea, 0x2c, 0x67, 0xe6, 0x55, 0x46, 0x0b, - 0xda, 0x9f, 0x78, 0x1b, 0xb2, 0x7e, 0xfa, 0x74, 0x4b, 0xff, 0xb3, 0x98, 0x17, 0x89, 0x82, 0x48, - 0xf9, 0xf7, 0x96, 0x65, 0x0f, 0x58, 0x7f, 0x74, 0x46, 0xeb, 0x1f, 0x63, 0xae, 0xf1, 0x73, 0x66, - 0x4a, 0xfe, 0x1d, 0x9b, 0x08, 0xee, 0xd8, 0xa1, 0xc4, 0x20, 0x39, 0x9c, 0x18, 0xfc, 0x41, 0xe7, - 0x47, 0x3f, 0x11, 0x00, 0x7c, 0x65, 0x08, 0x8b, 0x90, 0x30, 0xf5, 0x5e, 0x57, 0xa5, 0xe6, 0x98, - 0x90, 0x58, 0x03, 0x2d, 0x43, 0xe6, 0x83, 0x9e, 0x6e, 0xf6, 0x3a, 0xfe, 0x33, 0x38, 0x30, 0x12, - 0x5d, 0xa6, 0x1b, 0x50, 0x60, 0xa0, 0x82, 0xa5, 0x9d, 0x74, 0x15, 0xbb, 0x67, 0x62, 0x7e, 0x3b, - 0x91, 0xa7, 0xe4, 0x7d, 0x87, 0x4a, 0x18, 0x59, 0xd9, 0x89, 0xc7, 0xc8, 0x56, 0x3c, 0x4f, 0xc9, - 0x1e, 0xe3, 0x3b, 0x70, 0xd9, 0x6e, 0x99, 0xd8, 0x6a, 0xe9, 0x6d, 0x55, 0x1e, 0xdc, 0x91, 0x49, - 0x6a, 0x3b, 0xcb, 0x13, 0x3c, 0x81, 0x74, 0xc9, 0xd5, 0x70, 0x18, 0xdc, 0x95, 0xbf, 0x10, 0x00, - 0x0d, 0x5f, 0x52, 0xff, 0x40, 0x26, 0xff, 0x11, 0x24, 0xe8, 0x8e, 0x25, 0x89, 0xb0, 0xeb, 0x57, - 0xd3, 0xdc, 0x65, 0xbe, 0x0b, 0xa0, 0xd8, 0xb6, 0xa9, 0x1d, 0xf5, 0xd8, 0x11, 0x24, 0x36, 0x12, - 0x68, 0xa6, 0xf2, 0x15, 0x87, 0x6f, 0xe3, 0x2a, 0xdf, 0xfa, 0x8b, 0x9e, 0xa8, 0x6f, 0xfb, 0xfb, - 0x14, 0x8a, 0x3b, 0x90, 0x0f, 0xca, 0x3a, 0x90, 0x03, 0x1b, 0x43, 0x10, 0x72, 0x60, 0x50, 0x18, - 0x87, 0x1c, 0x5c, 0xc0, 0x22, 0xc6, 0x8a, 0x7c, 0x68, 0x43, 0xfc, 0xa5, 0x00, 0x59, 0xbf, 0xc3, - 0xf8, 0xa1, 0x9d, 0xda, 0xc5, 0x4f, 0x04, 0x48, 0xb9, 0x93, 0x0f, 0xa9, 0xb0, 0xf1, 0xd6, 0x2e, - 0xea, 0xaf, 0x27, 0x61, 0x25, 0x3b, 0x31, 0xb7, 0x10, 0xe8, 0x35, 0x37, 0xf3, 0x0e, 0xbb, 0x2d, - 0xf2, 0xaf, 0xb4, 0x53, 0x0b, 0xc5, 0x0f, 0x1a, 0x7f, 0xc7, 0xc7, 0x41, 0x52, 0x4e, 0xf4, 0x17, - 0x90, 0x54, 0x9a, 0xee, 0x1d, 0x59, 0x7e, 0xc4, 0xa5, 0x89, 0xc3, 0xba, 0xda, 0xe8, 0x57, 0x28, - 0xa7, 0xc4, 0x25, 0xf8, 0xa8, 0xa2, 0x6e, 0x21, 0xd1, 0x1b, 0x44, 0x2f, 0xe3, 0x09, 0x46, 0xb2, - 0x3c, 0xc0, 0xc1, 0xce, 0xc3, 0xdd, 0xcd, 0xad, 0x7b, 0x5b, 0xb5, 0x4d, 0x9e, 0x7b, 0x6f, 0x6e, - 0xd6, 0x36, 0x8b, 0x51, 0xc2, 0x27, 0xd5, 0x1e, 0xee, 0x1e, 0xd6, 0x36, 0x8b, 0x31, 0xb1, 0x02, - 0x69, 0xd7, 0x21, 0xd2, 0x52, 0x31, 0xfd, 0x43, 0x5e, 0x2c, 0x13, 0x93, 0x58, 0x03, 0x2d, 0x41, - 0xc6, 0x7f, 0xed, 0xc8, 0x36, 0x6f, 0xda, 0x70, 0xef, 0xd4, 0xff, 0x45, 0x80, 0xc2, 0x40, 0x3e, - 0x85, 0x5e, 0x83, 0x39, 0xa3, 0x77, 0x24, 0x3b, 0xb6, 0x3b, 0x70, 0x9d, 0xeb, 0x00, 0x60, 0xbd, - 0xa3, 0xb6, 0xd6, 0x7c, 0x80, 0xcf, 0xb8, 0x03, 0x4e, 0x1a, 0xbd, 0xa3, 0x07, 0xcc, 0xc4, 0xd9, - 0x30, 0xa2, 0x63, 0x86, 0x11, 0x1b, 0x18, 0x06, 0xba, 0x01, 0xd9, 0xae, 0xae, 0x62, 0x59, 0x51, - 0x55, 0x13, 0x5b, 0x2c, 0xec, 0xa5, 0xb9, 0xe6, 0x0c, 0xe9, 0xa9, 0xb0, 0x0e, 0xf1, 0x6b, 0x01, - 0xd0, 0x70, 0x10, 0x40, 0xfb, 0xa3, 0xf2, 0x47, 0x61, 0xba, 0xfc, 0x91, 0x7f, 0xee, 0xe1, 0x2c, - 0xb2, 0x01, 0x8b, 0x9e, 0xab, 0x32, 0xe8, 0x7c, 0xe9, 0xa2, 0x44, 0xa7, 0x5c, 0x94, 0x88, 0x84, - 0x5c, 0x79, 0xb7, 0x67, 0xa2, 0xcf, 0x15, 0x0d, 0x28, 0x35, 0x86, 0xc4, 0xf8, 0x3c, 0xc3, 0x86, - 0x24, 0x9c, 0x67, 0x48, 0xe2, 0x6d, 0x28, 0x3e, 0x72, 0xdf, 0xcf, 0xdf, 0x34, 0x30, 0x4c, 0x61, - 0x68, 0x98, 0xa7, 0x90, 0x22, 0xde, 0x97, 0x46, 0x97, 0xbf, 0x82, 0xb4, 0xbb, 0x7a, 0x6e, 0xb5, - 0x69, 0xe8, 0xb2, 0xf3, 0x91, 0x78, 0x22, 0xe8, 0x16, 0xcc, 0x93, 0xb8, 0xe1, 0xd4, 0x27, 0xb0, - 0xcb, 0x90, 0x28, 0xf5, 0x86, 0x05, 0xd6, 0xb1, 0xed, 0x20, 0xf8, 0x24, 0xed, 0x2c, 0x3a, 0x01, - 0xee, 0x77, 0x31, 0x00, 0x72, 0xa6, 0x1e, 0xb8, 0x13, 0x62, 0xdf, 0x30, 0x17, 0xc8, 0x8f, 0xc5, - 0x8f, 0xa3, 0x90, 0xf1, 0x55, 0x3d, 0xa0, 0x3f, 0x0f, 0xa4, 0xfa, 0x2b, 0xe3, 0x2a, 0x24, 0x7c, - 0x79, 0x7e, 0x60, 0x62, 0xd1, 0xd9, 0x27, 0x16, 0x56, 0x8c, 0xe2, 0x14, 0x4f, 0xc4, 0x67, 0x2e, - 0x9e, 0x78, 0x1e, 0x90, 0xad, 0xdb, 0x4a, 0x9b, 0x04, 0x6f, 0xad, 0x7b, 0x22, 0xb3, 0xdd, 0xce, - 0x02, 0x48, 0x91, 0xf6, 0x1c, 0xd2, 0x8e, 0x3d, 0x42, 0x17, 0xff, 0x55, 0x80, 0x94, 0x8b, 0xfc, - 0xcc, 0x5a, 0x5f, 0x78, 0x11, 0x92, 0x1c, 0xdc, 0x60, 0x05, 0x86, 0xbc, 0x35, 0xb2, 0x4a, 0xa4, - 0x0c, 0xa9, 0x0e, 0xb6, 0x15, 0x1a, 0x0d, 0x59, 0xe6, 0xe1, 0xb6, 0xbf, 0x73, 0xf5, 0xc6, 0xad, - 0x57, 0x21, 0xe3, 0xab, 0x11, 0x25, 0x91, 0x75, 0xa7, 0xf6, 0x56, 0x31, 0x52, 0x9e, 0xfb, 0xf4, - 0xf3, 0x95, 0xd8, 0x0e, 0xfe, 0x10, 0x95, 0x88, 0x3b, 0xae, 0xd6, 0x6b, 0xd5, 0x07, 0x45, 0xa1, - 0x9c, 0xf9, 0xf4, 0xf3, 0x95, 0x39, 0x09, 0xd3, 0x3b, 0xf6, 0x5b, 0x0f, 0xa0, 0x30, 0xf0, 0x45, - 0x83, 0x3e, 0x1e, 0x41, 0x7e, 0xf3, 0x60, 0x6f, 0x7b, 0xab, 0x5a, 0x69, 0xd4, 0xe4, 0xc3, 0xdd, - 0x46, 0xad, 0x28, 0xa0, 0x4b, 0xb0, 0xb0, 0xbd, 0xf5, 0x66, 0xbd, 0x21, 0x57, 0xb7, 0xb7, 0x6a, - 0x3b, 0x0d, 0xb9, 0xd2, 0x68, 0x54, 0xaa, 0x0f, 0x8a, 0xd1, 0xf5, 0x7f, 0xce, 0x40, 0xa1, 0xb2, - 0x51, 0xdd, 0xaa, 0x18, 0x46, 0x5b, 0x6b, 0x2a, 0x34, 0x62, 0x54, 0x21, 0x4e, 0x2f, 0xec, 0xc6, - 0xfe, 0x2d, 0x54, 0x1e, 0x5f, 0x84, 0x81, 0xee, 0x41, 0x82, 0xde, 0xe5, 0xa1, 0xf1, 0xbf, 0x0f, - 0x95, 0x27, 0x54, 0x65, 0x90, 0xc1, 0xd0, 0x7d, 0x38, 0xf6, 0x7f, 0xa2, 0xf2, 0xf8, 0x22, 0x0d, - 0xb4, 0x0d, 0x73, 0xce, 0x0d, 0xc5, 0xa4, 0x3f, 0x73, 0xca, 0x13, 0xab, 0x1d, 0xc8, 0xd4, 0xd8, - 0x4d, 0xd2, 0xf8, 0x5f, 0x8d, 0xca, 0x13, 0xca, 0x37, 0xd0, 0x16, 0x24, 0x39, 0xba, 0x3a, 0xe1, - 0x2f, 0x9b, 0xf2, 0xa4, 0xaa, 0x05, 0x24, 0x41, 0xda, 0xbb, 0x47, 0x9c, 0xfc, 0x03, 0x55, 0x79, - 0x8a, 0xca, 0x14, 0xf4, 0x18, 0x72, 0x41, 0xc4, 0x76, 0xba, 0x3f, 0x79, 0xca, 0x53, 0xd6, 0x47, - 0x10, 0xfd, 0x41, 0xf8, 0x76, 0xba, 0x3f, 0x7b, 0xca, 0x53, 0x96, 0x4b, 0xa0, 0xf7, 0x61, 0x7e, - 0x18, 0x5e, 0x9d, 0xfe, 0x47, 0x9f, 0xf2, 0x0c, 0x05, 0x14, 0xa8, 0x03, 0x68, 0x04, 0x2c, 0x3b, - 0xc3, 0x7f, 0x3f, 0xe5, 0x59, 0xea, 0x29, 0x90, 0x0a, 0x85, 0x41, 0xa8, 0x73, 0xda, 0xff, 0x80, - 0xca, 0x53, 0xd7, 0x56, 0xb0, 0xb7, 0x04, 0x71, 0xbf, 0x69, 0xff, 0x0b, 0x2a, 0x4f, 0x5d, 0x6a, - 0x81, 0x0e, 0x00, 0x7c, 0x48, 0xd2, 0x14, 0xff, 0x09, 0x95, 0xa7, 0x29, 0xba, 0x40, 0x06, 0x2c, - 0x8c, 0x82, 0x8e, 0x66, 0xf9, 0x6d, 0xa8, 0x3c, 0x53, 0x2d, 0x06, 0xb1, 0xe7, 0x20, 0x08, 0x34, - 0xdd, 0x6f, 0x44, 0xe5, 0x29, 0x8b, 0x32, 0x36, 0x6a, 0x5f, 0x7c, 0xb3, 0x24, 0x7c, 0xf9, 0xcd, - 0x92, 0xf0, 0xf5, 0x37, 0x4b, 0xc2, 0x67, 0xdf, 0x2e, 0x45, 0xbe, 0xfc, 0x76, 0x29, 0xf2, 0x3f, - 0xdf, 0x2e, 0x45, 0xfe, 0xfa, 0xb9, 0x13, 0xcd, 0x6e, 0xf5, 0x8e, 0x56, 0x9b, 0x7a, 0x67, 0xcd, - 0xff, 0xc7, 0xe9, 0xa8, 0x5f, 0x64, 0x8f, 0x92, 0x34, 0x12, 0xdf, 0xfe, 0x75, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xea, 0x82, 0x7b, 0x89, 0x42, 0x3b, 0x00, 0x00, + // 3664 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4b, 0x70, 0x1b, 0x47, + 0x7a, 0xc6, 0xe0, 0x8d, 0x1f, 0xaf, 0x61, 0x93, 0x92, 0x20, 0x48, 0x22, 0xe9, 0x51, 0x6c, 0xc9, + 0xb2, 0x4d, 0xda, 0x52, 0x6c, 0xc9, 0xb1, 0x13, 0x17, 0x08, 0x42, 0x01, 0x29, 0x8a, 0xa4, 0x86, + 0x20, 0x5d, 0x8e, 0x63, 0x8f, 0x87, 0x40, 0x93, 0x18, 0x0b, 0xc0, 0x8c, 0x67, 0x06, 0x34, 0xe8, + 0x6b, 0xe2, 0x8b, 0x0f, 0x29, 0xdf, 0x92, 0x8b, 0x6f, 0x49, 0x55, 0x2e, 0xc9, 0x3d, 0x95, 0xaa, + 0x1c, 0x72, 0x52, 0xed, 0xc9, 0x7b, 0xd9, 0xda, 0xcb, 0x7a, 0x5d, 0xf6, 0x65, 0x6b, 0x8f, 0x7b, + 0xd9, 0xaa, 0x3d, 0x6c, 0x6d, 0xf5, 0x63, 0x5e, 0x00, 0x06, 0x0f, 0x69, 0x6b, 0x77, 0x6b, 0xbd, + 0x37, 0xf4, 0xdf, 0xff, 0xff, 0xf7, 0x63, 0xba, 0xff, 0xc7, 0xf7, 0x37, 0xe0, 0x8a, 0x8d, 0x7b, + 0x2d, 0x6c, 0x76, 0xb5, 0x9e, 0xbd, 0xae, 0x1e, 0x37, 0xb5, 0x75, 0xfb, 0xdc, 0xc0, 0xd6, 0x9a, + 0x61, 0xea, 0xb6, 0x8e, 0x8a, 0x5e, 0xe7, 0x1a, 0xe9, 0x2c, 0x5f, 0xf3, 0x71, 0x37, 0xcd, 0x73, + 0xc3, 0xd6, 0xd7, 0x0d, 0x53, 0xd7, 0x4f, 0x18, 0x7f, 0xd9, 0xaf, 0x8c, 0xea, 0x59, 0x6f, 0xa9, + 0x56, 0x9b, 0x77, 0x5e, 0x1d, 0xe9, 0xf4, 0x0d, 0x15, 0xe8, 0xe5, 0x9a, 0x1f, 0xe3, 0x73, 0xa7, + 0xf7, 0xda, 0x88, 0xac, 0xa1, 0x9a, 0x6a, 0xd7, 0xe9, 0x5e, 0xf6, 0x75, 0x9f, 0x61, 0xd3, 0xd2, + 0xf4, 0x5e, 0x40, 0xf9, 0xca, 0xa9, 0xae, 0x9f, 0x76, 0xf0, 0x3a, 0x6d, 0x1d, 0xf7, 0x4f, 0xd6, + 0x6d, 0xad, 0x8b, 0x2d, 0x5b, 0xed, 0x1a, 0x9c, 0x61, 0xe9, 0x54, 0x3f, 0xd5, 0xe9, 0xcf, 0x75, + 0xf2, 0x8b, 0x51, 0xa5, 0xff, 0xc9, 0x40, 0x4a, 0xc6, 0x9f, 0xf4, 0xb1, 0x65, 0xa3, 0xdb, 0x10, + 0xc7, 0xcd, 0xb6, 0x5e, 0x12, 0x56, 0x85, 0x9b, 0xd9, 0xdb, 0x57, 0xd7, 0x86, 0x76, 0x66, 0x8d, + 0xf3, 0xd5, 0x9a, 0x6d, 0xbd, 0x1e, 0x91, 0x29, 0x2f, 0x7a, 0x1d, 0x12, 0x27, 0x9d, 0xbe, 0xd5, + 0x2e, 0x45, 0xa9, 0xd0, 0xb5, 0x30, 0xa1, 0xfb, 0x84, 0xa9, 0x1e, 0x91, 0x19, 0x37, 0x19, 0x4a, + 0xeb, 0x9d, 0xe8, 0xa5, 0xd8, 0xe4, 0xa1, 0xb6, 0x7a, 0x27, 0x74, 0x28, 0xc2, 0x8b, 0x36, 0x00, + 0xb4, 0x9e, 0x66, 0x2b, 0xcd, 0xb6, 0xaa, 0xf5, 0x4a, 0x71, 0x2a, 0xf9, 0x5c, 0xb8, 0xa4, 0x66, + 0x57, 0x09, 0x63, 0x3d, 0x22, 0x67, 0x34, 0xa7, 0x41, 0xa6, 0xfb, 0x49, 0x1f, 0x9b, 0xe7, 0xa5, + 0xc4, 0xe4, 0xe9, 0x3e, 0x22, 0x4c, 0x64, 0xba, 0x94, 0x1b, 0xbd, 0x0d, 0xe9, 0x66, 0x1b, 0x37, + 0x1f, 0x2b, 0xf6, 0xa0, 0x94, 0xa2, 0x92, 0x2b, 0x61, 0x92, 0x55, 0xc2, 0xd7, 0x18, 0xd4, 0x23, + 0x72, 0xaa, 0xc9, 0x7e, 0xa2, 0x7b, 0x90, 0x6c, 0xea, 0xdd, 0xae, 0x66, 0x97, 0x80, 0xca, 0x2e, + 0x87, 0xca, 0x52, 0xae, 0x7a, 0x44, 0xe6, 0xfc, 0x68, 0x17, 0x0a, 0x1d, 0xcd, 0xb2, 0x15, 0xab, + 0xa7, 0x1a, 0x56, 0x5b, 0xb7, 0xad, 0x52, 0x96, 0x6a, 0x78, 0x3e, 0x4c, 0xc3, 0x8e, 0x66, 0xd9, + 0x07, 0x0e, 0x73, 0x3d, 0x22, 0xe7, 0x3b, 0x7e, 0x02, 0xd1, 0xa7, 0x9f, 0x9c, 0x60, 0xd3, 0x55, + 0x58, 0xca, 0x4d, 0xd6, 0xb7, 0x47, 0xb8, 0x1d, 0x79, 0xa2, 0x4f, 0xf7, 0x13, 0xd0, 0xfb, 0xb0, + 0xd8, 0xd1, 0xd5, 0x96, 0xab, 0x4e, 0x69, 0xb6, 0xfb, 0xbd, 0xc7, 0xa5, 0x3c, 0x55, 0xfa, 0x62, + 0xe8, 0x24, 0x75, 0xb5, 0xe5, 0xa8, 0xa8, 0x12, 0x81, 0x7a, 0x44, 0x5e, 0xe8, 0x0c, 0x13, 0xd1, + 0x87, 0xb0, 0xa4, 0x1a, 0x46, 0xe7, 0x7c, 0x58, 0x7b, 0x81, 0x6a, 0xbf, 0x15, 0xa6, 0xbd, 0x42, + 0x64, 0x86, 0xd5, 0x23, 0x75, 0x84, 0x8a, 0x1a, 0x20, 0x1a, 0x26, 0x36, 0x54, 0x13, 0x2b, 0x86, + 0xa9, 0x1b, 0xba, 0xa5, 0x76, 0x4a, 0x45, 0xaa, 0xfb, 0x46, 0x98, 0xee, 0x7d, 0xc6, 0xbf, 0xcf, + 0xd9, 0xeb, 0x11, 0xb9, 0x68, 0x04, 0x49, 0x4c, 0xab, 0xde, 0xc4, 0x96, 0xe5, 0x69, 0x15, 0xa7, + 0x69, 0xa5, 0xfc, 0x41, 0xad, 0x01, 0x12, 0xaa, 0x41, 0x16, 0x0f, 0x88, 0xb8, 0x72, 0xa6, 0xdb, + 0xb8, 0xb4, 0x40, 0x15, 0x4a, 0xa1, 0x37, 0x94, 0xb2, 0x1e, 0xe9, 0x36, 0xae, 0x47, 0x64, 0xc0, + 0x6e, 0x0b, 0xa9, 0x70, 0xe1, 0x0c, 0x9b, 0xda, 0xc9, 0x39, 0x55, 0xa3, 0xd0, 0x1e, 0x62, 0x49, + 0x4a, 0x88, 0x2a, 0x7c, 0x29, 0x4c, 0xe1, 0x11, 0x15, 0x22, 0x2a, 0x6a, 0x8e, 0x48, 0x3d, 0x22, + 0x2f, 0x9e, 0x8d, 0x92, 0xc9, 0x11, 0x3b, 0xd1, 0x7a, 0x6a, 0x47, 0xfb, 0x0c, 0x2b, 0xc7, 0x1d, + 0xbd, 0xf9, 0xb8, 0xb4, 0x38, 0xf9, 0x88, 0xdd, 0xe7, 0xdc, 0x1b, 0x84, 0x99, 0x1c, 0xb1, 0x13, + 0x3f, 0x61, 0x23, 0x05, 0x89, 0x33, 0xb5, 0xd3, 0xc7, 0xdb, 0xf1, 0x74, 0x52, 0x4c, 0x6d, 0xc7, + 0xd3, 0x69, 0x31, 0xb3, 0x1d, 0x4f, 0x67, 0x44, 0x90, 0x6e, 0x40, 0xd6, 0x67, 0x92, 0x50, 0x09, + 0x52, 0x5d, 0x6c, 0x59, 0xea, 0x29, 0xa6, 0x16, 0x2c, 0x23, 0x3b, 0x4d, 0xa9, 0x00, 0x39, 0xbf, + 0x19, 0x92, 0xbe, 0x14, 0x5c, 0x49, 0x62, 0x61, 0x88, 0x24, 0x37, 0xa9, 0x8e, 0x24, 0x6f, 0xa2, + 0xeb, 0x90, 0xa7, 0x8b, 0x50, 0x9c, 0x7e, 0x62, 0xe6, 0xe2, 0x72, 0x8e, 0x12, 0x8f, 0x38, 0xd3, + 0x0a, 0x64, 0x8d, 0xdb, 0x86, 0xcb, 0x12, 0xa3, 0x2c, 0x60, 0xdc, 0x36, 0x1c, 0x86, 0xe7, 0x20, + 0x47, 0x56, 0xec, 0x72, 0xc4, 0xe9, 0x20, 0x59, 0x42, 0xe3, 0x2c, 0xd2, 0x3f, 0xc7, 0x40, 0x1c, + 0x36, 0x5d, 0xe8, 0x1e, 0xc4, 0x89, 0x15, 0xe7, 0x06, 0xb9, 0xbc, 0xc6, 0x4c, 0xfc, 0x9a, 0x63, + 0xe2, 0xd7, 0x1a, 0x8e, 0x89, 0xdf, 0x48, 0x3f, 0xf9, 0x66, 0x25, 0xf2, 0xe5, 0xcf, 0x57, 0x04, + 0x99, 0x4a, 0xa0, 0xcb, 0xc4, 0x60, 0xa9, 0x5a, 0x4f, 0xd1, 0x5a, 0x74, 0xca, 0x19, 0x62, 0x8d, + 0x54, 0xad, 0xb7, 0xd5, 0x42, 0x3b, 0x20, 0x36, 0xf5, 0x9e, 0x85, 0x7b, 0x56, 0xdf, 0x52, 0x98, + 0x8b, 0xe1, 0x66, 0x38, 0x60, 0x4c, 0x99, 0x6f, 0xa9, 0x3a, 0x9c, 0xfb, 0x94, 0x51, 0x2e, 0x36, + 0x83, 0x04, 0xb4, 0x0b, 0xf9, 0x33, 0xb5, 0xa3, 0xb5, 0x54, 0x5b, 0x37, 0x15, 0x0b, 0xdb, 0xdc, + 0x2e, 0x5f, 0x1f, 0xf9, 0xda, 0x47, 0x0e, 0xd7, 0x01, 0xb6, 0x0f, 0x8d, 0x96, 0x6a, 0xe3, 0x8d, + 0xf8, 0x93, 0x6f, 0x56, 0x04, 0x39, 0x77, 0xe6, 0xeb, 0x41, 0x2f, 0x40, 0x51, 0x35, 0x0c, 0xc5, + 0xb2, 0x55, 0x1b, 0x2b, 0xc7, 0xe7, 0x36, 0xb6, 0xa8, 0xa9, 0xce, 0xc9, 0x79, 0xd5, 0x30, 0x0e, + 0x08, 0x75, 0x83, 0x10, 0xd1, 0xf3, 0x50, 0x20, 0x56, 0x5d, 0x53, 0x3b, 0x4a, 0x1b, 0x6b, 0xa7, + 0x6d, 0xbb, 0x94, 0x5c, 0x15, 0x6e, 0xc6, 0xe4, 0x3c, 0xa7, 0xd6, 0x29, 0x11, 0xad, 0xc1, 0xa2, + 0xc3, 0xd6, 0xd4, 0x4d, 0xec, 0xf0, 0x12, 0x1b, 0x9e, 0x97, 0x17, 0x78, 0x57, 0x55, 0x37, 0x31, + 0xe3, 0x97, 0x5a, 0xee, 0x49, 0xa1, 0x1e, 0x00, 0x21, 0x88, 0xb7, 0x54, 0x5b, 0xa5, 0x5f, 0x20, + 0x27, 0xd3, 0xdf, 0x84, 0x66, 0xa8, 0x76, 0x9b, 0xef, 0x2b, 0xfd, 0x8d, 0x2e, 0x42, 0x92, 0xab, + 0x8e, 0xd1, 0x69, 0xf0, 0x16, 0x5a, 0x82, 0x84, 0x61, 0xea, 0x67, 0x98, 0x6e, 0x4b, 0x5a, 0x66, + 0x0d, 0x49, 0x86, 0x42, 0xd0, 0x5b, 0xa0, 0x02, 0x44, 0xed, 0x01, 0x1f, 0x25, 0x6a, 0x0f, 0xd0, + 0xab, 0x10, 0x27, 0x1f, 0x80, 0x8e, 0x51, 0x18, 0xe3, 0x1f, 0xb9, 0x5c, 0xe3, 0xdc, 0xc0, 0x32, + 0xe5, 0x94, 0x8a, 0x90, 0x0f, 0x78, 0x11, 0xe9, 0x22, 0x2c, 0x8d, 0x73, 0x0a, 0x52, 0xdb, 0xa5, + 0x07, 0x8c, 0x3b, 0x7a, 0x1d, 0xd2, 0xae, 0x57, 0x60, 0x07, 0xee, 0xf2, 0xc8, 0xb0, 0x0e, 0xb3, + 0xec, 0xb2, 0x92, 0x93, 0x46, 0x3e, 0x58, 0x5b, 0xe5, 0x31, 0x40, 0x4e, 0x4e, 0xa9, 0x86, 0x51, + 0x57, 0xad, 0xb6, 0xf4, 0x11, 0x94, 0xc2, 0x2c, 0xbe, 0x6f, 0xc3, 0x04, 0x7a, 0x5d, 0x9c, 0x0d, + 0xbb, 0x08, 0xc9, 0x13, 0xdd, 0xec, 0xaa, 0x36, 0x55, 0x96, 0x97, 0x79, 0x8b, 0x6c, 0x24, 0xb3, + 0xfe, 0x31, 0x4a, 0x66, 0x0d, 0x49, 0x81, 0xcb, 0xa1, 0x56, 0x9f, 0x88, 0x68, 0xbd, 0x16, 0x66, + 0xdb, 0x9a, 0x97, 0x59, 0xc3, 0x53, 0xc4, 0x26, 0xcb, 0x1a, 0x64, 0x58, 0x8b, 0xae, 0x95, 0xea, + 0xcf, 0xc8, 0xbc, 0x25, 0xfd, 0x7f, 0x1c, 0x2e, 0x8e, 0xb7, 0xfd, 0x68, 0x15, 0x72, 0x5d, 0x75, + 0xa0, 0xd8, 0x03, 0x7e, 0x4c, 0x05, 0xfa, 0xe1, 0xa1, 0xab, 0x0e, 0x1a, 0x03, 0x76, 0x46, 0x45, + 0x88, 0xd9, 0x03, 0xab, 0x14, 0x5d, 0x8d, 0xdd, 0xcc, 0xc9, 0xe4, 0x27, 0x3a, 0x84, 0x85, 0x8e, + 0xde, 0x54, 0x3b, 0x4a, 0x47, 0xb5, 0x6c, 0x85, 0x07, 0x05, 0xb1, 0x90, 0x1b, 0xc3, 0xac, 0x38, + 0x6e, 0xb1, 0xef, 0x49, 0x0c, 0x15, 0xbd, 0x31, 0x11, 0xb9, 0x48, 0x75, 0xec, 0xa8, 0xce, 0xa7, + 0x46, 0x9b, 0x90, 0xed, 0x6a, 0xd6, 0x31, 0x6e, 0xab, 0x67, 0x9a, 0x6e, 0x96, 0xe2, 0xab, 0xb1, + 0xb1, 0x41, 0xd5, 0x43, 0x8f, 0x87, 0x6b, 0xf2, 0x8b, 0xf9, 0x3e, 0x49, 0x22, 0x70, 0x86, 0x1d, + 0x2b, 0x94, 0x9c, 0xdb, 0x0a, 0xbd, 0x0a, 0x4b, 0x3d, 0x3c, 0xb0, 0x15, 0xf7, 0x86, 0x5b, 0xec, + 0x9c, 0xa4, 0xe8, 0xd6, 0x23, 0xd2, 0xe7, 0x9a, 0x05, 0x8b, 0x1c, 0x19, 0x74, 0x17, 0x4a, 0xf4, + 0x9e, 0x32, 0xe3, 0x45, 0x8c, 0x2c, 0x6e, 0x39, 0x97, 0xb6, 0x45, 0x3f, 0xe3, 0x05, 0xd2, 0x4f, + 0xcd, 0xe3, 0x0e, 0xed, 0xe5, 0x17, 0x7d, 0x1d, 0x96, 0x98, 0xbb, 0xc5, 0x26, 0xf1, 0xbb, 0xe4, + 0xb3, 0xd0, 0xa1, 0x30, 0x1d, 0x6a, 0xc1, 0xe9, 0xdb, 0x37, 0xf5, 0xc6, 0x80, 0x8e, 0xf4, 0xaa, + 0x2b, 0xd0, 0x52, 0xc8, 0x01, 0x76, 0x6c, 0xf3, 0x09, 0x3d, 0x8e, 0xc8, 0xe9, 0xab, 0x18, 0xae, + 0x15, 0xbf, 0xeb, 0x79, 0x89, 0xd3, 0xd1, 0xe8, 0x91, 0x77, 0x79, 0x16, 0xd3, 0x75, 0x22, 0xd2, + 0xbf, 0xc4, 0x7c, 0x87, 0x28, 0xe8, 0xd7, 0xf9, 0x11, 0x11, 0xbc, 0x23, 0x72, 0xe0, 0x9b, 0x97, + 0xff, 0x94, 0xb0, 0xf8, 0xfa, 0xca, 0xa8, 0x25, 0x18, 0x3e, 0x1d, 0xee, 0xd4, 0xc3, 0x0f, 0x48, + 0xec, 0xe9, 0x0e, 0x08, 0x82, 0x38, 0xdd, 0xd3, 0x38, 0x33, 0x86, 0xe4, 0xf7, 0x9f, 0xc4, 0xa1, + 0x09, 0xfb, 0xf6, 0xe9, 0x90, 0x6f, 0x2f, 0xbd, 0x03, 0x0b, 0x23, 0x91, 0x92, 0xbb, 0x3a, 0x61, + 0xec, 0xea, 0xa2, 0xfe, 0xd5, 0x49, 0x3f, 0x12, 0xa0, 0x1c, 0x1e, 0x1a, 0x8d, 0x55, 0xf5, 0x1a, + 0x5c, 0xf0, 0x1c, 0xa5, 0x7f, 0x96, 0xcc, 0x0e, 0x21, 0xb7, 0xd3, 0x3b, 0xa2, 0x61, 0x4e, 0xe5, + 0x21, 0x14, 0x83, 0xe1, 0x9b, 0xc5, 0xaf, 0xfc, 0x5f, 0x85, 0xd8, 0x90, 0xc0, 0xec, 0xe4, 0xc2, + 0x99, 0xbf, 0x69, 0x49, 0xff, 0x1d, 0x77, 0x3d, 0x42, 0x20, 0x16, 0x1b, 0x73, 0x38, 0x1f, 0xc1, + 0x62, 0x0b, 0x37, 0xb5, 0xd6, 0xd3, 0x9e, 0xcd, 0x05, 0x2e, 0xfd, 0x83, 0x39, 0x9a, 0x7f, 0x9e, + 0xf6, 0x0c, 0x20, 0x2d, 0x63, 0xcb, 0x20, 0x3d, 0x68, 0x03, 0x32, 0x78, 0xd0, 0xc4, 0x86, 0xed, + 0x44, 0xcf, 0xe3, 0xf3, 0x12, 0xc6, 0x5d, 0x73, 0x38, 0x49, 0x56, 0xee, 0x8a, 0xa1, 0x3b, 0x1c, + 0x78, 0x08, 0xc7, 0x10, 0xb8, 0xb8, 0x1f, 0x79, 0x78, 0xc3, 0x41, 0x1e, 0x62, 0xa1, 0x49, 0x35, + 0x93, 0x1a, 0x82, 0x1e, 0xee, 0x70, 0xe8, 0x21, 0x3e, 0x65, 0xb0, 0x00, 0xf6, 0x50, 0x0d, 0x60, + 0x0f, 0x89, 0x29, 0xcb, 0x0c, 0x01, 0x1f, 0xde, 0x70, 0xc0, 0x87, 0xe4, 0x94, 0x19, 0x0f, 0xa1, + 0x0f, 0x7f, 0xeb, 0x43, 0x1f, 0xd2, 0x54, 0x74, 0x35, 0x54, 0x74, 0x0c, 0xfc, 0xf0, 0xa6, 0x0b, + 0x3f, 0x64, 0x43, 0xa1, 0x0b, 0x2e, 0x3c, 0x8c, 0x3f, 0xec, 0x8d, 0xe0, 0x0f, 0x0c, 0x2f, 0x78, + 0x21, 0x54, 0xc5, 0x14, 0x00, 0x62, 0x6f, 0x04, 0x80, 0xc8, 0x4f, 0x51, 0x38, 0x05, 0x81, 0xf8, + 0xc7, 0xf1, 0x08, 0x44, 0x38, 0x46, 0xc0, 0xa7, 0x39, 0x1b, 0x04, 0xa1, 0x84, 0x40, 0x10, 0xc5, + 0xd0, 0x74, 0x99, 0xa9, 0x9f, 0x19, 0x83, 0x38, 0x1c, 0x83, 0x41, 0x30, 0xb4, 0xe0, 0x66, 0xa8, + 0xf2, 0x19, 0x40, 0x88, 0xc3, 0x31, 0x20, 0xc4, 0xc2, 0x54, 0xb5, 0x53, 0x51, 0x88, 0xfb, 0x41, + 0x14, 0x02, 0x85, 0x04, 0xae, 0xde, 0x6d, 0x0f, 0x81, 0x21, 0x8e, 0xc3, 0x60, 0x08, 0x06, 0x15, + 0xbc, 0x1c, 0xaa, 0x71, 0x0e, 0x1c, 0x62, 0x6f, 0x04, 0x87, 0x58, 0x9a, 0x72, 0xd2, 0x66, 0x07, + 0x22, 0x52, 0x62, 0x9a, 0x41, 0x10, 0xdb, 0xf1, 0x34, 0x88, 0x59, 0xe9, 0x45, 0x12, 0x4f, 0x0c, + 0x59, 0x38, 0x92, 0x68, 0x60, 0xd3, 0xd4, 0x4d, 0x0e, 0x29, 0xb0, 0x86, 0x74, 0x93, 0x24, 0x98, + 0x9e, 0x35, 0x9b, 0x00, 0x5a, 0xd0, 0x84, 0xce, 0x67, 0xc1, 0xa4, 0x9f, 0x09, 0x9e, 0x2c, 0x85, + 0x2d, 0xfc, 0xc9, 0x69, 0x86, 0x27, 0xa7, 0x3e, 0x28, 0x23, 0x1a, 0x84, 0x32, 0x56, 0x20, 0xeb, + 0xf7, 0x0b, 0x1c, 0xa5, 0x50, 0x3d, 0x7f, 0x70, 0x0b, 0x16, 0xa8, 0x53, 0x67, 0x80, 0x07, 0x77, + 0x52, 0x71, 0xea, 0x3a, 0x8b, 0xa4, 0x83, 0xed, 0x0b, 0x73, 0x4f, 0xaf, 0xc0, 0xa2, 0x8f, 0xd7, + 0x4d, 0x00, 0x59, 0xaa, 0x2e, 0xba, 0xdc, 0x15, 0x96, 0x09, 0x6e, 0xc7, 0xd3, 0x2d, 0x11, 0xcb, + 0xd7, 0x78, 0xcc, 0x30, 0xde, 0x1f, 0x4a, 0xbf, 0x88, 0x7a, 0xdb, 0xe8, 0x61, 0x20, 0xe3, 0xe0, + 0x0a, 0xe1, 0xa9, 0xe1, 0x0a, 0x7f, 0xb6, 0x1a, 0x0b, 0x64, 0xab, 0xe8, 0x7d, 0x58, 0x0a, 0x20, + 0x19, 0x4a, 0x9f, 0xa2, 0x14, 0xd4, 0x4d, 0xcf, 0x01, 0x68, 0x44, 0x7c, 0xa1, 0x9c, 0xdb, 0x83, + 0x3e, 0x80, 0x2b, 0x34, 0x72, 0x18, 0x5a, 0xbc, 0x33, 0x06, 0x1e, 0x35, 0xcc, 0xce, 0x82, 0x7c, + 0xc1, 0x81, 0x7c, 0x89, 0xe8, 0x08, 0x90, 0xb8, 0xfa, 0x10, 0x98, 0xe3, 0x24, 0x0c, 0xe6, 0xf8, + 0xb5, 0xe0, 0x1d, 0x2e, 0x17, 0xe8, 0x68, 0xea, 0x2d, 0xcc, 0x73, 0x65, 0xfa, 0x9b, 0xc4, 0x7f, + 0x1d, 0xfd, 0x94, 0x67, 0xc4, 0xe4, 0x27, 0xe1, 0x72, 0x7d, 0x67, 0x86, 0xbb, 0x46, 0x37, 0xcd, + 0x66, 0x51, 0x16, 0x4f, 0xb3, 0x45, 0x88, 0x3d, 0xc6, 0xcc, 0xd3, 0xe5, 0x64, 0xf2, 0x93, 0xf0, + 0xd1, 0xfb, 0xc3, 0xa3, 0x25, 0xd6, 0x40, 0xf7, 0x20, 0x43, 0xab, 0x2b, 0x8a, 0x6e, 0x58, 0xdc, + 0xb9, 0x05, 0xe2, 0x48, 0x56, 0x27, 0x59, 0xdb, 0x27, 0x3c, 0x7b, 0x86, 0x25, 0xa7, 0x0d, 0xfe, + 0xcb, 0x17, 0xde, 0x65, 0x02, 0xe1, 0xdd, 0x55, 0xc8, 0x90, 0xd9, 0x5b, 0x86, 0xda, 0xc4, 0x14, + 0x70, 0xcf, 0xc8, 0x1e, 0x41, 0x7a, 0x22, 0x40, 0x71, 0xc8, 0x57, 0x8e, 0x5d, 0xbb, 0x73, 0xb7, + 0xa2, 0x3e, 0xe0, 0xe7, 0x1a, 0xc0, 0xa9, 0x6a, 0x29, 0x9f, 0xaa, 0x3d, 0x1b, 0xb7, 0xf8, 0x72, + 0x33, 0xa7, 0xaa, 0xf5, 0x2e, 0x25, 0x04, 0x07, 0x4e, 0x0f, 0x0d, 0xec, 0x43, 0x18, 0x32, 0x7e, + 0x84, 0x01, 0x95, 0x21, 0x6d, 0x98, 0x9a, 0x6e, 0x6a, 0xf6, 0x39, 0x9d, 0x6d, 0x4c, 0x76, 0xdb, + 0xdb, 0xf1, 0x74, 0x4c, 0x8c, 0x6f, 0xc7, 0xd3, 0x71, 0x31, 0xe1, 0xc2, 0x9f, 0xcc, 0xf6, 0x64, + 0xc5, 0x9c, 0xf4, 0xb9, 0xef, 0xbe, 0x6c, 0xe2, 0x8e, 0x76, 0x86, 0xcd, 0x39, 0x16, 0x33, 0xdb, + 0xc7, 0x5d, 0x1e, 0xb3, 0x64, 0x1f, 0x85, 0xcc, 0x9e, 0xb4, 0xfa, 0x16, 0x6e, 0x71, 0x00, 0xce, + 0x6d, 0xa3, 0x3a, 0x24, 0xf1, 0x19, 0xee, 0xd9, 0x56, 0x29, 0x45, 0x83, 0xfa, 0x8b, 0xa3, 0xd9, + 0x09, 0xe9, 0xde, 0x28, 0x91, 0x5b, 0xf3, 0xcb, 0x6f, 0x56, 0x44, 0xc6, 0xfd, 0xb2, 0xde, 0xd5, + 0x6c, 0xdc, 0x35, 0xec, 0x73, 0x99, 0xcb, 0x4f, 0xde, 0x59, 0xa9, 0x02, 0x85, 0x60, 0x00, 0x83, + 0xae, 0x43, 0xde, 0xc4, 0x36, 0xb9, 0x64, 0x81, 0xfc, 0x29, 0xc7, 0x88, 0xec, 0x0e, 0x6c, 0xc7, + 0xd3, 0x82, 0x18, 0xdd, 0x8e, 0xa7, 0xa3, 0x62, 0x4c, 0xda, 0x87, 0x0b, 0x63, 0x03, 0x18, 0x74, + 0x17, 0x32, 0x5e, 0xec, 0x23, 0xd0, 0x65, 0x4c, 0x40, 0xc5, 0x3c, 0x5e, 0xe9, 0xff, 0x04, 0x4f, + 0x65, 0x10, 0x67, 0xab, 0x41, 0xd2, 0xc4, 0x56, 0xbf, 0xc3, 0x90, 0xaf, 0xc2, 0xed, 0x57, 0x66, + 0x0b, 0x7d, 0x08, 0xb5, 0xdf, 0xb1, 0x65, 0x2e, 0x2c, 0x7d, 0x08, 0x49, 0x46, 0x41, 0x59, 0x48, + 0x1d, 0xee, 0x3e, 0xd8, 0xdd, 0x7b, 0x77, 0x57, 0x8c, 0x20, 0x80, 0x64, 0xa5, 0x5a, 0xad, 0xed, + 0x37, 0x44, 0x01, 0x65, 0x20, 0x51, 0xd9, 0xd8, 0x93, 0x1b, 0x62, 0x94, 0x90, 0xe5, 0xda, 0x76, + 0xad, 0xda, 0x10, 0x63, 0x68, 0x01, 0xf2, 0xec, 0xb7, 0x72, 0x7f, 0x4f, 0x7e, 0x58, 0x69, 0x88, + 0x71, 0x1f, 0xe9, 0xa0, 0xb6, 0xbb, 0x59, 0x93, 0xc5, 0x84, 0xf4, 0x1a, 0x5c, 0x0e, 0x0d, 0x96, + 0x3c, 0x10, 0x4d, 0xf0, 0x81, 0x68, 0xd2, 0xbf, 0x45, 0x49, 0x56, 0x1c, 0x16, 0x01, 0xa1, 0xed, + 0xa1, 0x85, 0xdf, 0x9e, 0x23, 0x7c, 0x1a, 0x5a, 0x3d, 0x7a, 0x1e, 0x0a, 0x26, 0x3e, 0xc1, 0x76, + 0xb3, 0xcd, 0x22, 0x32, 0x86, 0xb2, 0xe5, 0xe5, 0x3c, 0xa7, 0x52, 0x21, 0x8b, 0xb1, 0x7d, 0x8c, + 0x9b, 0xb6, 0xc2, 0x6e, 0x9b, 0x45, 0xf3, 0xcb, 0x0c, 0x61, 0x23, 0xd4, 0x03, 0x46, 0x94, 0x3e, + 0x9a, 0x6b, 0x2f, 0x33, 0x90, 0x90, 0x6b, 0x0d, 0xf9, 0x3d, 0x31, 0x86, 0x10, 0x14, 0xe8, 0x4f, + 0xe5, 0x60, 0xb7, 0xb2, 0x7f, 0x50, 0xdf, 0x23, 0x7b, 0xb9, 0x08, 0x45, 0x67, 0x2f, 0x1d, 0x62, + 0x42, 0xfa, 0x49, 0x14, 0x2e, 0x85, 0xc4, 0x6f, 0xe8, 0x1e, 0x80, 0x3d, 0x50, 0x4c, 0xdc, 0xd4, + 0xcd, 0x56, 0xf8, 0x21, 0x6b, 0x0c, 0x64, 0xca, 0x21, 0x67, 0x6c, 0xfe, 0xcb, 0x9a, 0x80, 0xbd, + 0xa2, 0xb7, 0xb9, 0x52, 0xb2, 0x2a, 0x8b, 0x67, 0xd5, 0xd7, 0xc6, 0xc0, 0x03, 0xb8, 0x49, 0x14, + 0xd3, 0xbd, 0xa5, 0x8a, 0x29, 0x3f, 0x7a, 0x08, 0x0b, 0x9e, 0x2f, 0x64, 0x3e, 0xca, 0xc1, 0x18, + 0x56, 0xc3, 0x1d, 0x21, 0x73, 0x46, 0xb2, 0x78, 0x16, 0x24, 0x58, 0xe8, 0x3d, 0xb8, 0x34, 0xe4, + 0xc3, 0x5d, 0xa5, 0x89, 0x59, 0x5d, 0xf9, 0x85, 0xa0, 0x2b, 0xe7, 0xaa, 0xa5, 0x7f, 0x8f, 0xf9, + 0x37, 0x36, 0x18, 0xae, 0xee, 0x41, 0xd2, 0xb2, 0x55, 0xbb, 0x6f, 0xf1, 0x03, 0x77, 0x77, 0xd6, + 0xd8, 0x77, 0xcd, 0xf9, 0x71, 0x40, 0xc5, 0x65, 0xae, 0xe6, 0x2f, 0xfb, 0x6d, 0x49, 0xaf, 0x43, + 0x21, 0xb8, 0x39, 0xe1, 0x57, 0xc6, 0xb3, 0x39, 0x51, 0xa9, 0x03, 0x8b, 0x63, 0xa0, 0x28, 0x74, + 0x97, 0x97, 0x39, 0xd8, 0xf7, 0xb9, 0x3e, 0x3a, 0xab, 0x00, 0xbb, 0x57, 0xed, 0x20, 0x1e, 0xc1, + 0xcb, 0x1a, 0xd8, 0xa7, 0xf0, 0x08, 0x52, 0x13, 0xd0, 0x68, 0x0e, 0x32, 0x0e, 0x36, 0x13, 0x9e, + 0x01, 0x36, 0xfb, 0x0f, 0x01, 0xae, 0x4c, 0xc8, 0x4b, 0xd0, 0xa3, 0xa1, 0xd3, 0xf7, 0xe6, 0x3c, + 0x59, 0xcd, 0x1a, 0xa3, 0x05, 0xcf, 0x9f, 0x74, 0x07, 0x72, 0x7e, 0xfa, 0x6c, 0x5b, 0xff, 0xe3, + 0x98, 0xe7, 0x89, 0x82, 0xf8, 0x9e, 0xe7, 0xa0, 0x85, 0x67, 0x74, 0xd0, 0xc1, 0xd3, 0x1f, 0x9d, + 0xf3, 0xf4, 0x4f, 0x38, 0xae, 0xf1, 0x67, 0x3b, 0xae, 0x81, 0x1b, 0x9b, 0x08, 0xde, 0xd8, 0x29, + 0x21, 0x79, 0xeb, 0x19, 0x43, 0xf2, 0xb0, 0x74, 0x02, 0xcf, 0x5b, 0x1f, 0x1d, 0x93, 0x4e, 0x48, + 0xbf, 0x12, 0x00, 0x3c, 0x70, 0x95, 0xb8, 0x63, 0x53, 0xef, 0xf7, 0x5a, 0xf4, 0xa4, 0x25, 0x64, + 0xd6, 0x20, 0x09, 0xdf, 0x27, 0x7d, 0xdd, 0xec, 0x77, 0xfd, 0xe9, 0x0e, 0x30, 0x12, 0xdd, 0x81, + 0x1b, 0x50, 0x64, 0xf9, 0x9b, 0xa5, 0x9d, 0xf6, 0x54, 0xbb, 0x6f, 0x62, 0x8e, 0x9f, 0x16, 0x28, + 0xf9, 0xc0, 0xa1, 0x12, 0x46, 0x56, 0x90, 0xf5, 0x18, 0xd9, 0x66, 0x16, 0x28, 0xd9, 0x63, 0x7c, + 0x1f, 0x2e, 0xdb, 0x6d, 0x13, 0x5b, 0x6d, 0xbd, 0xd3, 0x52, 0x86, 0x2f, 0x5b, 0x92, 0x1e, 0x8b, + 0x95, 0x29, 0x97, 0x5c, 0xbe, 0xe4, 0x6a, 0x38, 0x0a, 0x5e, 0xb8, 0xdf, 0x08, 0x80, 0x46, 0x6b, + 0x62, 0x3f, 0x90, 0xc5, 0x7f, 0x06, 0x09, 0x7a, 0x19, 0x49, 0x94, 0xee, 0x9a, 0xcc, 0x0c, 0xb7, + 0x86, 0x1f, 0x00, 0xa8, 0xb6, 0x6d, 0x6a, 0xc7, 0x7d, 0x72, 0x67, 0xa2, 0xa3, 0x43, 0x79, 0x97, + 0xb9, 0xe2, 0xf0, 0x6d, 0x5c, 0xe5, 0xb7, 0x7a, 0xc9, 0x13, 0xf5, 0xdd, 0x6c, 0x9f, 0x42, 0x69, + 0x17, 0x0a, 0x41, 0x59, 0x27, 0xbb, 0x63, 0x73, 0x08, 0x66, 0x77, 0x0c, 0x75, 0xe0, 0xd9, 0x9d, + 0x9b, 0x1b, 0xc6, 0x58, 0xf9, 0x9b, 0x36, 0xa4, 0xdf, 0x0a, 0x90, 0xf3, 0xdb, 0x82, 0xdf, 0x73, + 0xce, 0x32, 0x25, 0x4d, 0xbb, 0x3c, 0x92, 0xb2, 0xa4, 0x4e, 0x55, 0xeb, 0xf0, 0x0f, 0x99, 0xb1, + 0x7c, 0x2e, 0x40, 0xda, 0x5d, 0x7c, 0xb0, 0x12, 0x1e, 0x78, 0x3a, 0xc0, 0xf6, 0x2e, 0xea, 0x2f, + 0x5f, 0xb3, 0x87, 0x02, 0x31, 0xf7, 0xa1, 0xc0, 0x5b, 0x6e, 0x50, 0x1d, 0x86, 0x67, 0xfb, 0x77, + 0x9a, 0x23, 0x14, 0x4e, 0x0e, 0xf1, 0xaf, 0x7c, 0x1e, 0x24, 0x9a, 0x44, 0x7f, 0x03, 0x49, 0xb5, + 0xe9, 0xa2, 0xf8, 0x85, 0x31, 0xf0, 0xb6, 0xc3, 0xba, 0xd6, 0x18, 0x54, 0x28, 0xa7, 0xcc, 0x25, + 0xf8, 0xac, 0xa2, 0xce, 0xac, 0xa4, 0x77, 0x88, 0x5e, 0xc6, 0x13, 0x74, 0x52, 0x05, 0x80, 0xc3, + 0xdd, 0x87, 0x7b, 0x9b, 0x5b, 0xf7, 0xb7, 0x6a, 0x9b, 0x3c, 0xac, 0xde, 0xdc, 0xac, 0x6d, 0x8a, + 0x51, 0xc2, 0x27, 0xd7, 0x1e, 0xee, 0x1d, 0xd5, 0x36, 0xc5, 0x98, 0x54, 0x81, 0x8c, 0x6b, 0x10, + 0xe9, 0x23, 0x0a, 0xfd, 0x53, 0x5e, 0x9b, 0x8f, 0xc9, 0xac, 0x81, 0x96, 0x21, 0xeb, 0x2f, 0x8c, + 0xb0, 0xcb, 0x9b, 0x31, 0xdc, 0x22, 0xdf, 0x7f, 0x09, 0x50, 0x1c, 0x0a, 0x95, 0xd0, 0x5b, 0x90, + 0x32, 0xfa, 0xc7, 0x8a, 0x73, 0x76, 0x87, 0x0a, 0x4d, 0x0e, 0xd6, 0xd0, 0x3f, 0xee, 0x68, 0xcd, + 0x07, 0xf8, 0x9c, 0x1b, 0xe0, 0xa4, 0xd1, 0x3f, 0x7e, 0xc0, 0x8e, 0x38, 0x9b, 0x46, 0x74, 0xc2, + 0x34, 0x62, 0x43, 0xd3, 0x40, 0x37, 0x20, 0xd7, 0xd3, 0x5b, 0x58, 0x51, 0x5b, 0x2d, 0x13, 0x5b, + 0xcc, 0xa3, 0x65, 0xb8, 0xe6, 0x2c, 0xe9, 0xa9, 0xb0, 0x0e, 0xe9, 0x5b, 0x01, 0xd0, 0xa8, 0x13, + 0x40, 0x07, 0xe3, 0x42, 0x43, 0x61, 0xb6, 0xd0, 0x90, 0x7f, 0xee, 0xd1, 0x00, 0xb1, 0x01, 0x4b, + 0x9e, 0xa9, 0x32, 0xe8, 0x7a, 0xe9, 0xa6, 0x44, 0x67, 0xdc, 0x94, 0x88, 0x8c, 0x5c, 0x79, 0xb7, + 0x67, 0xaa, 0xcd, 0x95, 0x0c, 0x28, 0x35, 0x46, 0xc4, 0xf8, 0x3a, 0xc3, 0xa6, 0x24, 0x3c, 0xcb, + 0x94, 0xa4, 0x3b, 0x20, 0x3e, 0x72, 0xc7, 0xe7, 0x23, 0x0d, 0x4d, 0x53, 0x18, 0x99, 0xe6, 0x19, + 0xa4, 0x89, 0xf5, 0xa5, 0xde, 0xe5, 0xef, 0x20, 0xe3, 0xee, 0x9e, 0xfb, 0x0e, 0x2b, 0x74, 0xdb, + 0xf9, 0x4c, 0x3c, 0x11, 0x74, 0x0b, 0x16, 0x88, 0xdf, 0x70, 0x0a, 0xa6, 0x0c, 0x89, 0x8e, 0x52, + 0x6b, 0x58, 0x64, 0x1d, 0x3b, 0x0e, 0x58, 0x4a, 0x22, 0x4a, 0xd1, 0x71, 0x70, 0x7f, 0x8c, 0x09, + 0x90, 0x74, 0x79, 0x08, 0x90, 0x67, 0xdf, 0x30, 0x1f, 0x08, 0x7d, 0xa5, 0x7f, 0x8a, 0x42, 0xd6, + 0x57, 0x8f, 0x45, 0x7f, 0x1d, 0x88, 0xe2, 0x57, 0x27, 0xd5, 0x6e, 0x7d, 0x21, 0x7c, 0x60, 0x61, + 0xd1, 0xf9, 0x17, 0x16, 0x56, 0x1d, 0x77, 0xca, 0xbb, 0xf1, 0xb9, 0xcb, 0xbb, 0x2f, 0x03, 0xb2, + 0x75, 0x5b, 0xed, 0x10, 0xe7, 0xad, 0xf5, 0x4e, 0x15, 0x76, 0xdb, 0x99, 0x03, 0x11, 0x69, 0xcf, + 0x11, 0xed, 0xd8, 0x27, 0x74, 0xe9, 0x7f, 0x05, 0x48, 0xbb, 0xa0, 0xce, 0xbc, 0xcf, 0x99, 0x2e, + 0x42, 0x92, 0xe3, 0x16, 0xec, 0x3d, 0x13, 0x6f, 0x8d, 0xad, 0x63, 0x97, 0x21, 0xdd, 0xc5, 0xb6, + 0x4a, 0xbd, 0x21, 0x8b, 0x3c, 0xdc, 0xf6, 0x53, 0xd7, 0x97, 0x6f, 0xbd, 0x09, 0x59, 0xdf, 0x1b, + 0x32, 0xe2, 0x59, 0x77, 0x6b, 0xef, 0x8a, 0x91, 0x72, 0xea, 0x8b, 0xaf, 0x56, 0x63, 0xbb, 0xf8, + 0x53, 0x54, 0x22, 0xe6, 0xb8, 0x5a, 0xaf, 0x55, 0x1f, 0x88, 0x42, 0x39, 0xfb, 0xc5, 0x57, 0xab, + 0x29, 0x19, 0xd3, 0x8a, 0xe2, 0xad, 0x07, 0x50, 0x1c, 0xfa, 0xa2, 0x41, 0x1b, 0x8f, 0xa0, 0xb0, + 0x79, 0xb8, 0xbf, 0xb3, 0x55, 0xad, 0x34, 0x6a, 0xca, 0xd1, 0x5e, 0xa3, 0x26, 0x0a, 0xe8, 0x12, + 0x2c, 0xee, 0x6c, 0xfd, 0x7d, 0xbd, 0xa1, 0x54, 0x77, 0xb6, 0x6a, 0xbb, 0x0d, 0xa5, 0xd2, 0x68, + 0x54, 0xaa, 0x0f, 0xc4, 0xe8, 0xed, 0xff, 0xcc, 0x42, 0xb1, 0xb2, 0x51, 0xdd, 0xaa, 0x18, 0x46, + 0x47, 0x6b, 0xaa, 0xd4, 0x63, 0x54, 0x21, 0x4e, 0x6b, 0x23, 0x13, 0x5f, 0xa0, 0x97, 0x27, 0x97, + 0x89, 0xd1, 0x7d, 0x48, 0xd0, 0xb2, 0x09, 0x9a, 0xfc, 0x24, 0xbd, 0x3c, 0xa5, 0x6e, 0x4c, 0x26, + 0x43, 0xef, 0xe1, 0xc4, 0x37, 0xea, 0xe5, 0xc9, 0x65, 0x64, 0xb4, 0x03, 0x29, 0x07, 0x6c, 0x9e, + 0xf6, 0x70, 0xbc, 0x3c, 0xb5, 0xb6, 0x4b, 0x96, 0xc6, 0x40, 0xfb, 0xc9, 0xcf, 0xd7, 0xcb, 0x53, + 0x0a, 0xcc, 0x68, 0x0b, 0x92, 0x1c, 0x30, 0x9d, 0xf2, 0x22, 0xbd, 0x3c, 0xad, 0x64, 0x8c, 0x64, + 0xc8, 0x78, 0x25, 0x9b, 0xe9, 0x8f, 0xf2, 0xcb, 0x33, 0xd4, 0xce, 0xd1, 0x87, 0x90, 0x0f, 0x82, + 0xb1, 0xb3, 0xbd, 0x7a, 0x2f, 0xcf, 0x58, 0x9c, 0x26, 0xfa, 0x83, 0xc8, 0xec, 0x6c, 0xaf, 0xe0, + 0xcb, 0x33, 0xd6, 0xaa, 0xd1, 0xc7, 0xb0, 0x30, 0x8a, 0x9c, 0xce, 0xfe, 0x28, 0xbe, 0x3c, 0x47, + 0xf5, 0x1a, 0x75, 0x01, 0x8d, 0x41, 0x5c, 0xe7, 0x78, 0x23, 0x5f, 0x9e, 0xa7, 0x98, 0x8d, 0x5a, + 0x50, 0x1c, 0x46, 0x31, 0x67, 0x7d, 0x33, 0x5f, 0x9e, 0xb9, 0xb0, 0xcd, 0x46, 0x09, 0x42, 0x7a, + 0xb3, 0xbe, 0xa1, 0x2f, 0xcf, 0x5c, 0xe7, 0x46, 0x87, 0x00, 0x3e, 0x90, 0x68, 0x86, 0x37, 0xf5, + 0xe5, 0x59, 0x2a, 0xde, 0xc8, 0x80, 0xc5, 0x71, 0xa8, 0xd0, 0x3c, 0x4f, 0xec, 0xcb, 0x73, 0x15, + 0xc2, 0xc9, 0x79, 0x0e, 0xe2, 0x3b, 0xb3, 0x3d, 0xb9, 0x2f, 0xcf, 0x58, 0x11, 0xdf, 0xa8, 0x3d, + 0xf9, 0x6e, 0x59, 0xf8, 0xfa, 0xbb, 0x65, 0xe1, 0xdb, 0xef, 0x96, 0x85, 0x2f, 0xbf, 0x5f, 0x8e, + 0x7c, 0xfd, 0xfd, 0x72, 0xe4, 0xa7, 0xdf, 0x2f, 0x47, 0xfe, 0xe1, 0xa5, 0x53, 0xcd, 0x6e, 0xf7, + 0x8f, 0xd7, 0x9a, 0x7a, 0x77, 0xdd, 0xff, 0x2f, 0xa6, 0x71, 0x7f, 0xbb, 0x3a, 0x4e, 0x52, 0x4f, + 0x7c, 0xe7, 0x77, 0x01, 0x00, 0x00, 0xff, 0xff, 0x51, 0xad, 0x4c, 0xbc, 0x96, 0x35, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5601,27 +5174,6 @@ func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -5643,48 +5195,6 @@ func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - return len(dAtA) - i, nil -} -func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -6071,12 +5581,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err22 != nil { - return 0, err22 + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err19 != nil { + return 0, err19 } - i -= n22 - i = encodeVarintTypes(dAtA, i, uint64(n22)) + i -= n19 + i = encodeVarintTypes(dAtA, i, uint64(n19)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -6134,70 +5644,6 @@ func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6233,7 +5679,7 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { +func (m *RequestCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6243,27 +5689,20 @@ func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { +func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6273,25 +5712,20 @@ func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *RequestCommit) Marshal() (dAtA []byte, err error) { +func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6301,58 +5735,12 @@ func (m *RequestCommit) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6523,12 +5911,12 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x3a } - n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err27 != nil { - return 0, err27 + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err22 != nil { + return 0, err22 } - i -= n27 - i = encodeVarintTypes(dAtA, i, uint64(n27)) + i -= n22 + i = encodeVarintTypes(dAtA, i, uint64(n22)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6536,10 +5924,10 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x28 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6611,12 +5999,12 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x3a } - n29, err29 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err29 != nil { - return 0, err29 + n24, err24 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err24 != nil { + return 0, err24 } - i -= n29 - i = encodeVarintTypes(dAtA, i, uint64(n29)) + i -= n24 + i = encodeVarintTypes(dAtA, i, uint64(n24)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6631,10 +6019,10 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x22 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6822,12 +6210,12 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n32, err32 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err32 != nil { - return 0, err32 + n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err27 != nil { + return 0, err27 } - i -= n32 - i = encodeVarintTypes(dAtA, i, uint64(n32)) + i -= n27 + i = encodeVarintTypes(dAtA, i, uint64(n27)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6842,10 +6230,10 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7036,27 +6424,6 @@ func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -7078,48 +6445,6 @@ func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} -func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - return len(dAtA) - i, nil -} func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -7637,43 +6962,6 @@ func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7694,13 +6982,6 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.MempoolError) > 0 { - i -= len(m.MempoolError) - copy(dAtA[i:], m.MempoolError) - i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError))) - i-- - dAtA[i] = 0x5a - } if m.Priority != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) i-- @@ -7720,44 +7001,11 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x42 } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.GasUsed != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 - } if m.GasWanted != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) i-- dAtA[i] = 0x28 } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 - } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a - } if len(m.Data) > 0 { i -= len(m.Data) copy(dAtA[i:], m.Data) @@ -7853,7 +7101,7 @@ func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { +func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7863,48 +7111,48 @@ func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ValidatorSetUpdate != nil { - { - size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) i-- - dAtA[i] = 0xaa + dAtA[i] = 0x18 } - if m.NextCoreChainLockUpdate != nil { - { - size, err := m.NextCoreChainLockUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa2 + return len(dAtA) - i, nil +} + +func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Snapshots) > 0 { + for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7912,97 +7160,13 @@ func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - } - if m.ConsensusParamUpdates != nil { - { - size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x12 } return len(dAtA) - i, nil } -func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) - i-- - dAtA[i] = 0x18 - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} - -func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Snapshots) > 0 { - for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { +func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -8090,20 +7254,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA62 := make([]byte, len(m.RefetchChunks)*10) - var j61 int + dAtA51 := make([]byte, len(m.RefetchChunks)*10) + var j50 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA62[j61] = uint8(uint64(num)&0x7f | 0x80) + dAtA51[j50] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j61++ + j50++ } - dAtA62[j61] = uint8(num) - j61++ + dAtA51[j50] = uint8(num) + j50++ } - i -= j61 - copy(dAtA[i:], dAtA62[:j61]) - i = encodeVarintTypes(dAtA, i, uint64(j61)) + i -= j50 + copy(dAtA[i:], dAtA51[:j50]) + i = encodeVarintTypes(dAtA, i, uint64(j50)) i-- dAtA[i] = 0x12 } @@ -8422,11 +7586,6 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0xa2 } - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) - i-- - dAtA[i] = 0x30 - } if len(m.AppHash) > 0 { i -= len(m.AppHash) copy(dAtA[i:], m.AppHash) @@ -9183,12 +8342,12 @@ func (m *Misbehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n74, err74 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err74 != nil { - return 0, err74 + n63, err63 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err63 != nil { + return 0, err63 } - i -= n74 - i = encodeVarintTypes(dAtA, i, uint64(n74)) + i -= n63 + i = encodeVarintTypes(dAtA, i, uint64(n63)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -9356,18 +8515,6 @@ func (m *Request_Query) Size() (n int) { } return n } -func (m *Request_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_CheckTx) Size() (n int) { if m == nil { return 0 @@ -9380,30 +8527,6 @@ func (m *Request_CheckTx) Size() (n int) { } return n } -func (m *Request_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Request_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_Commit) Size() (n int) { if m == nil { return 0 @@ -9625,29 +8748,6 @@ func (m *RequestQuery) Size() (n int) { return n } -func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.LastCommitInfo.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *RequestCheckTx) Size() (n int) { if m == nil { return 0 @@ -9664,31 +8764,6 @@ func (m *RequestCheckTx) Size() (n int) { return n } -func (m *RequestDeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *RequestEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - return n -} - func (m *RequestCommit) Size() (n int) { if m == nil { return 0 @@ -9779,8 +8854,8 @@ func (m *RequestPrepareProposal) Size() (n int) { } l = m.LocalLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9825,8 +8900,8 @@ func (m *RequestProcessProposal) Size() (n int) { } l = m.ProposedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9907,8 +8982,8 @@ func (m *RequestFinalizeBlock) Size() (n int) { } l = m.DecidedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -10027,18 +9102,6 @@ func (m *Response_Query) Size() (n int) { } return n } -func (m *Response_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_CheckTx) Size() (n int) { if m == nil { return 0 @@ -10051,30 +9114,6 @@ func (m *Response_CheckTx) Size() (n int) { } return n } -func (m *Response_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Response_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_Commit) Size() (n int) { if m == nil { return 0 @@ -10325,21 +9364,6 @@ func (m *ResponseQuery) Size() (n int) { return n } -func (m *ResponseBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *ResponseCheckTx) Size() (n int) { if m == nil { return 0 @@ -10353,26 +9377,9 @@ func (m *ResponseCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } if m.GasWanted != 0 { n += 1 + sovTypes(uint64(m.GasWanted)) } - if m.GasUsed != 0 { - n += 1 + sovTypes(uint64(m.GasUsed)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } l = len(m.Codespace) if l > 0 { n += 1 + l + sovTypes(uint64(l)) @@ -10384,10 +9391,6 @@ func (m *ResponseCheckTx) Size() (n int) { if m.Priority != 0 { n += 1 + sovTypes(uint64(m.Priority)) } - l = len(m.MempoolError) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } return n } @@ -10431,43 +9434,12 @@ func (m *ResponseDeliverTx) Size() (n int) { return n } -func (m *ResponseEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConsensusParamUpdates != nil { - l = m.ConsensusParamUpdates.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.NextCoreChainLockUpdate != nil { - l = m.NextCoreChainLockUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) - } - if m.ValidatorSetUpdate != nil { - l = m.ValidatorSetUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) - } - return n -} - func (m *ResponseCommit) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } if m.RetainHeight != 0 { n += 1 + sovTypes(uint64(m.RetainHeight)) } @@ -10675,9 +9647,6 @@ func (m *ResponseFinalizeBlock) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.RetainHeight != 0 { - n += 1 + sovTypes(uint64(m.RetainHeight)) - } if m.NextCoreChainLockUpdate != nil { l = m.NextCoreChainLockUpdate.Size() n += 2 + l + sovTypes(uint64(l)) @@ -11243,9 +10212,9 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Query{v} iNdEx = postIndex - case 6: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11272,15 +10241,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestBeginBlock{} + v := &RequestCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_BeginBlock{v} + m.Value = &Request_CheckTx{v} iNdEx = postIndex - case 7: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11307,15 +10276,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCheckTx{} + v := &RequestCommit{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_CheckTx{v} + m.Value = &Request_Commit{v} iNdEx = postIndex - case 8: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11342,112 +10311,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestDeliverTx{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_DeliverTx{v} - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestEndBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_EndBlock{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestCommit{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_Commit{v} - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestListSnapshots{} + v := &RequestListSnapshots{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12257,335 +11121,11 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { break } } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) - } - m.InitialCoreHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InitialCoreHeight |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestQuery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Prove = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) } - var msglen int + m.InitialCoreHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12595,26 +11135,11 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.InitialCoreHeight |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12636,7 +11161,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { +func (m *RequestQuery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12659,15 +11184,15 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -12694,16 +11219,48 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - m.Type = 0 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12713,11 +11270,31 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.Prove = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12739,7 +11316,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12762,10 +11339,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12802,61 +11379,11 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { m.Tx = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Height = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12866,7 +11393,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Type |= CheckTxType(b&0x7F) << shift if b < 0x80 { break } @@ -13469,7 +11996,7 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13496,8 +12023,8 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13812,7 +12339,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13839,8 +12366,8 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14389,7 +12916,7 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14416,8 +12943,8 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14732,116 +13259,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Exception{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseEcho{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Echo{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseFlush{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Flush{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseInfo{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Info{v} + m.Value = &Response_Exception{v} iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14868,15 +13290,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInitChain{} + v := &ResponseEcho{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_InitChain{v} + m.Value = &Response_Echo{v} iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14903,15 +13325,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseQuery{} + v := &ResponseFlush{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Query{v} + m.Value = &Response_Flush{v} iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14938,15 +13360,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseBeginBlock{} + v := &ResponseInfo{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_BeginBlock{v} + m.Value = &Response_Info{v} iNdEx = postIndex - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14973,15 +13395,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} + v := &ResponseInitChain{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_CheckTx{v} + m.Value = &Response_InitChain{v} iNdEx = postIndex - case 9: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15008,15 +13430,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} + v := &ResponseQuery{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_DeliverTx{v} + m.Value = &Response_Query{v} iNdEx = postIndex - case 10: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15043,11 +13465,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEndBlock{} + v := &ResponseCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_EndBlock{v} + m.Value = &Response_CheckTx{v} iNdEx = postIndex case 11: if wireType != 2 { @@ -16335,90 +14757,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -16501,70 +14839,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { m.Data = []byte{} } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Log = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) @@ -16584,59 +14858,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) - } - m.GasUsed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) @@ -16715,43 +14936,11 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Priority |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Priority |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MempoolError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -17044,198 +15233,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} - } - if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NextCoreChainLockUpdate == nil { - m.NextCoreChainLockUpdate = &types1.CoreChainLock{} - } - if err := m.NextCoreChainLockUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSetUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValidatorSetUpdate == nil { - m.ValidatorSetUpdate = &ValidatorSetUpdate{} - } - if err := m.ValidatorSetUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResponseCommit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -17265,40 +15262,6 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) @@ -18605,25 +16568,6 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) - } - m.RetainHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RetainHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } case 100: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) diff --git a/cmd/tenderdash/commands/compact.go b/cmd/tenderdash/commands/compact.go new file mode 100644 index 0000000000..eadd828ae3 --- /dev/null +++ b/cmd/tenderdash/commands/compact.go @@ -0,0 +1,71 @@ +package commands + +import ( + "errors" + "path/filepath" + "sync" + + "github.com/spf13/cobra" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" +) + +func MakeCompactDBCommand(cfg *config.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "experimental-compact-goleveldb", + Short: "force compacts the tendermint storage engine (only GoLevelDB supported)", + Long: ` +This is a temporary utility command that performs a force compaction on the state +and blockstores to reduce disk space for a pruning node. This should only be run +once the node has stopped. This command will likely be omitted in the future after +the planned refactor to the storage engine. + +Currently, only GoLevelDB is supported. + `, + RunE: func(cmd *cobra.Command, args []string) error { + if cfg.DBBackend != "goleveldb" { + return errors.New("compaction is currently only supported with goleveldb") + } + + compactGoLevelDBs(cfg.RootDir, logger) + return nil + }, + } + + return cmd +} + +func compactGoLevelDBs(rootDir string, logger log.Logger) { + dbNames := []string{"state", "blockstore"} + o := &opt.Options{ + DisableSeeksCompaction: true, + } + wg := sync.WaitGroup{} + + for _, dbName := range dbNames { + dbName := dbName + wg.Add(1) + go func() { + defer wg.Done() + dbPath := filepath.Join(rootDir, "data", dbName+".db") + store, err := leveldb.OpenFile(dbPath, o) + if err != nil { + logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err) + return + } + defer store.Close() + + logger.Info("starting compaction...", "db", dbPath) + + err = store.CompactRange(util.Range{Start: nil, Limit: nil}) + if err != nil { + logger.Error("failed to compact tendermint db", "path", dbPath, "err", err) + } + }() + } + wg.Wait() +} diff --git a/cmd/tenderdash/commands/debug/kill.go b/cmd/tenderdash/commands/debug/kill.go index a6c1ac7d86..7755817a63 100644 --- a/cmd/tenderdash/commands/debug/kill.go +++ b/cmd/tenderdash/commands/debug/kill.go @@ -33,10 +33,14 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, Args: cobra.ExactArgs(2), RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - pid, err := strconv.ParseInt(args[0], 10, 64) + // Using Atoi so that the size of an integer can be automatically inferred. + pid, err := strconv.Atoi(args[0]) if err != nil { return err } + if pid <= 0 { + return fmt.Errorf("PID value must be > 0; given value %q, got %d", args[0], pid) + } outFile := args[1] if outFile == "" { @@ -95,7 +99,7 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, } logger.Info("killing Tendermint process") - if err := killProc(int(pid), tmpDir); err != nil { + if err := killProc(pid, tmpDir); err != nil { return err } @@ -113,6 +117,9 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. func killProc(pid int, dir string) error { + if pid <= 0 { + return fmt.Errorf("PID must be > 0, got %d", pid) + } // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. diff --git a/cmd/tenderdash/commands/key_migrate.go b/cmd/tenderdash/commands/key_migrate.go index 5866be341b..88b9dfe715 100644 --- a/cmd/tenderdash/commands/key_migrate.go +++ b/cmd/tenderdash/commands/key_migrate.go @@ -6,69 +6,70 @@ import ( "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/scripts/keymigrate" "github.com/tendermint/tendermint/scripts/scmigrate" ) -func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command { +func MakeKeyMigrateCommand(conf *config.Config, logger log.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "key-migrate", Short: "Run Database key migration", RunE: func(cmd *cobra.Command, args []string) error { - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() + return RunDatabaseMigration(cmd.Context(), logger, conf) + }, + } - contexts := []string{ - // this is ordered to put the - // (presumably) biggest/most important - // subsets first. - "blockstore", - "state", - "peerstore", - "tx_index", - "evidence", - "light", - } + // allow database info to be overridden via cli + addDBFlags(cmd, conf) - for idx, dbctx := range contexts { - logger.Info("beginning a key migration", - "dbctx", dbctx, - "num", idx+1, - "total", len(contexts), - ) + return cmd +} - db, err := cfg.DefaultDBProvider(&cfg.DBContext{ - ID: dbctx, - Config: conf, - }) +func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.Config) error { + contexts := []string{ + // this is ordered to put + // the more ephemeral tables first to + // reduce the possibility of the + // ephemeral data overwriting later data + "tx_index", + "light", + "blockstore", + "state", + "evidence", + } - if err != nil { - return fmt.Errorf("constructing database handle: %w", err) - } + for idx, dbctx := range contexts { + logger.Info("beginning a key migration", + "dbctx", dbctx, + "num", idx+1, + "total", len(contexts), + ) - if err = keymigrate.Migrate(ctx, db); err != nil { - return fmt.Errorf("running migration for context %q: %w", - dbctx, err) - } + db, err := config.DefaultDBProvider(&config.DBContext{ + ID: dbctx, + Config: conf, + }) - if dbctx == "blockstore" { - if err := scmigrate.Migrate(ctx, db); err != nil { - return fmt.Errorf("running seen commit migration: %w", err) + if err != nil { + return fmt.Errorf("constructing database handle: %w", err) + } - } - } - } + if err = keymigrate.Migrate(ctx, dbctx, db); err != nil { + return fmt.Errorf("running migration for context %q: %w", + dbctx, err) + } - logger.Info("completed database migration successfully") + if dbctx == "blockstore" { + if err := scmigrate.Migrate(ctx, db); err != nil { + return fmt.Errorf("running seen commit migration: %w", err) - return nil - }, + } + } } - // allow database info to be overridden via cli - addDBFlags(cmd, conf) + logger.Info("completed database migration successfully") - return cmd + return nil } diff --git a/cmd/tenderdash/commands/light.go b/cmd/tenderdash/commands/light.go index 5b37c6bd32..9ebff8e275 100644 --- a/cmd/tenderdash/commands/light.go +++ b/cmd/tenderdash/commands/light.go @@ -171,7 +171,8 @@ for applications built w/ Cosmos SDK). // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if cfg.WriteTimeout > 0 && cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second } diff --git a/cmd/tenderdash/commands/reindex_event.go b/cmd/tenderdash/commands/reindex_event.go index 6cec32738a..34d07fdd5b 100644 --- a/cmd/tenderdash/commands/reindex_event.go +++ b/cmd/tenderdash/commands/reindex_event.go @@ -193,7 +193,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { return fmt.Errorf("not able to load block at height %d from the blockstore", i) } - r, err := args.stateStore.LoadABCIResponses(i) + r, err := args.stateStore.LoadFinalizeBlockResponses(i) if err != nil { return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i) } @@ -201,7 +201,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { e := types.EventDataNewBlockHeader{ Header: b.Header, NumTxs: int64(len(b.Txs)), - ResultFinalizeBlock: *r.FinalizeBlock, + ResultFinalizeBlock: *r, } var batch *indexer.Batch @@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { Height: b.Height, Index: uint32(i), Tx: b.Data.Txs[i], - Result: *(r.FinalizeBlock.TxResults[i]), + Result: *(r.TxResults[i]), } _ = batch.Add(&tr) diff --git a/cmd/tenderdash/commands/reindex_event_test.go b/cmd/tenderdash/commands/reindex_event_test.go index f60fe1b04e..97fb98c682 100644 --- a/cmd/tenderdash/commands/reindex_event_test.go +++ b/cmd/tenderdash/commands/reindex_event_test.go @@ -15,7 +15,6 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" - prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" _ "github.com/lib/pq" // for the psql sink @@ -153,16 +152,14 @@ func TestReIndexEvent(t *testing.T) { On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil) dtx := abcitypes.ExecTxResult{} - abciResp := &prototmstate.ABCIResponses{ - FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ - TxResults: []*abcitypes.ExecTxResult{&dtx}, - }, + abciResp := &abcitypes.ResponseFinalizeBlock{ + TxResults: []*abcitypes.ExecTxResult{&dtx}, } mockStateStore. - On("LoadABCIResponses", base).Return(nil, errors.New("")).Once(). - On("LoadABCIResponses", base).Return(abciResp, nil). - On("LoadABCIResponses", height).Return(abciResp, nil) + On("LoadFinalizeBlockResponses", base).Return(nil, errors.New("")).Once(). + On("LoadFinalizeBlockResponses", base).Return(abciResp, nil). + On("LoadFinalizeBlockResponses", height).Return(abciResp, nil) testCases := []struct { startHeight int64 @@ -170,7 +167,7 @@ func TestReIndexEvent(t *testing.T) { reIndexErr bool }{ {base, height, true}, // LoadBlock error - {base, height, true}, // LoadABCIResponses error + {base, height, true}, // LoadFinalizeBlockResponses error {base, height, true}, // index block event error {base, height, true}, // index tx event error {base, base, false}, diff --git a/cmd/tenderdash/commands/rollback_test.go b/cmd/tenderdash/commands/rollback_test.go new file mode 100644 index 0000000000..8e2ba87bfc --- /dev/null +++ b/cmd/tenderdash/commands/rollback_test.go @@ -0,0 +1,83 @@ +package commands_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/cmd/tenderdash/commands" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/rpc/client/local" + rpctest "github.com/tendermint/tendermint/rpc/test" + e2e "github.com/tendermint/tendermint/test/e2e/app" +) + +func TestRollbackIntegration(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + var height int64 + dir := t.TempDir() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := rpctest.CreateConfig(t, t.Name()) + require.NoError(t, err) + cfg.BaseConfig.DBBackend = "goleveldb" + + app, err := e2e.NewApplication(e2e.DefaultConfig(dir)) + require.NoError(t, err) + + t.Run("First run", func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + require.NoError(t, err) + node, _, err := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout) + require.NoError(t, err) + require.True(t, node.IsRunning()) + + time.Sleep(3 * time.Second) + cancel() + node.Wait() + + require.False(t, node.IsRunning()) + }) + t.Run("Rollback", func(t *testing.T) { + time.Sleep(time.Second) + require.NoError(t, app.Rollback()) + height, _, err = commands.RollbackState(cfg) + require.NoError(t, err, "%d", height) + }) + t.Run("Restart", func(t *testing.T) { + require.True(t, height > 0, "%d", height) + + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout) + require.NoError(t, err2) + t.Cleanup(node2.Wait) + + logger := log.NewNopLogger() + + client, err := local.New(logger, node2.(local.NodeService)) + require.NoError(t, err) + + ticker := time.NewTicker(200 * time.Millisecond) + for { + select { + case <-ctx.Done(): + t.Fatalf("failed to make progress after 20 seconds. Min height: %d", height) + case <-ticker.C: + status, err := client.Status(ctx) + require.NoError(t, err) + + if status.SyncInfo.LatestBlockHeight > height { + return + } + } + } + }) + +} diff --git a/cmd/tenderdash/commands/run_node.go b/cmd/tenderdash/commands/run_node.go index 347a04034e..f4d49b91e6 100644 --- a/cmd/tenderdash/commands/run_node.go +++ b/cmd/tenderdash/commands/run_node.go @@ -63,7 +63,6 @@ func AddNodeFlags(cmd *cobra.Command, conf *cfg.Config) { "p2p.laddr", conf.P2P.ListenAddress, "node listen address. (0.0.0.0:0 means any interface, any port)") - cmd.Flags().String("p2p.seeds", conf.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck cmd.Flags().String("p2p.persistent-peers", conf.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") cmd.Flags().Bool("p2p.upnp", conf.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", conf.P2P.PexReactor, "enable/disable Peer-Exchange") diff --git a/cmd/tenderdash/commands/testnet.go b/cmd/tenderdash/commands/testnet.go index 50d82b21b0..8b1dbaebab 100644 --- a/cmd/tenderdash/commands/testnet.go +++ b/cmd/tenderdash/commands/testnet.go @@ -240,7 +240,6 @@ Example: for i := 0; i < nValidators+nNonValidators; i++ { nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) - config.P2P.AllowDuplicateIP = true if populatePersistentPeers { persistentPeersWithoutSelf := make([]string, 0) for j := 0; j < len(persistentPeers); j++ { diff --git a/cmd/tenderdash/main.go b/cmd/tenderdash/main.go index 7320267fbd..e6536f4d88 100644 --- a/cmd/tenderdash/main.go +++ b/cmd/tenderdash/main.go @@ -43,6 +43,7 @@ func main() { commands.MakeKeyMigrateCommand(conf, logger), debug.GetDebugCommand(logger), commands.NewCompletionCmd(rcmd, true), + commands.MakeCompactDBCommand(conf, logger), ) // NOTE: diff --git a/config/config.go b/config/config.go index fba0d44371..ba45d68b1b 100644 --- a/config/config.go +++ b/config/config.go @@ -557,7 +557,7 @@ func DefaultRPCConfig() *RPCConfig { MaxSubscriptionClients: 100, MaxSubscriptionsPerClient: 5, ExperimentalDisableWebsocket: false, // compatible with TM v0.35 and earlier - EventLogWindowSize: 0, // disables /events RPC by default + EventLogWindowSize: 30 * time.Second, EventLogMaxItems: 0, TimeoutBroadcastTxCommit: 10 * time.Second, @@ -646,15 +646,6 @@ type P2PConfig struct { //nolint: maligned // Address to advertise to peers for them to dial ExternalAddress string `mapstructure:"external-address"` - // Comma separated list of seed nodes to connect to - // We only use these if we can’t connect to peers in the addrbook - // - // Deprecated: This value is not used by the new PEX reactor. Use - // BootstrapPeers instead. - // - // TODO(#5670): Remove once the p2p refactor is complete. - Seeds string `mapstructure:"seeds"` - // Comma separated list of peers to be added to the peer store // on startup. Either BootstrapPeers or PersistentPeers are // needed for peer discovery @@ -670,6 +661,10 @@ type P2PConfig struct { //nolint: maligned // outbound). MaxConnections uint16 `mapstructure:"max-connections"` + // MaxOutgoingConnections defines the maximum number of connected peers (inbound and + // outbound). + MaxOutgoingConnections uint16 `mapstructure:"max-outgoing-connections"` + // MaxIncomingConnectionAttempts rate limits the number of incoming connection // attempts per IP address. MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"` @@ -681,9 +676,6 @@ type P2PConfig struct { //nolint: maligned // other peers) PrivatePeerIDs string `mapstructure:"private-peer-ids"` - // Toggle to disable guard against peers connecting from the same ip. - AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` - // Time to wait before flushing messages out on the connection FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` @@ -700,13 +692,9 @@ type P2PConfig struct { //nolint: maligned HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` DialTimeout time.Duration `mapstructure:"dial-timeout"` - // Testing params. - // Force dial to fail - TestDialFail bool `mapstructure:"test-dial-fail"` - // Makes it possible to configure which queue backend the p2p - // layer uses. Options are: "fifo" and "priority", - // with the default being "priority". + // layer uses. Options are: "fifo" and "simple-priority", and "priority", + // with the default being "simple-priority". QueueType string `mapstructure:"queue-type"` } @@ -717,6 +705,7 @@ func DefaultP2PConfig() *P2PConfig { ExternalAddress: "", UPNP: false, MaxConnections: 64, + MaxOutgoingConnections: 12, MaxIncomingConnectionAttempts: 100, FlushThrottleTimeout: 100 * time.Millisecond, // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. @@ -728,11 +717,9 @@ func DefaultP2PConfig() *P2PConfig { SendRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s PexReactor: true, - AllowDuplicateIP: false, HandshakeTimeout: 20 * time.Second, DialTimeout: 3 * time.Second, - TestDialFail: false, - QueueType: "priority", + QueueType: "simple-priority", } } @@ -751,6 +738,9 @@ func (cfg *P2PConfig) ValidateBasic() error { if cfg.RecvRate < 0 { return errors.New("recv-rate can't be negative") } + if cfg.MaxOutgoingConnections > cfg.MaxConnections { + return errors.New("max-outgoing-connections cannot be larger than max-connections") + } return nil } @@ -758,7 +748,6 @@ func (cfg *P2PConfig) ValidateBasic() error { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" - cfg.AllowDuplicateIP = true cfg.FlushThrottleTimeout = 10 * time.Millisecond return cfg } @@ -768,9 +757,10 @@ func TestP2PConfig() *P2PConfig { // MempoolConfig defines the configuration options for the Tendermint mempool. type MempoolConfig struct { - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - Broadcast bool `mapstructure:"broadcast"` + RootDir string `mapstructure:"home"` + + // Whether to broadcast transactions to other nodes + Broadcast bool `mapstructure:"broadcast"` // Maximum number of transactions in the mempool Size int `mapstructure:"size"` @@ -817,7 +807,6 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the Tendermint mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Recheck: true, Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck diff --git a/config/db.go b/config/db.go index f508354e07..bbc2869446 100644 --- a/config/db.go +++ b/config/db.go @@ -25,5 +25,6 @@ type DBProvider func(*DBContext) (dbm.DB, error) // specified in the Config. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) } diff --git a/config/toml.go b/config/toml.go index ee5df22f6a..e2e570ed9e 100644 --- a/config/toml.go +++ b/config/toml.go @@ -292,7 +292,9 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### [p2p] -# Select the p2p internal queue +# Select the p2p internal queue. +# Options are: "fifo" and "simple-priority", and "priority", +# with the default being "simple-priority". queue-type = "{{ .P2P.QueueType }}" # Address to listen for incoming connections @@ -305,13 +307,6 @@ laddr = "{{ .P2P.ListenAddress }}" # example: 159.89.10.97:26656 external-address = "{{ .P2P.ExternalAddress }}" -# Comma separated list of seed nodes to connect to -# We only use these if we can’t connect to peers in the addrbook -# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -seeds = "{{ .P2P.Seeds }}" - # Comma separated list of peers to be added to the peer store # on startup. Either BootstrapPeers or PersistentPeers are # needed for peer discovery @@ -326,6 +321,10 @@ upnp = {{ .P2P.UPNP }} # Maximum number of connections (inbound and outbound). max-connections = {{ .P2P.MaxConnections }} +# Maximum number of connections reserved for outgoing +# connections. Must be less than max-connections +max-outgoing-connections = {{ .P2P.MaxOutgoingConnections }} + # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }} @@ -336,9 +335,6 @@ pex = {{ .P2P.PexReactor }} # Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 private-peer-ids = "{{ .P2P.PrivatePeerIDs }}" -# Toggle to disable guard against peers connecting from the same ip. -allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} - # Peer connection configuration. handshake-timeout = "{{ .P2P.HandshakeTimeout }}" dial-timeout = "{{ .P2P.DialTimeout }}" @@ -365,7 +361,11 @@ recv-rate = {{ .P2P.RecvRate }} ####################################################### [mempool] -recheck = {{ .Mempool.Recheck }} +# recheck has been moved from a config option to a global +# consensus param in v0.36 +# See https://github.com/tendermint/tendermint/issues/8244 for more information. + +# Set true to broadcast transactions in the mempool to other nodes broadcast = {{ .Mempool.Broadcast }} # Maximum number of transactions in the mempool diff --git a/dash/quorum/validator_conn_executor_test.go b/dash/quorum/validator_conn_executor_test.go index 376376fa93..6bf50f26e8 100644 --- a/dash/quorum/validator_conn_executor_test.go +++ b/dash/quorum/validator_conn_executor_test.go @@ -374,6 +374,7 @@ func TestFinalizeBlock(t *testing.T) { testifymock.Anything, testifymock.Anything, testifymock.Anything, + testifymock.Anything, testifymock.Anything).Return(nil) blockExec := sm.NewBlockExecutor( @@ -669,7 +670,7 @@ func (app *testApp) Info(context.Context, *abci.RequestInfo) (*abci.ResponseInfo } func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - app.ByzantineValidators = req.ByzantineValidators + app.ByzantineValidators = req.Misbehavior txs := make([]*abci.ExecTxResult, 0, len(req.Txs)) for _, tx := range req.Txs { txs = append(txs, &abci.ExecTxResult{Data: tx}) diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 7649b7cde7..07acd70b13 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -169,11 +169,12 @@ Try running these commands: -> data: {"size":0} -> data.hex: 0x7B2273697A65223A307D -> commit +> finalize_block "abc" -> code: OK --> data.hex: 0x0000000000000000 +-> code: OK +-> data.hex: 0x0200000000000000 -> finalize_block "abc" +> commit -> code: OK > info @@ -181,34 +182,63 @@ Try running these commands: -> data: {"size":1} -> data.hex: 0x7B2273697A65223A317D -> commit --> code: OK --> data.hex: 0x0200000000000000 - > query "abc" -> code: OK -> log: exists --> height: 2 +-> height: 1 +-> key: abc +-> key.hex: 616263 -> value: abc -> value.hex: 616263 -> finalize_block "def=xyz" +> finalize_block "def=xyz" "ghi=123" -> code: OK +-> code: OK +-> code: OK +-> data.hex: 0x0600000000000000 > commit -> code: OK --> data.hex: 0x0400000000000000 > query "def" -> code: OK -> log: exists --> height: 3 +-> height: 2 +-> key: def +-> key.hex: 646566 -> value: xyz -> value.hex: 78797A + +> prepare_proposal "preparedef" +-> code: OK +-> log: Succeeded. Tx: def action: ADDED +-> code: OK +-> log: Succeeded. Tx: preparedef action: REMOVED + +> process_proposal "def" +-> code: OK +-> status: ACCEPT + +> process_proposal "preparedef" +-> code: OK +-> status: REJECT + +> prepare_proposal + +> process_proposal +-> code: OK +-> status: ACCEPT + +> finalize_block +-> code: OK +-> data.hex: 0x0600000000000000 + +> commit +-> code: OK ``` -Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if -we do `finalize_block "abc=efg"` it will store `(abc, efg)`. +Note that if we do `finalize_block "abc" ...` it will store `(abc, abc)`, but if +we do `finalize_block "abc=efg" ...` it will store `(abc, efg)`. Similarly, you could put the commands in a file and run `abci-cli --verbose batch < myfile`. diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index 040da8eb2f..74baf7201c 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -106,10 +106,10 @@ Next, use the `tendermint testnet` command to create four directories of config Before you can start the network, you'll need peers identifiers (IPs are not enough and can change). We'll refer to them as ID1, ID2, ID3, ID4. ```sh -tendermint show_node_id --home ./mytestnet/node0 -tendermint show_node_id --home ./mytestnet/node1 -tendermint show_node_id --home ./mytestnet/node2 -tendermint show_node_id --home ./mytestnet/node3 +tendermint show-node-id --home ./mytestnet/node0 +tendermint show-node-id --home ./mytestnet/node1 +tendermint show-node-id --home ./mytestnet/node2 +tendermint show-node-id --home ./mytestnet/node3 ``` Finally, from each machine, run: diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md index 417152d748..18fe5ce776 100644 --- a/docs/introduction/what-is-tendermint.md +++ b/docs/introduction/what-is-tendermint.md @@ -103,9 +103,9 @@ Another example of a cryptocurrency application built on Tendermint is to Tendermint, but is more opinionated about how the state is managed, and requires that all application behaviour runs in potentially many docker containers, modules it calls "chaincode". It uses an -implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). +implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf) from a team at IBM that is augmented to handle potentially non-deterministic -chaincode It is possible to implement this docker-based behaviour as a ABCI app +chaincode. It is possible to implement this docker-based behaviour as a ABCI app in Tendermint, though extending Tendermint to handle non-determinism remains for future work. diff --git a/docs/nodes/README.md b/docs/nodes/README.md index fd9056e0dd..a0f14e6c88 100644 --- a/docs/nodes/README.md +++ b/docs/nodes/README.md @@ -45,4 +45,4 @@ We will cover the various types of node types within Tendermint. Validators are nodes that participate in the security of a network. Validators have an associated power in Tendermint, this power can represent stake in a [proof of stake](https://en.wikipedia.org/wiki/Proof_of_stake) system, reputation in [proof of authority](https://en.wikipedia.org/wiki/Proof_of_authority) or any sort of measurable unit. Running a secure and consistently online validator is crucial to a networks health. A validator must be secure and fault tolerant, it is recommended to run your validator with 2 or more sentry nodes. -As a validator there is the potential to have your weight reduced, this is defined by the application. Tendermint is notified by the application if a validator should have there weight increased or reduced. Application have different types of malicious behavior which lead to slashing of the validators power. Please check the documentation of the application you will be running in order to find more information. +As a validator there is the potential to have your weight reduced, this is defined by the application. Tendermint is notified by the application if a validator should have their weight increased or reduced. Application have different types of malicious behavior which lead to slashing of the validators power. Please check the documentation of the application you will be running in order to find more information. diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md index 1b2e9f0070..46ab9d5fae 100644 --- a/docs/nodes/metrics.md +++ b/docs/nodes/metrics.md @@ -18,40 +18,56 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Tags** | **Description** | -| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | -| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods | -| consensus_height | Gauge | | Height of the chain | -| consensus_validators | Gauge | | Number of validators | -| consensus_validators_power | Gauge | | Total voting power of all validators | -| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | -| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | -| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | -| consensus_missing_validators | Gauge | | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | | Number of rounds | -| consensus_num_txs | Gauge | | Number of transactions | -| consensus_total_txs | Gauge | | Total number of transactions committed | -| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer | -| consensus_latest_block_height | gauge | | /status sync_info number | -| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | -| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | -| consensus_block_size_bytes | Gauge | | Block size in bytes | -| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool -| p2p_peers | Gauge | | Number of peers node's connected to | -| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | -| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | -| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer | -| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id | -| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer | -| mempool_size | Gauge | | Number of uncommitted transactions | -| mempool_tx_size_bytes | histogram | | transaction sizes in bytes | -| mempool_failed_txs | counter | | number of failed transactions | -| mempool_recheck_times | counter | | number of transactions rechecked in the mempool | -| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms | +| **Name** | **Type** | **Tags** | **Description** | +|-----------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods | +| consensus_height | Gauge | | Height of the chain | +| consensus_validators | Gauge | | Number of validators | +| consensus_validators_power | Gauge | | Total voting power of all validators | +| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | +| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | +| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| consensus_missing_validators | Gauge | | Number of validators who did not sign | +| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | +| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | +| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | +| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| consensus_rounds | Gauge | | Number of rounds | +| consensus_num_txs | Gauge | | Number of transactions | +| consensus_total_txs | Gauge | | Total number of transactions committed | +| consensus_block_parts | Counter | peer_id | number of blockparts transmitted by peer | +| consensus_latest_block_height | gauge | | /status sync_info number | +| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | +| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | +| consensus_block_size_bytes | Gauge | | Block size in bytes | +| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol | +| consensus_block_gossip_receive_latency | Histogram | | Histogram of time taken to receive a block in seconds, measure between when a new block is first discovered to when the block is completed | +| consensus_block_gossip_parts_received | Counter | matches_current | Number of block parts received by the node | +| consensus_quorum_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | +| consensus_full_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | +| consensus_proposal_timestamp_difference | Histogram | | Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message | +| consensus_vote_extension_receive_count | Counter | status | Number of vote extensions received | +| consensus_proposal_receive_count | Counter | status | Total number of proposals received by the node since process start | +| consensus_proposal_create_count | Counter | | Total number of proposals created by the node since process start | +| consensus_round_voting_power_percent | Gauge | vote_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | +| consensus_late_votes | Counter | vote_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | +| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool | +| p2p_peers | Gauge | | Number of peers node's connected to | +| p2p_peer_receive_bytes_total | Counter | peer_id, chID | number of bytes per channel received from a given peer | +| p2p_peer_send_bytes_total | Counter | peer_id, chID | number of bytes per channel sent to a given peer | +| p2p_peer_pending_send_bytes | Gauge | peer_id | number of pending bytes to be sent to a given peer | +| p2p_router_peer_queue_recv | Histogram | | The time taken to read off of a peer's queue before sending on the connection | +| p2p_router_peer_queue_send | Histogram | | The time taken to send on a peer's queue which will later be sent on the connection | +| p2p_router_channel_queue_send | Histogram | | The time taken to send on a p2p channel's queue which will later be consumed by the corresponding service | +| p2p_router_channel_queue_dropped_msgs | Counter | ch_id | The number of messages dropped from a peer's queue for a specific p2p channel | +| p2p_peer_queue_msg_size | Gauge | ch_id | The size of messages sent over a peer's queue for a specific p2p channel | +| mempool_size | Gauge | | Number of uncommitted transactions | +| mempool_tx_size_bytes | Histogram | | transaction sizes in bytes | +| mempool_failed_txs | Counter | | number of failed transactions | +| mempool_recheck_times | Counter | | number of transactions rechecked in the mempool | +| state_block_processing_time | Histogram | | time between BeginBlock and EndBlock in ms | +| state_consensus_param_updates | Counter | | number of consensus parameter updates returned by the application since process start | +| state_validator_set_updates | Counter | | number of validator set updates returned by the application since process start | ## Useful queries diff --git a/docs/nodes/running-in-production.md b/docs/nodes/running-in-production.md index d8d73689a5..40ad26b5ed 100644 --- a/docs/nodes/running-in-production.md +++ b/docs/nodes/running-in-production.md @@ -295,14 +295,6 @@ flush-throttle-timeout=10 max-packet-msg-payload-size=10240 # 10KB ``` -- `mempool.recheck` - -After every block, Tendermint rechecks every transaction left in the -mempool to see if transactions committed in that block affected the -application state, so some of the transactions left may become invalid. -If that does not apply to your application, you can disable it by -setting `mempool.recheck=false`. - - `mempool.broadcast` Setting this to false will stop the mempool from relaying transactions diff --git a/docs/package-lock.json b/docs/package-lock.json index 447c8c27d0..da9a0c1c27 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -13,7 +13,7 @@ }, "devDependencies": { "@vuepress/plugin-html-redirect": "^0.1.4", - "watchpack": "^2.3.1" + "watchpack": "^2.4.0" } }, "node_modules/@algolia/cache-browser-local-storage": { @@ -6210,9 +6210,9 @@ } }, "node_modules/eventsource": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz", - "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.1.tgz", + "integrity": "sha512-qV5ZC0h7jYIAOhArFJgSfdyz6rALJyb270714o7ZtNnw2WSJ+eexhKtE0O8LYPRsHZHf2osHKZBxGPvm3kPkCA==", "dependencies": { "original": "^1.0.0" }, @@ -12288,9 +12288,9 @@ } }, "node_modules/terser": { - "version": "4.8.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", - "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.1.tgz", + "integrity": "sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==", "dependencies": { "commander": "^2.20.0", "source-map": "~0.6.1", @@ -13471,9 +13471,9 @@ } }, "node_modules/watchpack": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", - "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", "dev": true, "dependencies": { "glob-to-regexp": "^0.4.1", @@ -19046,9 +19046,9 @@ "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==" }, "eventsource": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz", - "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.1.tgz", + "integrity": "sha512-qV5ZC0h7jYIAOhArFJgSfdyz6rALJyb270714o7ZtNnw2WSJ+eexhKtE0O8LYPRsHZHf2osHKZBxGPvm3kPkCA==", "requires": { "original": "^1.0.0" } @@ -23925,9 +23925,9 @@ "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==" }, "terser": { - "version": "4.8.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", - "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.1.tgz", + "integrity": "sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==", "requires": { "commander": "^2.20.0", "source-map": "~0.6.1", @@ -24873,9 +24873,9 @@ } }, "watchpack": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", - "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", "dev": true, "requires": { "glob-to-regexp": "^0.4.1", diff --git a/docs/package.json b/docs/package.json index 77ea71b3ec..eb3d96839c 100644 --- a/docs/package.json +++ b/docs/package.json @@ -8,14 +8,14 @@ }, "devDependencies": { "@vuepress/plugin-html-redirect": "^0.1.4", - "watchpack": "^2.3.1" + "watchpack": "^2.4.0" }, "scripts": { "preserve": "./pre.sh", "serve": "trap 'exit 0' SIGINT; vuepress dev --no-cache", "postserve": "./post.sh", "prebuild": "./pre.sh", - "build": "trap 'exit 0' SIGINT; vuepress build --no-cache", + "build": "trap 'exit 0' SIGINT; vuepress build --no-cache --silent", "postbuild": "./post.sh" }, "author": "", diff --git a/docs/rfc/images/node-dependency-tree.svg b/docs/rfc/images/node-dependency-tree.svg new file mode 100644 index 0000000000..6d95e0e155 --- /dev/null +++ b/docs/rfc/images/node-dependency-tree.svg @@ -0,0 +1,3 @@ + + +
Node
Node
Statesync
Statesync
Blocksync
Blocksync
Consensus
Consensus
Mempool
Mempool
Evidence
Evidence
Block Executor
Block Executor
Blockchain
Blockchain
Evidence
Evidence
PEX
PEX
Peer Store
Peer Store
Peer Networking
Peer Networking
RPC External
RPC External
ABCI Layer
ABCI Layer
Events System
Events System
RPC Internal
RPC Internal
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/rfc/rfc-009-consensus-parameter-upgrades.md b/docs/rfc/rfc-009-consensus-parameter-upgrades.md index 60be878df1..d5077840db 100644 --- a/docs/rfc/rfc-009-consensus-parameter-upgrades.md +++ b/docs/rfc/rfc-009-consensus-parameter-upgrades.md @@ -31,12 +31,12 @@ not reference the new parameters. Any nodes joining the network with the newer version of Tendermint will have the new consensus parameters. Tendermint will need to handle this case so that new versions of Tendermint with new consensus parameters can still validate old blocks correctly without having to do anything overly complex -or hacky. +or hacky. ### Allowing Developer-Defined Values and the `EndBlock` Problem When new consensus parameters are added, application developers may wish to set -values for them so that the developer-defined values may be used as soon as the +values for them so that the developer-defined values may be used as soon as the software upgrades. We do not currently have a clean mechanism for handling this. Consensus parameter updates are communicated from the application to Tendermint @@ -51,7 +51,7 @@ can take effect is height `H+1`. As of now, height `H` must run with the default ### Hash Compatibility -This section discusses possible solutions to the problem of maintaining backwards-compatibility +This section discusses possible solutions to the problem of maintaining backwards-compatibility of hashed parameters while adding new parameters. #### Never Hash Defaults diff --git a/docs/rfc/rfc-013-abci++.md b/docs/rfc/rfc-013-abci++.md index 0289c187ec..6e83c9aa22 100644 --- a/docs/rfc/rfc-013-abci++.md +++ b/docs/rfc/rfc-013-abci++.md @@ -3,7 +3,7 @@ ## Changelog - 2020-01-11: initialized -- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254)) +- 2022-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254)) ## Author(s) diff --git a/docs/rfc/rfc-016-node-architecture.md b/docs/rfc/rfc-016-node-architecture.md new file mode 100644 index 0000000000..29098d2973 --- /dev/null +++ b/docs/rfc/rfc-016-node-architecture.md @@ -0,0 +1,83 @@ +# RFC 016: Node Architecture + +## Changelog + +- April 8, 2022: Initial draft (@cmwaters) +- April 15, 2022: Incorporation of feedback + +## Abstract + +The `node` package is the entry point into the Tendermint codebase, used both by the command line and programatically to create the nodes that make up a network. The package has suffered the most from the evolution of the codebase, becoming bloated as developers clipped on their bits of code here and there to get whatever feature they wanted working. + +The decisions made at the node level have the biggest impact to simplifying the protocols within them, unlocking better internal designs and making Tendermint more intuitive to use and easier to understand from the outside. Work, in minor increments, has already begun on this section of the codebase. This document exists to spark forth the necessary discourse in a few related areas that will help the team to converge on the long term makeup of the node. + +## Discussion + +The following is a list of points of discussion around the architecture of the node: + +### Dependency Tree + +The node object is currently stuffed with every component that possibly exists within Tendermint. In the constructor, all objects are built and interlaid with one another in some awkward dance. My guiding principle is that the node should only be made up of the components that it wants to have direct control of throughout its life. The node is a service which currently has the purpose of starting other services up in a particular order and stopping them all when commanded to do so. However, there are many services which are not direct dependents i.e. the mempool and evidence services should only be working when the consensus service is running. I propose to form more of a hierarchical structure of dependents which forces us to be clear about the relations that one component has to the other. More concretely, I propose the following dependency tree: + +![node dependency tree](./images/node-dependency-tree.svg) + +Many of the further discussion topics circle back to this representation of the node. + +It's also important to distinguish two dimensions which may require different characteristics of the architecture. There is the starting and stopping of services and their general lifecycle management. What is the correct order of operations to starting a node for example. Then there is the question of the needs of the service during actual operation. Then there is the question of what resources each service needs access to during its operation. Some need to publish events, others need access to data stores, and so forth. + +An alternative model and one that perhaps better suits the latter of these dimensions is the notion of an internal message passing system. Either the events bus or p2p layer could serve as a viable transport. This would essentially allow all services to communicate with any other service and could perhaps provide a solution to the coordination problem (presented below) without a centralized coordinator. The other main advantage is that such a system would be more robust to disruptions and changes to the code which may make a hierarchical structure quickly outdated and suboptimal. The addition of message routing is an added complexity to implement, will increase the degree of asynchronicity in the system and may make it harder to debug problems that are across multiple services. + +### Coordination of State Advancing Mechanisms + +Advancement of state in Tendermint is simply defined in heights: If the node is at height n, how does it get to height n + 1 and so on. Based on this definition we have three components that help a node to advance in height: consensus, statesync and blocksync. The way these components behave currently is very tightly coupled to one another with references passed back and forth. My guiding principle is that each of these should be able to operate completely independently of each other, e.g. a node should be able to run solely blocksync indefinitely. There have been several ideas suggested towards improving this flow. I've been leaning strongly towards a centralized system, whereby an orchestrator (in this case the node) decides what services to start and stop. +In a decentralized message passing system, individual services make their decision based upon a "global" shared state i.e. if my height is less that 10 below the average peer height, I as consensus, should stop (knowing that blocksync has the same condition for starting). As the example illustrates, each mechanism will still need to be aware of the presence of other mechanisms. + +Both centralized and decentralized systems rely on the communication of the nodes current height and a judgement on the height of the head of the chain. The latter, working out the head of the chain, is quite a difficult challenge as their is nothing preventing the node from acting maliciously and providing a different height. Currently both blocksync, consensus (and to a certain degree statesync), have parallel systems where peers communicate their height. This could be streamlined with the consensus (or even the p2p layer), broadcasting peer heights and either the node or the other state advancing mechanisms acting accordingly. + +Currently, when a node starts, it turns on every service that it is attached to. This means that while a node is syncing up by requesting blocks, it is also receiving transactions and votes, as well as snapshot and block requests. This is a needless use of bandwidth. An implementation of an orchestrator, regardless of whether the system is heirachical or not, should look to be able to open and close channels dynamically and effectively broadcast which services it is running. Integrating this with service discovery may also lead to a better serivce to peers. + +The orchestrator allows for some deal of variablity in how a node is constructed. Does it just run blocksync, shadowing the head of the chain and be highly available for querying. Does it rely on state sync at all? An important question that arises from this dynamicism is we ideally want to encourage nodes to provide as much of their resources as possible so that their is a healthy amount of providers to consumers. Do we make all services compulsory or allow for them to be disabled? Arguably it's possible that a user forks the codebase and rips out the blocksync code because they want to reduce bandwidth so this is more a question of how easy do we want to make this for users. + +### Block Executor + +The block executor is an important component that is currently used by both consensus and blocksync to execute transactions and update application state. Principally, I think it should be the only component that can write (and possibly even read) the block and state stores, and we should clean up other direct dependencies on the storage engine if we can. This would mean: + +- The reactors Consensus, BlockSync and StateSync should all import the executor for advancing state ie. `ApplyBlock` and `BootstrapState`. +- Pruning should also be a concern of the block executor as well as `FinalizeBlock` and `Commit`. This can simplify consensus to focus just on the consensus part. + +### The Interprocess communication systems: RPC, P2P, ABCI, and Events + +The schematic supplied above shows the relations between the different services, the node, the block executor, and the storage layer. Represented as colored dots are the components responsible for different roles of interprocess communication (IPC). These components permeate throughout the code base, seeping into most services. What can provide powerful functionality on one hand can also become a twisted vine, creating messy corner cases and convoluting the protocols themselves. A lot of the thinking around +how we want our IPC systens to function has been summarised in this [RFC](./rfc-002-ipc-ecosystem.md). In this section, I'd like to focus the reader on the relation between the IPC and the node structure. An issue that has frequently risen is that the RPC has control of the components where it strikes me as being more logical for the component to dictate the information that is emitted/available and the knobs it wishes to expose. The RPC is also inextricably tied to the node instance and has situations where it is passed pointers directly to the storage engine and other components. + +I am currently convinced of the approach that the p2p layer takes and would like to see other IPC components follow suit. This would mean that the RPC and events system would be constructed in the node yet would pass the adequate methods to register endpoints and topics to the sub components. For example, + +```go +// Methods from the RPC and event bus that would be passed into the constructor of components like "consensus" +// NOTE: This is a hypothetical construction to convey the idea. An actual implementation may differ. +func RegisterRoute(path string, handler func(http.ResponseWriter, *http.Request)) + +func RegisterTopic(name string) EventPublisher + +type EventPublisher func (context.Context, types.EventData, []abci.Event) +``` + +This would give the components control to the information they want to expose and keep all relevant logic within that package. It accomodates more to a dynamic system where services can switch on and off. Each component would also receive access to the logger and metrics system for introspection and debuggability. + +#### IPC Rubric + +I'd like to aim to reach a state where we as a team have either an implicit or explicit rubric which can determine, in the event of some new need to communicate information, what tool it should use for doing this. In the case of inter node communication, this is obviously the p2p stack (with perhaps the exception of the light client). Metrics and logging also have clear usage patterns. RPC and the events system are less clear. The RPC is used for debugging data and fine tuned operator control as it is for general public querying and transaction submission. The RPC is also known to have been plumbed back into the application for historical queries. The events system, similarly, is used for consuming transaction events as it is for the testing of consensus state transitions. + +Principally, I think we should look to change our language away from what the actual transport is and more towards what it's being used for and to whom. We call it a peer to peer layer and not the underlying tcp connection. In the same way, we should look to split RPC into an operator interface (RPC Internal), a public interface (RPC External) and a bidirectional ABCI. + +### Seperation of consumers and suppliers + +When a service such as blocksync is turned on, it automatically begins requesting blocks to verify and apply them as it also tries to serve them to other peers catching up. We should look to distinguish these two aspects: supplying of information and consuming of information in many of these components. More concretely, I'd suggest: + +- The blocksync and statesync service, i.e. supplying information for those trying to catch up should only start running once a node has caught up i.e. after running the blocksync and/or state sync *processes* +- The blocksync and state sync processes have defined termination clauses that inform the orchestrator when they are done and where they finished. + - One way of achieving this would be that every process both passes and returns the `State` object + - In some cases, a node may specify that it wants to run blocksync indefinitely. +- The mempool should also indicate whether it wants to receive transactions or to send them only (one-directional mempool) +- Similarly, the light client itself only requests information whereas the light client service (currently part of state sync) can do both. +- This distinction needs to be communicated in the p2p layer handshake itself but should also be changeable over the lifespan of the connection. diff --git a/docs/rfc/rfc-017-abci++-vote-extension-propag.md b/docs/rfc/rfc-017-abci++-vote-extension-propag.md new file mode 100644 index 0000000000..15d08f7bad --- /dev/null +++ b/docs/rfc/rfc-017-abci++-vote-extension-propag.md @@ -0,0 +1,571 @@ +# RFC 017: ABCI++ Vote Extension Propagation + +## Changelog + +- 11-Apr-2022: Initial draft (@sergio-mena). +- 15-Apr-2022: Addressed initial comments. First complete version (@sergio-mena). +- 09-May-2022: Addressed all outstanding comments. + +## Abstract + +According to the +[ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md) +(as of 11-Apr-2022), a validator MUST provide a signed vote extension for each non-`nil` precommit vote +of height *h* that it uses to propose a block in height *h+1*. When a validator is up to +date, this is easy to do, but when a validator needs to catch up this is far from trivial as this data +cannot be retrieved from the blockchain. + +This RFC presents and compares the different options to address this problem, which have been proposed +in several discussions by the Tendermint Core team. + +## Document Structure + +The RFC is structured as follows. In the [Background](#background) section, +subsections [Problem Description](#problem-description) and [Cases to Address](#cases-to-address) +explain the problem at hand from a high level perspective, i.e., abstracting away from the current +Tendermint implementation. In contrast, subsection +[Current Catch-up Mechanisms](#current-catch-up-mechanisms) delves into the details of the current +Tendermint code. + +In the [Discussion](#discussion) section, subsection [Solutions Proposed](#solutions-proposed) is also +worded abstracting away from implementation details, whilst subsections +[Feasibility of the Proposed Solutions](#feasibility-of-the-proposed-solutions) and +[Current Limitations and Possible Implementations](#current-limitations-and-possible-implementations) +analize the viability of one of the proposed solutions in the context of Tendermint's architecture +based on reactors. Finally, [Formalization Work](#formalization-work) briefly discusses the work +still needed demonstrate the correctness of the chosen solution. + +The high level subsections are aimed at readers who are familiar with consensus algorithms, in +particular with the one described in the Tendermint (white paper), but who are not necessarily +acquainted with the details of the Tendermint codebase. The other subsections, which go into +implementation details, are best understood by engineers with deep knowledge of the implementation of +Tendermint's blocksync and consensus reactors. + +## Background + +### Basic Definitions + +This document assumes that all validators have equal voting power for the sake of simplicity. This is done +without loss of generality. + +There are two types of votes in Tendermint: *prevotes* and *precommits*. Votes can be `nil` or refer to +a proposed block. This RFC focuses on precommits, +also known as *precommit votes*. In this document we sometimes call them simply *votes*. + +Validators send precommit votes to their peer nodes in *precommit messages*. According to the +[ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md), +a precommit message MUST also contain a *vote extension*. +This mandatory vote extension can be empty, but MUST be signed with the same key as the precommit +vote (i.e., the sending validator's). +Nevertheless, the vote extension is signed independently from the vote, so a vote can be separated from +its extension. +The reason for vote extensions to be mandatory in precommit messages is that, otherwise, a (malicious) +node can omit a vote extension while still providing/forwarding/sending the corresponding precommit vote. + +The validator set at height *h* is denoted *valseth*. A *commit* for height *h* consists of more +than *2nh/3* precommit votes voting for a block *b*, where *nh* denotes the size of +*valseth*. A commit does not contain `nil` precommit votes, and all votes in it refer to the +same block. An *extended commit* is a *commit* where every precommit vote has its respective vote extension +attached. + +### Problem Description + +In the version of [ABCI](https://github.com/tendermint/spec/blob/4fb99af/spec/abci/README.md) present up to +Tendermint v0.35, for any height *h*, a validator *v* MUST have the decided block *b* and a commit for +height *h* in order to decide at height *h*. Then, *v* just needs a commit for height *h* to propose at +height *h+1*, in the rounds of *h+1* where *v* is a proposer. + +In [ABCI++](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md), +the information that a validator *v* MUST have to be able to decide in *h* does not change with +respect to pre-existing ABCI: the decided block *b* and a commit for *h*. +In contrast, for proposing in *h+1*, a commit for *h* is not enough: *v* MUST now have an extended +commit. + +When a validator takes an active part in consensus at height *h*, it has all the data it needs in memory, +in its consensus state, to decide on *h* and propose in *h+1*. Things are not so easy in the cases when +*v* cannot take part in consensus because it is late (e.g., it falls behind, it crashes +and recovers, or it just starts after the others). If *v* does not take part, it cannot actively +gather precommit messages (which include vote extensions) in order to decide. +Before ABCI++, this was not a problem: full nodes are supposed to persist past blocks in the block store, +so other nodes would realise that *v* is late and send it the missing decided block at height *h* and +the corresponding commit (kept in block *h+1*) so that *v* can catch up. +However, we cannot apply this catch-up technique for ABCI++, as the vote extensions, which are part +of the needed *extended commit* are not part of the blockchain. + +### Cases to Address + +Before we tackle the description of the possible cases we need to address, let us describe the following +incremental improvement to the ABCI++ logic. Upon decision, a full node persists (e.g., in the block +store) the extended commit that allowed the node to decide. For the moment, let us assume the node only +needs to keep its *most recent* extended commit, and MAY remove any older extended commits from persistent +storage. +This improvement is so obvious that all solutions described in the [Discussion](#discussion) section use +it as a building block. Moreover, it completely addresses by itself some of the cases described in this +subsection. + +We now describe the cases (i.e. possible *runs* of the system) that have been raised in different +discussions and need to be addressed. They are (roughly) ordered from easiest to hardest to deal with. + +- **(a)** *Happy path: all validators advance together, no crash*. + + This case is included for completeness. All validators have taken part in height *h*. + Even if some of them did not manage to send a precommit message for the decided block, they all + receive enough precommit messages to be able to decide. As vote extensions are mandatory in + precommit messages, every validator *v* trivially has all the information, namely the decided block + and the extended commit, needed to propose in height *h+1* for the rounds in which *v* is the + proposer. + + No problem to solve here. + +- **(b)** *All validators advance together, then all crash at the same height*. + + This case has been raised in some discussions, the main concern being whether the vote extensions + for the previous height would be lost across the network. With the improvement described above, + namely persisting the latest extended commit at decision time, this case is solved. + When a crashed validator recovers, it recovers the last extended commit from persistent storage + and handshakes with the Application. + If need be, it also reconstructs messages for the unfinished height + (including all precommits received) from the WAL. + Then, the validator can resume where it was at the time of the crash. Thus, as extensions are + persisted, either in the WAL (in the form of received precommit messages), or in the latest + extended commit, the only way that vote extensions needed to start the next height could be lost + forever would be if all validators crashed and never recovered (e.g. disk corruption). + Since a *correct* node MUST eventually recover, this violates Tendermint's assumption of more than + *2nh/3* correct validators for every height *h*. + + No problem to solve here. + +- **(c)** *Lagging majority*. + + Let us assume the validator set does not change between *h* and *h+1*. + It is not possible by the nature of the Tendermint algorithm, which requires more + than *2nh/3* precommit votes for some round of height *h* in order to make progress. + So, only up to *nh/3* validators can lag behind. + + On the other hand, for the case where there are changes to the validator set between *h* and + *h+1* please see case (d) below, where the extreme case is discussed. + +- **(d)** *Validator set changes completely between* h *and* h+1. + + If sets *valseth* and *valseth+1* are disjoint, + more than *2nh/3* of validators in height *h* should + have actively participated in conensus in *h*. So, as of height *h*, only a minority of validators + in *h* can be lagging behind, although they could all lag behind from *h+1* on, as they are no + longer validators, only full nodes. This situation falls under the assumptions of case (h) below. + + As for validators in *valseth+1*, as they were not validators as of height *h*, they + could all be lagging behind by that time. However, by the time *h* finishes and *h+1* begins, the + chain will halt until more than *2nh+1/3* of them have caught up and started consensus + at height *h+1*. If set *valseth+1* does not change in *h+2* and subsequent + heights, only up to *nh+1/3* validators will be able to lag behind. Thus, we have + converted this case into case (h) below. + +- **(e)** *Enough validators crash to block the rest*. + + In this case, blockchain progress halts, i.e. surviving full nodes keep increasing rounds + indefinitely, until some of the crashed validators are able to recover. + Those validators that recover first will handshake with the Application and recover at the height + they crashed, which is still the same the nodes that did not crash are stuck in, so they don't need + to catch up. + Further, they had persisted the extended commit for the previous height. Nothing to solve. + + For those validators recovering later, we are in case (h) below. + +- **(f)** *Some validators crash, but not enough to block progress*. + + When the correct processes that crashed recover, they handshake with the Application and resume at + the height they were at when they crashed. As the blockchain did not stop making progress, the + recovered processes are likely to have fallen behind with respect to the progressing majority. + + At this point, the recovered processes are in case (h) below. + +- **(g)** *A new full node starts*. + + The reasoning here also applies to the case when more than one full node are starting. + When the full node starts from scratch, it has no state (its current height is 0). Ignoring + statesync for the time being, the node just needs to catch up by applying past blocks one by one + (after verifying them). + + Thus, the node is in case (h) below. + +- **(h)** *Advancing majority, lagging minority* + + In this case, some nodes are late. More precisely, at the present time, a set of full nodes, + denoted *Lhp*, are falling behind + (e.g., temporary disconnection or network partition, memory thrashing, crashes, new nodes) + an arbitrary + number of heights: + between *hs* and *hp*, where *hs < hp*, and + *hp* is the highest height + any correct full node has reached so far. + + The correct full nodes that reached *hp* were able to decide for *hp-1*. + Therefore, less than *nhp-1/3* validators of *hp-1* can be part + of *Lhp*, since enough up-to-date validators needed to actively participate + in consensus for *hp-1*. + + Since, at the present time, + no node in *Lhp* took part in any consensus between + *hs* and *hp-1*, + the reasoning above can be extended to validator set changes between *hs* and + *hp-1*. This results in the following restriction on the full nodes that can be part of *Lhp*. + + - ∀ *h*, where *hs ≤ h < hp*, + | *valseth* ∩ *Lhp* | *< nh/3* + + If this property does not hold for a particular height *h*, where + *hs ≤ h < hp*, Tendermint could not have progressed beyond *h* and + therefore no full node could have reached *hp* (a contradiction). + + These lagging nodes in *Lhp* need to catch up. They have to obtain the + information needed to make + progress from other nodes. For each height *h* between *hs* and *hp-2*, + this includes the decided block for *h*, and the + precommit votes also for *deciding h* (which can be extracted from the block at height *h+1*). + + At a given height *hc* (where possibly *hc << hp*), + a full node in *Lhp* will consider itself *caught up*, based on the + (maybe out of date) information it is getting from its peers. Then, the node needs to be ready to + propose at height *hc+1*, which requires having received the vote extensions for + *hc*. + As the vote extensions are *not* stored in the blocks, and it is difficult to have strong + guarantees on *when* a late node considers itself caught up, providing the late node with the right + vote extensions for the right height poses a problem. + +At this point, we have described and compared all cases raised in discussions leading up to this +RFC. The list above aims at being exhaustive. The analysis of each case included above makes all of +them converge into case (h). + +### Current Catch-up Mechanisms + +We now briefly describe the current catch-up mechanisms in the reactors concerned in Tendermint. + +#### Statesync + +Full nodes optionally run statesync just after starting, when they start from scratch. +If statesync succeeds, an Application snapshot is installed, and Tendermint jumps from height 0 directly +to the height the Application snapshop represents, without applying the block of any previous height. +Some light blocks are received and stored in the block store for running light-client verification of +all the skipped blocks. Light blocks are incomplete blocks, typically containing the header and the +canonical commit but, e.g., no transactions. They are stored in the block store as "signed headers". + +The statesync reactor is not really relevant for solving the problem discussed in this RFC. We will +nevertheless mention it when needed; in particular, to understand some corner cases. + +#### Blocksync + +The blocksync reactor kicks in after start up or recovery (and, optionally, after statesync is done) +and sends the following messages to its peers: + +- `StatusRequest` to query the height its peers are currently at, and +- `BlockRequest`, asking for blocks of heights the local node is missing. + +Using `BlockResponse` messages received from peers, the blocksync reactor validates each received +block using the block of the following height, saves the block in the block store, and sends the +block to the Application for execution. + +If blocksync has validated and applied the block for the height *previous* to the highest seen in +a `StatusResponse` message, or if no progress has been made after a timeout, the node considers +itself as caught up and switches to the consensus reactor. + +#### Consensus Reactor + +The consensus reactor runs the full Tendermint algorithm. For a validator this means it has to +propose blocks, and send/receive prevote/precommit messages, as mandated by Tendermint, before it can +decide and move on to the next height. + +If a full node that is running the consensus reactor falls behind at height *h*, when a peer node +realises this it will retrieve the canonical commit of *h+1* from the block store, and *convert* +it into a set of precommit votes and will send those to the late node. + +## Discussion + +### Solutions Proposed + +These are the solutions proposed in discussions leading up to this RFC. + +- **Solution 0.** *Vote extensions are made **best effort** in the specification*. + + This is the simplest solution, considered as a way to provide vote extensions in a simple enough + way so that it can be part of v0.36. + It consists in changing the specification so as to not *require* that precommit votes used upon + `PrepareProposal` contain their corresponding vote extensions. In other words, we render vote + extensions optional. + There are strong implications stemming from such a relaxation of the original specification. + + - As a vote extension is signed *separately* from the vote it is extending, an intermediate node + can now remove (i.e., censor) vote extensions from precommit messages at will. + - Further, there is no point anymore in the spec requiring the Application to accept a vote extension + passed via `VerifyVoteExtension` to consider a precommit message valid in its entirety. Remember + this behavior of `VerifyVoteExtension` is adding a constraint to Tendermint's conditions for + liveness. + In this situation, it is better and simpler to just drop the vote extension rejected by the + Application via `VerifyVoteExtension`, but still consider the precommit vote itself valid as long + as its signature verifies. + +- **Solution 1.** *Include vote extensions in the blockchain*. + + Another obvious solution, which has somehow been considered in the past, is to include the vote + extensions and their signatures in the blockchain. + The blockchain would thus include the extended commit, rather than a regular commit, as the structure + to be canonicalized in the next block. + With this solution, the current mechanisms implemented both in the blocksync and consensus reactors + would still be correct, as all the information a node needs to catch up, and to start proposing when + it considers itself as caught-up, can now be recovered from past blocks saved in the block store. + + This solution has two main drawbacks. + + - As the block format must change, upgrading a chain requires a hard fork. Furthermore, + all existing light client implementations will stop working until they are upgraded to deal with + the new format (e.g., how certain hashes calculated and/or how certain signatures are checked). + For instance, let us consider IBC, which relies on light clients. An IBC connection between + two chains will be broken if only one chain upgrades. + - The extra information (i.e., the vote extensions) that is now kept in the blockchain is not really + needed *at every height* for a late node to catch up. + - This information is only needed to be able to *propose* at the height the validator considers + itself as caught-up. If a validator is indeed late for height *h*, it is useless (although + correct) for it to call `PrepareProposal`, or `ExtendVote`, since the block is already decided. + - Moreover, some use cases require pretty sizeable vote extensions, which would result in an + important waste of space in the blockchain. + +- **Solution 2.** *Skip* propose *step in Tendermint algorithm*. + + This solution consists in modifying the Tendermint algorithm to skip the *send proposal* step in + heights where the node does not have the required vote extensions to populate the call to + `PrepareProposal`. The main idea behind this is that it should only happen when the validator is late + and, therefore, up-to-date validators have already proposed (and decided) for that height. + A small variation of this solution is, rather than skipping the *send proposal* step, the validator + sends a special *empty* or *bottom* (⊥) proposal to signal other nodes that it is not ready to propose + at (any round of) the current height. + + The appeal of this solution is its simplicity. A possible implementation does not need to extend + the data structures, or change the current catch-up mechanisms implemented in the blocksync or + in the consensus reactor. When we lack the needed information (vote extensions), we simply rely + on another correct validator to propose a valid block in other rounds of the current height. + + However, this solution can be attacked by a byzantine node in the network in the following way. + Let us consider the following scenario: + + - all validators in *valseth* send out precommit messages, with vote extensions, + for height *h*, round 0, roughly at the same time, + - all those precommit messages contain non-`nil` precommit votes, which vote for block *b* + - all those precommit messages sent in height *h*, round 0, and all messages sent in + height *h*, round *r > 0* get delayed indefinitely, so, + - all validators in *valseth* keep waiting for enough precommit + messages for height *h*, round 0, needed for deciding in height *h* + - an intermediate (malicious) full node *m* manages to receive block *b*, and gather more than + *2nh/3* precommit messages for height *h*, round 0, + - one way or another, the solution should have either (a) a mechanism for a full node to *tell* + another full node it is late, or (b) a mechanism for a full node to conclude it is late based + on other full nodes' messages; any of these mechanisms should, at the very least, + require the late node receiving the decided block and a commit (not necessarily an extended + commit) for *h*, + - node *m* uses the gathered precommit messages to build a commit for height *h*, round 0, + - in order to convince full nodes that they are late, node *m* either (a) *tells* them they + are late, or (b) shows them it (i.e. *m*) is ahead, by sending them block *b*, along with the + commit for height *h*, round 0, + - all full nodes conclude they are late from *m*'s behavior, and use block *b* and the commit for + height *h*, round 0, to decide on height *h*, and proceed to height *h+1*. + + At this point, *all* full nodes, including all validators in *valseth+1*, have advanced + to height *h+1* believing they are late, and so, expecting the *hypothetical* leading majority of + validators in *valseth+1* to propose for *h+1*. As a result, the blockhain + grinds to a halt. + A (rather complex) ad-hoc mechanism would need to be carried out by node operators to roll + back all validators to the precommit step of height *h*, round *r*, so that they can regenerate + vote extensions (remember vote extensions are non-deterministic) and continue execution. + +- **Solution 3.** *Require extended commits to be available at switching time*. + + This one is more involved than all previous solutions, and builds on an idea present in Solution 2: + vote extensions are actually not needed for Tendermint to make progress as long as the + validator is *certain* it is late. + + We define two modes. The first is denoted *catch-up mode*, and Tendermint only calls + `FinalizeBlock` for each height when in this mode. The second is denoted *consensus mode*, in + which the validator considers itself up to date and fully participates in consensus and calls + `PrepareProposal`/`ProcessProposal`, `ExtendVote`, and `VerifyVoteExtension`, before calling + `FinalizeBlock`. + + The catch-up mode does not need vote extension information to make progress, as all it needs is the + decided block at each height to call `FinalizeBlock` and keep the state-machine replication making + progress. The consensus mode, on the other hand, does need vote extension information when + starting every height. + + Validators are in consensus mode by default. When a validator in consensus mode falls behind + for whatever reason, e.g. cases (b), (d), (e), (f), (g), or (h) above, we introduce the following + key safety property: + + - for every height *hp*, a full node *f* in *hp* refuses to switch to catch-up + mode **until** there exists a height *h'* such that: + - *p* has received and (light-client) verified the blocks of + all heights *h*, where *hp ≤ h ≤ h'* + - it has received an extended commit for *h'* and has verified: + - the precommit vote signatures in the extended commit + - the vote extension signatures in the extended commit: each is signed with the same + key as the precommit vote it extends + + If the condition above holds for *hp*, namely receiving a valid sequence of blocks in + the *f*'s future, and an extended commit corresponding to the last block in the sequence, then + node *f*: + + - switches to catch-up mode, + - applies all blocks between *hp* and *h'* (calling `FinalizeBlock` only), and + - switches back to consensus mode using the extended commit for *h'* to propose in the rounds of + *h' + 1* where it is the proposer. + + This mechanism, together with the invariant it uses, ensures that the node cannot be attacked by + being fed a block without extensions to make it believe it is late, in a similar way as explained + for Solution 2. + +### Feasibility of the Proposed Solutions + +Solution 0, besides the drawbacks described in the previous section, provides guarantees that are +weaker than the rest. The Application does not have the assurance that more than *2nh/3* vote +extensions will *always* be available when calling `PrepareProposal` at height *h+1*. +This level of guarantees is probably not strong enough for vote extensions to be useful for some +important use cases that motivated them in the first place, e.g., encrypted mempool transactions. + +Solution 1, while being simple in that the changes needed in the current Tendermint codebase would +be rather small, is changing the block format, and would therefore require all blockchains using +Tendermint v0.35 or earlier to hard-fork when upgrading to v0.36. + +Since Solution 2 can be attacked, one might prefer Solution 3, even if it is more involved +to implement. Further, we must elaborate on how we can turn Solution 3, described in abstract +terms in the previous section, into a concrete implementation compatible with the current +Tendermint codebase. + +### Current Limitations and Possible Implementations + +The main limitations affecting the current version of Tendermint are the following. + +- The current version of the blocksync reactor does not use the full + [light client verification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/light-client/README.md) + algorithm to validate blocks coming from other peers. +- The code being structured into the blocksync and consensus reactors, only switching from the + blocksync reactor to the consensus reactor is supported; switching in the opposite direction is + not supported. Alternatively, the consensus reactor could have a mechanism allowing a late node + to catch up by skipping calls to `PrepareProposal`/`ProcessProposal`, and + `ExtendVote`/`VerifyVoteExtension` and only calling `FinalizeBlock` for each height. + Such a mechanism does not exist at the time of writing this RFC. + +The blocksync reactor featuring light client verification is being actively worked on (tentatively +for v0.37). So it is best if this RFC does not try to delve into that problem, but just makes sure +its outcomes are compatible with that effort. + +In subsection [Cases to Address](#cases-to-address), we concluded that we can focus on +solving case (h) in theoretical terms. +However, as the current Tendermint version does not yet support switching back to blocksync once a +node has switched to consensus, we need to split case (h) into two cases. When a full node needs to +catch up... + +- **(h.1)** ... it has not switched yet from the blocksync reactor to the consensus reactor, or + +- **(h.2)** ... it has already switched to the consensus reactor. + +This is important in order to discuss the different possible implementations. + +#### Base Implementation: Persist and Propagate Extended Commit History + +In order to circumvent the fact that we cannot switch from the consensus reactor back to blocksync, +rather than just keeping the few most recent extended commits, nodes will need to keep +and gossip a backlog of extended commits so that the consensus reactor can still propose and decide +in out-of-date heights (even if those proposals will be useless). + +The base implementation - for which an experimental patch exists - consists in the conservative +approach of persisting in the block store *all* extended commits for which we have also stored +the full block. Currently, when statesync is run at startup, it saves light blocks. +This base implementation does not seek +to receive or persist extended commits for those light blocks as they would not be of any use. + +Then, we modify the blocksync reactor so that peers *always* send requested full blocks together +with the corresponding extended commit in the `BlockResponse` messages. This guarantees that the +block store being reconstructed by blocksync has the same information as that of peers that are +up to date (at least starting from the latest snapshot applied by statesync before starting blocksync). +Thus, blocksync has all the data it requires to switch to the consensus reactor, as long as one of +the following exit conditions are met: + +- The node is still at height 0 (where no commit or extended commit is needed) +- The node has processed at least 1 block in blocksync + +The second condition is needed in case the node has installed an Application snapshot during statesync. +If that is the case, at the time blocksync starts, the block store only has the data statesync has saved: +light blocks, and no extended commits. +Hence we need to blocksync at least one block from another node, which will be sent with its corresponding extended commit, before we can switch to consensus. + +As a side note, a chain might be started at a height *hi > 0*, all other heights +*h < hi* being non-existent. In this case, the chain is still considered to be at height 0 before +block *hi* is applied, so the first condition above allows the node to switch to consensus even +if blocksync has not processed any block (which is always the case if all nodes are starting from scratch). + +When a validator falls behind while having already switched to the consensus reactor, a peer node can +simply retrieve the extended commit for the required height from the block store and reconstruct a set of +precommit votes together with their extensions and send them in the form of precommit messages to the +validator falling behind, regardless of whether the peer node holds the extended commit because it +actually participated in that consensus and thus received the precommit messages, or it received the extended commit via a `BlockResponse` message while running blocksync. + +This solution requires a few changes to the consensus reactor: + +- upon saving the block for a given height in the block store at decision time, save the + corresponding extended commit as well +- in the catch-up mechanism, when a node realizes that another peer is more than 2 heights + behind, it uses the extended commit (rather than the canoncial commit as done previously) to + reconstruct the precommit votes with their corresponding extensions + +The changes to the blocksync reactor are more substantial: + +- the `BlockResponse` message is extended to include the extended commit of the same height as + the block included in the response (just as they are stored in the block store) +- structure `bpRequester` is likewise extended to hold the received extended commits coming in + `BlockResponse` messages +- method `PeekTwoBlocks` is modified to also return the extended commit corresponding to the first block +- when successfully verifying a received block, the reactor saves its corresponding extended commit in + the block store + +The two main drawbacks of this base implementation are: + +- the increased size taken by the block store, in particular with big extensions +- the increased bandwith taken by the new format of `BlockResponse` + +#### Possible Optimization: Pruning the Extended Commit History + +If we cannot switch from the consensus reactor back to the blocksync reactor we cannot prune the extended commit backlog in the block store without sacrificing the implementation's correctness. The asynchronous +nature of our distributed system model allows a process to fall behing an arbitrary number of +heights, and thus all extended commits need to be kept *just in case* a node that late had +previously switched to the consensus reactor. + +However, there is a possibility to optimize the base implementation. Every time we enter a new height, +we could prune from the block store all extended commits that are more than *d* heights in the past. +Then, we need to handle two new situations, roughly equivalent to cases (h.1) and (h.2) described above. + +- (h.1) A node starts from scratch or recovers after a crash. In thisy case, we need to modify the + blocksync reactor's base implementation. + - when receiving a `BlockResponse` message, it MUST accept that the extended commit set to `nil`, + - when sending a `BlockResponse` message, if the block store contains the extended commit for that + height, it MUST set it in the message, otherwise it sets it to `nil`, + - the exit conditions used for the base implementation are no longer valid; the only reliable exit + condition now consists in making sure that the last block processed by blocksync was received with + the corresponding commit, and not `nil`; this extended commit will allow the node to switch from + the blocksync reactor to the consensus reactor and immediately act as a proposer if required. +- (h.2) A node already running the consensus reactor falls behind beyond *d* heights. In principle, + the node will be stuck forever as no other node can provide the vote extensions it needs to make + progress (they all have pruned the corresponding extended commit). + However we can manually have the node crash and recover as a workaround. This effectively converts + this case into (h.1). + +### Formalization Work + +A formalization work to show or prove the correctness of the different use cases and solutions +presented here (and any other that may be found) needs to be carried out. +A question that needs a precise answer is how many extended commits (one?, two?) a node needs +to keep in persistent memory when implementing Solution 3 described above without Tendermint's +current limitations. +Another important invariant we need to prove formally is that the set of vote extensions +required to make progress will always be held somewhere in the network. + +## References + +- [ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md) +- [ABCI as of v0.35](https://github.com/tendermint/spec/blob/4fb99af/spec/abci/README.md) +- [Vote extensions issue](https://github.com/tendermint/tendermint/issues/8174) +- [Light client verification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/light-client/README.md) diff --git a/docs/rfc/rfc-018-bls-agg-exploration.md b/docs/rfc/rfc-018-bls-agg-exploration.md new file mode 100644 index 0000000000..70ca171a09 --- /dev/null +++ b/docs/rfc/rfc-018-bls-agg-exploration.md @@ -0,0 +1,555 @@ +# RFC 018: BLS Signature Aggregation Exploration + +## Changelog + +- 01-April-2022: Initial draft (@williambanfield). +- 15-April-2022: Draft complete (@williambanfield). + +## Abstract + +## Background + +### Glossary + +The terms that are attached to these types of cryptographic signing systems +become confusing quickly. Different sources appear to use slightly different +meanings of each term and this can certainly add to the confusion. Below is +a brief glossary that may be helpful in understanding the discussion that follows. + +* **Short Signature**: A signature that does not vary in length with the +number of signers. +* **Multi-Signature**: A signature generated over a single message +where, given the message and signature, a verifier is able to determine that +all parties signed the message. May be short or may vary with the number of signers. +* **Aggregated Signature**: A _short_ signature generated over messages with +possibly different content where, given the messages and signature, a verifier +should be able to determine that all the parties signed the designated messages. +* **Threshold Signature**: A _short_ signature generated from multiple signers +where, given a message and the signature, a verifier is able to determine that +a large enough share of the parties signed the message. The identities of the +parties that contributed to the signature are not revealed. +* **BLS Signature**: An elliptic-curve pairing-based signature system that +has some nice properties for short multi-signatures. May stand for +*Boneh-Lynn-Schacham* or *Barreto-Lynn-Scott* depending on the context. A +BLS signature is type of signature scheme that is distinct from other forms +of elliptic-curve signatures such as ECDSA and EdDSA. +* **Interactive**: Cryptographic scheme where parties need to perform one or +more request-response cycles to produce the cryptographic material. For +example, an interactive signature scheme may require the signer and the +verifier to cooperate to create and/or verify the signature, rather than a +signature being created ahead of time. +* **Non-interactive**: Cryptographic scheme where parties do not need to +perform any request-response cycles to produce the cryptographic material. + +### Brief notes on pairing-based elliptic-curve cryptography + +Pairing-based elliptic-curve cryptography is quite complex and relies on several +types of high-level math. Cryptography, in general, relies on being able to find +problems with an asymmetry between the difficulty of calculating the solution +and verifying that a given solution is correct. + +Pairing-based cryptography works by operating on mathematical functions that +satisfy the property of **bilinear mapping**. This property is satisfied for +functions `e` with values `P`, `Q`, `R` and `S` where `e(P, Q + R) = e(P, Q) * e(P, R)` +and `e(P + S, Q) = e(P, Q) * e(S, Q)`. The most familiar example of this is +exponentiation. Written in common notation, `g^P*(Q+R) = g^(P*Q) * g^(P*R)` for +some value `g`. + +Pairing-based elliptic-curve cryptography creates a bilinear mapping using +elliptic curves over a finite field. With some original curve, you can define two groups, +`G1` and `G2` which are points of the original curve _modulo_ different values. +Finally, you define a third group `Gt`, where points from `G1` and `G2` satisfy +the property of bilinearity with `Gt`. In this scheme, the function `e` takes +as inputs points in `G1` and `G2` and outputs values in `Gt`. Succintly, given +some point `P` in `G1` and some point `Q` in `G1`, `e(P, Q) = C` where `C` is in `Gt`. +You can efficiently compute the mapping of points in `G1` and `G2` into `Gt`, +but you cannot efficiently determine what points were summed and paired to +produce the value in `Gt`. + +Functions are then defined to map digital signatures, messages, and keys into +and out of points of `G1` or `G2` and signature verification is the process +of calculating if a set of values representing a message, public key, and digital +signature produce the same value in `Gt` through `e`. + +Signatures can be created as either points in `G1` with public keys being +created as points in `G2` or vice versa. For the case of BLS12-381, the popular +curve used, points in `G1` are represented with 48 bytes and points in `G2` are +represented with 96 bytes. It is up to the implementer of the cryptosystem to +decide which should be larger, the public keys or the signatures. + +BLS signatures rely on pairing-based elliptic-curve cryptography to produce +various types of signatures. For a more in-depth but still high level discussion +pairing-based elliptic-curve cryptography, see Vitalik Buterin's post on +[Exploring Elliptic Curve Pairings][vitalik-pairing-post]. For much more in +depth discussion, see the specific paper on BLS12-381, [Short signatures from + the Weil Pairing][bls-weil-pairing] and +[Compact Multi-Signatures for Smaller Blockchains][multi-signatures-smaller-blockchains]. + +### Adoption + +BLS signatures have already gained traction within several popular projects. + +* Algorand is working on an implementation. +* [Zcash][zcash-adoption] has adopted BLS12-381 into the protocol. +* [Ethereum 2.0][eth-2-adoption] has adopted BLS12-381 into the protocol. +* [Chia Network][chia-adoption] has adopted BLS for signing blocks. +* [Ostracon][line-ostracon-pr], a fork of Tendermint has adopted BLS for signing blocks. + +### What systems may be affected by adding aggregated signatures? + +#### Gossip + +Gossip could be updated to aggregate vote signatures during a consensus round. +This appears to be of frankly little utility. Creating an aggregated signature +incurs overhead, so frequently re-aggregating may incur a significant +overhead. How costly this is is still subject to further investigation and +performance testing. + +Even if vote signatures were aggregated before gossip, each validator would still +need to receive and verify vote extension data from each (individual) peer validator in +order for consensus to proceed. That displaces any advantage gained by aggregating signatures across the vote message in the presence of vote extensions. + +#### Block Creation + +When creating a block, the proposer may create a small set of short +multi-signatures and attach these to the block instead of including one +signature per validator. + +#### Block Verification + +Currently, we verify each validator signature using the public key associated +with that validator. With signature aggregation, verification of blocks would +not verify many signatures individually, but would instead check the (single) +multi-signature using the public keys stored by the validator. This would also +require a mechanism for indicating which validators are included in the +aggregated signature. + +#### IBC Relaying + +IBC would no longer need to transmit a large set of signatures when +updating state. These state updates do not happen for every IBC packet, only +when changing an IBC light client's view of the counterparty chain's state. +General [IBC packets][ibc-packet] only contain enough information to correctly +route the data to the counterparty chain. + +IBC does persist commit signatures to the chain in these `MsgUpdateClient` +message when updating state. This message would no longer need the full set +of unique signatures and would instead only need one signature for all of the +data in the header. + +Adding BLS signatures would create a new signature type that must be +understood by the IBC module and by the relayers. For some operations, such +as state updates, the set of data written into the chain and received by the +IBC module could be slightly smaller. + +## Discussion + +### What are the proposed benefits to aggregated signatures? + +#### Reduce Block Size + +At the moment, a commit contains a 64-byte (512-bit) signature for each validator +that voted for the block. For the Cosmos Hub, which has 175 validators in the +active set, this amounts to about 11 KiB per block. That gives an upper bound of +around 113 GiB over the lifetime of the chain's 10.12M blocks. (Note, the Hub has +increased the number of validators in the active set over time so the total +signature size over the history of the chain is likely somewhat less than that). + +Signature aggregation would only produce two signatures for the entire block. +One for the yeas and one for the nays. Each BLS aggregated signature is 48 +bytes, per the [IETF standard of BLS signatures][bls-ietf-ecdsa-compare]. +Over the lifetime of the same Cosmos Hub chain, that would amount to about 1 +GB, a savings of 112 GB. While that is a large factor of reduction it's worth +bearing in mind that, at [GCP's cost][gcp-storage-pricing] of $.026 USD per GB, +that is a total savings of around $2.50 per month. + +#### Reduce Signature Creation and Verification Time + +From the [IETF draft standard on BLS Signatures][bls-ietf], BLS signatures can be +created in 370 microseconds and verified in 2700 microseconds. Our current +[Ed25519 implementation][voi-ed25519-perf] was benchmarked locally to take +13.9 microseconds to produce a signature and 2.03 milliseconds to batch verify +128 signatures, which is slightly fewer than the 175 in the Hub. blst, a popular +implementation of BLS signature aggregation was benchmarked to perform verification +on 100 signatures in 1.5 milliseconds [when run locally][blst-verify-bench] +on an 8 thread machine and pre-aggregated public keys. It is worth noting that +the `ed25519` library verification time grew steadily with the number of signatures, +whereas the bls library verification time remains constant. This is because the +number of operations used to verify a signature does not grow at all with the +number of signatures included in the aggregate signature (as long as the signers +signed over the same message data as is the case in Tendermint). + +It is worth noting that this would also represent a _degredation_ in signature +verification time for chains with small validator sets. When batch verifying +only 32 signatures, our ed25519 library takes .57 milliseconds, whereas BLS +would still require the same 1.5 milliseconds. + +For massive validator sets, blst dominates, taking the same 1.5 milliseconds to +check an aggregated signature from 1024 validators versus our ed25519 library's +13.066 milliseconds to batch verify a set of that size. + +#### Reduce Light-Client Verification Time + +The light client aims to be a faster and lighter-weight way to verify that a +block was voted on by a Tendermint network. The light client fetches +Tendermint block headers and commit signatures, performing public key +verification to ensure that the associated validator set signed the block. +Reducing the size of the commit signature would allow the light client to fetch +block data more quickly. + +Additionally, the faster signature verification times of BLS signatures mean +that light client verification would proceed more quickly. + +However, verification of an aggregated signature is all-or-nothing. The verifier +cannot check that some singular signer had a signature included in the block. +Instead, the verifier must use all public keys to check if some signature +was included. This does mean that any light client implementation must always +be able to fetch all public keys for any height instead of potentially being +able to check if some singular validator's key signed the block. + +#### Reduce Gossip Bandwidth + +##### Vote Gossip + +It is possible to aggregate subsets of signatures during voting, so that the +network need not gossip all *n* validator signatures to all *n* validators. +Theoretically, subsets of the signatures could be aggregated during consensus +and vote messages could carry those aggregated signatures. Implementing this +would certainly increase the complexity of the gossip layer but could possibly +reduce the total number of signatures required to be verified by each validator. + +##### Block Gossip + +A reduction in the block size as a result of signature aggregation would +naturally lead to a reduction in the bandwidth required to gossip a block. +Each validator would only send and receive the smaller aggregated signatures +instead of the full list of multi-signatures as we have them now. + +### What are the drawbacks to aggregated signatures? + +#### Heterogeneous key types cannot be aggregated + +Aggregation requires a specific signature algorithm, and our legacy signing schemes +cannot be aggregated. In practice, this means that aggregated signatures could +be created for a subset of validators using BLS signatures, and validators +with other key types (such as Ed25519) would still have to be be separately +propagated in blocks and votes. + +#### Many HSMs do not support aggregated signatures + +**Hardware Signing Modules** (HSM) are a popular way to manage private keys. +They provide additional security for key management and should be used when +possible for storing highly sensitive private key material. + +Below is a list of popular HSMs along with their support for BLS signatures. + +* YubiKey + * [No support][yubi-key-bls-support] +* Amazon Cloud HSM + * [No support][cloud-hsm-support] +* Ledger + * [Lists support for the BLS12-381 curve][ledger-bls-announce] + +I cannot find support listed for Google Cloud, although perhaps it exists. + +## Feasibility of implementation + +This section outlines the various hurdles that would exist to implementing BLS +signature aggregation into Tendermint. It aims to demonstrate that we _could_ +implement BLS signatures but that it would incur risk and require breaking changes for a +reasonably unclear benefit. + +### Can aggregated signatures be added as soft-upgrades? + +In my estimation, yes. With the implementation of proposer-based timestamps, +all validators now produce signatures on only one of two messages: + +1. A [CanonicalVote][canonical-vote-proto] where the BlockID is the hash of the block or +2. A `CanonicalVote` where the `BlockID` is nil. + +The block structure can be updated to perform hashing and validation in a new +way as a soft upgrade. This would look like adding a new section to the [Block.Commit][commit-proto] structure +alongside the current `Commit.Signatures` field. This new field, tentatively named +`AggregatedSignature` would contain the following structure: + +```proto +message AggregatedSignature { + // yeas is a BitArray representing which validators in the active validator + // set issued a 'yea' vote for the block. + tendermint.libs.bits.BitArray yeas = 1; + + // absent is a BitArray representing which validators in the active + // validator set did not issue votes for the block. + tendermint.libs.bits.BitArray absent = 2; + + // yea_signature is an aggregated signature produced from all of the vote + // signatures for the block. + repeated bytes yea_signature = 3; + + // nay_signature is an aggregated signature produced from all of the vote + // signatures from votes for 'nil' for this block. + // nay_signature should be made from all of the validators that were both not + // in the 'yeas' BitArray and not in the 'absent' BitArray. + repeated bytes nay_signature = 4; +} +``` + +Adding this new field as a soft upgrade would mean hashing this data structure +into the blockID along with the old `Commit.Signatures` when both are present +as well as ensuring that the voting power represented in the new +`AggregatedSignature` and `Signatures` field was enough to commit the block +during block validation. One can certainly imagine other possible schemes for +implementing this but the above should serve as a simple enough proof of concept. + +### Implementing vote-time and commit-time signature aggregation separately + +Implementing aggregated BLS signatures as part of the block structure can easily be +achieved without implementing any 'vote-time' signature aggregation. +The block proposer would gather all of the votes, complete with signatures, +as it does now, and produce a set of aggregate signatures from all of the +individual vote signatures. + +Implementing 'vote-time' signature aggregation cannot be achieved without +also implementing commit-time signature aggregation. This is because such +signatures cannot be dis-aggregated into their constituent pieces. Therefore, +in order to implement 'vote-time' signature aggregation, we would need to +either first implement 'commit-time' signature aggregation, or implement both +'vote-time' signature aggregation while also updating the block creation and +verification protocols to allow for aggregated signatures. + +### Updating IBC clients + +In order for IBC clients to function, they must be able to perform light-client +verification of blocks on counterparty chains. Because BLS signatures are not +currently part of light-clients, chains that transmit messages over IBC +cannot update to using BLS signatures without their counterparties first +being upgraded to parse and verify BLS. If chains upgrade without their +counterparties first updating, they will lose the ability to interoperate with +non-updated chains. + +### New attack surfaces + +BLS signatures and signature aggregation comes with a new set of attack surfaces. +Additionally, it's not clear that all possible major attacks are currently known +on the BLS aggregation schemes since new ones have been discovered since the ietf +draft standard was written. The known attacks are manageable and are listed below. +Our implementation would need to prevent against these but this does not appear +to present a significant hurdle to implementation. + +#### Rogue key attack prevention + +Generating an aggregated signature requires guarding against what is called +a [rogue key attack][bls-ietf-terms]. A rogue key attack is one in which a +malicious actor can craft an _aggregate_ key that can produce signatures that +appear to include a signature from a private key that the malicious actor +does not actually know. In Tendermint terms, this would look like a Validator +producing a vote signed by both itself and some other validator where the other +validator did not actually produce the vote itself. + +The main mechanisms for preventing this require that each entity prove that it +can can sign data with just their private key. The options involve either +ensuring that each entity sign a _different_ message when producing every +signature _or_ producing a [proof of possession][bls-ietf-pop] (PoP) when announcing +their key to the network. + +A PoP is a message that demonstrates ownership of a private +key. A simple scheme for PoP is one where the entity announcing +its new public key to the network includes a digital signature over the bytes +of the public key generated using the associated private key. Everyone receiving +the public key and associated proof-of-possession can easily verify the +signature and be sure the entity owns the private key. + +This PoP scheme suits the Tendermint use case quite well since +validator keys change infrequently so the associated PoPs would not be onerous +to produce, verify, and store. Using this scheme allows signature verification +to proceed more quickly, since all signatures are over identical data and +can therefore be checked using an aggregated public key instead of one at a +time, public key by public key. + +#### Summing Zero Attacks + +[Summing zero attacks][summing-zero-paper] are attacks that rely on using the '0' point of an +elliptic curve. For BLS signatures, if the point 0 is chosen as the private +key, then the 0 point will also always be the public key and all signatures +produced by the key will also be the 0 point. This is easy enough to +detect when verifying each signature individually. + +However, because BLS signature aggregation creates an aggregated signature and +an aggregated public key, a set of colluding signers can create a pair or set +of signatures that are non-zero but which aggregate ("sum") to 0. The signatures that sum zero along with the +summed public key of the colluding signers will verify any message. This would +allow the colluding signers to sign any block or message with the same signature. +This would be reasonably easy to detect and create evidence for because, in +all other cases, the same signature should not verify more than message. It's +not exactly clear how such an attack would advantage the colluding validators +because the normal mechanisms of evidence gathering would still detect the +double signing, regardless of the signatures on both blocks being identical. + +### Backwards Compatibility + +Backwards compatibility is an important consideration for signature verification. +Specifically, it is important to consider whether chains using current versions +of IBC would be able to interact with chains adopting BLS. + +Because the `Block` shared by IBC and Tendermint is produced and parsed using +protobuf, new structures can be added to the Block without breaking the +ability of legacy users to parse the new structure. Breaking changes between +current users of IBC and new Tendermint blocks only occur if data that is +relied upon by the current users is no longer included in the current fields. + +For the case of BLS aggregated signatures, a new `AggregatedSignature` field +can therefore be added to the `Commit` field without breaking current users. +Current users will be broken when counterparty chains upgrade to the new version +and _begin using_ BLS signatures. Once counterparty chains begin using BLS +signatures, the BlockID hashes will include hashes of the `AggregatedSignature` +data structure that the legacy users will not be able to compute. Additionally, +the legacy software will not be able to parse and verify the signatures to +ensure that a supermajority of validators from the counterparty chain signed +the block. + +### Library Support + +Libraries for BLS signature creation are limited in number, although active +development appears to be ongoing. Cryptographic algorithms are difficult to +implement correctly and correctness issues are extremely serious and dangerous. +No further exploration of BLS should be undertaken without strong assurance of +a well-tested library with continuing support for creating and verifying BLS +signatures. + +At the moment, there is one candidate, `blst`, that appears to be the most +mature and well vetted. While this library is undergoing continuing auditing +and is supported by funds from the Ethereum foundation, adopting a new cryptographic +library presents some serious risks. Namely, if the support for the library were +to be discontinued, Tendermint may become saddled with the requirement of supporting +a very complex piece of software or force a massive ecosystem-wide migration away +from BLS signatures. + +This is one of the more serious reasons to avoid adopting BLS signatures at this +time. There is no gold standard library. Some projects look promising, but no +project has been formally verified with a long term promise of being supported +well into the future. + +#### Go Standard Library + +The Go Standard library has no implementation of BLS signatures. + +#### BLST + +[blst][blst], or 'blast' is an implementation of BLS signatures written in C +that provides bindings into Go as part of the repository. This library is +actively undergoing formal verification by Galois and previously received an +initial audit by NCC group, a firm I'd never heard of. + +`blst` is [targeted for use in prysm][prysm-blst], the golang implementation of Ethereum 2.0. + +#### Gnark-Crypto + +[Gnark-Crypto][gnark] is a Go-native implementation of elliptic-curve pairing-based +cryptography. It is not audited and is documented as 'as-is', although +development appears to be active so formal verification may be forthcoming. + +#### CIRCL + +[CIRCL][circl] is a go-native implementation of several cryptographic primitives, +bls12-381 among them. The library is written and maintained by Cloudflare and +appears to receive frequent contributions. However, it lists itself as experimental +and urges users to take caution before using it in production. + +### Added complexity to light client verification + +Implementing BLS signature aggregation in Tendermint would pose issues for the +light client. The light client currently validates a subset of the signatures +on a block when performing the verification algorithm. This is no longer possible +with an aggregated signature. Aggregated signature verification is all-or-nothing. +The light client could no longer check that a subset of validators from some +set of validators is represented in the signature. Instead, it would need to create +a new aggregated key with all the stated signers for each height it verified where +the validator set changed. + +This means that the speed advantages gained by using BLS cannot be fully realized +by the light client since the client needs to perform the expensive operation +of re-aggregating the public key. Aggregation is _not_ constant time in the +number of keys and instead grows linearly. When [benchmarked locally][blst-verify-bench-agg], +blst public key aggregation of 128 keys took 2.43 milliseconds. This, along with +the 1.5 milliseconds to verify a signature would raise light client signature +verification time to 3.9 milliseconds, a time above the previously mentioned +batch verification time using our ed25519 library of 2.0 milliseconds. + +Schemes to cache aggregated subsets of keys could certainly cut this time down at the +cost of adding complexity to the light client. + +### Added complexity to evidence handling + +Implementing BLS signature aggregation in Tendermint would add complexity to +the evidence handling within Tendermint. Currently, the light client can submit +evidence of a fork attempt to the chain. This evidence consists of the set of +validators that double-signed, including their public keys, with the conflicting +block. + +We can quickly check that the listed validators double signed by verifying +that each of their signatures are in the submitted conflicting block. A BLS +signature scheme would change this by requiring the light client to submit +the public keys of all of the validators that signed the conflicting block so +that the aggregated signature may be checked against the full signature set. +Again, aggregated signature verification is all-or-nothing, so without all of +the public keys, we cannot verify the signature at all. These keys would be +retrievable. Any party that wanted to create a fork would want to convince a +network that its fork is legitimate, so it would need to gossip the public keys. +This does not hamper the feasibility of implementing BLS signature aggregation +into Tendermint, but does represent yet another piece of added complexity to +the associated protocols. + +## Open Questions + +* *Q*: Can you aggregate Ed25519 signatures in Tendermint? + * There is a suggested scheme in github issue [7892][suggested-ed25519-agg], +but additional rigor would be required to fully verify its correctness. + +## Current Consideration + +Adopting a signature aggregation scheme presents some serious risks and costs +to the Tendermint project. It requires multiple backwards-incompatible changes +to the code, namely a change in the structure of the block and a new backwards-incompatible +signature and key type. It risks adding a new signature type for which new attack +types are still being discovered _and_ for which no industry standard, battle-tested +library yet exists. + +The gains boasted by this new signing scheme are modest: Verification time is +marginally faster and block sizes shrink by a few kilobytes. These are relatively +minor gains in exchange for the complexity of the change and the listed risks of the technology. +We should take a wait-and-see approach to BLS signature aggregation, monitoring +the up-and-coming projects and consider implementing it as the libraries and +standards develop. + +### References + +[line-ostracon-repo]: https://github.com/line/ostracon +[line-ostracon-pr]: https://github.com/line/ostracon/pull/117 +[mit-BLS-lecture]: https://youtu.be/BFwc2XA8rSk?t=2521 +[gcp-storage-pricing]: https://cloud.google.com/storage/pricing#north-america_2 +[yubi-key-bls-support]: https://github.com/Yubico/yubihsm-shell/issues/66 +[cloud-hsm-support]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/pkcs11-key-types.html +[bls-ietf]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04 +[bls-ietf-terms]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.3 +[bls-ietf-pop]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-3.3 +[multi-signatures-smaller-blockchains]: https://eprint.iacr.org/2018/483.pdf +[ibc-tendermint]: https://github.com/cosmos/ibc/tree/master/spec/client/ics-007-tendermint-client +[zcash-adoption]: https://github.com/zcash/zcash/issues/2502 +[chia-adoption]: https://github.com/Chia-Network/chia-blockchain#chia-blockchain +[bls-ietf-ecdsa-compare]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.1 +[voi-ed25519-perf]: https://github.com/williambanfield/curve25519-voi/blob/benchmark/primitives/ed25519/PERFORMANCE.txt#L79 +[blst-verify-bench]: https://github.com/williambanfield/blst/blame/bench/bindings/go/PERFORMANCE.md#L9 +[blst-verify-bench-agg]: https://github.com/williambanfield/blst/blame/bench/bindings/go/PERFORMANCE.md#L23 +[vitalik-pairing-post]: https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627 +[ledger-bls-announce]: https://www.ledger.com/first-ever-firmware-update-coming-to-the-ledger-nano-x +[commit-proto]: https://github.com/tendermint/tendermint/blob/be7cb50bb3432ee652f88a443e8ee7b8ef7122bc/proto/tendermint/types/types.proto#L121 +[canonical-vote-proto]: https://github.com/tendermint/tendermint/blob/be7cb50bb3432ee652f88a443e8ee7b8ef7122bc/spec/core/encoding.md#L283 +[blst]: https://github.com/supranational/blst +[prysm-blst]: https://github.com/prysmaticlabs/prysm/blob/develop/go.mod#L75 +[gnark]: https://github.com/ConsenSys/gnark-crypto/ +[eth-2-adoption]: https://notes.ethereum.org/@GW1ZUbNKR5iRjjKYx6_dJQ/Skxf3tNcg_ +[bls-weil-pairing]: https://www.iacr.org/archive/asiacrypt2001/22480516.pdf +[summing-zero-paper]: https://eprint.iacr.org/2021/323.pdf +[circl]: https://github.com/cloudflare/circl +[light-client-evidence]: https://github.com/tendermint/tendermint/blob/a6fd1fe20116d4b1f7e819cded81cece8e5c1ac7/types/evidence.go#L245 +[suggested-ed25519-agg]: https://github.com/tendermint/tendermint/issues/7892 diff --git a/docs/rfc/rfc-020-onboarding-projects.rst b/docs/rfc/rfc-020-onboarding-projects.rst new file mode 100644 index 0000000000..dc18de65d7 --- /dev/null +++ b/docs/rfc/rfc-020-onboarding-projects.rst @@ -0,0 +1,240 @@ +======================================= +RFC 020: Tendermint Onboarding Projects +======================================= + +.. contents:: + :backlinks: none + +Changelog +--------- + +- 2022-03-30: Initial draft. (@tychoish) +- 2022-04-25: Imported document to tendermint repository. (@tychoish) + +Overview +-------- + +This document describes a collection of projects that might be good for new +engineers joining the Tendermint Core team. These projects mostly describe +features that we'd be very excited to see land in the code base, but that are +intentionally outside of the critical path of a release on the roadmap, and +have the following properties that we think make good on-boarding projects: + +- require relatively little context for the project or its history beyond a + more isolated area of the code. + +- provide exposure to different areas of the codebase, so new team members + will have reason to explore the code base, build relationships with people + on the team, and gain experience with more than one area of the system. + +- be of moderate size, striking a healthy balance between trivial or + mechanical changes (which provide little insight) and large intractable + changes that require deeper insight than is available during onboarding to + address well. A good size project should have natural touchpoints or + check-ins. + +Projects +-------- + +Before diving into one of these projects, have a conversation about the +project or aspects of Tendermint that you're excited to work on with your +onboarding buddy. This will help make sure that these issues are still +relevant, help you get any context, underatnding known pitfalls, and to +confirm a high level approach or design (if relevant.) On-boarding buddies +should be prepared to do some design work before someone joins the team. + +The descriptions that follow provide some basic background and attempt to +describe the user stories and the potential impact of these project. + +E2E Test Systems +~~~~~~~~~~~~~~~~ + +Tendermint's E2E framework makes it possible to run small test networks with +different Tendermint configurations, and make sure that the system works. The +tests run Tendermint in a separate binary, and the system provides some very +high level protection against making changes that could break Tendermint in +otherwise difficult to detect ways. + +Working on the E2E system is a good place to get introduced to the Tendermint +codebase, particularly for developers who are newer to Go, as the E2E +system (generator, runner, etc.) is distinct from the rest of Tendermint and +comparatively quite small, so it may be easier to begin making changes in this +area. At the same time, because the E2E system exercises *all* of Tendermint, +work in this area is a good way to get introduced to various components of the +system. + +Configurable E2E Workloads +++++++++++++++++++++++++++ + +All E2E tests use the same workload (e.g. generated transactions, submitted to +different nodes in the network,) which has been tuned empirically to provide a +gentle but consistent parallel load that all E2E tests can pass. Ideally, the +workload generator could be configurable to have different shapes of work +(bursty, different transaction sizes, weighted to different nodes, etc.) and +even perhaps further parameterized within a basic shape, which would make it +possible to use our existing test infrastructure to answer different questions +about the performance or capability of the system. + +The work would involve adding a new parameter to the E2E test manifest, and +creating an option (e.g. "legacy") for the current load generation model, +extract configurations options for the current load generation, and then +prototype implementations of alternate load generation, and also run some +preliminary using the tools. + +Byzantine E2E Workloads ++++++++++++++++++++++++ + +There are two main kinds of integration tests in Tendermint: the E2E test +framework, and then a collection of integration tests that masquerade as +unit-tests. While some of this expansion of test scope is (potentially) +inevitable, the masquerading unit tests (e.g ``consensus.byzantine_test.go``) +end up being difficult to understand, difficult to maintain, and unreliable. + +One solution to this, would be to modify the E2E ABCI application to allow it +to inject byzantine behavior, and then have this be a configurable aspect of +a test network to be able to provoke Byzantine behavior in a "real" system and +then observe that evidence is constructed. This would make it possible to +remove the legacy tests entirely once the new tests have proven themselves. + +Abstract Orchestration Framework +++++++++++++++++++++++++++++++++ + +The orchestration of e2e test processes is presently done using docker +compose, which works well, but has proven a bit limiting as all processes need +to run on a single machine, and the log aggregation functions are confusing at +best. + +This project would replace the current orchestration with something more +generic, potentially maintaining the current system, but also allowing the e2e +tests to manage processes using k8s. There are a few "local" k8s frameworks +(e.g. kind and k3s,) which might be able to be useful for our current testing +model, but hopefully, we could use this new implementation with other k8s +systems for more flexible distribute test orchestration. + +Improve Operationalize Experience of ``run-multiple.sh`` +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The e2e test runner currently runs a single test, and in most cases we manage +the test cases using a shell script that ensure cleanup of entire test +suites. This is a bit difficult to maintain and makes reproduction of test +cases more awkward than it should be. The e2e ``runner`` itself should provide +equivalent functionality to ``run-multiple.sh``: ensure cleanup of test cases, +collect and process output, and be able to manage entire suites of cases. + +It might also be useful to implement an e2e test orchestrator that runs all +tendermint instances in a single process, using "real" networks for faster +feedback and iteration during development. + +In addition to being a bit easier to maintain, having a more capable runner +implementation would make it easier to collect data from test runs, improve +debugability and reporting. + +Fan-Out For CI E2E Tests +++++++++++++++++++++++++ + +While there are some parallelism in the execution of e2e tests, each e2e test +job must build a tendermint e2e image, which takes about 5 minutes of CPU time +per-task, which given the size of each of the runs. + +We'd like to be able to reduce the amount of overhead per-e2e tests while +keeping the cycle time for working with the tests very low, while also +maintaining a reasonable level of test coverage. This is an impossible +tradeoff, in some ways, and the percentage of overhead at the moment is large +enough that we can make some material progress with a moderate amount of time. + +Most of this work has to do with modifying github actions configuration and +e2e artifact (docker) building to reduce redundant work. Eventually, when we +can drop the requirement for CGo storage engines, it will be possible to move +(cross) compile tendermint locally, and then inject the binary into the docker +container, which would reduce a lot of the build-time complexity, although we +can move more in this direction or have runtime flags to disable CGo +dependencies for local development. + +Remove Panics +~~~~~~~~~~~~~ + +There are lots of places in the code base which can panic, and would not be +particularly well handled. While in some cases, panics are the right answer, +in many cases the panics were just added to simplify downstream error +checking, and could easily be converted to errors. + +The `Don't Panic RFC +`_ +covers some of the background and approach. + +While the changes are in this project are relatively rote, this will provide +exposure to lots of different areas of the codebase as well as insight into +how different areas of the codebase interact with eachother, as well as +experience with the test suites and infrastructure. + +Implement more Expressive ABCI Applications +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Tendermint maintains two very simple ABCI applications (a KV application used +for basic testing, and slightly more advanced test application used in the +end-to-end tests). Writing an application would provide a new engineer with +useful experiences using Tendermint that mirrors the expierence of downstream +users. + +This is more of an exploratory project, but could include providing common +interfaces on top of Tendermint consensus for other well known protocols or +tools (e.g. ``etcd``) or a DNS server or some other tool. + +Self-Regulating Reactors +~~~~~~~~~~~~~~~~~~~~~~~~ + +Currently reactors (the internal processes that are responsible for the higher +level behavior of Tendermint) can be started and stopped, but have no +provision for being paused. These additional semantics may allow Tendermint to +pause reactors (and avoid processing their messhages, etc.) and allow better +coordination in the future. + +While this is a big project, it's possible to break this apart into many +smaller projects: make p2p channels pauseable, add pause/UN-pause hooks to the +service implementation and machinery, and finally to modify the reactor +implementations to take advantage of these additional semantics + +This project would give an engineer some exposure to the p2p layer of the +code, as well as to various aspects of the reactor implementations. + +Metrics +~~~~~~~ + +Tendermint has a metrics system that is relatively underutilized, and figuring +out ways to capture and organize the metrics to provide value to users might +provide an interesting set of projects for new engineers on Tendermint. + +Convert Logs to Metrics ++++++++++++++++++++++++ + +Because the tendermint logs tend to be quite verbose and not particularly +actionable, most users largely ignore the logging or run at very low +verbosity. While the log statements in the code do describe useful events, +taken as a whole the system is not particularly tractable, and particularly at +the Debug level, not useful. One solution to this problem is to identify log +messages that might be (e.g. increment a counter for certian kinds of errors) + +One approach might be to look at various logging statements, particularly +debug statements or errors that are logged but not returned, and see if +they're convertable to counters or other metrics. + +Expose Metrics to Tests ++++++++++++++++++++++++ + +The existing Tendermint test suites replace the metrics infrastructure with +no-op implementations, which means that tests can neither verify that metrics +are ever recorded, nor can tests use metrics to observe events in the +system. Writing an implementation, for testing, that makes it possible to +record metrics and provides an API for introspecting this data, as well as +potentially writing tests that take advantage of this type, could be useful. + +Logging Metrics ++++++++++++++++ + +In some systems, the logging system itself can provide some interesting +insights for operators: having metrics that track the number of messages at +different levels as well as the total number of messages, can act as a canary +for the system as a whole. + +This should be achievable by adding an interceptor layer within the logging +package itself that can add metrics to the existing system. diff --git a/docs/rfc/rfc-021-socket-protocol.md b/docs/rfc/rfc-021-socket-protocol.md new file mode 100644 index 0000000000..74034d20a6 --- /dev/null +++ b/docs/rfc/rfc-021-socket-protocol.md @@ -0,0 +1,266 @@ +# RFC 021: The Future of the Socket Protocol + +## Changelog + +- 19-May-2022: Initial draft (@creachadair) +- 19-Jul-2022: Converted from ADR to RFC (@creachadair) + +## Abstract + +This RFC captures some technical discussion about the ABCI socket protocol that +was originally documented to solicit an architectural decision. This topic was +not high-enough priority as of this writing to justify making a final decision. + +For that reason, the text of this RFC has the general structure of an ADR, but +should be viewed primarily as a record of the issue for future reference. + +## Background + +The [Application Blockchain Interface (ABCI)][abci] is a client-server protocol +used by the Tendermint consensus engine to communicate with the application on +whose behalf it performs state replication. There are currently three transport +options available for ABCI applications: + +1. **In-process**: Applications written in Go can be linked directly into the + same binary as the consensus node. Such applications use a "local" ABCI + connection, which exposes application methods to the node as direct function + calls. + +2. **Socket protocol**: Out-of-process applications may export the ABCI service + via a custom socket protocol that sends requests and responses over a + Unix-domain or TCP socket connection as length-prefixed protocol buffers. + In Tendermint, this is handled by the [socket client][socket-client]. + +3. **gRPC**: Out-of-process applications may export the ABCI service via gRPC. + In Tendermint, this is handled by the [gRPC client][grpc-client]. + +Both the out-of-process options (2) and (3) have a long history in Tendermint. +The beginnings of the gRPC client were added in [May 2016][abci-start] when +ABCI was still hosted in a separate repository, and the socket client (formerly +called the "remote client") was part of ABCI from its inception in November +2015. + +At that time when ABCI was first being developed, the gRPC project was very new +(it launched Q4 2015) and it was not an obvious choice for use in Tendermint. +It took a while before the language coverage and quality of gRPC reached a +point where it could be a viable solution for out-of-process applications. For +that reason, it made sense for the initial design of ABCI to focus on a custom +protocol for out-of-process applications. + +## Problem Statement + +For practical reasons, ABCI needs an interprocess communication option to +support applications not written in Go. The two practical options are RPC and +FFI, and for operational reasons an RPC mechanism makes more sense. + +The socket protocol has not changed all that substantially since its original +design, and has the advantage of being simple to implement in almost any +reasonable language. However, its simplicity includes some limitations that +have had a negative impact on the stability and performance of out-of-process +applications using it. In particular: + +- The protocol lacks request identifiers, so the client and server must return + responses in strict FIFO order. Even if the client issues requests that have + no dependency on each other, the protocol has no way except order of issue to + map responses to requests. + + This reduces (in some cases substantially) the concurrency an application can + exploit, since the parallelism of requests in flight is gated by the slowest + active request at any moment. There have been complaints from some network + operators on that basis. + +- The protocol lacks method identifiers, so the only way for the client and + server to understand which operation is requested is to dispatch on the type + of the request and response payloads. For responses, this means that [any + error condition is terminal not only to the request, but to the entire ABCI + client](https://github.com/tendermint/tendermint/blob/master/abci/client/socket_client.go#L149). + + The historical intent of terminating for any error seems to have been that + all ABCI errors are unrecoverable and hence protocol fatal + (see [Note 1](#note1)). In practice, however, this greatly complicates + debugging a faulty node, since the only way to respond to errors is to panic + the node which loses valuable context that could have been logged. + +- There are subtle concurrency management dependencies between the client and + the server that are not clearly documented anywhere, and it is very easy for + small changes in both the client and the server to lead to tricky deadlocks, + panics, race conditions, and slowdowns. As a recent example of this, see + https://github.com/tendermint/tendermint/pull/8581. + +These limitations are fixable, but one important question is whether it is +worthwhile to fix them. We can add request and method identifiers, for +example, but doing so would be a breaking change to the protocol requiring +every application using it to update. If applications have to migrate anyway, +the stability and language coverage of gRPC have improved a lot, and today it +is probably simpler to set up and maintain an application using gRPC transport +than to reimplement the Tendermint socket protocol. + +Moreover, gRPC addresses all the above issues out-of-the-box, and requires +(much) less custom code for both the server (i.e., the application) and the +client. The project is well-funded and widely-used, which makes it a safe bet +for a dependency. + +## Decision + +There is a set of related alternatives to consider: + +- Question 1: Designate a single IPC standard for out-of-process applications? + + Claim: We should converge on one (and only one) IPC option for out-of-process + applications. We should choose an option that, after a suitable period of + deprecation for alternatives, will address most or all the highest-impact + uses of Tendermint. Maintaining multiple options increases the surface area + for bugs and vulnerabilities, and we should not have multiple options for + basic interfaces without a clear and well-documented reason. + +- Question 2a: Choose gRPC and deprecate/remove the socket protocol? + + Claim: Maintaining and improving a custom RPC protocol is a substantial + project and not directly relevant to the requirements of consensus. We would + be better served by depending on a well-maintained open-source library like + gRPC. + +- Question 2b: Improve the socket protocol and deprecate/remove gRPC? + + Claim: If we find meaningful advantages to maintaining our own custom RPC + protocol in Tendermint, we should treat it as a first-class project within + the core and invest in making it good enough that we do not require other + options. + +**One important consideration** when discussing these questions is that _any +outcome which includes keeping the socket protocol will have eventual migration +impacts for out-of-process applications_ regardless. To fix the limitations of +the socket protocol as it is currently designed will require making _breaking +changes_ to the protocol. So, while we may put off a migration cost for +out-of-process applications by retaining the socket protocol in the short term, +we will eventually have to pay those costs to fix the problems in its current +design. + +## Detailed Design + +1. If we choose to standardize on gRPC, the main work in Tendermint core will + be removing and cleaning up the code for the socket client and server. + + Besides the code cleanup, we will also need to clearly document a + deprecation schedule, and invest time in making the migration easier for + applications currently using the socket protocol. + + > **Point for discussion:** Migrating from the socket protocol to gRPC + > should mostly be a plumbing change, as long as we do it during a release + > in which we are not making other breaking changes to ABCI. However, the + > effort may be more or less depending on how gRPC integration works in the + > application's implementation language, and would have to be sure networks + > have plenty of time not only to make the change but to verify that it + > preserves the function of the network. + > + > What questions should we be asking node operators and application + > developers to understand the migration costs better? + +2. If we choose to keep only the socket protocol, we will need to follow up + with a more detailed design for extending and upgrading the protocol to fix + the existing performance and operational issues with the protocol. + + Moreover, since the gRPC interface has been around for a long time we will + also need a deprecation plan for it. + +3. If we choose to keep both options, we will still need to do all the work of + (2), but the gRPC implementation should not require any immediate changes. + + +## Alternatives Considered + +- **FFI**. Another approach we could take is to use a C-based FFI interface so + that applications written in other languages are linked directly with the + consensus node, an option currently only available for Go applications. + + An FFI interface is possible for a lot of languages, but FFI support varies + widely in coverage and quality across languages and the points of friction + can be tricky to work around. Moreover, it's much harder to add FFI support + to a language where it's missing after-the-fact for an application developer. + + Although a basic FFI interface is not too difficult on the Go side, the C + shims for an FFI can get complicated if there's a lot of variability in the + runtime environment on the other end. + + If we want to have one answer for non-Go applications, we are better off + picking an IPC-based solution (whether that's gRPC or an extension of our + custom socket protocol or something else). + +## Consequences + +- **Standardize on gRPC** + + - ✅ Addresses existing performance and operational issues. + - ✅ Replaces custom code with a well-maintained widely-used library. + - ✅ Aligns with Cosmos SDK, which already uses gRPC extensively. + - ✅ Aligns with priv validator interface, for which the socket protocol is already deprecated for gRPC. + - ❓ Applications will be hard to implement in a language without gRPC support. + - ⛔ All users of the socket protocol have to migrate to gRPC, and we believe most current out-of-process applications use the socket protocol. + +- **Standardize on socket protocol** + + - ✅ Less immediate impact for existing users (but see below). + - ✅ Simplifies ABCI API surface by removing gRPC. + - ❓ Users of the socket protocol will have a (smaller) migration. + - ❓ Potentially easier to implement for languages that do not have support. + - ⛔ Need to do all the work to fix the socket protocol (which will require existing users to update anyway later). + - ⛔ Ongoing maintenance burden for per-language server implementations. + +- **Keep both options** + + - ✅ Less immediate impact for existing users (but see below). + - ❓ Users of the socket protocol will have a (smaller) migration. + - ⛔ Still need to do all the work to fix the socket protocol (which will require existing users to update anyway later). + - ⛔ Requires ongoing maintenance and support of both gRPC and socket protocol integrations. + + +## References + +- [Application Blockchain Interface (ABCI)][abci] +- [Tendermint ABCI socket client][socket-client] +- [Tendermint ABCI gRPC client][grpc-client] +- [Initial commit of gRPC client][abci-start] + +[abci]: https://github.com/tendermint/spec/tree/master/spec/abci +[socket-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/socket_client.go +[socket-server]: https://github.com/tendermint/tendermint/blob/master/abci/server/socket_server.go +[grpc-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/grpc_client.go +[abci-start]: https://github.com/tendermint/abci/commit/1ab3c747182aaa38418258679c667090c2bb1e0d + +## Notes + +- **Note 1**: The choice to make all ABCI errors protocol-fatal + was intended to avoid the risk that recovering an application error could + cause application state to diverge. Divergence can break consensus, so it's + essential to avoid it. + + This is a sound principle, but conflates protocol errors with "mechanical" + errors such as timeouts, resoures exhaustion, failed connections, and so on. + Because the protocol has no way to distinguish these conditions, the only way + for an application to report an error is to panic or crash. + + Whether a node is running in the same process as the application or as a + separate process, application errors should not be suppressed or hidden. + However, it's important to ensure that errors are handled at a consistent and + well-defined point in the protocol: Having the application panic or crash + rather than reporting an error means the node sees different results + depending on whether the application runs in-process or out-of-process, even + if the application logic is otherwise identical. + +## Appendix: Known Implementations of ABCI Socket Protocol + +This is a list of known implementations of the Tendermint custom socket +protocol. Note that in most cases I have not checked how complete or correct +these implementations are; these are based on search results and a cursory +visual inspection. + +- Tendermint Core (Go): [client][socket-client], [server][socket-server] +- Informal Systems [tendermint-rs](https://github.com/informalsystems/tendermint-rs) (Rust): [client](https://github.com/informalsystems/tendermint-rs/blob/master/abci/src/client.rs), [server](https://github.com/informalsystems/tendermint-rs/blob/master/abci/src/server.rs) +- Tendermint [js-abci](https://github.com/tendermint/js-abci) (JS): [server](https://github.com/tendermint/js-abci/blob/master/src/server.js) +- [Hotmoka](https://github.com/Hotmoka/hotmoka) ABCI (Java): [server](https://github.com/Hotmoka/hotmoka/blob/master/io-hotmoka-tendermint-abci/src/main/java/io/hotmoka/tendermint_abci/Server.java) +- [Tower ABCI](https://github.com/penumbra-zone/tower-abci) (Rust): [server](https://github.com/penumbra-zone/tower-abci/blob/main/src/server.rs) +- [abci-host](https://github.com/datopia/abci-host) (Clojure): [server](https://github.com/datopia/abci-host/blob/master/src/abci/host.clj) +- [abci_server](https://github.com/KrzysiekJ/abci_server) (Erlang): [server](https://github.com/KrzysiekJ/abci_server/blob/master/src/abci_server.erl) +- [py-abci](https://github.com/davebryson/py-abci) (Python): [server](https://github.com/davebryson/py-abci/blob/master/src/abci/server.py) +- [scala-tendermint-server](https://github.com/intechsa/scala-tendermint-server) (Scala): [server](https://github.com/InTechSA/scala-tendermint-server/blob/master/src/main/scala/lu/intech/tendermint/Server.scala) +- [kepler](https://github.com/f-o-a-m/kepler) (Rust): [server](https://github.com/f-o-a-m/kepler/blob/master/hs-abci-server/src/Network/ABCI/Server.hs) diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md index 90274ca1d9..b95b43a606 100644 --- a/docs/roadmap/roadmap.md +++ b/docs/roadmap/roadmap.md @@ -90,7 +90,7 @@ Has the same feature set as V0.37 but with a focus towards testing, protocol cor ## Post 1.0 Work -- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347) +- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/tendermint/issues/7932) - Consensus engine refactor - Fork accountability protocol - Bidirectional ABCI diff --git a/docs/tendermint-core/consensus/proposer-based-timestamps.md b/docs/tendermint-core/consensus/proposer-based-timestamps.md index 7f98f10d6b..17036a9f2e 100644 --- a/docs/tendermint-core/consensus/proposer-based-timestamps.md +++ b/docs/tendermint-core/consensus/proposer-based-timestamps.md @@ -13,14 +13,15 @@ order: 3 The PBTS algorithm defines a way for a Tendermint blockchain to create block timestamps that are within a reasonable bound of the clocks of the validators on the network. This replaces the original BFTTime algorithm for timestamp -assignment that relied on the timestamps included in precommit messages. +assignment that computed a timestamp using the timestamps included in precommit +messages. ## Algorithm Parameters The functionality of the PBTS algorithm is governed by two parameters within Tendermint. These two parameters are [consensus parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291), -meaning they are configured by the ABCI application and are expected to be the +meaning they are configured by the ABCI application and are therefore the same same across all nodes on the network. ### `Precision` @@ -51,7 +52,7 @@ useful for the protocols and applications built on top of Tendermint. The following protocols and application features require a reliable source of time: * Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. -* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification). +* Tendermint Evidence expiration is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification). * Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime). * IBC packets can use either a [timestamp or a height to timeout packet diff --git a/docs/tendermint-core/mempool/config.md b/docs/tendermint-core/mempool/config.md index 4e8a9ec73d..4a904ef253 100644 --- a/docs/tendermint-core/mempool/config.md +++ b/docs/tendermint-core/mempool/config.md @@ -14,9 +14,8 @@ Config: ```toml [mempool] -recheck = true +# Set true to broadcast transactions in the mempool to other nodes broadcast = true -wal-dir = "" # Maximum number of transactions in the mempool size = 5000 @@ -44,20 +43,6 @@ max-tx-bytes = 1048576 max-batch-bytes = 0 ``` - - -## Recheck - -Recheck determines if the mempool rechecks all pending -transactions after a block was committed. Once a block -is committed, the mempool removes all valid transactions -that were successfully included in the block. - -If `recheck` is true, then it will rerun CheckTx on -all remaining transactions with the new block state. - ## Broadcast Determines whether this node gossips any valid transactions @@ -92,7 +77,7 @@ Cache size determines the size of the cache holding transactions we have already ## Keep Invalid Transactions In Cache -Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. +Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. ## Max Transaction Bytes diff --git a/docs/tendermint-core/subscription.md b/docs/tendermint-core/subscription.md index 0f452c5633..84979f61a4 100644 --- a/docs/tendermint-core/subscription.md +++ b/docs/tendermint-core/subscription.md @@ -2,74 +2,228 @@ order: 7 --- -# Subscribing to events via Websocket +# Subscribing to Events -Tendermint emits different events, which you can subscribe to via -[Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful -for third-party applications (for analysis) or for inspecting state. +A Tendermint node emits events about important state transitions during +consensus. These events can be queried by clients via the [RPC interface][rpc] +on nodes that enable it. The [list of supported event types][event-types] can +be found in the tendermint/types Go package. -[List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants) +In Tendermint v0.36 there are two APIs to query events: -To connect to a node via websocket from the CLI, you can use a tool such as -[wscat](https://github.com/websockets/wscat) and run: +- The [**legacy streaming API**](#legacy-streaming-api), comprising the + `subscribe`, `unsubscribe`, and `unsubscribe_all` RPC methods over websocket. + +- The [**event log API**](#event-log-api), comprising the `events` RPC method. + +The legacy streaming API is deprecated in Tendermint v0.36, and will be removed +in Tendermint v0.37. Clients are strongly encouraged to migrate to the new +event log API as soon as is practical. + +[rpc]: https://docs.tendermint.com/master/rpc +[event-types]: https://godoc.org/github.com/tendermint/tendermint/types#EventNewBlockValue + +## Filter Queries + +Event requests take a [filter query][query] parameter. A filter query is a +string that describes a subset of available event items to return. An empty +query matches all events; otherwise a query comprises one or more *terms* +comparing event metadata to target values. + +For example, to select new block events, use the term: -```sh -wscat ws://127.0.0.1:26657/websocket ``` +tm.event = 'NewBlock' +``` + +Multiple terms can be combined with `AND` (case matters), for example to match +the transaction event with a given hash, use the query: + +``` +tm.event = 'Tx' AND tx.hash = 'EA7B33F' +``` + +Operands may be strings in single quotes (`'Tx'`), numbers (`45`), dates, or +timestamps. + +The comparison operators include `=`, `<`, `<=`, `>`, `>=`, and `CONTAINS` (for +substring match). In addition, the `EXISTS` operator checks for the presence +of an attribute regardless of its value. + +### Attributes + +Tendermint implicitly defines a string-valued `tm.event` attribute for all +event types. Transaction items (type `Tx`) are also assigned `tx.hash` +(string), giving the hash of the transaction, and and `tx.height` (number) +giving the height of the block containing the transaction. For `NewBlock` and +`NewBlockHeader` events, Tendermint defines a `block.height` attribute giving +the height of the block. + +Additional attributes can be provided by the application as [ABCI `Event` +records][abci-event] in response to the `FinalizeBlock` request. The full name +of the attribute in the query is formed by combining the `type` and attribute +`key` with a period. + +For example, given the events + +```go +[]abci.Event{{ + Type: "reward", + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "cosmos1xyz012pdq"}, + {Key: "amount", Value: "45.62"}, + {Key: "balance", Value: "100.390001"}, + }, +}} +``` + +a query may refer to the names `reward.address`, `reward.amount`, and `reward.balance`, as in: + +``` +reward.address EXISTS AND reward.balance > 45 +``` + +Certain application-specific metadata are also indexed for offline queries. +See [Indexing transactions](../app-dev/indexing-transactions.md) for more details. + +[query]: https://godoc.org/github.com/tendermint/tendermint/internal/pubsub/query/syntax +[abci-event]: https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto#L397 + +## Event Log API + +Starting in Tendermint v0.36, when the `rpc.event-log-window-size` +configuration is enabled, the node maintains maintains a log of all events +within this operator-defined time window. This API supersedes the websocket +subscription API described below. -You can subscribe to any of the events above by calling the `subscribe` RPC -method via Websocket along with a valid query. +Clients can query these events can by long-polling the `/events` RPC method, +which returns the most recent items from the log that match the [request +parameters][reqevents]. Each item returned includes a cursor that marks its +location in the log. Cursors can be passed via the `before` and `after` +parameters to fetch events earlier in the log. + +For example, this request: ```json { "jsonrpc": "2.0", - "method": "subscribe", - "id": 0, + "id": 1, + "method": "events", "params": { - "query": "tm.event='NewBlock'" + "filter": { + "query": "tm.event = 'Tx' AND app.key = 'applesauce'" + }, + "maxItems": 1, + "after": "" } } ``` -Check out [API docs](https://docs.tendermint.com/master/rpc/) for -more information on query syntax and other options. +will return a result similar to the following: + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "items": [ + { + "cursor": "16ee3d5e65be53d8-03d5", + "event": "Tx", + "data": { + "type": "tendermint/event/Tx", + "value": { + "height": 70, + "tx": "YXBwbGVzYXVjZT1zeXJ1cA==", + "result": { + "events": [ + { + "type": "app", + "attributes": [ + { + "key": "creator", + "value": "Cosmoshi Netowoko", + "index": true + }, + { + "key": "key", + "value": "applesauce", + "index": true + }, + { + "key": "index_key", + "value": "index is working", + "index": true + }, + { + "key": "noindex_key", + "value": "index is working", + "index": false + } + ] + } + ] + } + } + } + } + ], + "more": false, + "oldest": "16ee3d4c471c3b00-0001", + "newest": "16ee3d5f2e05a4e0-0400" + } +} +``` + +The `"items"` array gives the matching items (up to the requested +`"maxResults"`) in decreasing time order (i.e., newest to oldest). In this +case, there is only one result, but if there are additional results that were +not returned, the `"more"` flag will be true. Calling `/events` again with the +same query and `"after"` set to the cursor of the newest result (in this +example, `"16ee3d5e65be53d8-03d5"`) will fetch newer results. + +Go clients can use the [`eventstream`][eventstream] package to simplify the use +of this method. The `eventstream.Stream` automatically handles polling for new +events, updating the cursor, and reporting any missed events. -You can also use tags, given you had included them into DeliverTx -response, to query transaction results. See [Indexing -transactions](../app-dev/indexing-transactions.md) for details. +[reqevents]: https://pkg.go.dev/github.com/tendermint/tendermint@master/rpc/coretypes#RequestEvents +[eventstream]: https://godoc.org/github.com/tendermint/tendermint/rpc/client/eventstream -## ValidatorSetUpdates +## Legacy Streaming API -When validator set changes, ValidatorSetUpdates event is published. The -event carries a list of pubkey/power pairs. The list is the same -Tendermint receives from ABCI application (see [EndBlock -section](https://github.com/tendermint/tendermint/blob/master/spec/abci/abci.md#endblock) in -the ABCI spec). +- **Note:** This API is deprecated in Tendermint v0.36, and will be removed in + Tendermint v0.37. New clients and existing use should use the [event log + API](#event-log-api) instead. See [ADR 075][adr075] for more details. -Response: +To subscribe to events in the streaming API, you must connect to the node RPC +service using a [websocket][ws]. From the command line you can use a tool such +as [wscat][wscat], for example: + +```sh +wscat ws://127.0.0.1:26657/websocket +``` + +[ws]: https://en.wikipedia.org/wiki/WebSocket +[wscat]: https://github.com/websockets/wscat + +To subscribe to events, call the `subscribe` JSON-RPC method method passing in +a [filter query][query] for the events you wish to receive: ```json { "jsonrpc": "2.0", - "id": 0, - "result": { - "query": "tm.event='ValidatorSetUpdates'", - "data": { - "type": "tendermint/event/ValidatorSetUpdates", - "value": { - "validator_updates": [ - { - "address": "09EAD022FD25DE3A02E64B0FE9610B1417183EE4", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck=" - }, - "voting_power": "10", - "proposer_priority": "0" - } - ] - } - } + "method": "subscribe", + "id": 1, + "params": { + "query": "tm.event='NewBlock'" } } ``` + +The subscribe method returns an initial response confirming the subscription, +then sends additional JSON-RPC response messages containing the matching events +as they are published. The subscription continues until either the client +explicitly cancels the subscription (by calling `unsubscribe` or +`unsubscribe_all`) or until the websocket connection is terminated. + +[adr075]: https://tinyurl.com/adr075 diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index dce6ae05cb..c51b22c6b0 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -261,7 +261,7 @@ afford to lose all blockchain data! To reset a blockchain, stop the node and run: ```sh -tenderdash unsafe_reset_all +tendermint unsafe-reset-all ``` This command will remove the data directory and reset private validator and diff --git a/docs/tools/debugging/proposer-based-timestamps-runbook.md b/docs/tools/debugging/proposer-based-timestamps-runbook.md index a817bd29eb..cb32248dd0 100644 --- a/docs/tools/debugging/proposer-based-timestamps-runbook.md +++ b/docs/tools/debugging/proposer-based-timestamps-runbook.md @@ -213,4 +213,4 @@ documentation](https://hub.cosmos.network/main/governance/submitting.html#sendin If the application does not implement a way to update the consensus parameters programatically, then the application itself must be updated to do so. More information on updating -the consensus parameters via ABCI can be found in the [FinalizeBlock documentation](https://github.com/tendermint/tendermint/blob/master/spec/abci++/abci++_methods_002_draft.md#finalizeblock). +the consensus parameters via ABCI can be found in the [FinalizeBlock documentation](../../../spec/abci++/abci%2B%2B_methods.md#finalizeblock). diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index f0ea5beebe..314a6a20fa 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -26,8 +26,6 @@ yourself with the syntax. By following along with this guide, you'll create a Tendermint Core application called kvstore, a (very) simple distributed BFT key-value store. -> Note: please use a released version of Tendermint with this guide. The guides will work with the latest version. Please, do not use master. - ## Built-in app vs external app Running your application inside the same process as Tendermint Core will give @@ -39,29 +37,28 @@ through a TCP, Unix domain socket or gRPC. ## 1.1 Installing Go Please refer to [the official guide for installing -Go](https://golang.org/doc/install). +Go](https://go.dev/doc/install). Verify that you have the latest version of Go installed: -```bash +```sh $ go version -go version go1.18.1 darwin/amd64 +go version go1.18.x darwin/amd64 ``` Note that the exact patch number may differ as Go releases come out. ## 1.2 Creating a new Go project -We'll start by creating a new Go project. +We'll start by creating a new Go project. First, initialize the project folder with `go mod init`. Running this command should create the `go.mod` file. -```bash -mkdir kvstore -cd kvstore -go mod init github.com// +```sh +$ mkdir kvstore +$ cd kvstore +$ go mod init github.com//kvstore +go: creating new go.mod: module github.com//kvstore ``` -Inside the example directory create a `main.go` file with the following content: - -> Note: there is no need to clone or fork Tendermint in this tutorial. +Inside the project directory, create a `main.go` file with the following content: ```go package main @@ -77,7 +74,7 @@ func main() { When run, this should print "Hello, Tendermint Core" to the standard output. -```bash +```sh $ go run main.go Hello, Tendermint Core ``` @@ -159,64 +156,43 @@ func (app *KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshot } ``` -### 1.3.1 Add a persistent data store - -Our application will need to write its state out to persistent storage so that it -can stop and start without losing all of its data. - -For this tutorial, we will use [BadgerDB](https://github.com/dgraph-io/badger). -Badger is a fast embedded key-value store. - -First, add Badger as a dependency of your go module using the `go get` command: +Now, we will go through each method and explain when it is executed while adding +required business logic. -`go get github.com/dgraph-io/badger/v3` +### 1.3.1 Key-value store setup -Next, let's update the application and its constructor to receive a handle to the -database. +For the underlying key-value store we'll use the latest version of [badger](https://github.com/dgraph-io/badger), which is an embeddable, persistent and fast key-value (KV) database. -Update the application struct as follows: +```sh +$ go get github.com/dgraph-io/badger/v3 +go: added github.com/dgraph-io/badger/v3 v3.2103.2 +``` ```go +import "github.com/dgraph-io/badger/v3" + type KVStoreApplication struct { - db *badger.DB - pendingBlock *badger.Txn + db *badger.DB + currentBatch *badger.Txn } -``` -And change the constructor to set the appropriate field when creating the application: - -```go func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { - return &KVStoreApplication{db: db} + return &KVStoreApplication{ + db: db, + } } ``` -The `pendingBlock` keeps track of the transactions that will update the application's -state when a block is completed. Don't worry about it for now, we'll get to that later. +### 1.3.2 CheckTx -Finally, update the `import` stanza at the top to include the `Badger` library: +When a new transaction is added to the Tendermint Core, it will ask the application to check it (validate the format, signatures, etc.). ```go -import( - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/tendermint/tendermint/abci/types" -) -``` - -### 1.3.1 CheckTx - -When Tendermint Core receives a new transaction, Tendermint asks the application -if the transaction is acceptable. In our new application, let's implement some -basic validation for the transactions it will receive. - -For our KV store application, a transaction is a string with the form `key=value`, -indicating a key and value to write to the store. - -Add the following helper method to `app.go`: +import ( + "bytes" -```go -func (app *KVStoreApplication) validateTx(tx []byte) uint32 { - parts := bytes.SplitN(tx, []byte("="), 2) + ... +) // check that the transaction is not malformed if len(parts) != 2 || len(parts[0]) == 0 { @@ -249,24 +225,7 @@ Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas). -For the underlying key-value store we'll use -[badger](https://github.com/dgraph-io/badger), which is an embeddable, -persistent and fast key-value (KV) database. - -```go -import( - "bytes" - - "github.com/dgraph-io/badger/v3" - abcitypes "github.com/tendermint/tendermint/abci/types" -) -``` - - -While this `CheckTx` is simple and only validates that the transaction is well-formed, -it is very common for `CheckTx` to make more complex use of the state of an application. - -### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit +### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit When the Tendermint consensus engine has decided on the block, the block is transferred to the application over three ABCI method calls: `BeginBlock`, `DeliverTx`, and `EndBlock`. @@ -336,12 +295,7 @@ func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { } ``` -Finally, make sure to add the `log` library to the `import` stanza as well: - -```go -import ( - "bytes" - "log" +### 1.3.4 Query "github.com/dgraph-io/badger/v3" abcitypes "github.com/tendermint/tendermint/abci/types" @@ -407,13 +361,15 @@ import ( "path/filepath" "syscall" - "github.com/dgraph-io/badger/v3" - "github.com/spf13/viper" - abciclient "github.com/tendermint/tendermint/abci/client" - cfg "github.com/tendermint/tendermint/config" - tmlog "github.com/tendermint/tendermint/libs/log" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/types" + "github.com/dgraph-io/badger/v3" + "github.com/spf13/viper" + + abciclient "github.com/tendermint/tendermint/abci/client" + abcitypes "github.com/tendermint/tendermint/abci/types" + tmconfig "github.com/tendermint/tendermint/config" + tmlog "github.com/tendermint/tendermint/libs/log" + tmservice "github.com/tendermint/tendermint/libs/service" + tmnode "github.com/tendermint/tendermint/node" ) var homeDir string @@ -479,29 +435,42 @@ func main() { This is a huge blob of code, so let's break down what it's doing. -First, we load in the Tendermint Core configuration files: +func newTendermint(app abcitypes.Application, configFile string) (tmservice.Service, error) { + // read config + config := tmconfig.DefaultValidatorConfig() + config.SetRoot(filepath.Dir(filepath.Dir(configFile))) -```go -... - config := cfg.DefaultValidatorConfig() - - config.SetRoot(homeDir) - - viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml")) + viper.SetConfigFile(configFile) if err := viper.ReadInConfig(); err != nil { - log.Fatalf("Reading config: %v", err) + return nil, fmt.Errorf("viper failed to read config file: %w", err) } if err := viper.Unmarshal(config); err != nil { - log.Fatalf("Decoding config: %v", err) + return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) } if err := config.ValidateBasic(); err != nil { - log.Fatalf("Invalid configuration data: %v", err) + return nil, fmt.Errorf("config is invalid: %w", err) } - gf, err := types.GenesisDocFromFile(config.GenesisFile()) + + // create logger + logger, err := tmlog.NewDefaultLogger(tmlog.LogFormatPlain, config.LogLevel, false) if err != nil { - log.Fatalf("Loading genesis document: %v", err) + return nil, fmt.Errorf("failed to create logger: %w", err) } -... + + // create node + node, err := tmnode.New( + config, + logger, + abciclient.NewLocalCreator(app), + nil, + ) + if err != nil { + return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) + } + + return node, nil +} + ``` Next, we create a database handle and use it to construct our ABCI application: @@ -530,30 +499,67 @@ Then we construct a logger: ... ``` -Now we have everything setup to run the Tendermint node. We construct -a node by passing it the configuration, the logger, a handle to our application and -the genesis file: +Then we use it to create a Tendermint Core [Service](https://github.com/tendermint/tendermint/blob/v0.35.8/libs/service/service.go#L24) instance: ```go ... - node, err := nm.New(config, logger, acc, gf) - if err != nil { - log.Fatalf("Creating node: %v", err) - } -... + +// create node +node, err := tmnode.New( + config, + logger, + abciclient.NewLocalCreator(app), + nil, +) +if err != nil { + return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) +} ``` -Finally, we start the node: +[tmnode.New](https://github.com/tendermint/tendermint/blob/v0.35.8/node/public.go#L29) requires a few things including a configuration file, a logger and a few others in order to construct the full node. + +Note that we use [abciclient.NewLocalCreator](https://github.com/tendermint/tendermint/blob/v0.35.8/abci/client/creators.go#L15) here to create a local client instead of one communicating through a socket or gRPC. + +[viper](https://github.com/spf13/viper) is being used for reading the config, +which we will generate later using the `tendermint init` command. + ```go -... - node.Start() - defer func() { - node.Stop() - node.Wait() - }() -... +// read config +config := tmconfig.DefaultValidatorConfig() +config.SetRoot(filepath.Dir(filepath.Dir(configFile))) +viper.SetConfigFile(configFile) +if err := viper.ReadInConfig(); err != nil { + return nil, fmt.Errorf("viper failed to read config file: %w", err) +} +if err := viper.Unmarshal(config); err != nil { + return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) +} +if err := config.ValidateBasic(); err != nil { + return nil, fmt.Errorf("config is invalid: %w", err) +} +``` + +As for the logger, we use the built-in library, which provides a nice +abstraction over [zerolog](https://github.com/rs/zerolog). + +```go +// create logger +logger, err := tmlog.NewDefaultLogger(tmlog.LogFormatPlain, config.LogLevel, true) +if err != nil { + return nil, fmt.Errorf("failed to create logger: %w", err) +} ``` +Finally, we start the node and add some signal handling to gracefully stop it +upon receiving SIGTERM or Ctrl-C. + +```go +node.Start() +defer func() { + node.Stop() + node.Wait() +}() + The additional logic at the end of the file allows the program to catch `SIGTERM`. This means that the node can shutdown gracefully when an operator tries to kill the program: @@ -565,106 +571,128 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM) ... ``` -## 1.5 Getting Up and Running +## 1.5 Getting up and running + +Make sure to enable [Go modules](https://github.com/golang/go/wiki/Modules). Run `go mod tidy` to download and add dependencies in `go.mod` file. -Our application is almost ready to run. -Let's install the latest release version of the Tendermint library. +```sh +$ go mod tidy +... +``` -From inside of the project directory, run: +Let's make sure we're using the latest version of Tendermint (currently `v0.35.8`). ```sh -go get github.com/dashevo/tenderdash@master +$ go get github.com/tendermint/tendermint@latest +... ``` -Next, we'll need to populate the Tendermint Core configuration files. -This command will create a `tendermint-home` directory in your project and add a basic set of configuration -files in `tendermint-home/config/`. For more information on what these files contain -see [the configuration documentation](https://github.com/tendermint/tendermint/blob/v0.35.0/docs/nodes/configuration.md). +This will populate the `go.mod` with a release number followed by a hash for Tendermint. + +```go +module github.com//kvstore + +go 1.18 -From the root of your project, run: -```bash -go run github.com/tendermint/tendermint/cmd/tendermint@v0.35.0 init validator --home ./tendermint-home +require ( + github.com/dgraph-io/badger/v3 v3.2103.2 + github.com/tendermint/tendermint v0.35.8 + ... +) ``` -Next, build the application: -```bash -go build -mod=mod -o my-app # use -mod=mod to automatically update go.sum +Now, we can build the binary: + +```sh +$ go build +... ``` Everything is now in place to run your application. -Run: -```bash -$ rm -rf /tmp/example -$ TMHOME="/tmp/example" tenderdash init validator +```sh +$ rm -rf /tmp/kvstore /tmp/badger +$ TMHOME="/tmp/kvstore" tendermint init validator -I[2019-07-16|18:40:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:40:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:40:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -I[2019-07-16|18:40:36.483] Generated config module=main mode=validator +2022-07-20T17:04:41+08:00 INFO Generated private validator keyFile=/tmp/kvstore/config/priv_validator_key.json module=main stateFile=/tmp/kvstore/data/priv_validator_state.json +2022-07-20T17:04:41+08:00 INFO Generated node key module=main path=/tmp/kvstore/config/node_key.json +2022-07-20T17:04:41+08:00 INFO Generated genesis file module=main path=/tmp/kvstore/config/genesis.json +2022-07-20T17:04:41+08:00 INFO Generated config mode=validator module=main ``` -The application will begin producing blocks and you can see this reflected in -the log output. - -You now have successfully started running an application using Tendermint Core 🎉🎉. - -## 1.6 Using the application +Feel free to explore the generated files, which can be found at +`/tmp/kvstore/config` directory. Documentation on the config can be found +[here](https://docs.tendermint.com/master/tendermint-core/configuration.html). -Your application is now running and emitting logs to the terminal. -Now it's time to see what this application can do! +We are ready to start our application: -Let's try submitting a transaction to our new application. +```sh +$ ./kvstore -config "/tmp/kvstore/config/config.toml" + +badger 2022/07/16 13:55:59 INFO: All 0 tables opened in 0s +badger 2022/07/16 13:55:59 INFO: Replaying file id: 0 at offset: 0 +badger 2022/07/16 13:55:59 INFO: Replay took: 3.052µs +badger 2022/07/16 13:55:59 DEBUG: Value log discard stats empty +2022-07-16T13:55:59+08:00 INFO starting service impl=multiAppConn module=proxy service=multiAppConn +2022-07-16T13:55:59+08:00 INFO starting service connection=query impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service connection=snapshot impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service connection=mempool impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service connection=consensus impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service impl=EventBus module=events service=EventBus +2022-07-16T13:55:59+08:00 INFO starting service impl=PubSub module=pubsub service=PubSub +2022-07-16T13:55:59+08:00 INFO starting service impl=IndexerService module=txindex service=IndexerService +2022-07-16T13:55:59+08:00 INFO ABCI Handshake App Info hash= height=0 module=consensus protocol-version=0 software-version= +2022-07-16T13:55:59+08:00 INFO ABCI Replay Blocks appHeight=0 module=consensus stateHeight=0 storeHeight=0 +2022-07-16T13:55:59+08:00 INFO Completed ABCI Handshake - Tendermint and App are synced appHash= appHeight=0 module=consensus +2022-07-16T13:55:59+08:00 INFO Version info block=11 mode=validator p2p=8 tmVersion=0.35.8 +``` -Open another terminal window and run the following curl command: +Let's try sending a transaction. Open another terminal and execute the below command. -```bash +```sh $ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { - "check_tx": { - "gasWanted": "1", - ... - }, - "deliver_tx": { ... }, - "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", - "height": "128" + ... + "result": { + "check_tx": { + ... + "gas_wanted": "1", + ... + }, + "deliver_tx": {...}, + "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", + "height": "91" + } } ``` If everything went well, you should see a response indicating which height the transaction was included in the blockchain. -Finally, let's make sure that transaction really was persisted by the application. - -Run the following command: +Let's check if the given key now exists and its value: -```bash +```sh $ curl -s 'localhost:26657/abci_query?data="tendermint"' { - "response": { - "code": 0, - "log": "exists", - "info": "", - "index": "0", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3M=", - "proofOps": null, - "height": "6", - "codespace": "" + ... + "result": { + "response": { + "code": 0, + "log": "exists", + "info": "", + "index": "0", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", + "proofOps": null, + "height": "0", + "codespace": "" + } } } ``` -Those values don't look like the `key` and `value` we sent to Tendermint, -what's going on here? - -The response contain a `base64` encoded representation of the data we submitted. -To get the original value out of this data, we can use the `base64` command line utility. - -Run: -``` -echo cm9ja3M=" | base64 -d -``` +`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of +"tendermint" and "rocks" accordingly. ## Outro diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 91e03b3e01..8cd3edaa56 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -37,25 +37,27 @@ Core will not have access to application's state. ## 1.1 Installing Go Please refer to [the official guide for installing -Go](https://golang.org/doc/install). +Go](https://go.dev/doc/install). Verify that you have the latest version of Go installed: -```bash +```sh $ go version -go version go1.18 darwin/amd64 +go version go1.18.x darwin/amd64 ``` ## 1.2 Creating a new Go project -We'll start by creating a new Go project. +We'll start by creating a new Go project. Initialize the folder with `go mod init`. Running this command should create the `go.mod` file. -```bash -mkdir kvstore -cd kvstore +```sh +$ mkdir kvstore +$ cd kvstore +$ go mod init github.com//kvstore +go: creating new go.mod: module github.com//kvstore ``` -Inside the example directory create a `main.go` file with the following content: +Inside the project directory, create a `main.go` file with the following content: ```go package main @@ -71,8 +73,8 @@ func main() { When run, this should print "Hello, Tendermint Core" to the standard output. -```bash -go run main.go +```sh +$ go run main.go Hello, Tendermint Core ``` @@ -150,10 +152,34 @@ func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk } ``` -Now I will go through each method explaining when it's called and adding +Now, we will go through each method and explain when it is executed while adding required business logic. -### 1.3.1 CheckTx +### 1.3.1 Key-value store setup + +For the underlying key-value store we'll use the latest version of [badger](https://github.com/dgraph-io/badger), which is an embeddable, persistent and fast key-value (KV) database. + +```sh +$ go get github.com/dgraph-io/badger/v3 +go: added github.com/dgraph-io/badger/v3 v3.2103.2 +``` + +```go +import "github.com/dgraph-io/badger/v3" + +type KVStoreApplication struct { + db *badger.DB + currentBatch *badger.Txn +} + +func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { + return &KVStoreApplication{ + db: db, + } +} +``` + +### 1.3.2 CheckTx When a new transaction is added to the Tendermint Core, it will ask the application to check it (validate the format, signatures, etc.). @@ -212,26 +238,8 @@ Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas). -For the underlying key-value store we'll use -[badger](https://github.com/dgraph-io/badger), which is an embeddable, -persistent and fast key-value (KV) database. - -```go -import "github.com/dgraph-io/badger" - -type KVStoreApplication struct { - db *badger.DB - currentBatch *badger.Txn -} - -func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { - return &KVStoreApplication{ - db: db, - } -} -``` -### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit +### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit When Tendermint Core has decided on the block, it's transferred to the application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and @@ -287,7 +295,7 @@ func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { } ``` -### 1.3.3 Query +### 1.3.4 Query Now, when the client wants to know whenever a particular key/value exist, it will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call @@ -344,7 +352,7 @@ import ( "os/signal" "syscall" - "github.com/dgraph-io/badger" + "github.com/dgraph-io/badger/v3" abciserver "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/libs/log" @@ -353,7 +361,7 @@ import ( var socketAddr string func init() { - flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address") + flag.StringVar(&socketAddr, "socket-addr", "unix://kvstore.sock", "Unix domain socket address") } func main() { @@ -426,39 +434,40 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c ``` -## 1.5 Getting Up and Running +## 1.5 Getting up and running -We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for -dependency management. +Make sure to enable [Go modules](https://github.com/golang/go/wiki/Modules). Run `go mod tidy` to download and add dependencies in `go.mod` file. -```bash -go mod init github.com/me/example +```sh +$ go mod tidy +... ``` -This should create a `go.mod` file. The current tutorial only works with -the master branch of Tendermint, so let's make sure we're using the latest version: +Let's make sure we're using the latest version of Tendermint (currently `v0.35.8`). ```sh -go get github.com/dashevo/tenderdash@master +go get github.com/dashevo/tenderdash@latest ``` This will populate the `go.mod` with a release number followed by a hash for Tendermint. ```go -module github.com/me/example +module github.com//kvstore -go 1.16 +go 1.18 require ( - github.com/dgraph-io/badger v1.6.2 - github.com/tendermint/tendermint + github.com/dgraph-io/badger/v3 v3.2103.2 + github.com/tendermint/tendermint v0.35.8 + ... ) ``` -Now we can build the binary: +Now, we can build the binary: -```bash -go build +```sh +$ go build +... ``` To create a default configuration, nodeKey and private validator files, let's @@ -473,90 +482,108 @@ major version. rm -rf /tmp/example TMHOME="/tmp/example" tenderdash init validator -I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -I[2019-07-16|18:20:36.483] Generated config module=main mode=validator +2022-07-20T17:04:41+08:00 INFO Generated private validator keyFile=/tmp/kvstore/config/priv_validator_key.json module=main stateFile=/tmp/kvstore/data/priv_validator_state.json +2022-07-20T17:04:41+08:00 INFO Generated node key module=main path=/tmp/kvstore/config/node_key.json +2022-07-20T17:04:41+08:00 INFO Generated genesis file module=main path=/tmp/kvstore/config/genesis.json +2022-07-20T17:04:41+08:00 INFO Generated config mode=validator module=main ``` Feel free to explore the generated files, which can be found at -`/tmp/example/config` directory. Documentation on the config can be found +`/tmp/kvstore/config` directory. Documentation on the config can be found [here](https://docs.tendermint.com/master/tendermint-core/configuration.html). We are ready to start our application: -```bash -rm example.sock -./example - -badger 2019/07/16 18:25:11 INFO: All 0 tables opened in 0s -badger 2019/07/16 18:25:11 INFO: Replaying file id: 0 at offset: 0 -badger 2019/07/16 18:25:11 INFO: Replay took: 300.4s -I[2019-07-16|18:25:11.523] Starting ABCIServer impl=ABCIServ +```sh +$ rm kvstore.sock +$ ./kvstore + +badger 2022/07/20 17:07:17 INFO: All 1 tables opened in 9ms +badger 2022/07/20 17:07:17 INFO: Replaying file id: 0 at offset: 256 +badger 2022/07/20 17:07:17 INFO: Replay took: 9.077µs +badger 2022/07/20 17:07:17 DEBUG: Value log discard stats empty +2022-07-20T17:07:17+08:00 INFO starting service impl=ABCIServer service=ABCIServer +2022-07-20T17:07:17+08:00 INFO Waiting for new connection... ``` -Then we need to start Tendermint Core and point it to our application. Staying -within the application directory execute: +Then, we need to start Tendermint Core and point it to our application. Staying +within the project directory, open another terminal and execute the command below: -```bash -TMHOME="/tmp/example" tendermint node --proxy-app=unix://example.sock - -I[2019-07-16|18:26:20.362] Version info module=main software=0.32.1 block=10 p2p=7 -I[2019-07-16|18:26:20.383] Starting Node module=main impl=Node -E[2019-07-16|18:26:20.392] Couldn't connect to any seeds module=p2p -I[2019-07-16|18:26:20.394] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:8dab80770ae8e295d4ce905d86af78c4ff634b79 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-nIO96P Version:0.32.1 Channels:4020212223303800 Moniker:app48.fun-box.ru Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" -I[2019-07-16|18:26:21.440] Executed block module=state height=1 validTxs=0 invalidTxs=0 -I[2019-07-16|18:26:21.446] Committed state module=state height=1 txs=0 appHash= +```sh +$ TMHOME="/tmp/kvstore" tendermint node --proxy-app=unix://kvstore.sock + +2022-07-20T17:10:22+08:00 INFO starting service impl=multiAppConn module=proxy service=multiAppConn +2022-07-20T17:10:22+08:00 INFO starting service connection=query impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service connection=snapshot impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service connection=mempool impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service connection=consensus impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service impl=EventBus module=events service=EventBus +2022-07-20T17:10:22+08:00 INFO starting service impl=PubSub module=pubsub service=PubSub +2022-07-20T17:10:22+08:00 INFO starting service impl=IndexerService module=txindex service=IndexerService +... +2022-07-20T17:10:22+08:00 INFO starting service impl=Node module=main service=Node +2022-07-20T17:10:22+08:00 INFO Starting RPC HTTP server on 127.0.0.1:26657 module=rpc-server +2022-07-20T17:10:22+08:00 INFO p2p service legacy_enabled=false module=main +2022-07-20T17:10:22+08:00 INFO starting service impl=router module=p2p service=router +2022-07-20T17:10:22+08:00 INFO starting router channels=402021222330386061626300 listen_addr=tcp://0.0.0.0:26656 module=p2p net_addr={"id":"715727499e94f8fcaef1763192ebcc8460f44666","ip":"0.0.0.0","port":26656} node_id=715727499e94f8fcaef1763192ebcc8460f44666 +... ``` This should start the full node and connect to our ABCI application. ```sh -I[2019-07-16|18:25:11.525] Waiting for new connection... -I[2019-07-16|18:26:20.329] Accepted a new connection -I[2019-07-16|18:26:20.329] Waiting for new connection... -I[2019-07-16|18:26:20.330] Accepted a new connection -I[2019-07-16|18:26:20.330] Waiting for new connection... -I[2019-07-16|18:26:20.330] Accepted a new connection +2022-07-20T17:09:55+08:00 INFO Waiting for new connection... +2022-07-20T17:10:22+08:00 INFO Accepted a new connection +2022-07-20T17:10:22+08:00 INFO Waiting for new connection... +2022-07-20T17:10:22+08:00 INFO Accepted a new connection +2022-07-20T17:10:22+08:00 INFO Waiting for new connection... +2022-07-20T17:10:22+08:00 INFO Accepted a new connection ``` -Now open another tab in your terminal and try sending a transaction: +Let's try sending a transaction. Open another terminal and execute the below command. -```json +```sh $ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { - "check_tx": { - "gasWanted": "1", - ... - }, - "deliver_tx": { ... }, - "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", - "height": "33" + ... + "result": { + "check_tx": { + ... + "gas_wanted": "1", + ... + }, + "deliver_tx": { ... }, + "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", + "height": "15" + } } ``` Response should contain the height where this transaction was committed. -Now let's check if the given key now exists and its value: +Let's check if the given key now exists and its value: -```json +```sh $ curl -s 'localhost:26657/abci_query?data="tendermint"' { - "response": { - "code": 0, - "log": "exists", - "info": "", - "index": "0", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3M=", - "proofOps": null, - "height": "6", - "codespace": "" + ... + "result": { + "response": { + "code": 0, + "log": "exists", + "info": "", + "index": "0", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", + "proofOps": null, + "height": "0", + "codespace": "" + } } } ``` -"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of +`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of "tendermint" and "rocks" accordingly. ## Outro diff --git a/go.mod b/go.mod index bd2a49ae6f..3c67a6387d 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,11 @@ module github.com/tendermint/tendermint go 1.18 require ( - github.com/BurntSushi/toml v1.1.0 - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/adlio/schema v1.3.0 + github.com/BurntSushi/toml v1.2.0 + github.com/adlio/schema v1.3.3 github.com/btcsuite/btcd v0.22.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce - github.com/containerd/continuity v0.2.2 // indirect + github.com/containerd/continuity v0.3.0 // indirect github.com/dashevo/dashd-go v0.23.4 github.com/dashevo/dashd-go/btcec/v2 v2.0.6 // indirect github.com/dashpay/bls-signatures/go-bindings v0.0.0-20201127091120-745324b80143 @@ -19,7 +18,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 // indirect - github.com/golangci/golangci-lint v1.46.2 + github.com/golangci/golangci-lint v1.47.2 github.com/google/btree v1.0.1 // indirect github.com/google/gopacket v1.1.19 github.com/google/orderedcode v0.0.1 @@ -27,37 +26,33 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/lib/pq v1.10.5 - github.com/libp2p/go-buffer-pool v0.0.2 + github.com/lib/pq v1.10.6 + github.com/libp2p/go-buffer-pool v0.1.0 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible - github.com/prometheus/client_golang v1.12.1 + github.com/prometheus/client_golang v1.12.2 github.com/rs/cors v1.8.2 - github.com/rs/zerolog v1.26.1 + github.com/rs/zerolog v1.27.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.4.0 - github.com/spf13/viper v1.11.0 - github.com/stretchr/objx v0.3.0 // indirect - github.com/stretchr/testify v1.7.1 + github.com/spf13/cobra v1.5.0 + github.com/spf13/viper v1.12.0 + github.com/stretchr/testify v1.8.0 github.com/tendermint/tm-db v0.6.6 - github.com/vektra/mockery/v2 v2.12.1 - golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 - golang.org/x/net v0.0.0-20220412020605-290c469a71a5 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect - golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect - google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect - google.golang.org/grpc v1.46.0 - gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect - pgregory.net/rapid v0.4.7 + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e + golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 + golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 + google.golang.org/grpc v1.48.0 + pgregory.net/rapid v0.4.8 ) require ( - github.com/creachadair/atomicfile v0.2.5 + github.com/bufbuild/buf v1.4.0 + github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 github.com/go-pkgz/jrpc v0.2.0 github.com/google/go-cmp v0.5.8 + github.com/vektra/mockery/v2 v2.14.0 gotest.tools v2.2.0+incompatible ) @@ -72,21 +67,24 @@ require ( github.com/go-chi/render v1.0.1 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect github.com/go-pkgz/rest v1.5.0 // indirect - github.com/pelletier/go-toml/v2 v2.0.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.2 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect ) require ( 4d63.com/gochecknoglobals v0.1.0 // indirect - github.com/Antonboom/errname v0.1.6 // indirect + github.com/Antonboom/errname v0.1.7 // indirect github.com/Antonboom/nilnil v0.1.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/DataDog/zstd v1.4.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect + github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/OpenPeeDeeP/depguard v1.1.0 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.10 // indirect github.com/ashanbrown/forbidigo v1.3.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -101,8 +99,8 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect - github.com/creachadair/tomledit v0.0.19 - github.com/daixiang0/gci v0.3.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/daixiang0/gci v0.4.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect @@ -113,14 +111,14 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect - github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect + github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect - github.com/firefart/nonamedreturns v1.0.1 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/fzipp/gocyclo v0.5.1 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.6.3 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect github.com/go-toolsmith/astcopy v1.0.0 // indirect @@ -132,6 +130,8 @@ require ( github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect @@ -146,28 +146,34 @@ require ( github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-version v1.4.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect + github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/kisielk/errcheck v1.6.0 // indirect + github.com/kisielk/errcheck v1.6.1 // indirect github.com/kisielk/gotool v1.0.0 // indirect - github.com/kulti/thelper v0.6.2 // indirect - github.com/kunwardeep/paralleltest v1.0.3 // indirect + github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/pgzip v1.2.5 // indirect + github.com/kulti/thelper v0.6.3 // indirect + github.com/kunwardeep/paralleltest v1.0.6 // indirect github.com/kyoh86/exportloopref v0.1.8 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect github.com/ldez/tagliatelle v0.3.1 // indirect github.com/leonklingele/grouper v1.1.0 // indirect github.com/lufeee/execinquery v1.2.1 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/maratori/testpackage v1.0.1 // indirect + github.com/maratori/testpackage v1.1.0 // indirect github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect @@ -180,72 +186,86 @@ require ( github.com/moricho/tparallel v0.2.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.7.11 // indirect + github.com/nishanths/exhaustive v0.8.1 // indirect github.com/nishanths/predeclared v0.2.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/runc v1.0.3 // indirect + github.com/opencontainers/runc v1.1.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/profile v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.37.0 github.com/prometheus/procfs v0.7.3 // indirect github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryancurrah/gomodguard v1.2.3 // indirect github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.11.0 // indirect + github.com/securego/gosec/v2 v2.12.0 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/sivchari/containedctx v1.0.2 // indirect - github.com/sivchari/tenv v1.5.0 // indirect + github.com/sivchari/nosnakecase v1.5.0 // indirect + github.com/sivchari/tenv v1.7.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect github.com/spf13/afero v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/subosito/gotenv v1.2.0 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/subosito/gotenv v1.4.0 // indirect github.com/sylvia7788/contextcheck v1.0.4 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.6.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect github.com/ultraware/funlen v0.0.3 // indirect github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.5 // indirect + github.com/uudashr/gocognit v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect - gitlab.com/bosi/decorder v0.2.1 // indirect + gitlab.com/bosi/decorder v0.2.2 // indirect go.etcd.io/bbolt v1.3.6 // indirect - golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/sys v0.0.0-20220702020025-31831981b65f // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect - golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect + golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 // indirect + google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - honnef.co/go/tools v0.3.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.3.2 // indirect mvdan.cc/gofumpt v0.3.1 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect + mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect +) + +require ( + github.com/creachadair/tomledit v0.0.23 + github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca ) replace github.com/tendermint/tendermint => ./ diff --git a/go.sum b/go.sum index 9910c5222e..458d621870 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,5 @@ 4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -22,31 +21,14 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -61,31 +43,30 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.6 h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24= -github.com/Antonboom/errname v0.1.6/go.mod h1:7lz79JAnuoMNDAWE9MeeIr1/c/VpSUWatBv2FH9NYpI= +github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako= +github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q= github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0/go.mod h1:LGOGuvEgCfCQsy3JF2tRmpGDpzA53iZfyGEWSPwQ6/4= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 h1:V9xVvhKbLt7unNEGAruK1xXglyc668Pq3Xx0MNTNqpo= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0/go.mod h1:n/vLeA7V+QY84iYAGwMkkUUp9ooeuftMEvaDrSVch+Q= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -95,8 +76,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o= github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/adlio/schema v1.3.0 h1:eSVYLxYWbm/6ReZBCkLw4Fz7uqC+ZNoPvA39bOwi52A= -github.com/adlio/schema v1.3.0/go.mod h1:51QzxkpeFs6lRY11kPye26IaFPOV+HqEj01t5aXXKfs= +github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -105,15 +86,12 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.10 h1:qqGPDTV0ff0tWHN/nnIlSdjlU/EwRPaUY4SfpE1rnms= +github.com/alingse/asasalint v0.0.10/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= @@ -121,12 +99,13 @@ github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvx github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= @@ -155,12 +134,14 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/bufbuild/buf v1.4.0 h1:GqE3a8CMmcFvWPzuY3Mahf9Kf3S9XgZ/ORpfYFzO+90= +github.com/bufbuild/buf v1.4.0/go.mod h1:mwHG7klTHnX+rM/ym8LXGl7vYpVmnwT96xWoRB4H5QI= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -170,54 +151,50 @@ github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M= github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= -github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/atomicfile v0.2.5 h1:wkOlpsjyJOvJ3Hd8juHKdirJnCSIPacvtY21/3nYjAo= -github.com/creachadair/atomicfile v0.2.5/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= +github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= -github.com/creachadair/tomledit v0.0.19 h1:zbpfUtYFYFdpRjwJY9HJlto1iZ4M5YwYB6qqc37F6UM= -github.com/creachadair/tomledit v0.0.19/go.mod h1:gvtfnSZLa+YNQD28vaPq0Nk12bRxEhmUdBzAWn+EGF4= +github.com/creachadair/tomledit v0.0.23 h1:ohYJjMsxwzj4dDzKaBWFbWH5J+3LO/8CYnlVY+baBWA= +github.com/creachadair/tomledit v0.0.23/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4= -github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/daixiang0/gci v0.4.3 h1:wf7x0xRjQqTlA2dzHTI0A/xPyp7VcBatBG9nwGatwbQ= +github.com/daixiang0/gci v0.4.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/dashevo/dashd-go v0.23.0-test.7/go.mod h1:vc45V5rc4L4uA8ccXwyj6uuAcXQ0sdrZZiqKCkAhpps= github.com/dashevo/dashd-go v0.23.0-test.8/go.mod h1:OLSRGjMkJbTVHVDDaAYOJ0ronCLDBe7AV02BzHo40VE= github.com/dashevo/dashd-go v0.23.2/go.mod h1:GaTY1dpsl+KkfQwW6APnMim9YUx78XiyDIwn3aVN4Rk= @@ -253,11 +230,12 @@ github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KP github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/didip/tollbooth/v6 v6.0.1 h1:QvLvRpB1G2bzKvkRze0muMUBlGN9H1z7tJ4DH4ypWOU= github.com/didip/tollbooth/v6 v6.0.1/go.mod h1:j2pKs+JQ5PvU/K4jFnrnwntrmfUbYLJE5oSdxR37FD0= github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9 h1:gTh8fKuI/yLqQtZEPlDX3ZGsiTPZIe0ADHsxXSbwO1I= github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9/go.mod h1:YWyIfq3y4ArRfWZ9XksmuusP+7Mad+T0iFZ0kv0XG/M= +github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -271,46 +249,41 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/firefart/nonamedreturns v1.0.1 h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag= -github.com/firefart/nonamedreturns v1.0.1/go.mod h1:D3dpIBojGGNh5UfElmwPu73SwDCm+VKhHYqwlNOk2uQ= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/fzipp/gocyclo v0.5.1 h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw= -github.com/fzipp/gocyclo v0.5.1/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= @@ -327,9 +300,11 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-pkgz/expirable-cache v0.0.3 h1:rTh6qNPp78z0bQE6HDhXBHUwqnV9i09Vm6dksJLXQDc= github.com/go-pkgz/expirable-cache v0.0.3/go.mod h1:+IauqN00R2FqNRLCLA+X5YljQJrwB179PfiAoMPlTlQ= github.com/go-pkgz/jrpc v0.2.0 h1:CLy/eZyekjraVrxZV18N2R1mYLMJ/nWrgdfyIOGPY/E= @@ -364,8 +339,11 @@ github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzz github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -376,10 +354,10 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZ github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -388,8 +366,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -406,11 +382,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= @@ -421,8 +395,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.46.2 h1:o90t/Xa6dhJbvy8Bz2RpzUXqrkigp19DLStMolTZbyo= -github.com/golangci/golangci-lint v1.46.2/go.mod h1:3DkdHnxn9eoTTrpT2gB0TEv8KSziuoqe9FitgQLHvAY= +github.com/golangci/golangci-lint v1.47.2 h1:qvMDVv49Hrx3PSEXZ0bD/yhwSbhsOihQjFYCKieegIw= +github.com/golangci/golangci-lint v1.47.2/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -451,7 +425,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -460,7 +433,6 @@ github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -474,13 +446,9 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -490,19 +458,14 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= +github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -522,75 +485,56 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= -github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a h1:d4+I1YEKVmWZrgkt6jpXBnLgV2ZjO0YxEtLDdfIZfH4= +github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= +github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f h1:BNuUg9k2EiJmlMwjoef3e8vZLHplbVw6DrjGFjLL+Yo= +github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= +github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 h1:uFlcJKZPLQd7rmOY/RrvBuUaYmAFnlFHKLivhO6cOy8= +github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= @@ -607,7 +551,6 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -622,29 +565,32 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0 h1:YTDO4pNy7AUN/021p+JGHycQyYNIyMoenM1YDVK6RlY= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.1 h1:cErYo+J4SmEjdXZrVXGwLJCE2sB06s23LpkcyWNrT+s= +github.com/kisielk/errcheck v1.6.1/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.6.2 h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs= -github.com/kulti/thelper v0.6.2/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI= -github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= +github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= @@ -658,36 +604,30 @@ github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= -github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= +github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -705,25 +645,19 @@ github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aks github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.2.1 h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4= github.com/mgechev/revive v1.2.1/go.mod h1:+Ro3wqY4vakcYNtkBWdZC7dBg1xSB6sp054wWwmeFm0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -747,8 +681,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6Fx github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA= -github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= +github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo= +github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= @@ -757,7 +691,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= @@ -770,89 +703,80 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= -github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.0 h1:P7Bq0SaI8nsexyay5UAyDo+ICWy5MQPgEZ5+l8JQTKo= -github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM= +github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polyfloyd/go-errorlint v1.0.0 h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjLgt2LFsM= github.com/polyfloyd/go-errorlint v1.0.0/go.mod h1:KZy4xxPJyy88/gldCe5OdW6OQRtNO3EZE7hXzmnebgA= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= @@ -872,30 +796,25 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= -github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= +github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= +github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= -github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= -github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI= -github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/securego/gosec/v2 v2.12.0 h1:CQWdW7ATFpvLSohMVsajscfyHJ5rsGmEXmsNcsDNmAg= +github.com/securego/gosec/v2 v2.12.0/go.mod h1:iTpT+eKTw59bSgklBHlSnH5O2tNygHMDxfvMubA4i7I= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= @@ -910,8 +829,10 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= -github.com/sivchari/tenv v1.5.0 h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ= -github.com/sivchari/tenv v1.5.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sivchari/nosnakecase v1.5.0 h1:ZBvAu1H3uteN0KQ0IsLpIFOwYgPEhKLyv2ahrVkub6M= +github.com/sivchari/nosnakecase v1.5.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE= +github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -923,20 +844,15 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -945,19 +861,16 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= -github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= -github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= -github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -966,10 +879,12 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= +github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -992,14 +907,11 @@ github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiff github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.6.1 h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw= -github.com/tomarrell/wrapcheck/v2 v2.6.1/go.mod h1:Eo+Opt6pyMW1b6cNllOcDSSoHO0aTJ+iF6BfCUbHltA= +github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M= +github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= @@ -1007,15 +919,18 @@ github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqz github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= -github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= -github.com/vektra/mockery/v2 v2.12.1 h1:BAJk2fGjVg/P9Fi+BxZD1/ZeKTOclpeAb/SKCc12zXc= -github.com/vektra/mockery/v2 v2.12.1/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U= +github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= +github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/yfqs= +github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1031,22 +946,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w= -gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +gitlab.com/bosi/decorder v0.2.2 h1:LRfb3lP6mZWjUzpMOCLTVjcnl/SqZWBWmKNqQvMocQs= +gitlab.com/bosi/decorder v0.2.2/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= -go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1054,44 +961,45 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1103,8 +1011,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= +golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1130,15 +1038,14 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1147,7 +1054,6 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1178,23 +1084,16 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1204,17 +1103,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1226,13 +1116,12 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1250,7 +1139,6 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1259,13 +1147,11 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1283,7 +1169,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1292,50 +1177,34 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f h1:xdsejrW/0Wf2diT5CPp3XmKUNbr7Xvw8kYilQ+6qjRY= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1351,8 +1220,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1373,7 +1242,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1405,7 +1273,6 @@ golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1441,23 +1308,17 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 h1:NHLFZ56qCjD+0hYY3kE5Wl40Z7q4Gn9Ln/7YU0lsGko= +golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1478,24 +1339,6 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1546,49 +1389,12 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac h1:qSNTkEN+L2mvWcLgJOR+8bdHX9rN/IdU3A1Ghpfb1Rg= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -1608,21 +1414,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1635,6 +1431,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= @@ -1650,10 +1447,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= +gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1670,8 +1465,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1681,18 +1477,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.3.1 h1:1kJlrWJLkaGXgcaeosRXViwviqjI7nkBvU2+sZW0AYc= -honnef.co/go/tools v0.3.1/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= +honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= +honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= -mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk= +mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= +pgregory.net/rapid v0.4.8 h1:d+5SGZWUbJPbl3ss6tmPFqnNeQR6VDOFly+eTjwPiEw= +pgregory.net/rapid v0.4.8/go.mod h1:Z5PbWqjvWR1I3UGjvboUuan4fe4ZYEYNLNQLExzCoUs= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index f00a2fab5b..0ac9a124f4 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -200,16 +200,20 @@ func (pool *BlockPool) IsCaughtUp() bool { return pool.height >= (pool.maxPeerHeight - 1) } -// PeekTwoBlocks returns blocks at pool.height and pool.height+1. -// We need to see the second block's Commit to validate the first block. -// So we peek two blocks at a time. +// PeekTwoBlocks returns blocks at pool.height and pool.height+1. We need to +// see the second block's Commit to validate the first block. So we peek two +// blocks at a time. We return an extended commit, containing vote extensions +// and their associated signatures, as this is critical to consensus in ABCI++ +// as we switch from block sync to consensus mode. +// // The caller will verify the commit. -func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { +func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstCommit *types.Commit) { pool.mtx.RLock() defer pool.mtx.RUnlock() if r := pool.requesters[pool.height]; r != nil { first = r.getBlock() + firstCommit = r.getCommit() } if r := pool.requesters[pool.height+1]; r != nil { second = r.getBlock() @@ -218,7 +222,8 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) } // PopRequest pops the first block at pool.height. -// It must have been validated by 'second'.Commit from PeekTwoBlocks(). +// It must have been validated by the second Commit from PeekTwoBlocks. +// TODO(thane): (?) and its corresponding Commit. func (pool *BlockPool) PopRequest() { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -262,16 +267,25 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID { return peerID } -// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it. +// AddBlock validates that the block comes from the peer it was expected from +// and calls the requester to store it. +// +// This requires an extended commit at the same height as the supplied block - +// the block contains the last commit, but we need the latest commit in case we +// need to switch over from block sync to consensus at this height. If the +// height of the extended commit and the height of the block do not match, we +// do not add the block and return an error. // TODO: ensure that blocks come in order for each peer. -func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) { +func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, commit *types.Commit, blockSize int) error { pool.mtx.Lock() defer pool.mtx.Unlock() + if commit != nil && block.Height != commit.Height { + return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, commit.Height) + } + requester := pool.requesters[block.Height] if requester == nil { - pool.logger.Error("peer sent us a block we didn't expect", - "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height) diff := pool.height - block.Height if diff < 0 { diff *= -1 @@ -279,10 +293,10 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi if diff > maxDiffBetweenCurrentAndReceivedBlockHeight { pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID) } - return + return fmt.Errorf("peer sent us a block we didn't expect (peer: %s, current height: %d, block height: %d)", peerID, pool.height, block.Height) } - if requester.setBlock(block, peerID) { + if requester.setBlock(block, commit, peerID) { atomic.AddInt32(&pool.numPending, -1) peer := pool.peers[peerID] if peer != nil { @@ -290,9 +304,11 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi } } else { err := errors.New("requester is different or block already exists") - pool.logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height) pool.sendError(err, peerID) + return fmt.Errorf("%w (peer: %s, requester: %s, block height: %d)", err, peerID, requester.getPeerID(), block.Height) } + + return nil } // MaxPeerHeight returns the highest reported height. @@ -456,6 +472,7 @@ func (pool *BlockPool) debug() string { } else { str += fmt.Sprintf("H(%v):", h) str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil) + str += fmt.Sprintf("C?(%v) ", pool.requesters[h].commit != nil) } } return str @@ -547,6 +564,7 @@ type bpRequester struct { mtx sync.Mutex peerID types.NodeID block *types.Block + commit *types.Commit } func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester { @@ -572,13 +590,16 @@ func (bpr *bpRequester) OnStart(ctx context.Context) error { func (*bpRequester) OnStop() {} // Returns true if the peer matches and block doesn't already exist. -func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool { +func (bpr *bpRequester) setBlock(block *types.Block, commit *types.Commit, peerID types.NodeID) bool { bpr.mtx.Lock() if bpr.block != nil || bpr.peerID != peerID { bpr.mtx.Unlock() return false } bpr.block = block + if commit != nil { + bpr.commit = commit + } bpr.mtx.Unlock() select { @@ -594,6 +615,12 @@ func (bpr *bpRequester) getBlock() *types.Block { return bpr.block } +func (bpr *bpRequester) getCommit() *types.Commit { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.commit +} + func (bpr *bpRequester) getPeerID() types.NodeID { bpr.mtx.Lock() defer bpr.mtx.Unlock() @@ -611,6 +638,7 @@ func (bpr *bpRequester) reset() { bpr.peerID = "" bpr.block = nil + bpr.commit = nil } // Tells bpRequester to pick another peer and try again. diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go index 1cb8cca40c..c12347e6e4 100644 --- a/internal/blocksync/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -43,7 +43,10 @@ func (p testPeer) runInputRoutine() { // Request desired, pretend like we got the block immediately. func (p testPeer) simulateInput(input inputData) { block := &types.Block{Header: types.Header{Height: input.request.Height}} - input.pool.AddBlock(input.request.PeerID, block, 123) + extCommit := &types.Commit{ + Height: input.request.Height, + } + _ = input.pool.AddBlock(input.request.PeerID, block, extCommit, 123) // TODO: uncommenting this creates a race which is detected by: // https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 @@ -110,7 +113,7 @@ func TestBlockPoolBasic(t *testing.T) { if !pool.IsRunning() { return } - first, second := pool.PeekTwoBlocks() + first, second, _ := pool.PeekTwoBlocks() if first != nil && second != nil { pool.PopRequest() } else { @@ -164,7 +167,7 @@ func TestBlockPoolTimeout(t *testing.T) { if !pool.IsRunning() { return } - first, second := pool.PeekTwoBlocks() + first, second, _ := pool.PeekTwoBlocks() if first != nil && second != nil { pool.PopRequest() } else { diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 39b23ba9b0..448dbbd7e1 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -77,7 +77,7 @@ type Reactor struct { stateStore sm.Store blockExec *sm.BlockExecutor - store *store.BlockStore + store sm.BlockStore pool *BlockPool consReactor consensusReactor blockSync *atomicBool @@ -140,7 +140,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { if err != nil { return err } - r.chCreator = func(context.Context, *conn.ChannelDescriptor) (*p2p.Channel, error) { return blockSyncCh, nil } + r.chCreator = func(context.Context, *conn.ChannelDescriptor) (p2p.Channel, error) { return blockSyncCh, nil } state, err := r.stateStore.Load() if err != nil { @@ -188,33 +188,40 @@ func (r *Reactor) OnStop() { // respondToPeer loads a block and sends it to the requesting peer, if we have it. // Otherwise, we'll respond saying we do not have it. -func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error { +func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh p2p.Channel) error { block := r.store.LoadBlock(msg.Height) - if block != nil { - blockProto, err := block.ToProto() - if err != nil { - r.logger.Error("failed to convert msg to protobuf", "err", err) - return err - } - + if block == nil { + r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) return blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, - Message: &bcproto.BlockResponse{Block: blockProto}, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, }) } - r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) + commit := r.store.LoadSeenCommitAt(msg.Height) + if commit == nil { + return fmt.Errorf("found block in store with no commit: %v", block) + } + + blockProto, err := block.ToProto() + if err != nil { + return fmt.Errorf("failed to convert block to protobuf: %w", err) + } return blockSyncCh.Send(ctx, p2p.Envelope{ - To: peerID, - Message: &bcproto.NoBlockResponse{Height: msg.Height}, + To: peerID, + Message: &bcproto.BlockResponse{ + Block: blockProto, + Commit: commit.ToProto(), + }, }) + } // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh *p2p.Channel) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh p2p.Channel) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -241,8 +248,21 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo "err", err) return err } + var commit *types.Commit + if msg.Commit != nil { + var err error + commit, err = types.CommitFromProto(msg.Commit) + if err != nil { + r.logger.Error("failed to convert extended commit from proto", + "peer", envelope.From, + "err", err) + return err + } + } - r.pool.AddBlock(envelope.From, block, block.Size()) + if err := r.pool.AddBlock(envelope.From, block, commit, block.Size()); err != nil { + r.logger.Error("failed to add block", "err", err) + } case *bcproto.StatusRequest: return blockSyncCh.Send(ctx, p2p.Envelope{ @@ -276,7 +296,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo // message execution will result in a PeerError being sent on the BlockSyncChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Channel) { +func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh p2p.Channel) { iter := blockSyncCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() @@ -297,7 +317,7 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Chann } // processPeerUpdate processes a PeerUpdate. -func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh *p2p.Channel) { +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh p2p.Channel) { r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) // XXX: Pool#RedoRequest can sometimes give us an empty peer. @@ -332,7 +352,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh *p2p.Channel) { +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh p2p.Channel) { for { select { case <-ctx.Done(): @@ -374,7 +394,7 @@ func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error { return nil } -func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel) { +func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh p2p.Channel) { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) defer statusUpdateTicker.Stop() @@ -416,7 +436,7 @@ func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel) // do. // // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh *p2p.Channel) { +func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh p2p.Channel) { var ( trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond) switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second) @@ -445,14 +465,13 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh lastAdvance = r.pool.LastAdvance() ) - r.logger.Debug( - "consensus ticker", + r.logger.Debug("consensus ticker", "num_pending", numPending, "total", lenRequesters, - "height", height, - ) + "height", height) switch { + case r.pool.IsCaughtUp(): r.logger.Info("switching to consensus reactor", "height", height) @@ -495,15 +514,19 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // TODO: Uncouple from request routine. // see if there are any blocks to sync - first, second := r.pool.PeekTwoBlocks() - if first == nil || second == nil { - // we need both to sync the first block + first, second, commit := r.pool.PeekTwoBlocks() + if first != nil && commit == nil { + // See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631 + panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height)) + } else if first == nil || second == nil { + // we need to have fetched two consecutive blocks in order to + // perform blocksync verification continue - } else { - // try again quickly next loop - didProcessCh <- struct{}{} } + // try again quickly next loop + didProcessCh <- struct{}{} + firstParts, err := first.MakePartSet(types.BlockPartSizeBytes) if err != nil { r.logger.Error("failed to make ", @@ -526,7 +549,15 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // NOTE: We can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. + // TODO(sergio): Should we also validate against the extended commit? err = state.Validators.VerifyCommit(chainID, firstID, stateID, first.Height, second.LastCommit) + + if err == nil { + // validate the block before we persist it + err = r.blockExec.ValidateBlock(ctx, state, first) + } + // If either of the checks failed we log the error and request for a new block + // at that height if err != nil { err = fmt.Errorf("invalid last commit: %w", err) r.logger.Error( @@ -555,37 +586,39 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh return } } - } else { - r.pool.PopRequest() + return + } - // TODO: batch saves so we do not persist to disk every block - r.store.SaveBlock(first, firstParts, second.LastCommit) + r.pool.PopRequest() - var err error + r.store.SaveBlock(first, firstParts, commit) + // We use LastCommit here instead of commit. commit is not + // guaranteed to be populated by the peer if extensions are not enabled. + // Currently, the peer should provide an commit even if the vote extension data are absent + // but this may change so using second.LastCommit is safer. + //r.store.SaveBlock(first, firstParts, second.LastCommit) - // TODO: Same thing for app - but we would need a way to get the hash - // without persisting the state. - state, err = r.blockExec.ApplyBlock(ctx, state, r.nodeProTxHash, firstID, first) - if err != nil { - // TODO: This is bad, are we zombie? - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } + // TODO: Same thing for app - but we would need a way to get the hash + // without persisting the state. + state, err = r.blockExec.ApplyBlock(ctx, state, r.nodeProTxHash, firstID, first) + if err != nil { + panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) + } - r.metrics.RecordConsMetrics(first) + r.metrics.RecordConsMetrics(first) - blocksSynced++ + blocksSynced++ - if blocksSynced%100 == 0 { - lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - r.logger.Info( - "block sync rate", - "height", r.pool.height, - "max_peer_height", r.pool.MaxPeerHeight(), - "blocks/s", lastRate, - ) + if blocksSynced%100 == 0 { + lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) + r.logger.Info( + "block sync rate", + "height", r.pool.height, + "max_peer_height", r.pool.MaxPeerHeight(), + "blocks/s", lastRate, + ) - lastHundred = time.Now() - } + lastHundred = time.Now() } } } diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 63310efd06..67deee3b18 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -37,11 +37,9 @@ type reactorTestSuite struct { reactors map[types.NodeID]*Reactor app map[types.NodeID]abciclient.Client - blockSyncChannels map[types.NodeID]*p2p.Channel + blockSyncChannels map[types.NodeID]p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate peerUpdates map[types.NodeID]*p2p.PeerUpdates - - blockSync bool } func setup( @@ -65,10 +63,9 @@ func setup( nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), app: make(map[types.NodeID]abciclient.Client, numNodes), - blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + blockSyncChannels: make(map[types.NodeID]p2p.Channel, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - blockSync: true, } chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)} @@ -96,27 +93,25 @@ func setup( return rts } -func (rts *reactorTestSuite) addNode( +func makeReactor( ctx context.Context, t *testing.T, + proTxHash types.ProTxHash, nodeID types.NodeID, genDoc *types.GenesisDoc, privVal types.PrivValidator, - maxBlockHeight int64, -) { - t.Helper() + channelCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber) *Reactor { logger := log.NewNopLogger() - rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) - require.NoError(t, rts.app[nodeID].Start(ctx)) + app := proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) + require.NoError(t, app.Start(ctx)) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) - proTxHash := rts.network.Nodes[nodeID].NodeInfo.ProTxHash state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) @@ -131,6 +126,7 @@ func (rts *reactorTestSuite) addNode( mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) eventBus := eventbus.NewDefault(logger) @@ -138,8 +134,8 @@ func (rts *reactorTestSuite) addNode( blockExec := sm.NewBlockExecutor( stateStore, - logger, - rts.app[nodeID], + log.NewNopLogger(), + app, mp, sm.EmptyEvidencePool{}, blockStore, @@ -147,74 +143,110 @@ func (rts *reactorTestSuite) addNode( sm.NopMetrics(), ) - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, types.StateID{}, nil) - - if blockHeight > 1 { - lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) - lastBlock := blockStore.LoadBlock(blockHeight - 1) - - vote, err := factory.MakeVote( - ctx, - privVal, - state.Validators, - lastBlock.Header.ChainID, 0, - lastBlock.Header.Height, 0, 2, - lastBlockMeta.BlockID, - state.LastStateID, - ) - require.NoError(t, err) - lastCommit = types.NewCommit( - vote.Height, - vote.Round, - lastBlockMeta.BlockID, - state.LastStateID, - &types.CommitSigns{ - QuorumSigns: types.QuorumSigns{ - BlockSign: vote.BlockSignature, - StateSign: vote.StateSignature, - ExtensionSigns: types.MakeThresholdExtensionSigns(vote.VoteExtensions), - }, - QuorumHash: state.Validators.QuorumHash, - }, - ) - } + return NewReactor( + logger, + stateStore, + blockExec, + blockStore, + proTxHash, + nil, + channelCreator, + peerEvents, + true, + consensus.NopMetrics(), + nil, // eventbus, can be nil + ) +} - thisBlock, err := sf.MakeBlock(state, blockHeight, lastCommit, nil, 0) - require.NoError(t, err) - thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) - require.NoError(t, err) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} +func (rts *reactorTestSuite) addNode( + ctx context.Context, + t *testing.T, + nodeID types.NodeID, + genDoc *types.GenesisDoc, + privVal types.PrivValidator, + maxBlockHeight int64, +) { + t.Helper() - state, err = blockExec.ApplyBlock(ctx, state, proTxHash, blockID, thisBlock) - require.NoError(t, err) + logger := log.NewNopLogger() - blockStore.SaveBlock(thisBlock, thisParts, lastCommit) - } + rts.nodes = append(rts.nodes, nodeID) + rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) + require.NoError(t, rts.app[nodeID].Start(ctx)) rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) - chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.blockSyncChannels[nodeID], nil } - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - stateStore, - blockExec, - blockStore, - proTxHash, - nil, - chCreator, - func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, - rts.blockSync, - consensus.NopMetrics(), - nil, // eventbus, can be nil - ) - require.NoError(t, rts.reactors[nodeID].Start(ctx)) - require.True(t, rts.reactors[nodeID].IsRunning()) + proTxHash := rts.network.Nodes[nodeID].NodeInfo.ProTxHash + peerEvents := func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] } + reactor := makeReactor(ctx, t, proTxHash, nodeID, genDoc, privVal, chCreator, peerEvents) + + commit := types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil) + + state, err := reactor.stateStore.Load() + require.NoError(t, err) + for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { + block, blockID, partSet, seenCommit := makeNextBlock(ctx, t, state, privVal, blockHeight, commit) + + state, err = reactor.blockExec.ApplyBlock(ctx, state, proTxHash, blockID, block) + require.NoError(t, err) + + reactor.store.SaveBlock(block, partSet, seenCommit) + commit = seenCommit + } + + rts.reactors[nodeID] = reactor + require.NoError(t, reactor.Start(ctx)) + require.True(t, reactor.IsRunning()) +} + +func makeNextBlock(ctx context.Context, + t *testing.T, + state sm.State, + signer types.PrivValidator, + height int64, + commit *types.Commit) (*types.Block, types.BlockID, *types.PartSet, *types.Commit) { + + block, err := sf.MakeBlock(state, height, commit, nil, 0) + require.NoError(t, err) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: partSet.Header()} + + // Simulate a commit for the current height + vote, err := factory.MakeVote( + ctx, + signer, + state.Validators, + block.Header.ChainID, + 0, + block.Header.Height, + 0, + 2, + blockID, + state.StateID(), + ) + require.NoError(t, err) + seenCommit := types.NewCommit( + vote.Height, + vote.Round, + blockID, + state.StateID(), + &types.CommitSigns{ + QuorumSigns: types.QuorumSigns{ + BlockSign: vote.BlockSignature, + StateSign: vote.StateSignature, + ExtensionSigns: types.MakeThresholdExtensionSigns(vote.VoteExtensions), + }, + QuorumHash: state.Validators.QuorumHash, + }, + ) + return block, blockID, partSet, seenCommit } func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { @@ -417,3 +449,35 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { len(rts.reactors[newNode.NodeID].pool.peers), ) } + +/* +func TestReactorReceivesNoExtendedCommit(t *testing.T) { + blockDB := dbm.NewMemDB() + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(blockDB) + blockExec := sm.NewBlockExecutor( + stateStore, + log.NewNopLogger(), + rts.app[nodeID], + mp, + sm.EmptyEvidencePool{}, + blockStore, + eventbus, + sm.NopMetrics(), + ) + NewReactor( + log.NewNopLogger(), + stateStore, + blockExec, + blockStore, + nil, + chCreator, + func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, + rts.blockSync, + consensus.NopMetrics(), + nil, // eventbus, can be nil + ) + +} +*/ diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 32025c600d..be0672db7a 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -33,6 +33,10 @@ import ( // Byzantine node sends two different prevotes (nil and blockID) to the same // validator. func TestByzantinePrevoteEquivocation(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // empirically, this test either passes in <1s or hits some // kind of deadlock and hit the larger timeout. This timeout // can be extended a bunch if needed, but it's good to avoid @@ -185,6 +189,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // The commit is empty, but not nil. commit = types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil) case lazyNodeState.LastCommit != nil: + // Make the commit from LastCommit commit = lazyNodeState.LastCommit default: // This shouldn't happen. lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index f7e13ec35f..1b1757fa13 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -159,7 +159,7 @@ func signVote( quorumHash crypto.QuorumHash) *types.Vote { exts := make(types.VoteExtensions) - if voteType == tmproto.PrecommitType { + if voteType == tmproto.PrecommitType && !blockID.IsNil() { exts.Add(tmproto.VoteExtensionType_DEFAULT, []byte("extension")) } v, err := vs.signVote(ctx, voteType, chainID, blockID, lastAppHash, quorumType, quorumHash, exts) @@ -525,10 +525,11 @@ func loadPrivValidator(t *testing.T, cfg *config.Config) *privval.FilePV { } type makeStateArgs struct { - config *config.Config - logger log.Logger - validators int - application abci.Application + config *config.Config + consensusParams *types.ConsensusParams + logger log.Logger + validators int + application abci.Application } func makeState(ctx context.Context, t *testing.T, args makeStateArgs) (*State, []*validatorStub) { @@ -549,15 +550,18 @@ func makeState(ctx context.Context, t *testing.T, args makeStateArgs) (*State, [ if args.logger == nil { args.logger = log.NewNopLogger() } + c := factory.ConsensusParams() + if args.consensusParams != nil { + c = args.consensusParams + } - consensusParams := factory.ConsensusParams() // vote timeout increased because of bls12381 signing/verifying operations are longer performed than ed25519 // and 10ms (previous value) is not enough - consensusParams.Timeout.Vote = 50 * time.Millisecond - consensusParams.Timeout.VoteDelta = 5 * time.Millisecond + c.Timeout.Vote = 50 * time.Millisecond + c.Timeout.VoteDelta = 5 * time.Millisecond state, privVals := makeGenesisState(ctx, t, args.config, genesisStateArgs{ - Params: consensusParams, + Params: c, Validators: validators, }) diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index fcbd5ce3e9..bd3dc7200d 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -107,7 +107,7 @@ func invalidDoPrevoteFunc( round int32, cs *State, r *Reactor, - voteCh *p2p.Channel, + voteCh p2p.Channel, pv types.PrivValidator, ) { // routine to: diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 71be7c7a1b..4ca7627e85 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "fmt" "os" + "sync" "testing" "time" @@ -142,8 +143,10 @@ func checkTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{}) + var rCode uint32 + err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.ResponseCheckTx) { rCode = r.Code }, mempool.TxInfo{}) require.NoError(t, err, "error after checkTx") + require.Equal(t, code.CodeTypeOK, rCode, "checkTx code is error, txBytes %X", txBytes) } } @@ -179,6 +182,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { case msg := <-newBlockHeaderCh: headerEvent := msg.Data().(types.EventDataNewBlockHeader) n += headerEvent.NumTxs + logger.Info("new transactions", "nTxs", headerEvent.NumTxs, "total", n) case <-time.After(30 * time.Second): t.Fatal("Timed out waiting 30s to commit blocks with transactions") } @@ -208,20 +212,21 @@ func TestMempoolRmBadTx(t *testing.T) { resFinalize, err := app.FinalizeBlock(ctx, &abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) require.NoError(t, err) assert.False(t, resFinalize.TxResults[0].IsErr(), fmt.Sprintf("expected no error. got %v", resFinalize)) + assert.True(t, len(resFinalize.AppHash) > 0) - resCommit, err := app.Commit(ctx) + _, err = app.Commit(ctx) require.NoError(t, err) - assert.True(t, len(resCommit.Data) > 0) emptyMempoolCh := make(chan struct{}) checkTxRespCh := make(chan struct{}) go func() { - // Try to send the tx through the mempool. + // Try to send an out-of-sequence transaction through the mempool. // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool + binary.BigEndian.PutUint64(txBytes, uint64(5)) err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.ResponseCheckTx) { if r.Code != code.CodeTypeBadNonce { - t.Errorf("expected checktx to return bad nonce, got %v", r) + t.Errorf("expected checktx to return bad nonce, got %#v", r) return } checkTxRespCh <- struct{}{} @@ -269,6 +274,7 @@ type CounterApplication struct { txCount int mempoolTxCount int + mu sync.Mutex } func NewCounterApplication() *CounterApplication { @@ -276,10 +282,16 @@ func NewCounterApplication() *CounterApplication { } func (app *CounterApplication) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { + app.mu.Lock() + defer app.mu.Unlock() + return &abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}, nil } func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { + app.mu.Lock() + defer app.mu.Unlock() + respTxs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { txValue := txAsUint64(tx) @@ -293,18 +305,29 @@ func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.Reques app.txCount++ respTxs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK} } - return &abci.ResponseFinalizeBlock{TxResults: respTxs}, nil + + res := &abci.ResponseFinalizeBlock{TxResults: respTxs} + + if app.txCount > 0 { + res.AppHash = make([]byte, 32) + binary.BigEndian.PutUint64(res.AppHash[24:], uint64(app.txCount)) + } + + return res, nil } func (app *CounterApplication) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { - txValue := txAsUint64(req.Tx) - if txValue != uint64(app.mempoolTxCount) { - return &abci.ResponseCheckTx{ - Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue), - }, nil + app.mu.Lock() + defer app.mu.Unlock() + if req.Type == abci.CheckTxType_New { + txValue := txAsUint64(req.Tx) + if txValue != uint64(app.mempoolTxCount) { + return &abci.ResponseCheckTx{ + Code: code.CodeTypeBadNonce, + }, nil + } + app.mempoolTxCount++ } - app.mempoolTxCount++ return &abci.ResponseCheckTx{Code: code.CodeTypeOK}, nil } @@ -315,14 +338,7 @@ func txAsUint64(tx []byte) uint64 { } func (app *CounterApplication) Commit(context.Context) (*abci.ResponseCommit, error) { - app.mempoolTxCount = app.txCount - if app.txCount == 0 { - return &abci.ResponseCommit{}, nil - } - - hash := make([]byte, 32) - binary.BigEndian.PutUint64(hash, uint64(app.txCount)) - return &abci.ResponseCommit{Data: hash}, nil + return &abci.ResponseCommit{}, nil } func (app *CounterApplication) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { diff --git a/internal/consensus/metrics.gen.go b/internal/consensus/metrics.gen.go new file mode 100644 index 0000000000..55cc59f6c5 --- /dev/null +++ b/internal/consensus/metrics.gen.go @@ -0,0 +1,248 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package consensus + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "height", + Help: "Height of the chain.", + }, labels).With(labelsAndValues...), + ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_last_signed_height", + Help: "Last height signed by this validator if the node is a validator.", + }, append(labels, "validator_address")).With(labelsAndValues...), + Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rounds", + Help: "Number of rounds.", + }, labels).With(labelsAndValues...), + RoundDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "round_duration", + Help: "Histogram of round duration.", + + Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), + }, labels).With(labelsAndValues...), + Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators", + Help: "Number of validators.", + }, labels).With(labelsAndValues...), + ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators_power", + Help: "Total power of all validators.", + }, labels).With(labelsAndValues...), + ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_power", + Help: "Power of a validator.", + }, append(labels, "validator_address")).With(labelsAndValues...), + ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_missed_blocks", + Help: "Amount of blocks missed per validator.", + }, append(labels, "validator_address")).With(labelsAndValues...), + MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators", + Help: "Number of validators who did not sign.", + }, labels).With(labelsAndValues...), + MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators_power", + Help: "Total power of the missing validators.", + }, labels).With(labelsAndValues...), + ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators", + Help: "Number of validators who tried to double sign.", + }, labels).With(labelsAndValues...), + ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators_power", + Help: "Total power of the byzantine validators.", + }, labels).With(labelsAndValues...), + BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_interval_seconds", + Help: "Time between this and the last block.", + }, labels).With(labelsAndValues...), + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_txs", + Help: "Number of transactions.", + }, labels).With(labelsAndValues...), + BlockSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_size_bytes", + Help: "Size of the block.", + }, labels).With(labelsAndValues...), + TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_txs", + Help: "Total number of transactions.", + }, labels).With(labelsAndValues...), + CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "latest_block_height", + Help: "The latest block height.", + }, labels).With(labelsAndValues...), + BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_syncing", + Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "state_syncing", + Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_parts", + Help: "Number of block parts transmitted by each peer.", + }, append(labels, "peer_id")).With(labelsAndValues...), + StepDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "step_duration", + Help: "Histogram of durations for each step in the consensus protocol.", + + Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), + }, append(labels, "step")).With(labelsAndValues...), + BlockGossipReceiveLatency: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_gossip_receive_latency", + Help: "Histogram of time taken to receive a block in seconds, measured between when a new block is first discovered to when the block is completed.", + + Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), + }, labels).With(labelsAndValues...), + BlockGossipPartsReceived: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_gossip_parts_received", + Help: "Number of block parts received by the node, separated by whether the part was relevant to the block the node is trying to gather or not.", + }, append(labels, "matches_current")).With(labelsAndValues...), + QuorumPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "quorum_prevote_delay", + Help: "Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum.", + }, append(labels, "proposer_address")).With(labelsAndValues...), + FullPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "full_prevote_delay", + Help: "Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted.", + }, append(labels, "proposer_address")).With(labelsAndValues...), + ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_timestamp_difference", + Help: "Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message.", + + Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10}, + }, append(labels, "is_timely")).With(labelsAndValues...), + VoteExtensionReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "vote_extension_receive_count", + Help: "Number of vote extensions received labeled by application response status.", + }, append(labels, "status")).With(labelsAndValues...), + ProposalReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_receive_count", + Help: "Total number of proposals received by the node since process start labeled by application response status.", + }, append(labels, "status")).With(labelsAndValues...), + ProposalCreateCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_create_count", + Help: "Total number of proposals created by the node since process start.", + }, labels).With(labelsAndValues...), + RoundVotingPowerPercent: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "round_voting_power_percent", + Help: "A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round.", + }, append(labels, "vote_type")).With(labelsAndValues...), + LateVotes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "late_votes", + Help: "Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in.", + }, append(labels, "vote_type")).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Height: discard.NewGauge(), + ValidatorLastSignedHeight: discard.NewGauge(), + Rounds: discard.NewGauge(), + RoundDuration: discard.NewHistogram(), + Validators: discard.NewGauge(), + ValidatorsPower: discard.NewGauge(), + ValidatorPower: discard.NewGauge(), + ValidatorMissedBlocks: discard.NewGauge(), + MissingValidators: discard.NewGauge(), + MissingValidatorsPower: discard.NewGauge(), + ByzantineValidators: discard.NewGauge(), + ByzantineValidatorsPower: discard.NewGauge(), + BlockIntervalSeconds: discard.NewHistogram(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewHistogram(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + BlockSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), + StepDuration: discard.NewHistogram(), + BlockGossipReceiveLatency: discard.NewHistogram(), + BlockGossipPartsReceived: discard.NewCounter(), + QuorumPrevoteDelay: discard.NewGauge(), + FullPrevoteDelay: discard.NewGauge(), + ProposalTimestampDifference: discard.NewHistogram(), + VoteExtensionReceiveCount: discard.NewCounter(), + ProposalReceiveCount: discard.NewCounter(), + ProposalCreateCount: discard.NewCounter(), + RoundVotingPowerPercent: discard.NewGauge(), + LateVotes: discard.NewCounter(), + } +} diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index 2cf8f2f73c..bdf0eb412c 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -5,11 +5,9 @@ import ( "time" "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -19,28 +17,30 @@ const ( MetricsSubsystem = "consensus" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { // Height of the chain. Height metrics.Gauge - // ValidatorLastSignedHeight of a validator. - ValidatorLastSignedHeight metrics.Gauge + // Last height signed by this validator if the node is a validator. + ValidatorLastSignedHeight metrics.Gauge `metrics_labels:"validator_address"` // Number of rounds. Rounds metrics.Gauge // Histogram of round duration. - RoundDuration metrics.Histogram + RoundDuration metrics.Histogram `metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` // Number of validators. Validators metrics.Gauge // Total power of all validators. ValidatorsPower metrics.Gauge // Power of a validator. - ValidatorPower metrics.Gauge - // Amount of blocks missed by a validator. - ValidatorMissedBlocks metrics.Gauge + ValidatorPower metrics.Gauge `metrics_labels:"validator_address"` + // Amount of blocks missed per validator. + ValidatorMissedBlocks metrics.Gauge `metrics_labels:"validator_address"` // Number of validators who did not sign. MissingValidators metrics.Gauge // Total power of the missing validators. @@ -60,27 +60,27 @@ type Metrics struct { // Total number of transactions. TotalTxs metrics.Gauge // The latest block height. - CommittedHeight metrics.Gauge + CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"` // Whether or not a node is block syncing. 1 if yes, 0 if no. BlockSyncing metrics.Gauge // Whether or not a node is state syncing. 1 if yes, 0 if no. StateSyncing metrics.Gauge - // Number of blockparts transmitted by peer. - BlockParts metrics.Counter + // Number of block parts transmitted by each peer. + BlockParts metrics.Counter `metrics_labels:"peer_id"` - // Histogram of step duration. - StepDuration metrics.Histogram + // Histogram of durations for each step in the consensus protocol. + StepDuration metrics.Histogram `metrics_labels:"step" metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` stepStart time.Time // Histogram of time taken to receive a block in seconds, measured between when a new block is first // discovered to when the block is completed. - BlockGossipReceiveLatency metrics.Histogram + BlockGossipReceiveLatency metrics.Histogram `metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` blockGossipStart time.Time // Number of block parts received by the node, separated by whether the part // was relevant to the block the node is trying to gather or not. - BlockGossipPartsReceived metrics.Counter + BlockGossipPartsReceived metrics.Counter `metrics_labels:"matches_current"` // QuroumPrevoteMessageDelay is the interval in seconds between the proposal // timestamp and the timestamp of the earliest prevote that achieved a quorum @@ -91,232 +91,50 @@ type Metrics struct { // be above 2/3 of the total voting power of the network defines the endpoint // the endpoint of the interval. Subtract the proposal timestamp from this endpoint // to obtain the quorum delay. - QuorumPrevoteDelay metrics.Gauge + //metrics:Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum. + QuorumPrevoteDelay metrics.Gauge `metrics_labels:"proposer_address"` // FullPrevoteDelay is the interval in seconds between the proposal // timestamp and the timestamp of the latest prevote in a round where 100% // of the voting power on the network issued prevotes. - FullPrevoteDelay metrics.Gauge + //metrics:Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted. + FullPrevoteDelay metrics.Gauge `metrics_labels:"proposer_address"` // ProposalTimestampDifference is the difference between the timestamp in // the proposal message and the local time of the validator at the time // that the validator received the message. - ProposalTimestampDifference metrics.Histogram -} - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "height", - Help: "Height of the chain.", - }, labels).With(labelsAndValues...), - Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "rounds", - Help: "Number of rounds.", - }, labels).With(labelsAndValues...), - RoundDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "round_duration", - Help: "Time spent in a round.", - Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), - }, labels).With(labelsAndValues...), - Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validators", - Help: "Number of validators.", - }, labels).With(labelsAndValues...), - ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validator_last_signed_height", - Help: "Last signed height for a validator", - }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validator_missed_blocks", - Help: "Total missed blocks for a validator", - }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validators_power", - Help: "Total power of all validators.", - }, labels).With(labelsAndValues...), - ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validator_power", - Help: "Power of a validator", - }, append(labels, "validator_address")).With(labelsAndValues...), - MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "missing_validators", - Help: "Number of validators who did not sign.", - }, labels).With(labelsAndValues...), - MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "missing_validators_power", - Help: "Total power of the missing validators.", - }, labels).With(labelsAndValues...), - ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "byzantine_validators", - Help: "Number of validators who tried to double sign.", - }, labels).With(labelsAndValues...), - ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "byzantine_validators_power", - Help: "Total power of the byzantine validators.", - }, labels).With(labelsAndValues...), - BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_interval_seconds", - Help: "Time between this and the last block.", - }, labels).With(labelsAndValues...), - NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "num_txs", - Help: "Number of transactions.", - }, labels).With(labelsAndValues...), - BlockSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_size_bytes", - Help: "Size of the block.", - }, labels).With(labelsAndValues...), - TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "total_txs", - Help: "Total number of transactions.", - }, labels).With(labelsAndValues...), - CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "latest_block_height", - Help: "The latest block height.", - }, labels).With(labelsAndValues...), - BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_syncing", - Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.", - }, labels).With(labelsAndValues...), - StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "state_syncing", - Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", - }, labels).With(labelsAndValues...), - BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_parts", - Help: "Number of blockparts transmitted by peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), - BlockGossipReceiveLatency: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_gossip_receive_latency", - Help: "Difference in seconds between when the validator learns of a new block" + - "and when the validator receives the last piece of the block.", - Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), - }, labels).With(labelsAndValues...), - BlockGossipPartsReceived: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_gossip_parts_received", - Help: "Number of block parts received by the node, labeled by whether the " + - "part was relevant to the block the node was currently gathering or not.", - }, append(labels, "matches_current")).With(labelsAndValues...), - StepDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "step_duration", - Help: "Time spent per step.", - Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), - }, append(labels, "step")).With(labelsAndValues...), - QuorumPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "quorum_prevote_delay", - Help: "Difference in seconds between the proposal timestamp and the timestamp " + - "of the latest prevote that achieved a quorum in the prevote step.", - }, append(labels, "proposer_address")).With(labelsAndValues...), - FullPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "full_prevote_delay", - Help: "Difference in seconds between the proposal timestamp and the timestamp " + - "of the latest prevote that achieved 100% of the voting power in the prevote step.", - }, append(labels, "proposer_address")).With(labelsAndValues...), - ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "proposal_timestamp_difference", - Help: "Difference in seconds between the timestamp in the proposal " + - "message and the local time when the message was received. " + - "Only calculated when a new block is proposed.", - Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10}, - }, append(labels, "is_timely")).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Height: discard.NewGauge(), - - ValidatorLastSignedHeight: discard.NewGauge(), - - Rounds: discard.NewGauge(), - RoundDuration: discard.NewHistogram(), - StepDuration: discard.NewHistogram(), - - Validators: discard.NewGauge(), - ValidatorsPower: discard.NewGauge(), - ValidatorPower: discard.NewGauge(), - ValidatorMissedBlocks: discard.NewGauge(), - MissingValidators: discard.NewGauge(), - MissingValidatorsPower: discard.NewGauge(), - ByzantineValidators: discard.NewGauge(), - ByzantineValidatorsPower: discard.NewGauge(), - - BlockIntervalSeconds: discard.NewHistogram(), - - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewHistogram(), - TotalTxs: discard.NewGauge(), - CommittedHeight: discard.NewGauge(), - BlockSyncing: discard.NewGauge(), - StateSyncing: discard.NewGauge(), - BlockParts: discard.NewCounter(), - BlockGossipReceiveLatency: discard.NewHistogram(), - BlockGossipPartsReceived: discard.NewCounter(), - QuorumPrevoteDelay: discard.NewGauge(), - FullPrevoteDelay: discard.NewGauge(), - ProposalTimestampDifference: discard.NewHistogram(), - } + //metrics:Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message. + ProposalTimestampDifference metrics.Histogram `metrics_labels:"is_timely" metrics_bucketsizes:"-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10"` + + // VoteExtensionReceiveCount is the number of vote extensions received by this + // node. The metric is annotated by the status of the vote extension from the + // application, either 'accepted' or 'rejected'. + //metrics:Number of vote extensions received labeled by application response status. + VoteExtensionReceiveCount metrics.Counter `metrics_labels:"status"` + + // ProposalReceiveCount is the total number of proposals received by this node + // since process start. + // The metric is annotated by the status of the proposal from the application, + // either 'accepted' or 'rejected'. + //metrics:Total number of proposals received by the node since process start labeled by application response status. + ProposalReceiveCount metrics.Counter `metrics_labels:"status"` + + // ProposalCreationCount is the total number of proposals created by this node + // since process start. + //metrics:Total number of proposals created by the node since process start. + ProposalCreateCount metrics.Counter + + // RoundVotingPowerPercent is the percentage of the total voting power received + // with a round. The value begins at 0 for each round and approaches 1.0 as + // additional voting power is observed. The metric is labeled by vote type. + //metrics:A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round. + RoundVotingPowerPercent metrics.Gauge `metrics_labels:"vote_type"` + + // LateVotes stores the number of votes that were received by this node that + // correspond to earlier heights and rounds than this node is currently + // in. + //metrics:Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. + LateVotes metrics.Counter `metrics_labels:"vote_type"` } // RecordConsMetrics uses for recording the block related metrics during fast-sync. @@ -335,10 +153,45 @@ func (m *Metrics) MarkBlockGossipComplete() { m.BlockGossipReceiveLatency.Observe(time.Since(m.blockGossipStart).Seconds()) } +func (m *Metrics) MarkProposalProcessed(accepted bool) { + status := "accepted" + if !accepted { + status = "rejected" + } + m.ProposalReceiveCount.With("status", status).Add(1) +} + +func (m *Metrics) MarkVoteExtensionReceived(accepted bool) { + status := "accepted" + if !accepted { + status = "rejected" + } + m.VoteExtensionReceiveCount.With("status", status).Add(1) +} + +func (m *Metrics) MarkVoteReceived(vt tmproto.SignedMsgType, power, totalPower int64) { + p := float64(power) / float64(totalPower) + n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) + m.RoundVotingPowerPercent.With("vote_type", n).Add(p) +} + func (m *Metrics) MarkRound(r int32, st time.Time) { m.Rounds.Set(float64(r)) roundTime := time.Since(st).Seconds() m.RoundDuration.Observe(roundTime) + + pvt := tmproto.PrevoteType + pvn := strings.ToLower(strings.TrimPrefix(pvt.String(), "SIGNED_MSG_TYPE_")) + m.RoundVotingPowerPercent.With("vote_type", pvn).Set(0) + + pct := tmproto.PrecommitType + pcn := strings.ToLower(strings.TrimPrefix(pct.String(), "SIGNED_MSG_TYPE_")) + m.RoundVotingPowerPercent.With("vote_type", pcn).Set(0) +} + +func (m *Metrics) MarkLateVote(vt tmproto.SignedMsgType) { + n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) + m.LateVotes.With("vote_type", n).Add(1) } func (m *Metrics) MarkStep(s cstypes.RoundStepType) { diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index 3085dbb4bf..21ba89e2ed 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -223,11 +223,7 @@ func (*VoteMessage) TypeTag() string { return "tendermint/Vote" } // ValidateBasic checks whether the vote within the message is well-formed. func (m *VoteMessage) ValidateBasic() error { - // Here we validate votes with vote extensions, since we require vote - // extensions to be sent in precommit messages during consensus. Prevote - // messages should never have vote extensions, and this is also validated - // here. - return m.Vote.ValidateWithExtension() + return m.Vote.ValidateBasic() } // String returns a string representation. diff --git a/internal/consensus/msgs_test.go b/internal/consensus/msgs_test.go index dbd72415f6..7f4f80bec0 100644 --- a/internal/consensus/msgs_test.go +++ b/internal/consensus/msgs_test.go @@ -81,7 +81,7 @@ func TestMsgToProto(t *testing.T) { 1, 1, 2, - types.BlockID{}, + bi, types.StateID{}, ) require.NoError(t, err) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index d09bd55b8a..a090c32329 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -12,6 +12,7 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" @@ -162,10 +163,10 @@ func NewReactor( } type channelBundle struct { - state *p2p.Channel - data *p2p.Channel - vote *p2p.Channel - votSet *p2p.Channel + state p2p.Channel + data p2p.Channel + vote p2p.Channel + votSet p2p.Channel } // OnStart starts separate go routines for each p2p Channel and listens for @@ -213,6 +214,8 @@ func (r *Reactor) OnStart(ctx context.Context) error { if err := r.state.Start(ctx); err != nil { return err } + } else if err := r.state.updateStateFromStore(); err != nil { + return err } go r.updateRoundStateRoutine(ctx) @@ -250,7 +253,7 @@ func (r *Reactor) WaitSync() bool { func (r *Reactor) SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) { r.logger.Info("switching to consensus") - // We have no votes, so reconstruct LastPrecommits from SeenCommit. + // we have no votes, so reconstruct LastCommit from SeenCommit if state.LastBlockHeight > 0 { r.state.reconstructLastCommit(state) } @@ -308,7 +311,7 @@ func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) { // subscribeToBroadcastEvents subscribes for new round steps and votes using the // internal pubsub defined in the consensus state to broadcast them to peers // upon receiving. -func (r *Reactor) subscribeToBroadcastEvents(ctx context.Context, stateCh *p2p.Channel) { +func (r *Reactor) subscribeToBroadcastEvents(ctx context.Context, stateCh p2p.Channel) { onStopCh := r.state.getOnStopCh() err := r.state.evsw.AddListenerForEvent( @@ -459,7 +462,7 @@ func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundSta time.Sleep(r.state.config.PeerGossipSleepDuration) } -func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh *p2p.Channel, chans channelBundle) { +func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh p2p.Channel, chans channelBundle) { logger := r.logger.With("peer", ps.peerID) timer := time.NewTimer(0) @@ -471,10 +474,12 @@ OUTER_LOOP: return } + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { case <-ctx.Done(): return - default: + case <-timer.C: } rs := r.getRoundState() @@ -518,13 +523,6 @@ OUTER_LOOP: "blockstoreBase", blockStoreBase, "blockstoreHeight", r.state.blockStore.Height(), ) - - timer.Reset(r.state.config.PeerGossipSleepDuration) - select { - case <-timer.C: - case <-ctx.Done(): - return - } } else { ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) } @@ -540,12 +538,6 @@ OUTER_LOOP: // if height and round don't match, sleep if (rs.Height != prs.Height) || (rs.Round != prs.Round) { - timer.Reset(r.state.config.PeerGossipSleepDuration) - select { - case <-timer.C: - case <-ctx.Done(): - return - } continue OUTER_LOOP } @@ -584,22 +576,11 @@ OUTER_LOOP: }) r.logResult(err, logger, "sending POL", "height", prs.Height, "round", prs.Round) } - - continue OUTER_LOOP - } - - // nothing to do -- sleep - timer.Reset(r.state.config.PeerGossipSleepDuration) - select { - case <-timer.C: - case <-ctx.Done(): - return } - continue OUTER_LOOP } } -func (r *Reactor) sendProposalBlockPart(ctx context.Context, dataCh *p2p.Channel, ps *PeerState, part *types.Part, height int64, round int32) error { +func (r *Reactor) sendProposalBlockPart(ctx context.Context, dataCh p2p.Channel, ps *PeerState, part *types.Part, height int64, round int32) error { partProto, err := part.ToProto() if err != nil { return fmt.Errorf("failed to convert block part to proto, error: %w", err) @@ -620,7 +601,7 @@ func (r *Reactor) sendProposalBlockPart(ctx context.Context, dataCh *p2p.Channel // pickSendVote picks a vote and sends it to the peer. It will return true if // there is a vote to send and false otherwise. -func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader, voteCh *p2p.Channel) (bool, error) { +func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader, voteCh p2p.Channel) (bool, error) { vote, ok := ps.PickVoteToSend(votes) if !ok { return false, nil @@ -655,7 +636,7 @@ func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.V return true, nil } -func (r *Reactor) sendCommit(ctx context.Context, ps *PeerState, commit *types.Commit, voteCh *p2p.Channel) error { +func (r *Reactor) sendCommit(ctx context.Context, ps *PeerState, commit *types.Commit, voteCh p2p.Channel) error { if commit == nil { return fmt.Errorf("attempt to send nil commit to peer %s", ps.peerID) } @@ -670,7 +651,7 @@ func (r *Reactor) sendCommit(ctx context.Context, ps *PeerState, commit *types.C // send sends a message to provided channel. // If to is nil, message will be broadcasted. -func (r *Reactor) send(ctx context.Context, ps *PeerState, channel *p2p.Channel, msg proto.Message) error { +func (r *Reactor) send(ctx context.Context, ps *PeerState, channel p2p.Channel, msg proto.Message) error { select { case <-ctx.Done(): return errReactorClosed @@ -683,7 +664,7 @@ func (r *Reactor) send(ctx context.Context, ps *PeerState, channel *p2p.Channel, } // broadcast sends a broadcast message to all peers connected to the `channel`. -func (r *Reactor) broadcast(ctx context.Context, channel *p2p.Channel, msg proto.Message) error { +func (r *Reactor) broadcast(ctx context.Context, channel p2p.Channel, msg proto.Message) error { select { case <-ctx.Done(): return errReactorClosed @@ -712,11 +693,11 @@ func (r *Reactor) gossipVotesForHeight( rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, - voteCh *p2p.Channel, + voteCh p2p.Channel, ) (bool, error) { logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) - // If there are lastPrecommits to send... + // if there are lastPrecommits to send... if prs.Step == cstypes.RoundStepNewHeight { if ok, err := r.pickSendVote(ctx, ps, rs.LastPrecommits, voteCh); err != nil { logger.Debug("picked previous precommit vote to send") @@ -785,7 +766,7 @@ func (r *Reactor) gossipVotesForHeight( } // gossipCommit sends a commit to the peer -func (r *Reactor) gossipCommit(ctx context.Context, voteCh *p2p.Channel, rs *cstypes.RoundState, ps *PeerState, prs *cstypes.PeerRoundState) error { +func (r *Reactor) gossipCommit(ctx context.Context, voteCh p2p.Channel, rs *cstypes.RoundState, ps *PeerState, prs *cstypes.PeerRoundState) error { // logger := r.Logger.With("height", rs.Height, "peer_height", prs.Height, "peer", ps.peerID) var commit *types.Commit blockStoreBase := r.state.blockStore.Base() @@ -816,7 +797,7 @@ func (r *Reactor) gossipCommit(ctx context.Context, voteCh *p2p.Channel, rs *cst return nil // success } -func (r *Reactor) gossipVotesAndCommitRoutine(ctx context.Context, voteCh *p2p.Channel, ps *PeerState) { +func (r *Reactor) gossipVotesAndCommitRoutine(ctx context.Context, voteCh p2p.Channel, ps *PeerState) { logger := r.logger.With("peer", ps.peerID) timer := time.NewTimer(0) @@ -879,7 +860,7 @@ func (r *Reactor) gossipVotesAndCommitRoutine(ctx context.Context, voteCh *p2p.C // NOTE: `queryMaj23Routine` has a simple crude design since it only comes // into play for liveness when there's a signature DDoS attack happening. -func (r *Reactor) queryMaj23Routine(ctx context.Context, stateCh *p2p.Channel, ps *PeerState) { +func (r *Reactor) queryMaj23Routine(ctx context.Context, stateCh p2p.Channel, ps *PeerState) { timer := time.NewTimer(0) defer timer.Stop() @@ -1130,7 +1111,7 @@ func (r *Reactor) peerDown(ctx context.Context, peerUpdate p2p.PeerUpdate, chans // If we fail to find the peer state for the envelope sender, we perform a no-op // and return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message, voteSetCh *p2p.Channel) error { +func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message, voteSetCh p2p.Channel) error { ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { r.logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") @@ -1228,7 +1209,7 @@ func (r *Reactor) handleDataMessage(ctx context.Context, envelope *p2p.Envelope, } if r.WaitSync() { - logger.Info("ignoring message received during sync", "msg", fmt.Sprintf("%T", msgI)) + logger.Info("ignoring message received during sync", "msg", tmstrings.LazySprintf("%T", msgI)) return nil } @@ -1437,7 +1418,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, cha // Any error encountered during message execution will result in a PeerError being sent // on the StateChannel or DataChannel or VoteChannel or VoteSetBitsChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel gracefully. -func (r *Reactor) processMsgCh(ctx context.Context, msgCh *p2p.Channel, chBundle channelBundle) { +func (r *Reactor) processMsgCh(ctx context.Context, msgCh p2p.Channel, chBundle channelBundle) { iter := msgCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index f6e3661bd8..466b864bb5 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -46,10 +46,10 @@ type reactorTestSuite struct { reactors map[types.NodeID]*Reactor subs map[types.NodeID]eventbus.Subscription blocksyncSubs map[types.NodeID]eventbus.Subscription - stateChannels map[types.NodeID]*p2p.Channel - dataChannels map[types.NodeID]*p2p.Channel - voteChannels map[types.NodeID]*p2p.Channel - voteSetBitsChannels map[types.NodeID]*p2p.Channel + stateChannels map[types.NodeID]p2p.Channel + dataChannels map[types.NodeID]p2p.Channel + voteChannels map[types.NodeID]p2p.Channel + voteSetBitsChannels map[types.NodeID]p2p.Channel } func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { @@ -90,7 +90,7 @@ func setup( t.Cleanup(cancel) chCreator := func(nodeID types.NodeID) p2p.ChannelCreator { - return func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { switch desc.ID { case StateChannel: return rts.stateChannels[nodeID], nil @@ -671,6 +671,10 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 591597474f..3b7e250ae6 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -505,11 +505,11 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. - abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) + finalizeBlockResponses, err := h.stateStore.LoadFinalizeBlockResponses(storeBlockHeight) if err != nil { return nil, err } - mockApp, err := newMockProxyApp(h.logger, appHash, abciResponses) + mockApp, err := newMockProxyApp(h.logger, appHash, finalizeBlockResponses) if err != nil { return nil, err } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 3cd5bdac03..60b96ae0c5 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -9,7 +9,6 @@ import ( "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -35,6 +34,7 @@ func (emptyMempool) Update( _ []*abci.ExecTxResult, _ mempool.PreCheckFunc, _ mempool.PostCheckFunc, + _ bool, ) error { return nil } @@ -51,7 +51,7 @@ func (emptyMempool) InitWAL() error { return nil } func (emptyMempool) CloseWAL() {} //----------------------------------------------------------------------------- -// mockProxyApp uses ABCIResponses to give the right results. +// mockProxyApp uses Responses to FinalizeBlock to give the right results. // // Useful because we don't want to call Commit() twice for the same block on // the real app. @@ -59,24 +59,24 @@ func (emptyMempool) CloseWAL() {} func newMockProxyApp( logger log.Logger, appHash []byte, - abciResponses *tmstate.ABCIResponses, + finalizeBlockResponses *abci.ResponseFinalizeBlock, ) (abciclient.Client, error) { return proxy.New(abciclient.NewLocalClient(logger, &mockProxyApp{ - appHash: appHash, - abciResponses: abciResponses, + appHash: appHash, + finalizeBlockResponses: finalizeBlockResponses, }), logger, proxy.NopMetrics()), nil } type mockProxyApp struct { abci.BaseApplication - appHash []byte - txCount int - abciResponses *tmstate.ABCIResponses + appHash []byte + txCount int + finalizeBlockResponses *abci.ResponseFinalizeBlock } func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - r := mock.abciResponses.FinalizeBlock + r := mock.finalizeBlockResponses mock.txCount++ if r == nil { return &abci.ResponseFinalizeBlock{}, nil @@ -85,5 +85,5 @@ func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFina } func (mock *mockProxyApp) Commit(context.Context) (*abci.ResponseCommit, error) { - return &abci.ResponseCommit{Data: mock.appHash}, nil + return &abci.ResponseCommit{}, nil } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 988546ef5b..acbee3e133 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -116,6 +116,10 @@ func sendTxs(ctx context.Context, t *testing.T, cs *State) { // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. func TestWALCrash(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + testCases := []struct { name string initFn func(dbm.DB, *State, context.Context) @@ -798,8 +802,8 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { } ensureNewRound(t, newRoundCh, height+1, 0) - sim.Chain = make([]*types.Block, 0) - sim.Commits = make([]*types.Commit, 0) + sim.Chain = []*types.Block{} + sim.Commits = []*types.Commit{} for i := 1; i <= numBlocks; i++ { sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) @@ -1310,16 +1314,16 @@ type badApp struct { onlyLastHashIsWrong bool } -func (app *badApp) Commit(context.Context) (*abci.ResponseCommit, error) { +func (app *badApp) FinalizeBlock(_ context.Context, _ *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.height++ if app.onlyLastHashIsWrong { if app.height == app.numBlocks { - return &abci.ResponseCommit{Data: tmrand.Bytes(32)}, nil + return &abci.ResponseFinalizeBlock{AppHash: tmrand.Bytes(32)}, nil } - return &abci.ResponseCommit{Data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + return &abci.ResponseFinalizeBlock{AppHash: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, app.height}}, nil } else if app.allHashesAreWrong { - return &abci.ResponseCommit{Data: tmrand.Bytes(32)}, nil + return &abci.ResponseFinalizeBlock{AppHash: tmrand.Bytes(32)}, nil } panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") @@ -1484,6 +1488,8 @@ type mockBlockStore struct { coreChainLockedHeight uint32 } +var _ sm.BlockStore = &mockBlockStore{} + // TODO: NewBlockStore(db.NewMemDB) ... func newMockBlockStore(t *testing.T, cfg *config.Config, params types.ConsensusParams) *mockBlockStore { return &mockBlockStore{ @@ -1516,19 +1522,22 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } } func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } - func (bs *mockBlockStore) SaveBlock( block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit, ) { } + func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return bs.commits[height-1] } func (bs *mockBlockStore) LoadSeenCommit() *types.Commit { return bs.commits[len(bs.commits)-1] } +func (bs *mockBlockStore) LoadSeenCommitAt(height int64) *types.Commit { + return bs.commits[height-1] +} func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { pruned := uint64(0) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index b3a8ddde3e..f5acf4d84c 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -20,6 +20,7 @@ import ( "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/internal/libs/autofile" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" sm "github.com/tendermint/tendermint/internal/state" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmevents "github.com/tendermint/tendermint/libs/events" @@ -126,9 +127,8 @@ type State struct { // store blocks and commits blockStore sm.BlockStore - stateStore sm.Store - initialStatePopulated bool - skipBootstrapping bool + stateStore sm.Store + skipBootstrapping bool // create and execute blocks blockExec *sm.BlockExecutor @@ -260,9 +260,6 @@ func (cs *State) SetProposedAppVersion(ver uint64) { } func (cs *State) updateStateFromStore() error { - if cs.initialStatePopulated { - return nil - } state, err := cs.stateStore.Load() if err != nil { return fmt.Errorf("loading state: %w", err) @@ -271,14 +268,22 @@ func (cs *State) updateStateFromStore() error { return nil } - // We have no votes, so reconstruct LastPrecommits from SeenCommit. + eq, err := state.Equals(cs.state) + if err != nil { + return fmt.Errorf("comparing state: %w", err) + } + // if the new state is equivalent to the old state, we should not trigger a state update. + if eq { + return nil + } + + // We have no votes, so reconstruct LastCommit from SeenCommit. if state.LastBlockHeight > 0 { cs.reconstructLastCommit(state) } cs.updateToState(state, nil) - cs.initialStatePopulated = true return nil } @@ -740,22 +745,27 @@ func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) { } } -// Reconstruct LastCommit from SeenCommit, which we saved along with the block, -// (which happens even before saving the state) +// Reconstruct the LastCommit from either SeenCommit or the ExtendedCommit. SeenCommit +// and ExtendedCommit are saved along with the block. If VoteExtensions are required +// the method will panic on an absent ExtendedCommit or an ExtendedCommit without +// extension data. func (cs *State) reconstructLastCommit(state sm.State) { - seenCommit := cs.blockStore.LoadSeenCommit() - if seenCommit == nil || seenCommit.Height != state.LastBlockHeight { - seenCommit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight) + commit, err := cs.loadLastCommit(state) + if err != nil { + panic(fmt.Sprintf("failed to reconstruct last commit; %s", err)) } + cs.LastCommit = commit +} - if seenCommit == nil { - panic(fmt.Sprintf( - "failed to reconstruct last commit; seen commit for height %v not found", - state.LastBlockHeight, - )) +func (cs *State) loadLastCommit(state sm.State) (*types.Commit, error) { + commit := cs.blockStore.LoadSeenCommit() + if commit == nil || commit.Height != state.LastBlockHeight { + commit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight) } - - cs.LastCommit = seenCommit + if commit == nil { + return nil, fmt.Errorf("commit for height %v not found", state.LastBlockHeight) + } + return commit, nil } // Updates State and increments height to match that of state. @@ -790,11 +800,9 @@ func (cs *State) updateToState(state sm.State, commit *types.Commit) { // signal the new round step, because other services (eg. txNotifier) // depend on having an up-to-date peer state! if state.LastBlockHeight <= cs.state.LastBlockHeight { - cs.logger.Debug( - "ignoring updateToState()", + cs.logger.Debug("ignoring updateToState()", "new_height", state.LastBlockHeight+1, - "old_height", cs.state.LastBlockHeight+1, - ) + "old_height", cs.state.LastBlockHeight+1) cs.newStep() return } @@ -1066,8 +1074,7 @@ func (cs *State) handleMsg(ctx context.Context, mi msgInfo, fromReplay bool) { } if err != nil && msg.Round != cs.Round { - cs.logger.Debug( - "received block part from wrong round", + cs.logger.Debug("received block part from wrong round", "height", cs.Height, "cs_round", cs.Round, "block_height", msg.Height, @@ -1140,7 +1147,7 @@ func (cs *State) handleMsg(ctx context.Context, mi msgInfo, fromReplay bool) { "error", err, ) default: - cs.logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg)) + cs.logger.Error("unknown msg type", "type", tmstrings.LazySprintf("%T", msg)) return } @@ -1252,10 +1259,10 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) { logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { - logger.Debug( - "entering new round with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering new round with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1263,7 +1270,10 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) { logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) } - logger.Debug("entering new round", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering new round", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) // increment validators if necessary validators := cs.Validators @@ -1360,10 +1370,10 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) { logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { - logger.Debug( - "entering propose step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering propose step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1377,7 +1387,10 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) { } } - logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering propose step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPropose: @@ -1473,6 +1486,7 @@ func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round } else if block == nil { return } + cs.metrics.ProposalCreateCount.Add(1) blockParts, err = block.MakePartSet(types.BlockPartSizeBytes) if err != nil { cs.logger.Error("unable to create proposal block part set", "error", err) @@ -1581,6 +1595,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error) return nil, errors.New("entered createProposalBlock with privValidator being nil") } + // TODO(sergio): wouldn't it be easier if CreateProposalBlock accepted cs.LastCommit directly? var commit *types.Commit switch { case cs.Height == cs.state.InitialHeight: @@ -1622,10 +1637,10 @@ func (cs *State) enterPrevote(ctx context.Context, height int64, round int32, al logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { - logger.Debug( - "entering prevote step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering prevote step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1635,7 +1650,10 @@ func (cs *State) enterPrevote(ctx context.Context, height int64, round int32, al cs.newStep() }() - logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering prevote step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) // Sign and broadcast vote as necessary cs.doPrevote(ctx, height, round, allowOldBlocks) @@ -1672,16 +1690,13 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 } sp := cs.state.ConsensusParams.Synchrony.SynchronyParamsOrDefaults() - if cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() { + //TODO: Remove this temporary fix when the complete solution is ready. See #8739 + if !cs.replayMode && cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() { logger.Debug("prevote step: Proposal is not timely; prevoting nil", - "proposed", - tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano), - "received", - tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano), - "msg_delay", - sp.MessageDelay, - "precision", - sp.Precision) + "proposed", tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano), + "received", tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano), + "msg_delay", sp.MessageDelay, + "precision", sp.Precision) cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1710,6 +1725,7 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 if err != nil { panic(fmt.Sprintf("ProcessProposal: %v", err)) } + cs.metrics.MarkProposalProcessed(isAppValid) // Vote nil if the Application rejected the block if !isAppValid { @@ -1765,8 +1781,8 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority() if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round { if cs.LockedRound <= cs.Proposal.POLRound { - logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" + - "majority in a round later than the locked round; prevoting the proposal") + logger.Debug("prevote step: ProposalBlock is valid and received a 2/3 majority in a round later than the locked round", + "outcome", "prevoting the proposal") cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) return } @@ -1807,10 +1823,10 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { - logger.Debug( - "entering prevote wait step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering prevote wait step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1821,7 +1837,10 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { )) } - logger.Debug("entering prevote wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering prevote wait step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPrevoteWait: @@ -1839,18 +1858,21 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) // else, precommit nil otherwise. func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) { - logger := cs.logger.With("height", height, "round", round) + logger := cs.logger.With("new_height", height, "new_round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { - logger.Debug( - "entering precommit step with invalid args", - "current", fmt.Sprintf("height state:%v value:%v / round state:%v value:%v / state step:%v", - cs.Height, height, cs.Round, round, cs.Step), - ) + logger.Debug("entering precommit step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) + return } - logger.Debug("entering precommit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering precommit step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPrecommit: @@ -1957,14 +1979,13 @@ func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) // Enter: any +2/3 precommits for next round. func (cs *State) enterPrecommitWait(height int64, round int32) { - logger := cs.logger.With("height", height, "round", round) + logger := cs.logger.With("new_height", height, "new_round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { - logger.Debug( - "entering precommit wait step with invalid args", + logger.Debug("entering precommit wait step with invalid args", "triggered_timeout", cs.TriggeredTimeoutPrecommit, - "current", fmt.Sprintf("%v/%v", cs.Height, cs.Round), - ) + "height", cs.Height, + "round", cs.Round) return } @@ -1975,7 +1996,10 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { )) } - logger.Debug("entering precommit wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering precommit wait step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPrecommitWait: @@ -1989,17 +2013,20 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { // Enter: +2/3 precommits for block func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int32) { - logger := cs.logger.With("height", height, "commit_round", commitRound) + logger := cs.logger.With("new_height", height, "commit_round", commitRound) if cs.Height != height || cstypes.RoundStepApplyCommit <= cs.Step { - logger.Debug( - "entering commit step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering commit step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } - logger.Info("Entering commit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering commit step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterCommit: @@ -2057,12 +2084,12 @@ func (cs *State) updateProposalBlockAndPartsBeforeCommit(blockID types.BlockID, // If we have the block AND +2/3 commits for it, finalize. func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) { - logger := cs.logger.With("height", height) - if cs.Height != height { panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } + logger := cs.logger.With("height", height) + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() if !ok || blockID.IsNil() { logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil") @@ -2072,9 +2099,8 @@ func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) { if !cs.ProposalBlock.HashesTo(blockID.Hash) { // TODO: this happens every time if we're not a validator (ugly logs) // TODO: ^^ wait, why does it matter that we're a validator? - logger.Debug( - "failed attempt to finalize commit; we do not have the commit block", - "proposal_block", cs.ProposalBlock.Hash(), + logger.Debug("failed attempt to finalize commit; we do not have the commit block", + "proposal_block", tmstrings.LazyBlockHash(cs.ProposalBlock), "commit_block", blockID.Hash, ) return @@ -2114,11 +2140,10 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) { logger.Info( "finalizing commit of block", - "hash", block.Hash(), + "hash", tmstrings.LazyBlockHash(block), "root", block.AppHash, "num_txs", len(block.Txs), ) - logger.Debug(fmt.Sprintf("%v", block)) // Save to blockStore. if cs.blockStore.Height() < block.Height { @@ -2649,11 +2674,9 @@ func (cs *State) handleCompleteProposal(ctx context.Context, height int64, fromR blockID, hasTwoThirds := prevotes.TwoThirdsMajority() if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) { if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.logger.Debug( - "updating valid block to new proposal block", + cs.logger.Debug("updating valid block to new proposal block", "valid_round", cs.Round, - "valid_block_hash", cs.ProposalBlock.Hash(), - ) + "valid_block_hash", tmstrings.LazyBlockHash(cs.ProposalBlock)) cs.ValidRound = cs.Round cs.ValidBlock = cs.ProposalBlock @@ -2708,19 +2731,16 @@ func (cs *State) tryAddVote(ctx context.Context, vote *types.Vote, peerID types. "found conflicting vote from ourselves; did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, - "type", vote.Type, - ) + "type", vote.Type) return added, err } // report conflicting votes to the evidence pool cs.evpool.ReportConflictingVotes(voteErr.VoteA, voteErr.VoteB) - cs.logger.Debug( - "found and sent conflicting votes to the evidence pool", + cs.logger.Debug("found and sent conflicting votes to the evidence pool", "vote_a", voteErr.VoteA, - "vote_b", voteErr.VoteB, - ) + "vote_b", voteErr.VoteB) return added, err } else if errors.Is(err, types.ErrVoteNonDeterministicSignature) { @@ -2802,9 +2822,24 @@ func (cs *State) addVote( return } - // Verify VoteExtension if precommit - if vote.Type == tmproto.PrecommitType { - if err = cs.blockExec.VerifyVoteExtension(ctx, vote); err != nil { + // Verify VoteExtension if precommit and not nil + // https://github.com/tendermint/tendermint/issues/8487 + if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() && + !bytes.Equal(vote.ValidatorProTxHash, cs.privValidatorProTxHash) { // Skip the VerifyVoteExtension call if the vote was issued by this validator. + + // The core fields of the vote message were already validated in the + // consensus reactor when the vote was received. + // Here, we verify the signature of the vote extension included in the vote + // message. + _, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex) + qt, qh := cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash + if err := vote.VerifyExtensionSign(cs.state.ChainID, val.PubKey, qt, qh); err != nil { + return false, err + } + + err := cs.blockExec.VerifyVoteExtension(ctx, vote) + cs.metrics.MarkVoteExtensionReceived(err == nil) + if err != nil { return false, err } } @@ -2838,6 +2873,11 @@ func (cs *State) addVote( // Either duplicate, or error upon cs.Votes.AddByIndex() return } + if vote.Round == cs.Round { + vals := cs.state.Validators + _, val := vals.GetByIndex(vote.ValidatorIndex) + cs.metrics.MarkVoteReceived(vote.Type, val.VotingPower, vals.TotalVotingPower()) + } if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { return added, err @@ -2862,11 +2902,9 @@ func (cs *State) addVote( cs.ValidBlock = cs.ProposalBlock cs.ValidBlockParts = cs.ProposalBlockParts } else { - cs.logger.Debug( - "valid block we do not know about; set ProposalBlock=nil", - "proposal", cs.ProposalBlock.Hash(), - "block_id", blockID.Hash, - ) + cs.logger.Debug("valid block we do not know about; set ProposalBlock=nil", + "proposal", tmstrings.LazyBlockHash(cs.ProposalBlock), + "block_id", blockID.Hash) // we're getting the wrong block cs.ProposalBlock = nil @@ -2973,18 +3011,15 @@ func (cs *State) signVote( // If the signedMessageType is for precommit, // use our local precommit Timeout as the max wait time for getting a singed commit. The same goes for prevote. - timeout := cs.voteTimeout(cs.Round) - - switch msgType { - case tmproto.PrecommitType: + timeout := time.Second + if msgType == tmproto.PrecommitType && !vote.BlockID.IsNil() { + timeout = cs.voteTimeout(cs.Round) // if the signedMessage type is for a precommit, add VoteExtension exts, err := cs.blockExec.ExtendVote(ctx, vote) if err != nil { return nil, err } vote.VoteExtensions = types.NewVoteExtensionsFromABCIExtended(exts) - default: - timeout = time.Second } v := vote.ToProto() @@ -3031,17 +3066,16 @@ func (cs *State) signAddVote( // TODO: pass pubKey to signVote start := time.Now() vote, err := cs.signVote(ctx, msgType, hash, header) - if err == nil { - cs.sendInternalMessage(ctx, msgInfo{&VoteMessage{vote}, "", tmtime.Now()}) - cs.logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "took", time.Since(start).String()) - return vote + if err != nil { + cs.logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + return nil } - - cs.logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) - return nil + cs.sendInternalMessage(ctx, msgInfo{&VoteMessage{vote}, "", tmtime.Now()}) + cs.logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "took", time.Since(start).String()) + return vote } -// updatePrivValidatorProTxHash get's the private validator proTxHash and +// updatePrivValidatorPubKey get's the private validator public key and // memoizes it. This func returns an error if the private validator is not // responding or responds with an error. func (cs *State) updatePrivValidatorProTxHash(ctx context.Context) error { diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 6ac9f499b7..e8ef52c21b 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -2047,11 +2047,16 @@ func TestFinalizeBlockCalled(t *testing.T) { Status: abci.ResponseProcessProposal_ACCEPT, }, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, - }, nil) - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) + // We only expect VerifyVoteExtension to be called on non-nil precommits. + // https://github.com/tendermint/tendermint/issues/8487 + if !testCase.voteNil { + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) + } + r := &abci.ResponseFinalizeBlock{AppHash: []byte("the_hash")} + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe() m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) @@ -2098,32 +2103,32 @@ func TestFinalizeBlockCalled(t *testing.T) { } } -// TestExtendVoteCalled tests that the vote extension methods are called at the -// correct point in the consensus algorithm. -func TestExtendVoteCalled(t *testing.T) { +func TestExtendVote(t *testing.T) { config := configSetup(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + voteExtensions := []*abci.ExtendVoteExtension{ + { + Type: tmproto.VoteExtensionType_DEFAULT, + Extension: []byte("extension"), + }, + } + m := abcimocks.NewApplication(t) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, - }, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, - }, nil) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round + proTxHashMap := make(map[string]struct{}) + for _, vs := range vss { + pth, _ := vs.GetProTxHash(ctx) + proTxHashMap[pth.String()] = struct{}{} + } + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) proTxHash, err := cs1.privValidator.GetProTxHash(ctx) @@ -2142,49 +2147,33 @@ func TestExtendVoteCalled(t *testing.T) { Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } + reqExtendVoteFunc := mock.MatchedBy(func(req *abci.RequestExtendVote) bool { + return assert.Equal(t, req.Height, height) && assert.Equal(t, []byte(blockID.Hash), req.Hash) + }) + m.On("ExtendVote", mock.Anything, reqExtendVoteFunc).Return(&abci.ResponseExtendVote{ + VoteExtensions: voteExtensions, + }, nil) + reqVerifyVoteExtFunc := mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { + _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] + return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && + assert.Equal(t, req.Height, height) && + assert.Equal(t, req.VoteExtensions, voteExtensions) && + assert.True(t, ok) + }) + m.On("VerifyVoteExtension", mock.Anything, reqVerifyVoteExtFunc). + Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) ensurePrecommit(t, voteCh, height, round) - m.AssertCalled(t, "ExtendVote", ctx, &abci.RequestExtendVote{ - Height: height, - Hash: blockID.Hash, - }) - - m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorProTxHash: proTxHash, - Height: height, - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, - }) signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) ensureNewRound(t, newRoundCh, height+1, 0) m.AssertExpectations(t) - // Only 3 of the vote extensions are seen, as consensus proceeds as soon as the +2/3 threshold - // is observed by the consensus engine. - for _, pv := range vss[:3] { - proTxHash, err := pv.GetProTxHash(ctx) - require.NoError(t, err) - m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorProTxHash: proTxHash, - Height: height, - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, - }) - } - + mock.AssertExpectationsForObjects(t, m) } // TestVerifyVoteExtensionNotCalledOnAbsentPrecommit tests that the VerifyVoteExtension @@ -2193,25 +2182,28 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { config := configSetup(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - + voteExtensions := []*abci.ExtendVoteExtension{ + { + Type: tmproto.VoteExtensionType_DEFAULT, + Extension: []byte("extension"), + }, + } m := abcimocks.NewApplication(t) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, - }, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, + VoteExtensions: voteExtensions, }, nil) m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round + proTxHashMap := make(map[string]struct{}) + for _, vs := range vss { + pth, _ := vs.GetProTxHash(ctx) + proTxHashMap[pth.String()] = struct{}{} + } + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) proTxHash, err := cs1.privValidator.GetProTxHash(ctx) @@ -2227,7 +2219,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } - signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[2:]...) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss...) ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) ensurePrecommit(t, voteCh, height, round) @@ -2236,18 +2228,17 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Height: height, Hash: blockID.Hash, }) - - m.AssertCalled(t, "VerifyVoteExtension", mock.Anything, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorProTxHash: proTxHash, - Height: height, - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, + reqVerifyVoteExtFunc := mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { + _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] + return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && + assert.Equal(t, req.Height, height) && + assert.Equal(t, req.VoteExtensions, voteExtensions) && + assert.True(t, ok) }) + m.On("VerifyVoteExtension", mock.Anything, reqVerifyVoteExtFunc). + Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[2:]...) diff --git a/internal/consensus/ticker.go b/internal/consensus/ticker.go index 9d7a94fefc..59eaa800b0 100644 --- a/internal/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -115,9 +115,9 @@ func (t *timeoutTicker) timeoutRoutine(ctx context.Context) { // NOTE time.Timer allows duration to be non-positive ti = newti t.timer.Reset(ti.Duration) - t.logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) + t.logger.Debug("Internal state machine timeout scheduled", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) case <-t.timer.C: - t.logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) + t.logger.Debug("Internal state machine timeout elapsed ", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) // go routine here guarantees timeoutRoutine doesn't block. // Determinism comes from playback in the receiveRoutine. // We can eliminate it by merging the timeoutRoutine into receiveRoutine diff --git a/internal/consensus/types/height_vote_set.go b/internal/consensus/types/height_vote_set.go index 29e159f6bd..dcae57a7b8 100644 --- a/internal/consensus/types/height_vote_set.go +++ b/internal/consensus/types/height_vote_set.go @@ -41,8 +41,8 @@ One for their LastPrecommits round, and another for the official commit round. type HeightVoteSet struct { chainID string height int64 - stateID types.StateID // State ID describing current state (eg. previous height and previous app hash) valSet *types.ValidatorSet + stateID types.StateID // State ID describing current state (eg. previous height and previous app hash) mtx sync.Mutex round int32 // max tracked round @@ -50,11 +50,7 @@ type HeightVoteSet struct { peerCatchupRounds map[types.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds } -func NewHeightVoteSet( - chainID string, - height int64, - stateID types.StateID, - valSet *types.ValidatorSet) *HeightVoteSet { +func NewHeightVoteSet(chainID string, height int64, stateID types.StateID, valSet *types.ValidatorSet) *HeightVoteSet { hvs := &HeightVoteSet{ chainID: chainID, stateID: stateID, diff --git a/internal/eventlog/eventlog.go b/internal/eventlog/eventlog.go index b507f79bc1..31c7d14fec 100644 --- a/internal/eventlog/eventlog.go +++ b/internal/eventlog/eventlog.go @@ -24,9 +24,9 @@ import ( // any number of readers. type Log struct { // These values do not change after construction. - windowSize time.Duration - maxItems int - numItemsGauge gauge + windowSize time.Duration + maxItems int + metrics *Metrics // Protects access to the fields below. Lock to modify the values of these // fields, or to read or snapshot the values. @@ -45,14 +45,14 @@ func New(opts LogSettings) (*Log, error) { return nil, errors.New("window size must be positive") } lg := &Log{ - windowSize: opts.WindowSize, - maxItems: opts.MaxItems, - numItemsGauge: discard{}, - ready: make(chan struct{}), - source: opts.Source, + windowSize: opts.WindowSize, + maxItems: opts.MaxItems, + metrics: NopMetrics(), + ready: make(chan struct{}), + source: opts.Source, } if opts.Metrics != nil { - lg.numItemsGauge = opts.Metrics.numItemsGauge + lg.metrics = opts.Metrics } return lg, nil } diff --git a/internal/eventlog/metrics.gen.go b/internal/eventlog/metrics.gen.go new file mode 100644 index 0000000000..d9d86b2b9e --- /dev/null +++ b/internal/eventlog/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package eventlog + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + numItems: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_items", + Help: "Number of items currently resident in the event log.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + numItems: discard.NewGauge(), + } +} diff --git a/internal/eventlog/metrics.go b/internal/eventlog/metrics.go index cc319032ee..fb7ccf694e 100644 --- a/internal/eventlog/metrics.go +++ b/internal/eventlog/metrics.go @@ -1,39 +1,14 @@ package eventlog -import ( - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) +import "github.com/go-kit/kit/metrics" -// gauge is the subset of the Prometheus gauge interface used here. -type gauge interface { - Set(float64) -} +const MetricsSubsystem = "eventlog" + +//go:generate go run ../../scripts/metricsgen -struct=Metrics // Metrics define the metrics exported by the eventlog package. type Metrics struct { - numItemsGauge gauge -} - -// discard is a no-op implementation of the gauge interface. -type discard struct{} - -func (discard) Set(float64) {} - -const eventlogSubsystem = "eventlog" -// PrometheusMetrics returns a collection of eventlog metrics for Prometheus. -func PrometheusMetrics(ns string, fields ...string) *Metrics { - var labels []string - for i := 0; i < len(fields); i += 2 { - labels = append(labels, fields[i]) - } - return &Metrics{ - numItemsGauge: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: ns, - Subsystem: eventlogSubsystem, - Name: "num_items", - Help: "Number of items currently resident in the event log.", - }, labels).With(fields...), - } + // Number of items currently resident in the event log. + numItems metrics.Gauge } diff --git a/internal/eventlog/prune.go b/internal/eventlog/prune.go index 4c3c1f0d0a..062e91bd2b 100644 --- a/internal/eventlog/prune.go +++ b/internal/eventlog/prune.go @@ -12,7 +12,7 @@ func (lg *Log) checkPrune(head *logEntry, size int, age time.Duration) error { const windowSlop = 30 * time.Second if age < (lg.windowSize+windowSlop) && (lg.maxItems <= 0 || size <= lg.maxItems) { - lg.numItemsGauge.Set(float64(lg.numItems)) + lg.metrics.numItems.Set(float64(lg.numItems)) return nil // no pruning is needed } @@ -46,7 +46,7 @@ func (lg *Log) checkPrune(head *logEntry, size int, age time.Duration) error { lg.mu.Lock() defer lg.mu.Unlock() lg.numItems = newState.size - lg.numItemsGauge.Set(float64(newState.size)) + lg.metrics.numItems.Set(float64(newState.size)) lg.oldestCursor = newState.oldest lg.head = newState.head return err diff --git a/internal/evidence/metrics.gen.go b/internal/evidence/metrics.gen.go new file mode 100644 index 0000000000..f2eb7dfa8f --- /dev/null +++ b/internal/evidence/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package evidence + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + NumEvidence: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_evidence", + Help: "Number of pending evidence in the evidence pool.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + NumEvidence: discard.NewGauge(), + } +} diff --git a/internal/evidence/metrics.go b/internal/evidence/metrics.go index 59efc23f91..adb0260f2d 100644 --- a/internal/evidence/metrics.go +++ b/internal/evidence/metrics.go @@ -2,9 +2,6 @@ package evidence import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,35 +10,11 @@ const ( MetricsSubsystem = "evidence_pool" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. // see MetricsProvider for descriptions. type Metrics struct { - // Number of evidence in the evidence pool + // Number of pending evidence in the evidence pool. NumEvidence metrics.Gauge } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - - NumEvidence: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "num_evidence", - Help: "Number of pending evidence in evidence pool.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - NumEvidence: discard.NewGauge(), - } -} diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index e45b281b90..e61c4e0aeb 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -3,10 +3,7 @@ package mocks import ( - testing "testing" - mock "github.com/stretchr/testify/mock" - types "github.com/tendermint/tendermint/types" ) @@ -61,8 +58,13 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } -// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t testing.TB) *BlockStore { +type mockConstructorTestingTNewBlockStore interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index 53d2e54ba1..43f62d9cba 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -22,6 +22,16 @@ import ( "github.com/tendermint/tendermint/types" ) +// key prefixes +// NB: Before modifying these, cross-check them with those in +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(sergio): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] +// * internal/p2p/peermanager.go [1] const ( // prefixes are unique across all tm db's prefixCommitted = int64(9) diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 5bbaff5366..3a5ed3fd3b 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -159,7 +159,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (er // processEvidenceCh implements a blocking event loop where we listen for p2p // Envelope messages from the evidenceCh. -func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel) { +func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh p2p.Channel) { iter := evidenceCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() @@ -186,7 +186,7 @@ func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel // connects/disconnects frequently from the broadcasting peer(s). // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh *p2p.Channel) { +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh p2p.Channel) { r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() @@ -227,7 +227,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh *p2p.Channel) { +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh p2p.Channel) { for { select { case peerUpdate := <-peerUpdates.Updates(): @@ -249,7 +249,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU // that the peer has already received or may not be ready for. // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh *p2p.Channel) { +func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh p2p.Channel) { var next *clist.CElement defer func() { diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index 0f2ac2b19c..22f093efce 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -39,7 +39,7 @@ type reactorTestSuite struct { logger log.Logger reactors map[types.NodeID]*evidence.Reactor pools map[types.NodeID]*evidence.Pool - evidenceChannels map[types.NodeID]*p2p.Channel + evidenceChannels map[types.NodeID]p2p.Channel peerUpdates map[types.NodeID]*p2p.PeerUpdates peerChans map[types.NodeID]chan p2p.PeerUpdate nodes []*p2ptest.Node @@ -97,7 +97,7 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store) *reactorTe rts.network.Nodes[nodeID].PeerManager.Register(ctx, pu) rts.nodes = append(rts.nodes, rts.network.Nodes[nodeID]) - chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.evidenceChannels[nodeID], nil } diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 6381ea888a..573b63f406 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -10,13 +10,13 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/inspect/rpc" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmstrings "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/types" "golang.org/x/sync/errgroup" diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 4085dcb805..2425c3a6c2 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -23,7 +23,6 @@ import ( indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proto/tendermint/state" httpclient "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" ) @@ -263,12 +262,10 @@ func TestBlockResults(t *testing.T) { testGasUsed := int64(100) stateStoreMock := &statemocks.Store{} // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ - FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ - TxResults: []*abcitypes.ExecTxResult{ - { - GasUsed: testGasUsed, - }, + stateStoreMock.On("LoadFinalizeBlockResponses", testHeight).Return(&abcitypes.ResponseFinalizeBlock{ + TxResults: []*abcitypes.ExecTxResult{ + { + GasUsed: testGasUsed, }, }, }, nil) diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go index 00c3e52efa..d706168346 100644 --- a/internal/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -125,7 +125,8 @@ func serverRPCConfig(r *config.RPCConfig) *server.Config { // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if cfg.WriteTimeout > 0 && cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit { cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second } return cfg diff --git a/internal/libs/clist/bench_test.go b/internal/libs/clist/bench_test.go index ee5d836a7a..95973cc767 100644 --- a/internal/libs/clist/bench_test.go +++ b/internal/libs/clist/bench_test.go @@ -12,7 +12,7 @@ func BenchmarkDetaching(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { start.removed = true - start.detachNext() + start.DetachNext() start.DetachPrev() tmp := nxt nxt = nxt.Next() diff --git a/internal/libs/clist/clist.go b/internal/libs/clist/clist.go index 3969c94cce..9a0e5bcc84 100644 --- a/internal/libs/clist/clist.go +++ b/internal/libs/clist/clist.go @@ -103,7 +103,7 @@ func (e *CElement) Removed() bool { return isRemoved } -func (e *CElement) detachNext() { +func (e *CElement) DetachNext() { e.mtx.Lock() if !e.removed { e.mtx.Unlock() diff --git a/internal/libs/confix/confix.go b/internal/libs/confix/confix.go new file mode 100644 index 0000000000..a9449fa228 --- /dev/null +++ b/internal/libs/confix/confix.go @@ -0,0 +1,155 @@ +// Package confix applies changes to a Tendermint TOML configuration file, to +// update configurations created with an older version of Tendermint to a +// compatible format for a newer version. +package confix + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + + "github.com/creachadair/atomicfile" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/transform" + "github.com/spf13/viper" + + "github.com/tendermint/tendermint/config" +) + +// Upgrade reads the configuration file at configPath and applies any +// transformations necessary to upgrade it to the current version. If this +// succeeds, the transformed output is written to outputPath. As a special +// case, if outputPath == "" the output is written to stdout. +// +// It is safe if outputPath == inputPath. If a regular file outputPath already +// exists, it is overwritten. In case of error, the output is not written. +// +// Upgrade is a convenience wrapper for calls to LoadConfig, ApplyFixes, and +// CheckValid. If the caller requires more control over the behavior of the +// upgrade, call those functions directly. +func Upgrade(ctx context.Context, configPath, outputPath string) error { + if configPath == "" { + return errors.New("empty input configuration path") + } + + doc, err := LoadConfig(configPath) + if err != nil { + return fmt.Errorf("loading config: %v", err) + } + + if err := ApplyFixes(ctx, doc); err != nil { + return fmt.Errorf("updating %q: %v", configPath, err) + } + + var buf bytes.Buffer + if err := tomledit.Format(&buf, doc); err != nil { + return fmt.Errorf("formatting config: %v", err) + } + + // Verify that Tendermint can parse the results after our edits. + if err := CheckValid(buf.Bytes()); err != nil { + return fmt.Errorf("updated config is invalid: %v", err) + } + + if outputPath == "" { + _, err = os.Stdout.Write(buf.Bytes()) + } else { + err = atomicfile.WriteData(outputPath, buf.Bytes(), 0600) + } + return err +} + +// ApplyFixes transforms doc and reports whether it succeeded. +func ApplyFixes(ctx context.Context, doc *tomledit.Document) error { + // Check what version of Tendermint might have created this config file, as + // a safety check for the updates we are about to make. + tmVersion := GuessConfigVersion(doc) + if tmVersion == vUnknown { + return errors.New("cannot tell what Tendermint version created this config") + } else if tmVersion < v34 || tmVersion > v36 { + // TODO(creachadair): Add in rewrites for older versions. This will + // require some digging to discover what the changes were. The upgrade + // instructions do not give specifics. + return fmt.Errorf("unable to update version %s config", tmVersion) + } + return plan.Apply(ctx, doc) +} + +// LoadConfig loads and parses the TOML document from path. +func LoadConfig(path string) (*tomledit.Document, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return tomledit.Parse(f) +} + +const ( + vUnknown = "" + v32 = "v0.32" + v33 = "v0.33" + v34 = "v0.34" + v35 = "v0.35" + v36 = "v0.36" +) + +// GuessConfigVersion attempts to figure out which version of Tendermint +// created the specified config document. It returns "" if the creating version +// cannot be determined, otherwise a string of the form "vX.YY". +func GuessConfigVersion(doc *tomledit.Document) string { + hasDisableWS := doc.First("rpc", "experimental-disable-websocket") != nil + hasUseLegacy := doc.First("p2p", "use-legacy") != nil // v0.35 only + if hasDisableWS && !hasUseLegacy { + return v36 + } + + hasBlockSync := transform.FindTable(doc, "blocksync") != nil // add: v0.35 + hasStateSync := transform.FindTable(doc, "statesync") != nil // add: v0.34 + if hasBlockSync && hasStateSync { + return v35 + } else if hasStateSync { + return v34 + } + + hasIndexKeys := doc.First("tx_index", "index_keys") != nil // add: v0.33 + hasIndexTags := doc.First("tx_index", "index_tags") != nil // rem: v0.33 + if hasIndexKeys && !hasIndexTags { + return v33 + } + + hasFastSync := transform.FindTable(doc, "fastsync") != nil // add: v0.32 + if hasIndexTags && hasFastSync { + return v32 + } + + // Something older, probably. + return vUnknown +} + +// CheckValid checks whether the specified config appears to be a valid +// Tendermint config file. This emulates how the node loads the config. +func CheckValid(data []byte) error { + v := viper.New() + v.SetConfigType("toml") + + if err := v.ReadConfig(bytes.NewReader(data)); err != nil { + return fmt.Errorf("reading config: %w", err) + } + + var cfg config.Config + if err := v.Unmarshal(&cfg); err != nil { + return fmt.Errorf("decoding config: %w", err) + } + + return cfg.ValidateBasic() +} + +// WithLogWriter returns a child of ctx with a logger attached that sends +// output to w. This is a convenience wrapper for transform.WithLogWriter. +func WithLogWriter(ctx context.Context, w io.Writer) context.Context { + return transform.WithLogWriter(ctx, w) +} diff --git a/scripts/confix/confix_test.go b/internal/libs/confix/confix_test.go similarity index 97% rename from scripts/confix/confix_test.go rename to internal/libs/confix/confix_test.go index ec258f4ca6..dc0042fe59 100644 --- a/scripts/confix/confix_test.go +++ b/internal/libs/confix/confix_test.go @@ -1,4 +1,4 @@ -package main_test +package confix_test import ( "bytes" @@ -9,7 +9,7 @@ import ( "github.com/creachadair/tomledit" "github.com/google/go-cmp/cmp" - confix "github.com/tendermint/tendermint/scripts/confix" + "github.com/tendermint/tendermint/internal/libs/confix" ) func mustParseConfig(t *testing.T, path string) *tomledit.Document { diff --git a/scripts/confix/plan.go b/internal/libs/confix/plan.go similarity index 94% rename from scripts/confix/plan.go rename to internal/libs/confix/plan.go index 653bca9fd2..ac6f7b5a6d 100644 --- a/scripts/confix/plan.go +++ b/internal/libs/confix/plan.go @@ -1,4 +1,4 @@ -package main +package confix import ( "context" @@ -222,4 +222,16 @@ var plan = transform.Plan{ return fmt.Errorf("unrecognized value: %v", idx.KeyValue) }), }, + { + // Since https://github.com/tendermint/tendermint/pull/8514. + Desc: "Remove the recheck option from the [mempool] section", + T: transform.Remove(parser.Key{"mempool", "recheck"}), + ErrorOK: true, + }, + { + // Since https://github.com/tendermint/tendermint/pull/8654. + Desc: "Remove the seeds option from the [p2p] section", + T: transform.Remove(parser.Key{"p2p", "seeds"}), + ErrorOK: true, + }, } diff --git a/scripts/confix/testdata/README.md b/internal/libs/confix/testdata/README.md similarity index 90% rename from scripts/confix/testdata/README.md rename to internal/libs/confix/testdata/README.md index 5bbfa795f3..04f2af2050 100644 --- a/scripts/confix/testdata/README.md +++ b/internal/libs/confix/testdata/README.md @@ -41,12 +41,12 @@ The files named `diff-XX-YY.txt` were generated by using the `condiff` tool on the config samples for versions v0.XX and v0.YY: ```shell -go run ./scripts/confix/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt +go run ./scripts/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt ``` The `baseline.txt` was computed in the same way, but using an empty starting file so that we capture all the settings in the target: ```shell -go run ./scripts/confix/condiff -desnake /dev/null v26-config.toml > baseline.txt +go run ./scripts/condiff -desnake /dev/null v26-config.toml > baseline.txt ``` diff --git a/scripts/confix/testdata/baseline.txt b/internal/libs/confix/testdata/baseline.txt similarity index 100% rename from scripts/confix/testdata/baseline.txt rename to internal/libs/confix/testdata/baseline.txt diff --git a/scripts/confix/testdata/diff-26-27.txt b/internal/libs/confix/testdata/diff-26-27.txt similarity index 100% rename from scripts/confix/testdata/diff-26-27.txt rename to internal/libs/confix/testdata/diff-26-27.txt diff --git a/scripts/confix/testdata/diff-27-28.txt b/internal/libs/confix/testdata/diff-27-28.txt similarity index 100% rename from scripts/confix/testdata/diff-27-28.txt rename to internal/libs/confix/testdata/diff-27-28.txt diff --git a/scripts/confix/testdata/diff-28-29.txt b/internal/libs/confix/testdata/diff-28-29.txt similarity index 100% rename from scripts/confix/testdata/diff-28-29.txt rename to internal/libs/confix/testdata/diff-28-29.txt diff --git a/scripts/confix/testdata/diff-29-30.txt b/internal/libs/confix/testdata/diff-29-30.txt similarity index 100% rename from scripts/confix/testdata/diff-29-30.txt rename to internal/libs/confix/testdata/diff-29-30.txt diff --git a/scripts/confix/testdata/diff-30-31.txt b/internal/libs/confix/testdata/diff-30-31.txt similarity index 100% rename from scripts/confix/testdata/diff-30-31.txt rename to internal/libs/confix/testdata/diff-30-31.txt diff --git a/scripts/confix/testdata/diff-31-32.txt b/internal/libs/confix/testdata/diff-31-32.txt similarity index 100% rename from scripts/confix/testdata/diff-31-32.txt rename to internal/libs/confix/testdata/diff-31-32.txt diff --git a/scripts/confix/testdata/diff-32-33.txt b/internal/libs/confix/testdata/diff-32-33.txt similarity index 100% rename from scripts/confix/testdata/diff-32-33.txt rename to internal/libs/confix/testdata/diff-32-33.txt diff --git a/scripts/confix/testdata/diff-33-34.txt b/internal/libs/confix/testdata/diff-33-34.txt similarity index 100% rename from scripts/confix/testdata/diff-33-34.txt rename to internal/libs/confix/testdata/diff-33-34.txt diff --git a/scripts/confix/testdata/diff-34-35.txt b/internal/libs/confix/testdata/diff-34-35.txt similarity index 87% rename from scripts/confix/testdata/diff-34-35.txt rename to internal/libs/confix/testdata/diff-34-35.txt index 13a4432a0e..de08f29652 100644 --- a/scripts/confix/testdata/diff-34-35.txt +++ b/internal/libs/confix/testdata/diff-34-35.txt @@ -8,13 +8,11 @@ +M blocksync.version -S fastsync -M fastsync.version -+M mempool.ttl-duration -+M mempool.ttl-num-blocks -+M mempool.version -M mempool.wal-dir +M p2p.bootstrap-peers +M p2p.max-connections +M p2p.max-incoming-connection-attempts ++M p2p.max-outgoing-connections +M p2p.queue-type -M p2p.seed-mode +M p2p.use-legacy @@ -28,4 +26,3 @@ -M statesync.chunk-fetchers +M statesync.fetchers +M statesync.use-p2p -+M tx-index.psql-conn diff --git a/scripts/confix/testdata/diff-35-36.txt b/internal/libs/confix/testdata/diff-35-36.txt similarity index 96% rename from scripts/confix/testdata/diff-35-36.txt rename to internal/libs/confix/testdata/diff-35-36.txt index 13fd268af2..298c53056a 100644 --- a/scripts/confix/testdata/diff-35-36.txt +++ b/internal/libs/confix/testdata/diff-35-36.txt @@ -9,12 +9,14 @@ -M consensus.timeout-prevote-delta -M consensus.timeout-propose -M consensus.timeout-propose-delta +-M mempool.recheck -M mempool.version -M p2p.addr-book-file -M p2p.addr-book-strict -M p2p.max-num-inbound-peers -M p2p.max-num-outbound-peers -M p2p.persistent-peers-max-dial-period +-M p2p.seeds -M p2p.unconditional-peer-ids -M p2p.use-legacy +M rpc.event-log-max-items diff --git a/scripts/confix/testdata/non-config.toml b/internal/libs/confix/testdata/non-config.toml similarity index 100% rename from scripts/confix/testdata/non-config.toml rename to internal/libs/confix/testdata/non-config.toml diff --git a/scripts/confix/testdata/v26-config.toml b/internal/libs/confix/testdata/v26-config.toml similarity index 100% rename from scripts/confix/testdata/v26-config.toml rename to internal/libs/confix/testdata/v26-config.toml diff --git a/scripts/confix/testdata/v27-config.toml b/internal/libs/confix/testdata/v27-config.toml similarity index 100% rename from scripts/confix/testdata/v27-config.toml rename to internal/libs/confix/testdata/v27-config.toml diff --git a/scripts/confix/testdata/v28-config.toml b/internal/libs/confix/testdata/v28-config.toml similarity index 100% rename from scripts/confix/testdata/v28-config.toml rename to internal/libs/confix/testdata/v28-config.toml diff --git a/scripts/confix/testdata/v29-config.toml b/internal/libs/confix/testdata/v29-config.toml similarity index 100% rename from scripts/confix/testdata/v29-config.toml rename to internal/libs/confix/testdata/v29-config.toml diff --git a/scripts/confix/testdata/v30-config.toml b/internal/libs/confix/testdata/v30-config.toml similarity index 100% rename from scripts/confix/testdata/v30-config.toml rename to internal/libs/confix/testdata/v30-config.toml diff --git a/scripts/confix/testdata/v31-config.toml b/internal/libs/confix/testdata/v31-config.toml similarity index 100% rename from scripts/confix/testdata/v31-config.toml rename to internal/libs/confix/testdata/v31-config.toml diff --git a/scripts/confix/testdata/v32-config.toml b/internal/libs/confix/testdata/v32-config.toml similarity index 100% rename from scripts/confix/testdata/v32-config.toml rename to internal/libs/confix/testdata/v32-config.toml diff --git a/scripts/confix/testdata/v33-config.toml b/internal/libs/confix/testdata/v33-config.toml similarity index 100% rename from scripts/confix/testdata/v33-config.toml rename to internal/libs/confix/testdata/v33-config.toml diff --git a/scripts/confix/testdata/v34-config.toml b/internal/libs/confix/testdata/v34-config.toml similarity index 93% rename from scripts/confix/testdata/v34-config.toml rename to internal/libs/confix/testdata/v34-config.toml index 6bcffd9541..02d94d1cd3 100644 --- a/scripts/confix/testdata/v34-config.toml +++ b/internal/libs/confix/testdata/v34-config.toml @@ -272,6 +272,11 @@ dial_timeout = "3s" ####################################################### [mempool] +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool. +version = "v0" + recheck = true broadcast = true wal_dir = "" @@ -301,6 +306,22 @@ max_tx_bytes = 1048576 # XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 max_batch_bytes = 0 +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + ####################################################### ### State Sync Configuration Options ### ####################################################### @@ -405,8 +426,14 @@ peer_query_maj23_sleep_duration = "2s" # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). # - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = "kv" +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + ####################################################### ### Instrumentation Configuration Options ### ####################################################### diff --git a/scripts/confix/testdata/v35-config.toml b/internal/libs/confix/testdata/v35-config.toml similarity index 98% rename from scripts/confix/testdata/v35-config.toml rename to internal/libs/confix/testdata/v35-config.toml index ec11e27716..1c360742f0 100644 --- a/scripts/confix/testdata/v35-config.toml +++ b/internal/libs/confix/testdata/v35-config.toml @@ -227,7 +227,9 @@ pprof-laddr = "" # Enable the legacy p2p layer. use-legacy = false -# Select the p2p internal queue +# Select the p2p internal queue. +# Options are: "fifo", "simple-priority", "priority", and "wdrr" +# with the default being "priority". queue-type = "priority" # Address to listen for incoming connections @@ -281,6 +283,10 @@ max-num-outbound-peers = 10 # Maximum number of connections (inbound and outbound). max-connections = 64 +# Maximum number of connections reserved for outgoing +# connections. Must be less than max-connections +max-outgoing-connections = 12 + # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = 100 diff --git a/scripts/confix/testdata/v36-config.toml b/internal/libs/confix/testdata/v36-config.toml similarity index 97% rename from scripts/confix/testdata/v36-config.toml rename to internal/libs/confix/testdata/v36-config.toml index ae617640f6..8801fb44f1 100644 --- a/scripts/confix/testdata/v36-config.toml +++ b/internal/libs/confix/testdata/v36-config.toml @@ -157,7 +157,7 @@ experimental-disable-websocket = false # the latest (up to EventLogMaxItems) will be available for subscribers to # fetch via the /events method. If 0 (the default) the event log and the # /events RPC method are disabled. -event-log-window-size = "0s" +event-log-window-size = "30s" # The maxiumum number of events that may be retained by the event log. If # this value is 0, no upper limit is set. Otherwise, items in excess of @@ -208,8 +208,10 @@ pprof-laddr = "" ####################################################### [p2p] -# Select the p2p internal queue -queue-type = "priority" +# Select the p2p internal queue. +# Options are: "fifo", "simple-priority", and "priority", +# with the default being "priority". +queue-type = "simple-priority" # Address to listen for incoming connections laddr = "tcp://0.0.0.0:26656" @@ -221,13 +223,6 @@ laddr = "tcp://0.0.0.0:26656" # example: 159.89.10.97:26656 external-address = "" -# Comma separated list of seed nodes to connect to -# We only use these if we can’t connect to peers in the addrbook -# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -seeds = "" - # Comma separated list of peers to be added to the peer store # on startup. Either BootstrapPeers or PersistentPeers are # needed for peer discovery @@ -242,6 +237,10 @@ upnp = false # Maximum number of connections (inbound and outbound). max-connections = 64 +# Maximum number of connections reserved for outgoing +# connections. Must be less than max-connections +max-outgoing-connections = 12 + # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = 100 @@ -281,7 +280,11 @@ recv-rate = 5120000 ####################################################### [mempool] -recheck = true +# recheck has been moved from a config option to a global +# consensus param in v0.36 +# See https://github.com/tendermint/tendermint/issues/8244 for more information. + +# Set true to broadcast transactions in the mempool to other nodes broadcast = true # Maximum number of transactions in the mempool diff --git a/internal/libs/strings/string.go b/internal/libs/strings/string.go new file mode 100644 index 0000000000..067f31ffc3 --- /dev/null +++ b/internal/libs/strings/string.go @@ -0,0 +1,148 @@ +package strings + +import ( + "fmt" + "strings" + + tmbytes "github.com/tendermint/tendermint/libs/bytes" +) + +type lazyStringf struct { + tmpl string + args []interface{} + out string +} + +func (s *lazyStringf) String() string { + if s.out == "" && s.tmpl != "" { + s.out = fmt.Sprintf(s.tmpl, s.args) + s.args = nil + s.tmpl = "" + } + return s.out +} + +// LazySprintf creates a fmt.Stringer implementation with similar +// semantics as fmt.Sprintf, *except* that the string is built when +// String() is called on the object. This means that format arguments +// are resolved/captured into string format when String() is called, +// and not, as in fmt.Sprintf when that function returns. +// +// As a result, if you use this type in go routines or defer +// statements it's possible to pass an argument to LazySprintf which +// has one value at the call site and a different value when the +// String() is evaluated, which may lead to unexpected outcomes. In +// these situations, either be *extremely* careful about the arguments +// passed to this function or use fmt.Sprintf. +// +// The implementation also caches the output of the underlying +// fmt.Sprintf statement when String() is called, so subsequent calls +// will produce the same result. +func LazySprintf(t string, args ...interface{}) fmt.Stringer { + return &lazyStringf{tmpl: t, args: args} +} + +type lazyStringer struct { + val fmt.Stringer + out string +} + +func (l *lazyStringer) String() string { + if l.out == "" && l.val != nil { + l.out = l.val.String() + l.val = nil + } + return l.out +} + +// LazyStringer captures a fmt.Stringer implementation resolving the +// underlying string *only* when the String() method is called and +// caching the result for future use. +func LazyStringer(v fmt.Stringer) fmt.Stringer { return &lazyStringer{val: v} } + +type lazyBlockHash struct { + block interface{ Hash() tmbytes.HexBytes } + out string +} + +// LazyBlockHash defers block Hash until the Stringer interface is invoked. +// This is particularly useful for avoiding calling Sprintf when debugging is not +// active. +// +// As a result, if you use this type in go routines or defer +// statements it's possible to pass an argument to LazyBlockHash that +// has one value at the call site and a different value when the +// String() is evaluated, which may lead to unexpected outcomes. In +// these situations, either be *extremely* careful about the arguments +// passed to this function or use fmt.Sprintf. +// +// The implementation also caches the output of the string form of the +// block hash when String() is called, so subsequent calls will +// produce the same result. +func LazyBlockHash(block interface{ Hash() tmbytes.HexBytes }) fmt.Stringer { + return &lazyBlockHash{block: block} +} + +func (l *lazyBlockHash) String() string { + if l.out == "" && l.block != nil { + l.out = l.block.Hash().String() + l.block = nil + } + return l.out +} + +// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func SplitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + + return nonEmptyStrings +} + +// ASCIITrim removes spaces from an a ASCII string, erroring if the +// sequence is not an ASCII string. +func ASCIITrim(s string) (string, error) { + if len(s) == 0 { + return "", nil + } + r := make([]byte, 0, len(s)) + for _, b := range []byte(s) { + switch { + case b == 32: + continue // skip space + case 32 < b && b <= 126: + r = append(r, b) + default: + return "", fmt.Errorf("non-ASCII (non-tab) char 0x%X", b) + } + } + return string(r), nil +} + +// StringSliceEqual checks if string slices a and b are equal +func StringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/libs/strings/string_test.go b/internal/libs/strings/string_test.go similarity index 100% rename from libs/strings/string_test.go rename to internal/libs/strings/string_test.go diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index c69fc80dd4..3986cd5853 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -22,6 +22,10 @@ type TxCache interface { // Remove removes the given raw transaction from the cache. Remove(tx types.Tx) + + // Has reports whether tx is present in the cache. Checking for presence is + // not treated as an access of the value. + Has(tx types.Tx) bool } var _ TxCache = (*LRUTxCache)(nil) @@ -97,6 +101,14 @@ func (c *LRUTxCache) Remove(tx types.Tx) { } } +func (c *LRUTxCache) Has(tx types.Tx) bool { + c.mtx.Lock() + defer c.mtx.Unlock() + + _, ok := c.cacheMap[tx.Key()] + return ok +} + // NopTxCache defines a no-op raw transaction cache. type NopTxCache struct{} @@ -105,3 +117,4 @@ var _ TxCache = (*NopTxCache)(nil) func (NopTxCache) Reset() {} func (NopTxCache) Push(types.Tx) bool { return true } func (NopTxCache) Remove(types.Tx) {} +func (NopTxCache) Has(types.Tx) bool { return false } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 629fa0bdae..c0da7cef20 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -1,20 +1,21 @@ package mempool import ( - "bytes" "context" - "errors" "fmt" + "runtime" + "sort" "sync" "sync/atomic" "time" + "github.com/creachadair/taskgroup" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" ) @@ -23,73 +24,41 @@ var _ Mempool = (*TxMempool)(nil) // TxMempoolOption sets an optional parameter on the TxMempool. type TxMempoolOption func(*TxMempool) -// TxMempool defines a prioritized mempool data structure used by the v1 mempool -// reactor. It keeps a thread-safe priority queue of transactions that is used -// when a block proposer constructs a block and a thread-safe linked-list that -// is used to gossip transactions to peers in a FIFO manner. +// TxMempool implemements the Mempool interface and allows the application to +// set priority values on transactions in the CheckTx response. When selecting +// transactions to include in a block, higher-priority transactions are chosen +// first. When evicting transactions from the mempool for size constraints, +// lower-priority transactions are evicted sooner. +// +// Within the mempool, transactions are ordered by time of arrival, and are +// gossiped to the rest of the network based on that order (gossip order does +// not take priority into account). type TxMempool struct { + // Immutable fields logger log.Logger - metrics *Metrics config *config.MempoolConfig proxyAppConn abciclient.Client + metrics *Metrics + cache TxCache // seen transactions - // txsAvailable fires once for each height when the mempool is not empty - txsAvailable chan struct{} - notifiedTxsAvailable bool - - // height defines the last block height process during Update() - height int64 - - // sizeBytes defines the total size of the mempool (sum of all tx bytes) - sizeBytes int64 - - // cache defines a fixed-size cache of already seen transactions as this - // reduces pressure on the proxyApp. - cache TxCache - - // txStore defines the main storage of valid transactions. Indexes are built - // on top of this store. - txStore *TxStore - - // gossipIndex defines the gossiping index of valid transactions via a - // thread-safe linked-list. We also use the gossip index as a cursor for - // rechecking transactions already in the mempool. - gossipIndex *clist.CList + // Atomically-updated fields + txsBytes int64 // atomic: the total size of all transactions in the mempool, in bytes - // recheckCursor and recheckEnd are used as cursors based on the gossip index - // to recheck transactions that are already in the mempool. Iteration is not - // thread-safe and transaction may be mutated in serial order. - // - // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for - // iterator and cursor management when rechecking transactions. If the gossip - // index changes or is removed in a future refactor, this will have to be - // refactored. Instead, we should consider just keeping a slice of a snapshot - // of the mempool's current transactions during Update and an integer cursor - // into that slice. This, however, requires additional O(n) space complexity. - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // priorityIndex defines the priority index of valid transactions via a - // thread-safe priority queue. - priorityIndex *TxPriorityQueue - - // heightIndex defines a height-based, in ascending order, transaction index. - // i.e. older transactions are first. - heightIndex *WrappedTxList - - // timestampIndex defines a timestamp-based, in ascending order, transaction - // index. i.e. older transactions are first. - timestampIndex *WrappedTxList - - // A read/write lock is used to safe guard updates, insertions and deletions - // from the mempool. A read-lock is implicitly acquired when executing CheckTx, - // however, a caller must explicitly grab a write-lock via Lock when updating - // the mempool via Update(). - mtx sync.RWMutex - preCheck PreCheckFunc - postCheck PostCheckFunc + // Synchronized fields, protected by mtx. + mtx *sync.RWMutex + notifiedTxsAvailable bool + txsAvailable chan struct{} // one value sent per height when mempool is not empty + preCheck PreCheckFunc + postCheck PostCheckFunc + height int64 // the latest height passed to Update + + txs *clist.CList // valid transactions (passed CheckTx) + txByKey map[types.TxKey]*clist.CElement + txBySender map[string]*clist.CElement // for sender != "" } +// NewTxMempool constructs a new, empty priority mempool at the specified +// initial height and using the given config and options. func NewTxMempool( logger log.Logger, cfg *config.MempoolConfig, @@ -98,23 +67,16 @@ func NewTxMempool( ) *TxMempool { txmp := &TxMempool{ - logger: logger, - config: cfg, - proxyAppConn: proxyAppConn, - height: -1, - cache: NopTxCache{}, - metrics: NopMetrics(), - txStore: NewTxStore(), - gossipIndex: clist.New(), - priorityIndex: NewTxPriorityQueue(), - heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }), - timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) - }), + logger: logger, + config: cfg, + proxyAppConn: proxyAppConn, + metrics: NopMetrics(), + cache: NopTxCache{}, + txs: clist.New(), + mtx: new(sync.RWMutex), + txByKey: make(map[types.TxKey]*clist.CElement), + txBySender: make(map[string]*clist.CElement), } - if cfg.CacheSize > 0 { txmp.cache = NewLRUTxCache(cfg.CacheSize) } @@ -147,45 +109,34 @@ func WithMetrics(metrics *Metrics) TxMempoolOption { // Lock obtains a write-lock on the mempool. A caller must be sure to explicitly // release the lock when finished. -func (txmp *TxMempool) Lock() { - txmp.mtx.Lock() -} +func (txmp *TxMempool) Lock() { txmp.mtx.Lock() } // Unlock releases a write-lock on the mempool. -func (txmp *TxMempool) Unlock() { - txmp.mtx.Unlock() -} +func (txmp *TxMempool) Unlock() { txmp.mtx.Unlock() } // Size returns the number of valid transactions in the mempool. It is // thread-safe. -func (txmp *TxMempool) Size() int { - return txmp.txStore.Size() -} +func (txmp *TxMempool) Size() int { return txmp.txs.Len() } // SizeBytes return the total sum in bytes of all the valid transactions in the // mempool. It is thread-safe. -func (txmp *TxMempool) SizeBytes() int64 { - return atomic.LoadInt64(&txmp.sizeBytes) -} +func (txmp *TxMempool) SizeBytes() int64 { return atomic.LoadInt64(&txmp.txsBytes) } -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. +// FlushAppConn executes Flush on the mempool's proxyAppConn. // -// NOTE: The caller must obtain a write-lock prior to execution. +// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before +// calling FlushAppConn. func (txmp *TxMempool) FlushAppConn(ctx context.Context) error { - return txmp.proxyAppConn.Flush(ctx) -} - -// WaitForNextTx returns a blocking channel that will be closed when the next -// valid transaction is available to gossip. It is thread-safe. -func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { - return txmp.gossipIndex.WaitChan() -} + // N.B.: We have to issue the call outside the lock so that its callback can + // fire. It's safe to do this, the flush will block until complete. + // + // We could just not require the caller to hold the lock at all, but the + // semantics of the Mempool interface require the caller to hold it, and we + // can't change that without disrupting existing use. + txmp.mtx.Unlock() + defer txmp.mtx.Lock() -// NextGossipTx returns the next valid transaction to gossip. A caller must wait -// for WaitForNextTx to signal a transaction is available to gossip first. It is -// thread-safe. -func (txmp *TxMempool) NextGossipTx() *clist.CElement { - return txmp.gossipIndex.Front() + return txmp.proxyAppConn.Flush(ctx) } // EnableTxsAvailable enables the mempool to trigger events when transactions @@ -199,228 +150,249 @@ func (txmp *TxMempool) EnableTxsAvailable() { // TxsAvailable returns a channel which fires once for every height, and only // when transactions are available in the mempool. It is thread-safe. -func (txmp *TxMempool) TxsAvailable() <-chan struct{} { - return txmp.txsAvailable -} +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } -// CheckTx executes the ABCI CheckTx method for a given transaction. -// It acquires a read-lock and attempts to execute the application's -// CheckTx ABCI method synchronously. We return an error if any of -// the following happen: +// CheckTx adds the given transaction to the mempool if it fits and passes the +// application's ABCI CheckTx method. // -// - The CheckTx execution fails. -// - The transaction already exists in the cache and we've already received the -// transaction from the peer. Otherwise, if it solely exists in the cache, we -// return nil. -// - The transaction size exceeds the maximum transaction size as defined by the -// configuration provided to the mempool. -// - The transaction fails Pre-Check (if it is defined). -// - The proxyAppConn fails, e.g. the buffer is full. +// CheckTx reports an error without adding tx if: // -// If the mempool is full, we still execute CheckTx and attempt to find a lower -// priority transaction to evict. If such a transaction exists, we remove the -// lower priority transaction and add the new one with higher priority. +// - The size of tx exceeds the configured maximum transaction size. +// - The pre-check hook is defined and reports an error for tx. +// - The transaction already exists in the cache. +// - The proxy connection to the application fails. // -// NOTE: -// - The applications' CheckTx implementation may panic. -// - The caller is not to explicitly require any locks for executing CheckTx. +// If tx passes all of the above conditions, it is passed (asynchronously) to +// the application's ABCI CheckTx method and this CheckTx method returns nil. +// If cb != nil, it is called when the ABCI request completes to report the +// application response. +// +// If the application accepts the transaction and the mempool is full, the +// mempool evicts one or more of the lowest-priority transaction whose priority +// is (strictly) lower than the priority of tx and whose size together exceeds +// the size of tx, and adds tx instead. If no such transactions exist, tx is +// discarded. func (txmp *TxMempool) CheckTx( ctx context.Context, tx types.Tx, cb func(*abci.ResponseCheckTx), txInfo TxInfo, ) error { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - if txSize := len(tx); txSize > txmp.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: txmp.config.MaxTxBytes, - Actual: txSize, + // During the initial phase of CheckTx, we do not need to modify any state. + // A transaction will not actually be added to the mempool until it survives + // a call to the ABCI CheckTx method and size constraint checks. + height, err := func() (int64, error) { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + // Reject transactions in excess of the configured maximum transaction size. + if len(tx) > txmp.config.MaxTxBytes { + return 0, types.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} } - } - if txmp.preCheck != nil { - if err := txmp.preCheck(tx); err != nil { - return types.ErrPreCheck{Reason: err} + // If a precheck hook is defined, call it before invoking the application. + if txmp.preCheck != nil { + if err := txmp.preCheck(tx); err != nil { + return 0, types.ErrPreCheck{Reason: err} + } } - } - if err := txmp.proxyAppConn.Error(); err != nil { - return err - } + // Early exit if the proxy connection has an error. + if err := txmp.proxyAppConn.Error(); err != nil { + return 0, err + } - txHash := tx.Key() + txKey := tx.Key() - // We add the transaction to the mempool's cache and if the - // transaction is already present in the cache, i.e. false is returned, then we - // check if we've seen this transaction and error if we have. - if !txmp.cache.Push(tx) { - txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) - return types.ErrTxInCache + // Check for the transaction in the cache. + if !txmp.cache.Push(tx) { + // If the cached transaction is also in the pool, record its sender. + if elt, ok := txmp.txByKey[txKey]; ok { + w := elt.Value.(*WrappedTx) + w.SetPeer(txInfo.SenderID) + } + return 0, types.ErrTxInCache + } + return txmp.height, nil + }() + if err != nil { + return err } - res, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) + // Invoke an ABCI CheckTx for this transaction. + rsp, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) if err != nil { txmp.cache.Remove(tx) return err } - - if txmp.recheckCursor != nil { - return errors.New("recheck cursor is non-nil") - } - wtx := &WrappedTx{ tx: tx, - hash: txHash, + hash: tx.Key(), timestamp: time.Now().UTC(), - height: txmp.height, + height: height, } - - txmp.defaultTxCallback(tx, res) - txmp.initTxCallback(wtx, res, txInfo) - + wtx.SetPeer(txInfo.SenderID) if cb != nil { - cb(res) + cb(rsp) } - - return nil + return txmp.addNewTransaction(wtx, rsp) } +// RemoveTxByKey removes the transaction with the specified key from the +// mempool. It reports an error if no such transaction exists. This operation +// does not remove the transaction from the cache. func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { - txmp.Lock() - defer txmp.Unlock() + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + return txmp.removeTxByKey(txKey) +} - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { - txmp.removeTx(wtx, false) +// removeTxByKey removes the specified transaction key from the mempool. +// The caller must hold txmp.mtx exclusively. +func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { + if elt, ok := txmp.txByKey[key]; ok { + w := elt.Value.(*WrappedTx) + delete(txmp.txByKey, key) + delete(txmp.txBySender, w.sender) + txmp.txs.Remove(elt) + elt.DetachPrev() + elt.DetachNext() + atomic.AddInt64(&txmp.txsBytes, -w.Size()) return nil } + return fmt.Errorf("transaction %x not found", key) +} - return errors.New("transaction not found") +// removeTxByElement removes the specified transaction element from the mempool. +// The caller must hold txmp.mtx exclusively. +func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { + w := elt.Value.(*WrappedTx) + delete(txmp.txByKey, w.tx.Key()) + delete(txmp.txBySender, w.sender) + txmp.txs.Remove(elt) + elt.DetachPrev() + elt.DetachNext() + atomic.AddInt64(&txmp.txsBytes, -w.Size()) } -// Flush empties the mempool. It acquires a read-lock, fetches all the -// transactions currently in the transaction store and removes each transaction -// from the store and all indexes and finally resets the cache. -// -// NOTE: -// - Flushing the mempool may leave the mempool in an inconsistent state. +// Flush purges the contents of the mempool and the cache, leaving both empty. +// The current height is not modified by this operation. func (txmp *TxMempool) Flush() { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txmp.heightIndex.Reset() - txmp.timestampIndex.Reset() + txmp.mtx.Lock() + defer txmp.mtx.Unlock() - for _, wtx := range txmp.txStore.GetAllTxs() { - txmp.removeTx(wtx, false) + // Remove all the transactions in the list explicitly, so that the sizes + // and indexes get updated properly. + cur := txmp.txs.Front() + for cur != nil { + next := cur.Next() + txmp.removeTxByElement(cur) + cur = next } - - atomic.SwapInt64(&txmp.sizeBytes, 0) txmp.cache.Reset() } -// ReapMaxBytesMaxGas returns a list of transactions within the provided size -// and gas constraints. Transaction are retrieved in priority order. -// -// NOTE: -// - Transactions returned are not removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { +// allEntriesSorted returns a slice of all the transactions currently in the +// mempool, sorted in nonincreasing order by priority with ties broken by +// increasing order of arrival time. +func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { txmp.mtx.RLock() defer txmp.mtx.RUnlock() - var ( - totalGas int64 - totalSize int64 - ) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) - for txmp.priorityIndex.NumTxs() > 0 { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) - - // Ensure we have capacity for the transaction with respect to the - // transaction size. - if maxBytes > -1 && totalSize+size > maxBytes { - return txs[:len(txs)-1] + all := make([]*WrappedTx, 0, len(txmp.txByKey)) + for _, tx := range txmp.txByKey { + all = append(all, tx.Value.(*WrappedTx)) + } + sort.Slice(all, func(i, j int) bool { + if all[i].priority == all[j].priority { + return all[i].timestamp.Before(all[j].timestamp) } + return all[i].priority > all[j].priority // N.B. higher priorities first + }) + return all +} - totalSize += size - - // ensure we have capacity for the transaction with respect to total gas - gas := totalGas + wtx.gasWanted - if maxGas > -1 && gas > maxGas { - return txs[:len(txs)-1] +// ReapMaxBytesMaxGas returns a slice of valid transactions that fit within the +// size and gas constraints. The results are ordered by nonincreasing priority, +// with ties broken by increasing order of arrival. Reaping transactions does +// not remove them from the mempool. +// +// If maxBytes < 0, no limit is set on the total size in bytes. +// If maxGas < 0, no limit is set on the total gas cost. +// +// If the mempool is empty or has no transactions fitting within the given +// constraints, the result will also be empty. +func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + var totalGas, totalBytes int64 + + var keep []types.Tx //nolint:prealloc + for _, w := range txmp.allEntriesSorted() { + // N.B. When computing byte size, we need to include the overhead for + // encoding as protobuf to send to the application. + totalGas += w.gasWanted + totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) + if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) { + break } - - totalGas = gas + keep = append(keep, w.tx) } - - return txs + return keep } -// ReapMaxTxs returns a list of transactions within the provided number of -// transactions bound. Transaction are retrieved in priority order. -// -// NOTE: -// - Transactions returned are not removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() +// TxsWaitChan returns a channel that is closed when there is at least one +// transaction available to be gossiped. +func (txmp *TxMempool) TxsWaitChan() <-chan struct{} { return txmp.txs.WaitChan() } - numTxs := txmp.priorityIndex.NumTxs() - if max < 0 { - max = numTxs - } +// TxsFront returns the frontmost element of the pending transaction list. +// It will be nil if the mempool is empty. +func (txmp *TxMempool) TxsFront() *clist.CElement { return txmp.txs.Front() } - cap := tmmath.MinInt(numTxs, max) +// ReapMaxTxs returns up to max transactions from the mempool. The results are +// ordered by nonincreasing priority with ties broken by increasing order of +// arrival. Reaping transactions does not remove them from the mempool. +// +// If max < 0, all transactions in the mempool are reaped. +// +// The result may have fewer than max elements (possibly zero) if the mempool +// does not have that many transactions available. +func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { + var keep []types.Tx //nolint:prealloc - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, cap) - txs := make([]types.Tx, 0, cap) - for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - } - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) + for _, w := range txmp.allEntriesSorted() { + if max >= 0 && len(keep) >= max { + break + } + keep = append(keep, w.tx) } - return txs + return keep } -// Update iterates over all the transactions provided by the block producer, -// removes them from the cache (if applicable), and removes -// the transactions from the main transaction store and associated indexes. -// If there are transactions remaining in the mempool, we initiate a -// re-CheckTx for them (if applicable), otherwise, we notify the caller more -// transactions are available. +// Update removes all the given transactions from the mempool and the cache, +// and updates the current block height. The blockTxs and deliverTxResponses +// must have the same length with each response corresponding to the tx at the +// same offset. +// +// If the configuration enables recheck, Update sends each remaining +// transaction after removing blockTxs to the ABCI CheckTx method. Any +// transactions marked as invalid during recheck are also removed. // -// NOTE: -// - The caller must explicitly acquire a write-lock. +// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before +// calling Update. func (txmp *TxMempool) Update( ctx context.Context, blockHeight int64, blockTxs types.Txs, - execTxResult []*abci.ExecTxResult, + deliverTxResponses []*abci.ExecTxResult, newPreFn PreCheckFunc, newPostFn PostCheckFunc, + recheck bool, ) error { + // Safety check: Transactions and responses must match in number. + if len(blockTxs) != len(deliverTxResponses) { + panic(fmt.Sprintf("mempool: got %d transactions but %d ExecTx responses", + len(blockTxs), len(deliverTxResponses))) + } + txmp.height = blockHeight txmp.notifiedTxsAvailable = false @@ -432,18 +404,17 @@ func (txmp *TxMempool) Update( } for i, tx := range blockTxs { - if execTxResult[i].Code == abci.CodeTypeOK { - // add the valid committed transaction to the cache (if missing) + // Add successful committed transactions to the cache (if they are not + // already present). Transactions that failed to commit are removed from + // the cache unless the operator has explicitly requested we keep them. + if deliverTxResponses[i].Code == abci.CodeTypeOK { _ = txmp.cache.Push(tx) } else if !txmp.config.KeepInvalidTxsInCache { - // allow invalid transactions to be re-submitted txmp.cache.Remove(tx) } - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { - txmp.removeTx(wtx, false) - } + // Regardless of success, remove the transaction from the mempool. + _ = txmp.removeTxByKey(tx.Key()) } txmp.purgeExpiredTxs(blockHeight) @@ -451,287 +422,304 @@ func (txmp *TxMempool) Update( // If there any uncommitted transactions left in the mempool, we either // initiate re-CheckTx per remaining transaction or notify that remaining // transactions are left. - if txmp.Size() > 0 { - if txmp.config.Recheck { - txmp.logger.Debug( - "executing re-CheckTx for all remaining transactions", - "num_txs", txmp.Size(), - "height", blockHeight, - ) - txmp.updateReCheckTxs(ctx) + size := txmp.Size() + txmp.metrics.Size.Set(float64(size)) + if size > 0 { + if recheck { + txmp.recheckTransactions(ctx) } else { txmp.notifyTxsAvailable() } } - - txmp.metrics.Size.Set(float64(txmp.Size())) return nil } -// initTxCallback is the callback invoked for a new unique transaction after CheckTx -// has been executed by the ABCI application for the first time on that transaction. -// CheckTx can be called again for the same transaction later when re-checking; -// however, this callback will not be called. +// addNewTransaction handles the ABCI CheckTx response for the first time a +// transaction is added to the mempool. A recheck after a block is committed +// goes to handleRecheckResult. // -// initTxCallback runs after the ABCI application executes CheckTx. -// It runs the postCheck hook if one is defined on the mempool. -// If the CheckTx response response code is not OK, or if the postCheck hook -// reports an error, the transaction is rejected. Otherwise, we attempt to insert -// the transaction into the mempool. +// If either the application rejected the transaction or a post-check hook is +// defined and rejects the transaction, it is discarded. // -// When inserting a transaction, we first check if there is sufficient capacity. -// If there is, the transaction is added to the txStore and all indexes. -// Otherwise, if the mempool is full, we attempt to find a lower priority transaction -// to evict in place of the new incoming transaction. If no such transaction exists, -// the new incoming transaction is rejected. +// Otherwise, if the mempool is full, check for lower-priority transactions +// that can be evicted to make room for the new one. If no such transactions +// exist, this transaction is logged and dropped; otherwise the selected +// transactions are evicted. // -// NOTE: -// - An explicit lock is NOT required. -func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, txInfo TxInfo) { +// Finally, the new transaction is added and size stats updated. +func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.ResponseCheckTx) error { + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + var err error if txmp.postCheck != nil { - err = txmp.postCheck(wtx.tx, res) + err = txmp.postCheck(wtx.tx, checkTxRes) } - if err != nil || res.Code != abci.CodeTypeOK { - // ignore bad transactions + if err != nil || checkTxRes.Code != abci.CodeTypeOK { txmp.logger.Info( "rejected bad transaction", - "priority", wtx.priority, + "priority", wtx.Priority(), "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "peer_id", txInfo.SenderNodeID, - "code", res.Code, + "peer_id", wtx.peers, + "code", checkTxRes.Code, "post_check_err", err, ) txmp.metrics.FailedTxs.Add(1) + // Remove the invalid transaction from the cache, unless the operator has + // instructed us to keep invalid transactions. if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) } + if err != nil { - res.MempoolError = err.Error() + return err } - return + // TODO(creachadair): Report an error for an invalid transaction. + // This is an API change, unfortunately, but should be made safe if it isn't. + // fmt.Errorf("invalid transaction: ABCI response code %d", checkTxRes.Code) + return nil } - sender := res.Sender - priority := res.Priority + priority := checkTxRes.Priority + sender := checkTxRes.Sender - if len(sender) > 0 { - if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { - txmp.logger.Error( - "rejected incoming good transaction; tx already exists for sender", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + // Disallow multiple concurrent transactions from the same sender assigned + // by the ABCI application. As a special case, an empty sender is not + // restricted. + if sender != "" { + elt, ok := txmp.txBySender[sender] + if ok { + w := elt.Value.(*WrappedTx) + txmp.logger.Debug( + "rejected valid incoming transaction; tx already exists for sender", + "tx", fmt.Sprintf("%X", w.tx.Hash()), "sender", sender, ) txmp.metrics.RejectedTxs.Add(1) - return + // TODO(creachadair): Report an error for a duplicate sender. + // This is an API change, unfortunately, but should be made safe if it isn't. + // fmt.Errorf("transaction rejected: tx already exists for sender %q (%X)", sender, w.tx.Hash()) + return nil } } + // At this point the application has ruled the transaction valid, but the + // mempool might be full. If so, find the lowest-priority items with lower + // priority than the application assigned to this new one, and evict as many + // of them as necessary to make room for tx. If no such items exist, we + // discard tx. + if err := txmp.canAddTx(wtx); err != nil { - evictTxs := txmp.priorityIndex.GetEvictableTxs( - priority, - int64(wtx.Size()), - txmp.SizeBytes(), - txmp.config.MaxTxsBytes, - ) - if len(evictTxs) == 0 { - // No room for the new incoming transaction so we just remove it from - // the cache. + var victims []*clist.CElement // eligible transactions for eviction + var victimBytes int64 // total size of victims + for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() { + cw := cur.Value.(*WrappedTx) + if cw.priority < priority { + victims = append(victims, cur) + victimBytes += cw.Size() + } + } + + // If there are no suitable eviction candidates, or the total size of + // those candidates is not enough to make room for the new transaction, + // drop the new one. + if len(victims) == 0 || victimBytes < wtx.Size() { txmp.cache.Remove(wtx.tx) txmp.logger.Error( - "rejected incoming good transaction; mempool full", + "rejected valid incoming transaction; mempool is full", "tx", fmt.Sprintf("%X", wtx.tx.Hash()), "err", err.Error(), ) txmp.metrics.RejectedTxs.Add(1) - return + // TODO(creachadair): Report an error for a full mempool. + // This is an API change, unfortunately, but should be made safe if it isn't. + // fmt.Errorf("transaction rejected: mempool is full (%X)", wtx.tx.Hash()) + return nil } - // evict an existing transaction(s) - // - // NOTE: - // - The transaction, toEvict, can be removed while a concurrent - // reCheckTx callback is being executed for the same transaction. - for _, toEvict := range evictTxs { - txmp.removeTx(toEvict, true) + txmp.logger.Debug("evicting lower-priority transactions", + "new_tx", tmstrings.LazySprintf("%X", wtx.tx.Hash()), + "new_priority", priority, + ) + + // Sort lowest priority items first so they will be evicted first. Break + // ties in favor of newer items (to maintain FIFO semantics in a group). + sort.Slice(victims, func(i, j int) bool { + iw := victims[i].Value.(*WrappedTx) + jw := victims[j].Value.(*WrappedTx) + if iw.Priority() == jw.Priority() { + return iw.timestamp.After(jw.timestamp) + } + return iw.Priority() < jw.Priority() + }) + + // Evict as many of the victims as necessary to make room. + var evictedBytes int64 + for _, vic := range victims { + w := vic.Value.(*WrappedTx) + txmp.logger.Debug( - "evicted existing good transaction; mempool full", - "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), - "old_priority", toEvict.priority, - "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "new_priority", wtx.priority, + "evicted valid existing transaction; mempool full", + "old_tx", tmstrings.LazySprintf("%X", w.tx.Hash()), + "old_priority", w.priority, ) + txmp.removeTxByElement(vic) + txmp.cache.Remove(w.tx) txmp.metrics.EvictedTxs.Add(1) + + // We may not need to evict all the eligible transactions. Bail out + // early if we have made enough room. + evictedBytes += w.Size() + if evictedBytes >= wtx.Size() { + break + } } } - wtx.gasWanted = res.GasWanted - wtx.priority = priority - wtx.sender = sender - wtx.peers = map[uint16]struct{}{ - txInfo.SenderID: {}, - } + wtx.SetGasWanted(checkTxRes.GasWanted) + wtx.SetPriority(priority) + wtx.SetSender(sender) + txmp.insertTx(wtx) txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) txmp.metrics.Size.Set(float64(txmp.Size())) - - txmp.insertTx(wtx) txmp.logger.Debug( - "inserted good transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "inserted new valid transaction", + "priority", wtx.Priority(), + "tx", tmstrings.LazySprintf("%X", wtx.tx.Hash()), "height", txmp.height, "num_txs", txmp.Size(), ) txmp.notifyTxsAvailable() + return nil } -// defaultTxCallback is the CheckTx application callback used when a -// transaction is being re-checked (if re-checking is enabled). The -// caller must hold a mempool write-lock (via Lock()) and when -// executing Update(), if the mempool is non-empty and Recheck is -// enabled, then all remaining transactions will be rechecked via -// CheckTx. The order transactions are rechecked must be the same as -// the order in which this callback is called. -func (txmp *TxMempool) defaultTxCallback(tx types.Tx, res *abci.ResponseCheckTx) { - if txmp.recheckCursor == nil { - return +func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + elt := txmp.txs.PushBack(wtx) + txmp.txByKey[wtx.tx.Key()] = elt + if s := wtx.Sender(); s != "" { + txmp.txBySender[s] = elt } - txmp.metrics.RecheckTimes.Add(1) - - wtx := txmp.recheckCursor.Value.(*WrappedTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, wtx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } - - txmp.logger.Error( - "re-CheckTx transaction mismatch", - "got", wtx.tx.Hash(), - "expected", tx.Key(), - ) + atomic.AddInt64(&txmp.txsBytes, wtx.Size()) +} - if txmp.recheckCursor == txmp.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - txmp.recheckCursor = nil - return - } +// handleRecheckResult handles the responses from ABCI CheckTx calls issued +// during the recheck phase of a block Update. It removes any transactions +// invalidated by the application. +// +// This method is NOT executed for the initial CheckTx on a new transaction; +// that case is handled by addNewTransaction instead. +func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.ResponseCheckTx) { + txmp.metrics.RecheckTimes.Add(1) + txmp.mtx.Lock() + defer txmp.mtx.Unlock() - txmp.recheckCursor = txmp.recheckCursor.Next() - wtx = txmp.recheckCursor.Value.(*WrappedTx) + // Find the transaction reported by the ABCI callback. It is possible the + // transaction was evicted during the recheck, in which case the transaction + // will be gone. + elt, ok := txmp.txByKey[tx.Key()] + if !ok { + return } + wtx := elt.Value.(*WrappedTx) - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, res) - } - - if res.Code == abci.CodeTypeOK && err == nil { - wtx.priority = res.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err, - "code", res.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } + // If a postcheck hook is defined, call it before checking the result. + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes) } - // move reCheckTx cursor to next element - if txmp.recheckCursor == txmp.recheckEnd { - txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() + if checkTxRes.Code == abci.CodeTypeOK && err == nil { + wtx.SetPriority(checkTxRes.Priority) + return // N.B. Size of mempool did not change } - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") - - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() - } + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.Priority(), + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", checkTxRes.Code, + ) + txmp.removeTxByElement(elt) + txmp.metrics.FailedTxs.Add(1) + if !txmp.config.KeepInvalidTxsInCache { + txmp.cache.Remove(wtx.tx) } - txmp.metrics.Size.Set(float64(txmp.Size())) } -// updateReCheckTxs updates the recheck cursors using the gossipIndex. For -// each transaction, it executes CheckTx. The global callback defined on -// the proxyAppConn will be executed for each transaction after CheckTx is -// executed. +// recheckTransactions initiates re-CheckTx ABCI calls for all the transactions +// currently in the mempool. It reports the number of recheck calls that were +// successfully initiated. // -// NOTE: -// - The caller must have a write-lock when executing updateReCheckTxs. -func (txmp *TxMempool) updateReCheckTxs(ctx context.Context) { +// Precondition: The mempool is not empty. +// The caller must hold txmp.mtx exclusively. +func (txmp *TxMempool) recheckTransactions(ctx context.Context) { if txmp.Size() == 0 { - panic("attempted to update re-CheckTx txs when mempool is empty") + panic("mempool: cannot run recheck on an empty mempool") } + txmp.logger.Debug( + "executing re-CheckTx for all remaining transactions", + "num_txs", txmp.Size(), + "height", txmp.height, + ) - txmp.recheckCursor = txmp.gossipIndex.Front() - txmp.recheckEnd = txmp.gossipIndex.Back() - - for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { - wtx := e.Value.(*WrappedTx) - - // Only execute CheckTx if the transaction is not marked as removed which - // could happen if the transaction was evicted. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - res, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{ - Tx: wtx.tx, - Type: abci.CheckTxType_Recheck, + // Collect transactions currently in the mempool requiring recheck. + wtxs := make([]*WrappedTx, 0, txmp.txs.Len()) + for e := txmp.txs.Front(); e != nil; e = e.Next() { + wtxs = append(wtxs, e.Value.(*WrappedTx)) + } + + // Issue CheckTx calls for each remaining transaction, and when all the + // rechecks are complete signal watchers that transactions may be available. + go func() { + g, start := taskgroup.New(nil).Limit(2 * runtime.NumCPU()) + + for _, wtx := range wtxs { + wtx := wtx + start(func() error { + rsp, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{ + Tx: wtx.tx, + Type: abci.CheckTxType_Recheck, + }) + if err != nil { + txmp.logger.Error("failed to execute CheckTx during recheck", + "err", err, "hash", fmt.Sprintf("%x", wtx.tx.Hash())) + } else { + txmp.handleRecheckResult(wtx.tx, rsp) + } + return nil }) - if err != nil { - // no need in retrying since the tx will be rechecked after the next block - txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) - continue - } - txmp.defaultTxCallback(wtx.tx, res) } - } + if err := txmp.proxyAppConn.Flush(ctx); err != nil { + txmp.logger.Error("failed to flush transactions during recheck", "err", err) + } - if err := txmp.proxyAppConn.Flush(ctx); err != nil { - txmp.logger.Error("failed to flush transactions during rechecking", "err", err) - } + // When recheck is complete, trigger a notification for more transactions. + _ = g.Wait() + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + txmp.notifyTxsAvailable() + }() } // canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. If it returns nil, -// the transaction can be inserted into the mempool. +// the mempool due to mempool configured constraints. Otherwise, nil is +// returned and the transaction can be inserted into the mempool. func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { - var ( - numTxs = txmp.Size() - sizeBytes = txmp.SizeBytes() - ) + numTxs := txmp.Size() + txBytes := txmp.SizeBytes() - if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { + if numTxs >= txmp.config.Size || wtx.Size()+txBytes > txmp.config.MaxTxsBytes { return types.ErrMempoolIsFull{ NumTxs: numTxs, MaxTxs: txmp.config.Size, - TxsBytes: sizeBytes, + TxsBytes: txBytes, MaxTxsBytes: txmp.config.MaxTxsBytes, } } @@ -739,96 +727,40 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { return nil } -func (txmp *TxMempool) insertTx(wtx *WrappedTx) { - txmp.txStore.SetTx(wtx) - txmp.priorityIndex.PushTx(wtx) - txmp.heightIndex.Insert(wtx) - txmp.timestampIndex.Insert(wtx) - - // Insert the transaction into the gossip index and mark the reference to the - // linked-list element, which will be needed at a later point when the - // transaction is removed. - gossipEl := txmp.gossipIndex.PushBack(wtx) - wtx.gossipEl = gossipEl - - atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) -} - -func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { - if txmp.txStore.IsTxRemoved(wtx.hash) { - return - } - - txmp.txStore.RemoveTx(wtx) - txmp.priorityIndex.RemoveTx(wtx) - txmp.heightIndex.Remove(wtx) - txmp.timestampIndex.Remove(wtx) - - // Remove the transaction from the gossip index and cleanup the linked-list - // element so it can be garbage collected. - txmp.gossipIndex.Remove(wtx.gossipEl) - wtx.gossipEl.DetachPrev() - - atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) - - if removeFromCache { - txmp.cache.Remove(wtx.tx) - } -} - -// purgeExpiredTxs removes all transactions that have exceeded their respective -// height- and/or time-based TTLs from their respective indexes. Every expired -// transaction will be removed from the mempool, but preserved in the cache. +// purgeExpiredTxs removes all transactions from the mempool that have exceeded +// their respective height or time-based limits as of the given blockHeight. +// Transactions removed by this operation are not removed from the cache. // -// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which -// the caller has a write-lock on the mempool and so we can safely iterate over -// the height and time based indexes. +// The caller must hold txmp.mtx exclusively. func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { - now := time.Now() - expiredTxs := make(map[types.TxKey]*WrappedTx) - - if txmp.config.TTLNumBlocks > 0 { - purgeIdx := -1 - for i, wtx := range txmp.heightIndex.txs { - if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] - } + if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 { + return // nothing to do } - if txmp.config.TTLDuration > 0 { - purgeIdx := -1 - for i, wtx := range txmp.timestampIndex.txs { - if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] + now := time.Now() + cur := txmp.txs.Front() + for cur != nil { + // N.B. Grab the next element first, since if we remove cur its successor + // will be invalidated. + next := cur.Next() + + w := cur.Value.(*WrappedTx) + if txmp.config.TTLNumBlocks > 0 && (blockHeight-w.height) > txmp.config.TTLNumBlocks { + txmp.removeTxByElement(cur) + txmp.cache.Remove(w.tx) + txmp.metrics.EvictedTxs.Add(1) + } else if txmp.config.TTLDuration > 0 && now.Sub(w.timestamp) > txmp.config.TTLDuration { + txmp.removeTxByElement(cur) + txmp.cache.Remove(w.tx) + txmp.metrics.EvictedTxs.Add(1) } - } - - for _, wtx := range expiredTxs { - txmp.removeTx(wtx, false) + cur = next } } func (txmp *TxMempool) notifyTxsAvailable() { if txmp.Size() == 0 { - panic("attempt to notify txs available but mempool is empty!") + return // nothing to do } if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 946377b1cd..3505d7040a 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -86,9 +86,19 @@ func setup(t testing.TB, app abciclient.Client, cacheSize int, options ...TxMemp return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, app, options...) } -func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { - t.Helper() +// mustCheckTx invokes txmp.CheckTx for the given transaction and waits until +// its callback has finished executing. It fails t if CheckTx fails. +func mustCheckTx(ctx context.Context, t *testing.T, txmp *TxMempool, spec string) { + done := make(chan struct{}) + if err := txmp.CheckTx(ctx, []byte(spec), func(*abci.ResponseCheckTx) { + close(done) + }, TxInfo{}); err != nil { + t.Fatalf("CheckTx for %q failed: %v", spec, err) + } + <-done +} +func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { txs := make([]testTx, numTxs) txInfo := TxInfo{SenderID: peerID} @@ -122,6 +132,10 @@ func convertTex(in []testTx) types.Txs { } func TestTxMempool_TxsAvailable(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -173,7 +187,7 @@ func TestTxMempool_TxsAvailable(t *testing.T) { // commit half the transactions and ensure we fire an event txmp.Lock() - require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil, true)) txmp.Unlock() ensureTxFire() ensureNoTxFire() @@ -210,13 +224,94 @@ func TestTxMempool_Size(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil, true)) txmp.Unlock() require.Equal(t, len(rawTxs)/2, txmp.Size()) require.Equal(t, int64(2850), txmp.SizeBytes()) } +func TestTxMempool_Eviction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 1000) + txmp.config.Size = 5 + txmp.config.MaxTxsBytes = 60 + txExists := func(spec string) bool { + txmp.Lock() + defer txmp.Unlock() + key := types.Tx(spec).Key() + _, ok := txmp.txByKey[key] + return ok + } + t.Cleanup(client.Wait) + + // A transaction bigger than the mempool should be rejected even when there + // are slots available. + mustCheckTx(ctx, t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1") + require.Equal(t, 0, txmp.Size()) + + // Nearly-fill the mempool with a low-priority transaction, to show that it + // is evicted even when slots are available for a higher-priority tx. + const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2" + mustCheckTx(ctx, t, txmp, bigTx) + require.Equal(t, 1, txmp.Size()) // bigTx is the only element + require.True(t, txExists(bigTx)) + require.Equal(t, int64(len(bigTx)), txmp.SizeBytes()) + + // The next transaction should evict bigTx, because it is higher priority + // but does not fit on size. + mustCheckTx(ctx, t, txmp, "key1=0000=25") + require.True(t, txExists("key1=0000=25")) + require.False(t, txExists(bigTx)) + require.False(t, txmp.cache.Has([]byte(bigTx))) + require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes()) + + // Now fill up the rest of the slots with other transactions. + mustCheckTx(ctx, t, txmp, "key2=0001=5") + mustCheckTx(ctx, t, txmp, "key3=0002=10") + mustCheckTx(ctx, t, txmp, "key4=0003=3") + mustCheckTx(ctx, t, txmp, "key5=0004=3") + + // A new transaction with low priority should be discarded. + mustCheckTx(ctx, t, txmp, "key6=0005=1") + require.False(t, txExists("key6=0005=1")) + + // A new transaction with higher priority should evict key5, which is the + // newest of the two transactions with lowest priority. + mustCheckTx(ctx, t, txmp, "key7=0006=7") + require.True(t, txExists("key7=0006=7")) // new transaction added + require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted + require.True(t, txExists("key4=0003=3")) // older low-priority tx retained + + // Another new transaction evicts the other low-priority element. + mustCheckTx(ctx, t, txmp, "key8=0007=20") + require.True(t, txExists("key8=0007=20")) + require.False(t, txExists("key4=0003=3")) + + // Now the lowest-priority tx is 5, so that should be the next to go. + mustCheckTx(ctx, t, txmp, "key9=0008=9") + require.True(t, txExists("key9=0008=9")) + require.False(t, txExists("k3y2=0001=5")) + + // Add a transaction that requires eviction of multiple lower-priority + // entries, in order to fit the size of the element. + mustCheckTx(ctx, t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11 + require.True(t, txExists("key1=0000=25")) + require.True(t, txExists("key8=0007=20")) + require.True(t, txExists("key10=0123456789abcdef=11")) + require.False(t, txExists("key3=0002=10")) + require.False(t, txExists("key9=0008=9")) + require.False(t, txExists("key7=0006=7")) +} + func TestTxMempool_Flush(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -243,7 +338,7 @@ func TestTxMempool_Flush(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil, true)) txmp.Unlock() txmp.Flush() @@ -449,6 +544,10 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) { } func TestTxMempool_ConcurrentTxs(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -501,7 +600,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil, true)) txmp.Unlock() height++ @@ -537,7 +636,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { tTxs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) - require.Equal(t, 100, txmp.heightIndex.Size()) // reap 5 txs at the next height -- no txs should expire reapedTxs := txmp.ReapMaxTxs(5) @@ -547,16 +645,14 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil, true)) txmp.Unlock() require.Equal(t, 95, txmp.Size()) - require.Equal(t, 95, txmp.heightIndex.Size()) // check more txs at height 101 _ = checkTxs(ctx, t, txmp, 50, 1) require.Equal(t, 145, txmp.Size()) - require.Equal(t, 145, txmp.heightIndex.Size()) // Reap 5 txs at a height that would expire all the transactions from before // the previous Update (height 100). @@ -573,11 +669,10 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil, true)) txmp.Unlock() require.GreaterOrEqual(t, txmp.Size(), 45) - require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45) } func TestTxMempool_CheckTxPostCheckError(t *testing.T) { @@ -622,10 +717,17 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { expectedErrString := "" if testCase.err != nil { expectedErrString = testCase.err.Error() + require.Equal(t, expectedErrString, txmp.postCheck(tx, res).Error()) + } else { + require.Equal(t, nil, txmp.postCheck(tx, res)) } - require.Equal(t, expectedErrString, res.MempoolError) } - require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) + if testCase.err == nil { + require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) + } else { + err = txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0}) + require.EqualError(t, err, "test error") + } }) } } diff --git a/internal/mempool/metrics.gen.go b/internal/mempool/metrics.gen.go new file mode 100644 index 0000000000..100c5e71cb --- /dev/null +++ b/internal/mempool/metrics.gen.go @@ -0,0 +1,67 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package mempool + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "size", + Help: "Number of uncommitted transactions in the mempool.", + }, labels).With(labelsAndValues...), + TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_size_bytes", + Help: "Histogram of transaction sizes in bytes.", + + Buckets: stdprometheus.ExponentialBuckets(1, 3, 7), + }, labels).With(labelsAndValues...), + FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "failed_txs", + Help: "Number of failed transactions.", + }, labels).With(labelsAndValues...), + RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rejected_txs", + Help: "Number of rejected transactions.", + }, labels).With(labelsAndValues...), + EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "evicted_txs", + Help: "Number of evicted transactions.", + }, labels).With(labelsAndValues...), + RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "recheck_times", + Help: "Number of times transactions are rechecked in the mempool.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Size: discard.NewGauge(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), + RejectedTxs: discard.NewCounter(), + EvictedTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), + } +} diff --git a/internal/mempool/metrics.go b/internal/mempool/metrics.go index 5d3022e80e..5323076351 100644 --- a/internal/mempool/metrics.go +++ b/internal/mempool/metrics.go @@ -2,9 +2,6 @@ package mempool import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,14 +10,16 @@ const ( MetricsSubsystem = "mempool" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. // see MetricsProvider for descriptions. type Metrics struct { - // Size of the mempool. + // Number of uncommitted transactions in the mempool. Size metrics.Gauge - // Histogram of transaction sizes, in bytes. - TxSizeBytes metrics.Histogram + // Histogram of transaction sizes in bytes. + TxSizeBytes metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:"1,3,7"` // Number of failed transactions. FailedTxs metrics.Counter @@ -29,80 +28,16 @@ type Metrics struct { // transactions that passed CheckTx but failed to make it into the mempool // due to resource limits, e.g. mempool is full and no lower priority // transactions exist in the mempool. + //metrics:Number of rejected transactions. RejectedTxs metrics.Counter // EvictedTxs defines the number of evicted transactions. These are valid // transactions that passed CheckTx and existed in the mempool but were later // evicted to make room for higher priority valid transactions that passed // CheckTx. + //metrics:Number of evicted transactions. EvictedTxs metrics.Counter // Number of times transactions are rechecked in the mempool. RecheckTimes metrics.Counter } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "size", - Help: "Size of the mempool (number of uncommitted transactions).", - }, labels).With(labelsAndValues...), - - TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "tx_size_bytes", - Help: "Transaction sizes in bytes.", - Buckets: stdprometheus.ExponentialBuckets(1, 3, 17), - }, labels).With(labelsAndValues...), - - FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "failed_txs", - Help: "Number of failed transactions.", - }, labels).With(labelsAndValues...), - - RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "rejected_txs", - Help: "Number of rejected transactions.", - }, labels).With(labelsAndValues...), - - EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "evicted_txs", - Help: "Number of evicted transactions.", - }, labels).With(labelsAndValues...), - - RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "recheck_times", - Help: "Number of times transactions are rechecked in the mempool.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Size: discard.NewGauge(), - TxSizeBytes: discard.NewHistogram(), - FailedTxs: discard.NewCounter(), - RejectedTxs: discard.NewCounter(), - EvictedTxs: discard.NewCounter(), - RecheckTimes: discard.NewCounter(), - } -} diff --git a/internal/mempool/mocks/mempool.go b/internal/mempool/mocks/mempool.go index b82d7d63e8..b699ef3c9b 100644 --- a/internal/mempool/mocks/mempool.go +++ b/internal/mempool/mocks/mempool.go @@ -11,8 +11,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -159,13 +157,13 @@ func (_m *Mempool) Unlock() { _m.Called() } -// Update provides a mock function with given fields: ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn -func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { - ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) +// Update provides a mock function with given fields: ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck +func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc, recheck bool) error { + ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { - r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) + if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc, bool) error); ok { + r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck) } else { r0 = ret.Error(0) } @@ -173,8 +171,13 @@ func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types return r0 } -// NewMempool creates a new instance of Mempool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewMempool(t testing.TB) *Mempool { +type mockConstructorTestingTNewMempool interface { + mock.TestingT + Cleanup(func()) +} + +// NewMempool creates a new instance of Mempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMempool(t mockConstructorTestingTNewMempool) *Mempool { mock := &Mempool{} mock.Mock.Test(t) diff --git a/internal/mempool/priority_queue.go b/internal/mempool/priority_queue.go deleted file mode 100644 index e31997397e..0000000000 --- a/internal/mempool/priority_queue.go +++ /dev/null @@ -1,158 +0,0 @@ -package mempool - -import ( - "container/heap" - "sort" - "sync" -) - -var _ heap.Interface = (*TxPriorityQueue)(nil) - -// TxPriorityQueue defines a thread-safe priority queue for valid transactions. -type TxPriorityQueue struct { - mtx sync.RWMutex - txs []*WrappedTx -} - -func NewTxPriorityQueue() *TxPriorityQueue { - pq := &TxPriorityQueue{ - txs: make([]*WrappedTx, 0), - } - - heap.Init(pq) - - return pq -} - -// GetEvictableTxs attempts to find and return a list of *WrappedTx than can be -// evicted to make room for another *WrappedTx with higher priority. If no such -// list of *WrappedTx exists, nil will be returned. The returned list of *WrappedTx -// indicate that these transactions can be removed due to them being of lower -// priority and that their total sum in size allows room for the incoming -// transaction according to the mempool's configured limits. -func (pq *TxPriorityQueue) GetEvictableTxs(priority, txSize, totalSize, cap int64) []*WrappedTx { - pq.mtx.RLock() - defer pq.mtx.RUnlock() - - txs := make([]*WrappedTx, len(pq.txs)) - copy(txs, pq.txs) - - sort.Slice(txs, func(i, j int) bool { - return txs[i].priority < txs[j].priority - }) - - var ( - toEvict []*WrappedTx - i int - ) - - currSize := totalSize - - // Loop over all transactions in ascending priority order evaluating those - // that are only of less priority than the provided argument. We continue - // evaluating transactions until there is sufficient capacity for the new - // transaction (size) as defined by txSize. - for i < len(txs) && txs[i].priority < priority { - toEvict = append(toEvict, txs[i]) - currSize -= int64(txs[i].Size()) - - if currSize+txSize <= cap { - return toEvict - } - - i++ - } - - return nil -} - -// NumTxs returns the number of transactions in the priority queue. It is -// thread safe. -func (pq *TxPriorityQueue) NumTxs() int { - pq.mtx.RLock() - defer pq.mtx.RUnlock() - - return len(pq.txs) -} - -// RemoveTx removes a specific transaction from the priority queue. -func (pq *TxPriorityQueue) RemoveTx(tx *WrappedTx) { - pq.mtx.Lock() - defer pq.mtx.Unlock() - - if tx.heapIndex < len(pq.txs) { - heap.Remove(pq, tx.heapIndex) - } -} - -// PushTx adds a valid transaction to the priority queue. It is thread safe. -func (pq *TxPriorityQueue) PushTx(tx *WrappedTx) { - pq.mtx.Lock() - defer pq.mtx.Unlock() - - heap.Push(pq, tx) -} - -// PopTx removes the top priority transaction from the queue. It is thread safe. -func (pq *TxPriorityQueue) PopTx() *WrappedTx { - pq.mtx.Lock() - defer pq.mtx.Unlock() - - x := heap.Pop(pq) - if x != nil { - return x.(*WrappedTx) - } - - return nil -} - -// Push implements the Heap interface. -// -// NOTE: A caller should never call Push. Use PushTx instead. -func (pq *TxPriorityQueue) Push(x interface{}) { - n := len(pq.txs) - item := x.(*WrappedTx) - item.heapIndex = n - pq.txs = append(pq.txs, item) -} - -// Pop implements the Heap interface. -// -// NOTE: A caller should never call Pop. Use PopTx instead. -func (pq *TxPriorityQueue) Pop() interface{} { - old := pq.txs - n := len(old) - item := old[n-1] - old[n-1] = nil // avoid memory leak - item.heapIndex = -1 // for safety - pq.txs = old[0 : n-1] - return item -} - -// Len implements the Heap interface. -// -// NOTE: A caller should never call Len. Use NumTxs instead. -func (pq *TxPriorityQueue) Len() int { - return len(pq.txs) -} - -// Less implements the Heap interface. It returns true if the transaction at -// position i in the queue is of less priority than the transaction at position j. -func (pq *TxPriorityQueue) Less(i, j int) bool { - // If there exists two transactions with the same priority, consider the one - // that we saw the earliest as the higher priority transaction. - if pq.txs[i].priority == pq.txs[j].priority { - return pq.txs[i].timestamp.Before(pq.txs[j].timestamp) - } - - // We want Pop to give us the highest, not lowest, priority so we use greater - // than here. - return pq.txs[i].priority > pq.txs[j].priority -} - -// Swap implements the Heap interface. It swaps two transactions in the queue. -func (pq *TxPriorityQueue) Swap(i, j int) { - pq.txs[i], pq.txs[j] = pq.txs[j], pq.txs[i] - pq.txs[i].heapIndex = i - pq.txs[j].heapIndex = j -} diff --git a/internal/mempool/priority_queue_test.go b/internal/mempool/priority_queue_test.go deleted file mode 100644 index ddc84806da..0000000000 --- a/internal/mempool/priority_queue_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package mempool - -import ( - "math/rand" - "sort" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestTxPriorityQueue(t *testing.T) { - pq := NewTxPriorityQueue() - numTxs := 1000 - - priorities := make([]int, numTxs) - - var wg sync.WaitGroup - for i := 1; i <= numTxs; i++ { - priorities[i-1] = i - wg.Add(1) - - go func(i int) { - pq.PushTx(&WrappedTx{ - priority: int64(i), - timestamp: time.Now(), - }) - - wg.Done() - }(i) - } - - sort.Sort(sort.Reverse(sort.IntSlice(priorities))) - - wg.Wait() - require.Equal(t, numTxs, pq.NumTxs()) - - // Wait a second and push a tx with a duplicate priority - time.Sleep(time.Second) - now := time.Now() - pq.PushTx(&WrappedTx{ - priority: 1000, - timestamp: now, - }) - require.Equal(t, 1001, pq.NumTxs()) - - tx := pq.PopTx() - require.Equal(t, 1000, pq.NumTxs()) - require.Equal(t, int64(1000), tx.priority) - require.NotEqual(t, now, tx.timestamp) - - gotPriorities := make([]int, 0) - for pq.NumTxs() > 0 { - gotPriorities = append(gotPriorities, int(pq.PopTx().priority)) - } - - require.Equal(t, priorities, gotPriorities) -} - -func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) { - pq := NewTxPriorityQueue() - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - values := make([]int, 1000) - - for i := 0; i < 1000; i++ { - tx := make([]byte, 5) // each tx is 5 bytes - _, err := rng.Read(tx) - require.NoError(t, err) - - x := rng.Intn(100000) - pq.PushTx(&WrappedTx{ - tx: tx, - priority: int64(x), - }) - - values[i] = x - } - - sort.Ints(values) - - max := values[len(values)-1] - min := values[0] - totalSize := int64(len(values) * 5) - - testCases := []struct { - name string - priority, txSize, totalSize, cap int64 - expectedLen int - }{ - { - name: "larest priority; single tx", - priority: int64(max + 1), - txSize: 5, - totalSize: totalSize, - cap: totalSize, - expectedLen: 1, - }, - { - name: "larest priority; multi tx", - priority: int64(max + 1), - txSize: 17, - totalSize: totalSize, - cap: totalSize, - expectedLen: 4, - }, - { - name: "larest priority; out of capacity", - priority: int64(max + 1), - txSize: totalSize + 1, - totalSize: totalSize, - cap: totalSize, - expectedLen: 0, - }, - { - name: "smallest priority; no tx", - priority: int64(min - 1), - txSize: 5, - totalSize: totalSize, - cap: totalSize, - expectedLen: 0, - }, - { - name: "small priority; no tx", - priority: int64(min), - txSize: 5, - totalSize: totalSize, - cap: totalSize, - expectedLen: 0, - }, - } - - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - evictTxs := pq.GetEvictableTxs(tc.priority, tc.txSize, tc.totalSize, tc.cap) - require.Len(t, evictTxs, tc.expectedLen) - }) - } -} - -func TestTxPriorityQueue_RemoveTx(t *testing.T) { - pq := NewTxPriorityQueue() - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - numTxs := 1000 - - values := make([]int, numTxs) - - for i := 0; i < numTxs; i++ { - x := rng.Intn(100000) - pq.PushTx(&WrappedTx{ - priority: int64(x), - }) - - values[i] = x - } - - require.Equal(t, numTxs, pq.NumTxs()) - - sort.Ints(values) - max := values[len(values)-1] - - wtx := pq.txs[pq.NumTxs()/2] - pq.RemoveTx(wtx) - require.Equal(t, numTxs-1, pq.NumTxs()) - require.Equal(t, int64(max), pq.PopTx().priority) - require.Equal(t, numTxs-2, pq.NumTxs()) - - require.NotPanics(t, func() { - pq.RemoveTx(&WrappedTx{heapIndex: numTxs}) - pq.RemoveTx(&WrappedTx{heapIndex: numTxs + 1}) - }) - require.Equal(t, numTxs-2, pq.NumTxs()) -} diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index ea199b28b0..78afd150f1 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -6,10 +6,10 @@ import ( "fmt" "runtime/debug" "sync" - "time" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -22,13 +22,6 @@ var ( _ p2p.Wrapper = (*protomem.Message)(nil) ) -// PeerManager defines the interface contract required for getting necessary -// peer information. This should eventually be replaced with a message-oriented -// approach utilizing the p2p stack. -type PeerManager interface { - GetHeight(types.NodeID) int64 -} - // Reactor implements a service that contains mempool of txs that are broadcasted // amongst peers. It maintains a map from peer ID to counter, to prevent gossiping // txs to the peers you received it from. @@ -40,9 +33,8 @@ type Reactor struct { mempool *TxMempool ids *IDs - getPeerHeight func(types.NodeID) int64 - peerEvents p2p.PeerEventSubscriber - chCreator p2p.ChannelCreator + peerEvents p2p.PeerEventSubscriber + chCreator p2p.ChannelCreator // observePanic is a function for observing panics that were recovered in methods on // Reactor. observePanic is called with the recovered value. @@ -59,18 +51,16 @@ func NewReactor( txmp *TxMempool, chCreator p2p.ChannelCreator, peerEvents p2p.PeerEventSubscriber, - getPeerHeight func(types.NodeID) int64, ) *Reactor { r := &Reactor{ - logger: logger, - cfg: cfg, - mempool: txmp, - ids: NewMempoolIDs(), - chCreator: chCreator, - peerEvents: peerEvents, - getPeerHeight: getPeerHeight, - peerRoutines: make(map[types.NodeID]context.CancelFunc), - observePanic: defaultObservePanic, + logger: logger, + cfg: cfg, + mempool: txmp, + ids: NewMempoolIDs(), + chCreator: chCreator, + peerEvents: peerEvents, + peerRoutines: make(map[types.NodeID]context.CancelFunc), + observePanic: defaultObservePanic, } r.BaseService = *service.NewBaseService(logger, "Mempool", r) @@ -153,6 +143,15 @@ func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope *p2p.Envelo // problem. continue } + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // Do not propagate context + // cancellation errors, but do + // not continue to check + // transactions from this + // message if we are shutting down. + return nil + } + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) @@ -196,7 +195,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (er // processMempoolCh implements a blocking event loop where we listen for p2p // Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh *p2p.Channel) { +func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh p2p.Channel) { iter := mempoolCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() @@ -217,7 +216,7 @@ func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh *p2p.Channel) // goroutine or not. If not, we start one for the newly added peer. For down or // removed peers, we remove the peer from the mempool peer ID set and signal to // stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, mempoolCh *p2p.Channel) { +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, mempoolCh p2p.Channel) { r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() @@ -266,7 +265,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, mempoolCh *p2p.Channel) { +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, mempoolCh p2p.Channel) { for { select { case <-ctx.Done(): @@ -277,7 +276,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU } } -func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, mempoolCh *p2p.Channel) { +func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, mempoolCh p2p.Channel) { peerMempoolID := r.ids.GetForPeer(peerID) var nextGossipTx *clist.CElement @@ -309,8 +308,8 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m select { case <-ctx.Done(): return - case <-r.mempool.WaitForNextTx(): // wait until a tx is available - if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil { + case <-r.mempool.TxsWaitChan(): // wait until a tx is available + if nextGossipTx = r.mempool.TxsFront(); nextGossipTx == nil { continue } } @@ -318,18 +317,9 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m memTx := nextGossipTx.Value.(*WrappedTx) - if r.getPeerHeight != nil { - height := r.getPeerHeight(peerID) - if height > 0 && height < memTx.height-1 { - // allow for a lag of one block - time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } - // NOTE: Transaction batching was disabled due to: // https://github.com/tendermint/tendermint/issues/5796 - if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok { + if !memTx.HasPeer(peerMempoolID) { // Send the mempool tx to the corresponding peer. Note, the peer may be // behind and thus would not be able to process the mempool tx correctly. if err := mempoolCh.Send(ctx, p2p.Envelope{ @@ -341,9 +331,8 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m return } - r.logger.Debug( - "gossiped tx to peer", - "tx", fmt.Sprintf("%X", memTx.tx.Hash()), + r.logger.Debug("gossiped tx to peer", + "tx", tmstrings.LazySprintf("%X", memTx.tx.Hash()), "peer", peerID, ) } diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 8ceae20135..bd6ccf8b22 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -30,7 +30,7 @@ type reactorTestSuite struct { logger log.Logger reactors map[types.NodeID]*Reactor - mempoolChannels map[types.NodeID]*p2p.Channel + mempoolChannels map[types.NodeID]p2p.Channel mempools map[types.NodeID]*TxMempool kvstores map[types.NodeID]*kvstore.Application @@ -51,7 +51,7 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode logger: log.NewNopLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + mempoolChannels: make(map[types.NodeID]p2p.Channel, numNodes), mempools: make(map[types.NodeID]*TxMempool, numNodes), kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), @@ -68,14 +68,14 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode require.NoError(t, client.Start(ctx)) t.Cleanup(client.Wait) - mempool := setup(t, client, 0) + mempool := setup(t, client, 1<<20) rts.mempools[nodeID] = mempool rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) - chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.mempoolChannels[nodeID], nil } @@ -85,7 +85,6 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode mempool, chCreator, func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, - rts.network.Nodes[nodeID].PeerManager.GetHeight, ) rts.nodes = append(rts.nodes, nodeID) @@ -171,7 +170,9 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { secondaryReactor.observePanic = observePanic firstTx := &WrappedTx{} + primaryMempool.Lock() primaryMempool.insertTx(firstTx) + primaryMempool.Unlock() // run the router rts.start(ctx, t) @@ -184,6 +185,8 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { wg.Add(1) go func() { defer wg.Done() + primaryMempool.Lock() + defer primaryMempool.Unlock() primaryMempool.insertTx(next) }() } @@ -254,7 +257,7 @@ func TestReactorConcurrency(t *testing.T) { deliverTxResponses[i] = &abci.ExecTxResult{Code: 0} } - require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil)) + require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil, true)) }() // 1. submit a bunch of txs @@ -268,7 +271,7 @@ func TestReactorConcurrency(t *testing.T) { mempool.Lock() defer mempool.Unlock() - err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil) + err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil, true) require.NoError(t, err) }() } diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index c7113c9513..1a221e2c3c 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,11 +1,9 @@ package mempool import ( - "sort" "sync" "time" - "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/types" ) @@ -24,270 +22,78 @@ type TxInfo struct { // WrappedTx defines a wrapper around a raw transaction with additional metadata // that is used for indexing. type WrappedTx struct { - // tx represents the raw binary transaction data - tx types.Tx - - // hash defines the transaction hash and the primary key used in the mempool - hash types.TxKey - - // height defines the height at which the transaction was validated at - height int64 - - // gasWanted defines the amount of gas the transaction sender requires - gasWanted int64 - - // priority defines the transaction's priority as specified by the application - // in the ResponseCheckTx response. - priority int64 - - // sender defines the transaction's sender as specified by the application in - // the ResponseCheckTx response. - sender string - - // timestamp is the time at which the node first received the transaction from - // a peer. It is used as a second dimension is prioritizing transactions when - // two transactions have the same priority. - timestamp time.Time - - // peers records a mapping of all peers that sent a given transaction - peers map[uint16]struct{} - - // heapIndex defines the index of the item in the heap - heapIndex int - - // gossipEl references the linked-list element in the gossip index - gossipEl *clist.CElement - - // removed marks the transaction as removed from the mempool. This is set - // during RemoveTx and is needed due to the fact that a given existing - // transaction in the mempool can be evicted when it is simultaneously having - // a reCheckTx callback executed. - removed bool -} - -func (wtx *WrappedTx) Size() int { - return len(wtx.tx) -} - -// TxStore implements a thread-safe mapping of valid transaction(s). -// -// NOTE: -// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative -// access is not allowed. Regardless, it is not expected for the mempool to -// need mutative access. -type TxStore struct { - mtx sync.RWMutex - hashTxs map[types.TxKey]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application -} - -func NewTxStore() *TxStore { - return &TxStore{ - senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[types.TxKey]*WrappedTx), - } -} - -// Size returns the total number of transactions in the store. -func (txs *TxStore) Size() int { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return len(txs.hashTxs) -} - -// GetAllTxs returns all the transactions currently in the store. -func (txs *TxStore) GetAllTxs() []*WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wTxs := make([]*WrappedTx, len(txs.hashTxs)) - i := 0 - for _, wtx := range txs.hashTxs { - wTxs[i] = wtx - i++ - } - - return wTxs -} - -// GetTxBySender returns a *WrappedTx by the transaction's sender property -// defined by the ABCI application. -func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.senderTxs[sender] -} - -// GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.hashTxs[hash] -} - -// IsTxRemoved returns true if a transaction by hash is marked as removed and -// false otherwise. -func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx, ok := txs.hashTxs[hash] - if ok { - return wtx.removed + tx types.Tx // the original transaction data + hash types.TxKey // the transaction hash + height int64 // height when this transaction was initially checked (for expiry) + timestamp time.Time // time when transaction was entered (for TTL) + + mtx sync.Mutex + gasWanted int64 // app: gas required to execute this transaction + priority int64 // app: priority value for this transaction + sender string // app: assigned sender label + peers map[uint16]bool // peer IDs who have sent us this transaction +} + +// Size reports the size of the raw transaction in bytes. +func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) } + +// SetPeer adds the specified peer ID as a sender of w. +func (w *WrappedTx) SetPeer(id uint16) { + w.mtx.Lock() + defer w.mtx.Unlock() + if w.peers == nil { + w.peers = map[uint16]bool{id: true} + } else { + w.peers[id] = true } - - return false } -// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a -// non-empty sender, we additionally store the transaction by the sender as -// defined by the ABCI application. -func (txs *TxStore) SetTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - txs.senderTxs[wtx.sender] = wtx - } - - txs.hashTxs[wtx.tx.Key()] = wtx -} - -// RemoveTx removes a *WrappedTx from the transaction store. It deletes all -// indexes of the transaction. -func (txs *TxStore) RemoveTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - delete(txs.senderTxs, wtx.sender) - } - - delete(txs.hashTxs, wtx.tx.Key()) - wtx.removed = true -} - -// TxHasPeer returns true if a transaction by hash has a given peer ID and false -// otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return false - } - - _, ok := wtx.peers[peerID] +// HasPeer reports whether the specified peer ID is a sender of w. +func (w *WrappedTx) HasPeer(id uint16) bool { + w.mtx.Lock() + defer w.mtx.Unlock() + _, ok := w.peers[id] return ok } -// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the -// given peerID to the WrappedTx's set of peers that sent us this transaction. -// We return true if we've already recorded the given peer for this transaction -// and false otherwise. If the transaction does not exist by hash, we return -// (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return nil, false - } - - if wtx.peers == nil { - wtx.peers = make(map[uint16]struct{}) - } - - if _, ok := wtx.peers[peerID]; ok { - return wtx, true - } - - wtx.peers[peerID] = struct{}{} - return wtx, false -} - -// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be -// used to build generic transaction indexes in the mempool. It accepts a -// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx -// references which is used during Insert in order to determine sorted order. If -// less returns true, a <= b. -type WrappedTxList struct { - mtx sync.RWMutex - txs []*WrappedTx - less func(*WrappedTx, *WrappedTx) bool +// SetGasWanted sets the application-assigned gas requirement of w. +func (w *WrappedTx) SetGasWanted(gas int64) { + w.mtx.Lock() + defer w.mtx.Unlock() + w.gasWanted = gas } -func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { - return &WrappedTxList{ - txs: make([]*WrappedTx, 0), - less: less, - } +// GasWanted reports the application-assigned gas requirement of w. +func (w *WrappedTx) GasWanted() int64 { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.gasWanted } -// Size returns the number of WrappedTx objects in the list. -func (wtl *WrappedTxList) Size() int { - wtl.mtx.RLock() - defer wtl.mtx.RUnlock() - - return len(wtl.txs) +// SetSender sets the application-assigned sender of w. +func (w *WrappedTx) SetSender(sender string) { + w.mtx.Lock() + defer w.mtx.Unlock() + w.sender = sender } -// Reset resets the list of transactions to an empty list. -func (wtl *WrappedTxList) Reset() { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - wtl.txs = make([]*WrappedTx, 0) +// Sender reports the application-assigned sender of w. +func (w *WrappedTx) Sender() string { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.sender } -// Insert inserts a WrappedTx reference into the sorted list based on the list's -// comparator function. -func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - if i == len(wtl.txs) { - // insert at the end - wtl.txs = append(wtl.txs, wtx) - return - } - - // Make space for the inserted element by shifting values at the insertion - // index up one index. - // - // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). - wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) - wtl.txs[i] = wtx +// SetPriority sets the application-assigned priority of w. +func (w *WrappedTx) SetPriority(p int64) { + w.mtx.Lock() + defer w.mtx.Unlock() + w.priority = p } -// Remove attempts to remove a WrappedTx from the sorted list. -func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - // Since the list is sorted, we evaluate all elements starting at i. Note, if - // the element does not exist, we may potentially evaluate the entire remainder - // of the list. However, a caller should not be expected to call Remove with a - // non-existing element. - for i < len(wtl.txs) { - if wtl.txs[i] == wtx { - wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) - return - } - - i++ - } +// Priority reports the application-assigned priority of w. +func (w *WrappedTx) Priority() int64 { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.priority } diff --git a/internal/mempool/tx_test.go b/internal/mempool/tx_test.go deleted file mode 100644 index c6d494b047..0000000000 --- a/internal/mempool/tx_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package mempool - -import ( - "fmt" - "math/rand" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/types" -) - -func TestTxStore_GetTxBySender(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - sender: "foo", - priority: 1, - timestamp: time.Now(), - } - - res := txs.GetTxBySender(wtx.sender) - require.Nil(t, res) - - txs.SetTx(wtx) - - res = txs.GetTxBySender(wtx.sender) - require.NotNil(t, res) - require.Equal(t, wtx, res) -} - -func TestTxStore_GetTxByHash(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - sender: "foo", - priority: 1, - timestamp: time.Now(), - } - - key := wtx.tx.Key() - res := txs.GetTxByHash(key) - require.Nil(t, res) - - txs.SetTx(wtx) - - res = txs.GetTxByHash(key) - require.NotNil(t, res) - require.Equal(t, wtx, res) -} - -func TestTxStore_SetTx(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - priority: 1, - timestamp: time.Now(), - } - - key := wtx.tx.Key() - txs.SetTx(wtx) - - res := txs.GetTxByHash(key) - require.NotNil(t, res) - require.Equal(t, wtx, res) - - wtx.sender = "foo" - txs.SetTx(wtx) - - res = txs.GetTxByHash(key) - require.NotNil(t, res) - require.Equal(t, wtx, res) -} - -func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - priority: 1, - timestamp: time.Now(), - } - - key := wtx.tx.Key() - txs.SetTx(wtx) - - res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15) - require.Nil(t, res) - require.False(t, ok) - - res, ok = txs.GetOrSetPeerByTxHash(key, 15) - require.NotNil(t, res) - require.False(t, ok) - - res, ok = txs.GetOrSetPeerByTxHash(key, 15) - require.NotNil(t, res) - require.True(t, ok) - - require.True(t, txs.TxHasPeer(key, 15)) - require.False(t, txs.TxHasPeer(key, 16)) -} - -func TestTxStore_RemoveTx(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - priority: 1, - timestamp: time.Now(), - } - - txs.SetTx(wtx) - - key := wtx.tx.Key() - res := txs.GetTxByHash(key) - require.NotNil(t, res) - - txs.RemoveTx(res) - - res = txs.GetTxByHash(key) - require.Nil(t, res) -} - -func TestTxStore_Size(t *testing.T) { - txStore := NewTxStore() - numTxs := 1000 - - for i := 0; i < numTxs; i++ { - txStore.SetTx(&WrappedTx{ - tx: []byte(fmt.Sprintf("test_tx_%d", i)), - priority: int64(i), - timestamp: time.Now(), - }) - } - - require.Equal(t, numTxs, txStore.Size()) -} - -func TestWrappedTxList_Reset(t *testing.T) { - list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }) - - require.Zero(t, list.Size()) - - for i := 0; i < 100; i++ { - list.Insert(&WrappedTx{height: int64(i)}) - } - - require.Equal(t, 100, list.Size()) - - list.Reset() - require.Zero(t, list.Size()) -} - -func TestWrappedTxList_Insert(t *testing.T) { - list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }) - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - var expected []int - for i := 0; i < 100; i++ { - height := rng.Int63n(10000) - expected = append(expected, int(height)) - list.Insert(&WrappedTx{height: height}) - - if i%10 == 0 { - list.Insert(&WrappedTx{height: height}) - expected = append(expected, int(height)) - } - } - - got := make([]int, list.Size()) - for i, wtx := range list.txs { - got[i] = int(wtx.height) - } - - sort.Ints(expected) - require.Equal(t, expected, got) -} - -func TestWrappedTxList_Remove(t *testing.T) { - list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }) - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - var txs []*WrappedTx - for i := 0; i < 100; i++ { - height := rng.Int63n(10000) - tx := &WrappedTx{height: height} - - txs = append(txs, tx) - list.Insert(tx) - - if i%10 == 0 { - tx = &WrappedTx{height: height} - list.Insert(tx) - txs = append(txs, tx) - } - } - - // remove a tx that does not exist - list.Remove(&WrappedTx{height: 20000}) - - // remove a tx that exists (by height) but not referenced - list.Remove(&WrappedTx{height: txs[0].height}) - - // remove a few existing txs - for i := 0; i < 25; i++ { - j := rng.Intn(len(txs)) - list.Remove(txs[j]) - txs = append(txs[:j], txs[j+1:]...) - } - - expected := make([]int, len(txs)) - for i, tx := range txs { - expected[i] = int(tx.height) - } - - got := make([]int, list.Size()) - for i, wtx := range list.txs { - got[i] = int(wtx.height) - } - - sort.Ints(expected) - require.Equal(t, expected, got) -} diff --git a/internal/mempool/types.go b/internal/mempool/types.go index a51d286e28..481ced3fab 100644 --- a/internal/mempool/types.go +++ b/internal/mempool/types.go @@ -71,6 +71,7 @@ type Mempool interface { txResults []*abci.ExecTxResult, newPreFn PreCheckFunc, newPostFn PostCheckFunc, + recheck bool, ) error // FlushAppConn flushes the mempool connection to ensure async callback calls diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index 8e6774612e..394656632d 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -2,6 +2,7 @@ package p2p import ( "context" + "errors" "fmt" "sync" @@ -19,6 +20,10 @@ type Envelope struct { ChannelID ChannelID } +func (e Envelope) IsZero() bool { + return e.From == "" && e.To == "" && e.Message == nil +} + // Wrapper is a Protobuf message that can contain a variety of inner messages // (e.g. via oneof fields). If a Channel's message type implements Wrapper, the // Router will automatically wrap outbound messages and unwrap inbound messages, @@ -33,6 +38,16 @@ type Wrapper interface { Unwrap() (proto.Message, error) } +type Channel interface { + fmt.Stringer + + Err() error + + Send(context.Context, Envelope) error + SendError(context.Context, PeerError) error + Receive(context.Context) *ChannelIterator +} + // PeerError is a peer error reported via Channel.Error. // // FIXME: This currently just disconnects the peer, which is too simplistic. @@ -46,44 +61,38 @@ type Wrapper interface { type PeerError struct { NodeID types.NodeID Err error + Fatal bool } func (pe PeerError) Error() string { return fmt.Sprintf("peer=%q: %s", pe.NodeID, pe.Err.Error()) } func (pe PeerError) Unwrap() error { return pe.Err } -// Channel is a bidirectional channel to exchange Protobuf messages with peers. +// legacyChannel is a bidirectional channel to exchange Protobuf messages with peers. // Each message is wrapped in an Envelope to specify its sender and receiver. -type Channel struct { +type legacyChannel struct { ID ChannelID inCh <-chan Envelope // inbound messages (peers to reactors) outCh chan<- Envelope // outbound messages (reactors to peers) errCh chan<- PeerError // peer error reporting - messageType proto.Message // the channel's message type, used for unmarshaling - name string + name string } // NewChannel creates a new channel. It is primarily for internal and test // use, reactors should use Router.OpenChannel(). -func NewChannel( - id ChannelID, - messageType proto.Message, - inCh <-chan Envelope, - outCh chan<- Envelope, - errCh chan<- PeerError, -) *Channel { - return &Channel{ - ID: id, - messageType: messageType, - inCh: inCh, - outCh: outCh, - errCh: errCh, +func NewChannel(id ChannelID, name string, inCh <-chan Envelope, outCh chan<- Envelope, errCh chan<- PeerError) Channel { + return &legacyChannel{ + ID: id, + name: name, + inCh: inCh, + outCh: outCh, + errCh: errCh, } } // Send blocks until the envelope has been sent, or until ctx ends. // An error only occurs if the context ends before the send completes. -func (ch *Channel) Send(ctx context.Context, envelope Envelope) error { +func (ch *legacyChannel) Send(ctx context.Context, envelope Envelope) error { select { case <-ctx.Done(): return ctx.Err() @@ -92,9 +101,15 @@ func (ch *Channel) Send(ctx context.Context, envelope Envelope) error { } } +func (ch *legacyChannel) Err() error { return nil } + // SendError blocks until the given error has been sent, or ctx ends. // An error only occurs if the context ends before the send completes. -func (ch *Channel) SendError(ctx context.Context, pe PeerError) error { +func (ch *legacyChannel) SendError(ctx context.Context, pe PeerError) error { + if errors.Is(pe.Err, context.Canceled) || errors.Is(pe.Err, context.DeadlineExceeded) { + return nil + } + select { case <-ctx.Done(): return ctx.Err() @@ -103,18 +118,29 @@ func (ch *Channel) SendError(ctx context.Context, pe PeerError) error { } } -func (ch *Channel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s>", ch.ID, ch.name) } +func (ch *legacyChannel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s>", ch.ID, ch.name) } // Receive returns a new unbuffered iterator to receive messages from ch. // The iterator runs until ctx ends. -func (ch *Channel) Receive(ctx context.Context) *ChannelIterator { +func (ch *legacyChannel) Receive(ctx context.Context) *ChannelIterator { iter := &ChannelIterator{ pipe: make(chan Envelope), // unbuffered } - go func() { + go func(pipe chan<- Envelope) { defer close(iter.pipe) - iteratorWorker(ctx, ch, iter.pipe) - }() + for { + select { + case <-ctx.Done(): + return + case envelope := <-ch.inCh: + select { + case <-ctx.Done(): + return + case pipe <- envelope: + } + } + } + }(iter.pipe) return iter } @@ -129,21 +155,6 @@ type ChannelIterator struct { current *Envelope } -func iteratorWorker(ctx context.Context, ch *Channel, pipe chan Envelope) { - for { - select { - case <-ctx.Done(): - return - case envelope := <-ch.inCh: - select { - case <-ctx.Done(): - return - case pipe <- envelope: - } - } - } -} - // Next returns true when the Envelope value has advanced, and false // when the context is canceled or iteration should stop. If an iterator has returned false, // it will never return true again. @@ -182,7 +193,7 @@ func (iter *ChannelIterator) Envelope() *Envelope { return iter.current } // // This allows the caller to consume messages from multiple channels // without needing to manage the concurrency separately. -func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterator { +func MergedChannelIterator(ctx context.Context, chs ...Channel) *ChannelIterator { iter := &ChannelIterator{ pipe: make(chan Envelope), // unbuffered } @@ -190,10 +201,17 @@ func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterato for _, ch := range chs { wg.Add(1) - go func(ch *Channel) { + go func(ch Channel, pipe chan<- Envelope) { defer wg.Done() - iteratorWorker(ctx, ch, iter.pipe) - }(ch) + iter := ch.Receive(ctx) + for iter.Next(ctx) { + select { + case <-ctx.Done(): + return + case pipe <- *iter.Envelope(): + } + } + }(ch, iter.pipe) } done := make(chan struct{}) diff --git a/internal/p2p/channel_test.go b/internal/p2p/channel_test.go index e06e3e77ea..eeaf77db25 100644 --- a/internal/p2p/channel_test.go +++ b/internal/p2p/channel_test.go @@ -16,13 +16,13 @@ type channelInternal struct { Error chan PeerError } -func testChannel(size int) (*channelInternal, *Channel) { +func testChannel(size int) (*channelInternal, *legacyChannel) { in := &channelInternal{ In: make(chan Envelope, size), Out: make(chan Envelope, size), Error: make(chan PeerError, size), } - ch := &Channel{ + ch := &legacyChannel{ inCh: in.In, outCh: in.Out, errCh: in.Error, diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index c8fc211888..adc287328f 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -100,7 +100,8 @@ type MConnection struct { // used to ensure FlushStop and OnStop // are safe to call concurrently. - stopMtx sync.Mutex + stopMtx sync.Mutex + stopSignal <-chan struct{} cancel context.CancelFunc @@ -207,6 +208,7 @@ func (c *MConnection) OnStart(ctx context.Context) error { c.quitSendRoutine = make(chan struct{}) c.doneSendRoutine = make(chan struct{}) c.quitRecvRoutine = make(chan struct{}) + c.stopSignal = ctx.Done() c.setRecvLastMsgAt(time.Now()) go c.sendRoutine(ctx) go c.recvRoutine(ctx) @@ -311,7 +313,7 @@ func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) + c.logger.Error("Cannot send bytes to unknown channel", "channel", chID) return false } @@ -681,6 +683,8 @@ func (ch *channel) sendBytes(bytes []byte) bool { return true case <-time.After(defaultSendTimeout): return false + case <-ch.conn.stopSignal: + return false } } diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 4da3901fa0..d0802a3c63 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -315,6 +315,10 @@ func TestMConnectionMultiplePings(t *testing.T) { } func TestMConnectionPingPongs(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // check that we are not leaking any go-routines t.Cleanup(leaktest.CheckTimeout(t, 10*time.Second)) @@ -558,6 +562,10 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { } func TestMConnectionTrySend(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) ctx, cancel := context.WithCancel(context.Background()) @@ -606,6 +614,10 @@ func TestConnVectors(t *testing.T) { } func TestMConnectionChannelOverflow(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) diff --git a/internal/p2p/metrics.gen.go b/internal/p2p/metrics.gen.go new file mode 100644 index 0000000000..9cffbc46b6 --- /dev/null +++ b/internal/p2p/metrics.gen.go @@ -0,0 +1,135 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package p2p + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + PeersConnected: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected", + Help: "Number of peers connected.", + }, labels).With(labelsAndValues...), + PeersStored: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_stored", + Help: "Nomber of peers in the peer store database.", + }, labels).With(labelsAndValues...), + PeersInactivated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_inactivated", + Help: "Number of inactive peers stored.", + }, labels).With(labelsAndValues...), + PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_receive_bytes_total", + Help: "Number of bytes per channel received from a given peer.", + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), + PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_send_bytes_total", + Help: "Number of bytes per channel sent to a given peer.", + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), + PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_pending_send_bytes", + Help: "Number of bytes pending being sent to a given peer.", + }, append(labels, "peer_id")).With(labelsAndValues...), + PeersConnectedSuccess: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_success", + Help: "Number of successful connection attempts", + }, labels).With(labelsAndValues...), + PeersConnectedFailure: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_failure", + Help: "Number of failed connection attempts", + }, labels).With(labelsAndValues...), + PeersConnectedIncoming: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_incoming", + Help: "Number of peers connected as a result of dialing the peer.", + }, labels).With(labelsAndValues...), + PeersConnectedOutgoing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_outgoing", + Help: "Number of peers connected as a result of the peer dialing this node.", + }, labels).With(labelsAndValues...), + PeersEvicted: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_evicted", + Help: "Number of peers evicted by this node.", + }, labels).With(labelsAndValues...), + RouterPeerQueueRecv: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_peer_queue_recv", + Help: "The time taken to read off of a peer's queue before sending on the connection.", + }, labels).With(labelsAndValues...), + RouterPeerQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_peer_queue_send", + Help: "The time taken to send on a peer's queue which will later be read and sent on the connection.", + }, labels).With(labelsAndValues...), + RouterChannelQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_channel_queue_send", + Help: "The time taken to send on a p2p channel's queue which will later be consued by the corresponding reactor/service.", + }, labels).With(labelsAndValues...), + PeerQueueDroppedMsgs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_channel_queue_dropped_msgs", + Help: "The number of messages dropped from a peer's queue for a specific p2p Channel.", + }, append(labels, "ch_id")).With(labelsAndValues...), + PeerQueueMsgSize: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_queue_msg_size", + Help: "The size of messages sent over a peer's queue for a specific p2p Channel.", + }, append(labels, "ch_id")).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + PeersConnected: discard.NewGauge(), + PeersStored: discard.NewGauge(), + PeersInactivated: discard.NewGauge(), + PeerReceiveBytesTotal: discard.NewCounter(), + PeerSendBytesTotal: discard.NewCounter(), + PeerPendingSendBytes: discard.NewGauge(), + PeersConnectedSuccess: discard.NewCounter(), + PeersConnectedFailure: discard.NewCounter(), + PeersConnectedIncoming: discard.NewGauge(), + PeersConnectedOutgoing: discard.NewGauge(), + PeersEvicted: discard.NewCounter(), + RouterPeerQueueRecv: discard.NewHistogram(), + RouterPeerQueueSend: discard.NewHistogram(), + RouterChannelQueueSend: discard.NewHistogram(), + PeerQueueDroppedMsgs: discard.NewCounter(), + PeerQueueMsgSize: discard.NewGauge(), + } +} diff --git a/internal/p2p/metrics.go b/internal/p2p/metrics.go index 2780d221ef..bc233f691f 100644 --- a/internal/p2p/metrics.go +++ b/internal/p2p/metrics.go @@ -7,9 +7,6 @@ import ( "sync" "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -25,140 +22,74 @@ var ( valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { - // Number of peers. - Peers metrics.Gauge - // Number of bytes received from a given peer. - PeerReceiveBytesTotal metrics.Counter - // Number of bytes sent to a given peer. - PeerSendBytesTotal metrics.Counter - // Pending bytes to be sent to a given peer. - PeerPendingSendBytes metrics.Gauge + // Number of peers connected. + PeersConnected metrics.Gauge + // Nomber of peers in the peer store database. + PeersStored metrics.Gauge + // Number of inactive peers stored. + PeersInactivated metrics.Gauge + // Number of bytes per channel received from a given peer. + PeerReceiveBytesTotal metrics.Counter `metrics_labels:"peer_id, chID, message_type"` + // Number of bytes per channel sent to a given peer. + PeerSendBytesTotal metrics.Counter `metrics_labels:"peer_id, chID, message_type"` + // Number of bytes pending being sent to a given peer. + PeerPendingSendBytes metrics.Gauge `metrics_labels:"peer_id"` + + // Number of successful connection attempts + PeersConnectedSuccess metrics.Counter + // Number of failed connection attempts + PeersConnectedFailure metrics.Counter + + // Number of peers connected as a result of dialing the + // peer. + PeersConnectedIncoming metrics.Gauge + // Number of peers connected as a result of the peer dialing + // this node. + PeersConnectedOutgoing metrics.Gauge + + // Number of peers evicted by this node. + PeersEvicted metrics.Counter // RouterPeerQueueRecv defines the time taken to read off of a peer's queue // before sending on the connection. + //metrics:The time taken to read off of a peer's queue before sending on the connection. RouterPeerQueueRecv metrics.Histogram // RouterPeerQueueSend defines the time taken to send on a peer's queue which // will later be read and sent on the connection (see RouterPeerQueueRecv). + //metrics:The time taken to send on a peer's queue which will later be read and sent on the connection. RouterPeerQueueSend metrics.Histogram // RouterChannelQueueSend defines the time taken to send on a p2p channel's // queue which will later be consued by the corresponding reactor/service. + //metrics:The time taken to send on a p2p channel's queue which will later be consued by the corresponding reactor/service. RouterChannelQueueSend metrics.Histogram // PeerQueueDroppedMsgs defines the number of messages dropped from a peer's // queue for a specific flow (i.e. Channel). - PeerQueueDroppedMsgs metrics.Counter + //metrics:The number of messages dropped from a peer's queue for a specific p2p Channel. + PeerQueueDroppedMsgs metrics.Counter `metrics_labels:"ch_id" metrics_name:"router_channel_queue_dropped_msgs"` // PeerQueueMsgSize defines the average size of messages sent over a peer's // queue for a specific flow (i.e. Channel). - PeerQueueMsgSize metrics.Gauge + //metrics:The size of messages sent over a peer's queue for a specific p2p Channel. + PeerQueueMsgSize metrics.Gauge `metrics_labels:"ch_id" metric_name:"router_channel_queue_msg_size"` +} +type metricsLabelCache struct { mtx *sync.RWMutex messageLabelNames map[reflect.Type]string } -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peers", - Help: "Number of peers.", - }, labels).With(labelsAndValues...), - - PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_receive_bytes_total", - Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), - - PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_send_bytes_total", - Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), - - PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_pending_send_bytes", - Help: "Number of pending bytes to be sent to a given peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), - - RouterPeerQueueRecv: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_peer_queue_recv", - Help: "The time taken to read off of a peer's queue before sending on the connection.", - }, labels).With(labelsAndValues...), - - RouterPeerQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_peer_queue_send", - Help: "The time taken to send on a peer's queue which will later be read and sent on the connection (see RouterPeerQueueRecv).", - }, labels).With(labelsAndValues...), - - RouterChannelQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_channel_queue_send", - Help: "The time taken to send on a p2p channel's queue which will later be consued by the corresponding reactor/service.", - }, labels).With(labelsAndValues...), - - PeerQueueDroppedMsgs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_channel_queue_dropped_msgs", - Help: "The number of messages dropped from a peer's queue for a specific p2p Channel.", - }, append(labels, "ch_id")).With(labelsAndValues...), - - PeerQueueMsgSize: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_channel_queue_msg_size", - Help: "The size of messages sent over a peer's queue for a specific p2p Channel.", - }, append(labels, "ch_id")).With(labelsAndValues...), - - mtx: &sync.RWMutex{}, - messageLabelNames: map[reflect.Type]string{}, - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Peers: discard.NewGauge(), - PeerReceiveBytesTotal: discard.NewCounter(), - PeerSendBytesTotal: discard.NewCounter(), - PeerPendingSendBytes: discard.NewGauge(), - RouterPeerQueueRecv: discard.NewHistogram(), - RouterPeerQueueSend: discard.NewHistogram(), - RouterChannelQueueSend: discard.NewHistogram(), - PeerQueueDroppedMsgs: discard.NewCounter(), - PeerQueueMsgSize: discard.NewGauge(), - mtx: &sync.RWMutex{}, - messageLabelNames: map[reflect.Type]string{}, - } -} - // ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang // type that is passed in. // This method uses a map on the Metrics struct so that each label name only needs // to be produced once to prevent expensive string operations. -func (m *Metrics) ValueToMetricLabel(i interface{}) string { +func (m *metricsLabelCache) ValueToMetricLabel(i interface{}) string { t := reflect.TypeOf(i) m.mtx.RLock() @@ -176,3 +107,10 @@ func (m *Metrics) ValueToMetricLabel(i interface{}) string { m.messageLabelNames[t] = l return l } + +func newMetricsLabelCache() *metricsLabelCache { + return &metricsLabelCache{ + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, + } +} diff --git a/internal/p2p/metrics_test.go b/internal/p2p/metrics_test.go index 839786d919..98523fe822 100644 --- a/internal/p2p/metrics_test.go +++ b/internal/p2p/metrics_test.go @@ -9,12 +9,12 @@ import ( ) func TestValueToMetricsLabel(t *testing.T) { - m := NopMetrics() + lc := newMetricsLabelCache() r := &p2p.PexResponse{} - str := m.ValueToMetricLabel(r) + str := lc.ValueToMetricLabel(r) assert.Equal(t, "p2p_PexResponse", str) // subsequent calls to the function should produce the same result - str = m.ValueToMetricLabel(r) + str = lc.ValueToMetricLabel(r) assert.Equal(t, "p2p_PexResponse", str) } diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 73b6cfc3b3..5a317bcf1c 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -13,7 +13,7 @@ import ( p2p "github.com/tendermint/tendermint/internal/p2p" - testing "testing" + time "time" types "github.com/tendermint/tendermint/types" ) @@ -37,20 +37,20 @@ func (_m *Connection) Close() error { return r0 } -// Handshake provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { - ret := _m.Called(_a0, _a1, _a2) +// Handshake provides a mock function with given fields: _a0, _a1, _a2, _a3 +func (_m *Connection) Handshake(_a0 context.Context, _a1 time.Duration, _a2 types.NodeInfo, _a3 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { + ret := _m.Called(_a0, _a1, _a2, _a3) var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func(context.Context, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok { - r0 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok { + r0 = rf(_a0, _a1, _a2, _a3) } else { r0 = ret.Get(0).(types.NodeInfo) } var r1 crypto.PubKey - if rf, ok := ret.Get(1).(func(context.Context, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok { - r1 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(1).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok { + r1 = rf(_a0, _a1, _a2, _a3) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(crypto.PubKey) @@ -58,8 +58,8 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 cry } var r2 error - if rf, ok := ret.Get(2).(func(context.Context, types.NodeInfo, crypto.PrivKey) error); ok { - r2 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(2).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) error); ok { + r2 = rf(_a0, _a1, _a2, _a3) } else { r2 = ret.Error(2) } @@ -153,8 +153,13 @@ func (_m *Connection) String() string { return r0 } -// NewConnection creates a new instance of Connection. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnection(t testing.TB) *Connection { +type mockConstructorTestingTNewConnection interface { + mock.TestingT + Cleanup(func()) +} + +// NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewConnection(t mockConstructorTestingTNewConnection) *Connection { mock := &Connection{} mock.Mock.Test(t) diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 34ebec20e7..e89f0e25a3 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -10,8 +10,6 @@ import ( mock "github.com/stretchr/testify/mock" p2p "github.com/tendermint/tendermint/internal/p2p" - - testing "testing" ) // Transport is an autogenerated mock type for the Transport type @@ -151,8 +149,13 @@ func (_m *Transport) String() string { return r0 } -// NewTransport creates a new instance of Transport. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransport(t testing.TB) *Transport { +type mockConstructorTestingTNewTransport interface { + mock.TestingT + Cleanup(func()) +} + +// NewTransport creates a new instance of Transport. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewTransport(t mockConstructorTestingTNewTransport) *Transport { mock := &Transport{} mock.Mock.Test(t) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index fc4657596d..12804f873d 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -164,8 +164,8 @@ func (n *Network) MakeChannels( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) map[types.NodeID]*p2p.Channel { - channels := map[types.NodeID]*p2p.Channel{} +) map[types.NodeID]p2p.Channel { + channels := map[types.NodeID]p2p.Channel{} for _, node := range n.Nodes { channels[node.NodeID] = node.MakeChannel(ctx, t, chDesc) } @@ -179,8 +179,8 @@ func (n *Network) MakeChannelsNoCleanup( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) map[types.NodeID]*p2p.Channel { - channels := map[types.NodeID]*p2p.Channel{} +) map[types.NodeID]p2p.Channel { + channels := map[types.NodeID]p2p.Channel{} for _, node := range n.Nodes { channels[node.NodeID] = node.MakeChannelNoCleanup(ctx, t, chDesc) } @@ -271,11 +271,13 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, proTxHash crypto.P require.NotNil(t, ep, "transport not listening an endpoint") peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - MinRetryTime: 10 * time.Millisecond, - MaxRetryTime: 100 * time.Millisecond, - RetryTimeJitter: time.Millisecond, - MaxPeers: opts.MaxPeers, - MaxConnected: opts.MaxConnected, + MinRetryTime: 10 * time.Millisecond, + DisconnectCooldownPeriod: 10 * time.Millisecond, + MaxRetryTime: 100 * time.Millisecond, + RetryTimeJitter: time.Millisecond, + MaxPeers: opts.MaxPeers, + MaxConnected: opts.MaxConnected, + Metrics: p2p.NopMetrics(), }) require.NoError(t, err) @@ -287,7 +289,7 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, proTxHash crypto.P func() *types.NodeInfo { return &nodeInfo }, transport, ep, - p2p.RouterOptions{DialSleep: func(_ context.Context) {}}, + p2p.RouterOptions{}, ) require.NoError(t, err) @@ -321,7 +323,7 @@ func (n *Node) MakeChannel( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) *p2p.Channel { +) p2p.Channel { ctx, cancel := context.WithCancel(ctx) channel, err := n.Router.OpenChannel(ctx, chDesc) require.NoError(t, err) @@ -338,7 +340,7 @@ func (n *Node) MakeChannelNoCleanup( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) *p2p.Channel { +) p2p.Channel { channel, err := n.Router.OpenChannel(ctx, chDesc) require.NoError(t, err) return channel diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 885e080d40..276bff390a 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -15,7 +15,7 @@ import ( ) // RequireEmpty requires that the given channel is empty. -func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) { +func RequireEmpty(ctx context.Context, t *testing.T, channels ...p2p.Channel) { t.Helper() ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) @@ -32,7 +32,7 @@ func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) { } // RequireReceive requires that the given envelope is received on the channel. -func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { +func RequireReceive(ctx context.Context, t *testing.T, channel p2p.Channel, expect p2p.Envelope) { t.Helper() ctx, cancel := context.WithTimeout(ctx, time.Second) @@ -54,7 +54,7 @@ func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, exp // RequireReceiveUnordered requires that the given envelopes are all received on // the channel, ignoring order. -func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Channel, expect []*p2p.Envelope) { +func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel p2p.Channel, expect []*p2p.Envelope) { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() @@ -75,7 +75,7 @@ func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Cha } // RequireSend requires that the given envelope is sent on the channel. -func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { +func RequireSend(ctx context.Context, t *testing.T, channel p2p.Channel, envelope p2p.Envelope) { tctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() @@ -93,7 +93,7 @@ func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelo func RequireSendReceive( ctx context.Context, t *testing.T, - channel *p2p.Channel, + channel p2p.Channel, peerID types.NodeID, send proto.Message, receive proto.Message, @@ -116,7 +116,7 @@ func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUp } // RequireError requires that the given peer error is submitted for a peer. -func RequireError(ctx context.Context, t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { +func RequireError(ctx context.Context, t *testing.T, channel p2p.Channel, peerError p2p.PeerError) { tctx, tcancel := context.WithTimeout(ctx, time.Second) defer tcancel() diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 452e0df96c..cd0a63cb1f 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -38,11 +38,18 @@ const ( PeerStatusBad PeerStatus = "bad" // peer observed as bad ) +type peerConnectionDirection int + +const ( + peerConnectionIncoming peerConnectionDirection = iota + 1 + peerConnectionOutgoing +) + // PeerScore is a numeric score assigned to a peer (higher is better). -type PeerScore uint8 +type PeerScore int16 const ( - PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers + PeerScorePersistent PeerScore = math.MaxInt16 // persistent peers MaxPeerScoreNotPersistent PeerScore = PeerScorePersistent - 1 ) @@ -108,6 +115,13 @@ type PeerManagerOptions struct { // outbound). 0 means no limit. MaxConnected uint16 + // MaxOutgoingConnections specifies how many outgoing + // connections a node will maintain. It must be lower than MaxConnected. If it is + // 0, then all connections can be outgoing. Once this limit is + // reached, the node will not dial peers, allowing the + // remaining peer connections to be used by incoming connections. + MaxOutgoingConnections uint16 + // MaxConnectedUpgrade is the maximum number of additional connections to // use for probing any better-scored peers to upgrade to when all connection // slots are full. 0 disables peer upgrading. @@ -137,6 +151,10 @@ type PeerManagerOptions struct { // retry times, to avoid thundering herds. 0 disables jitter. RetryTimeJitter time.Duration + // DisconnectCooldownPeriod is the amount of time after we + // disconnect from a peer before we'll consider dialing a new peer + DisconnectCooldownPeriod time.Duration + // PeerScores sets fixed scores for specific peers. It is mainly used // for testing. A score of 0 is ignored. PeerScores map[types.NodeID]PeerScore @@ -152,6 +170,9 @@ type PeerManagerOptions struct { // persistentPeers provides fast PersistentPeers lookups. It is built // by optimize(). persistentPeers map[types.NodeID]bool + + // Peer Metrics + Metrics *Metrics } // Validate validates the options. @@ -200,14 +221,18 @@ func (o *PeerManagerOptions) Validate() error { } } + if o.MaxOutgoingConnections > 0 && o.MaxConnected < o.MaxOutgoingConnections { + return errors.New("cannot set MaxOutgoingConnections to a value larger than MaxConnected") + } + return nil } -// isPersistentPeer checks if a peer is in PersistentPeers. It will panic +// isPersistent checks if a peer is in PersistentPeers. It will panic // if called before optimize(). func (o *PeerManagerOptions) isPersistent(id types.NodeID) bool { if o.persistentPeers == nil { - panic("isPersistentPeer() called before optimize()") + panic("isPersistent() called before optimize()") } return o.persistentPeers[id] } @@ -268,19 +293,20 @@ func (o *PeerManagerOptions) optimize() { type PeerManager struct { selfID types.NodeID options PeerManagerOptions + metrics *Metrics rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes mtx sync.Mutex store *peerStore - subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address) - dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail) - upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail) - connected map[types.NodeID]bool // connected peers (Dialed/Accepted → Disconnected) - ready map[types.NodeID]bool // ready peers (Ready → Disconnected) - evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext) - evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected) + subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address) + dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail) + upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail) + connected map[types.NodeID]peerConnectionDirection // connected peers (Dialed/Accepted → Disconnected) + ready map[types.NodeID]bool // ready peers (Ready → Disconnected) + evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext) + evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected) } // NewPeerManager creates a new peer manager. @@ -305,16 +331,22 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec dialWaker: tmsync.NewWaker(), evictWaker: tmsync.NewWaker(), + metrics: NopMetrics(), store: store, dialing: map[types.NodeID]bool{}, upgrading: map[types.NodeID]types.NodeID{}, - connected: map[types.NodeID]bool{}, + connected: map[types.NodeID]peerConnectionDirection{}, ready: map[types.NodeID]bool{}, evict: map[types.NodeID]bool{}, evicting: map[types.NodeID]bool{}, subscriptions: map[*PeerUpdates]*PeerUpdates{}, } + + if options.Metrics != nil { + peerManager.metrics = options.Metrics + } + if err = peerManager.configurePeers(); err != nil { return nil, err } @@ -346,6 +378,9 @@ func (m *PeerManager) configurePeers() error { } } } + + m.metrics.PeersStored.Add(float64(m.store.Size())) + return nil } @@ -375,20 +410,45 @@ func (m *PeerManager) prunePeers() error { ranked := m.store.Ranked() for i := len(ranked) - 1; i >= 0; i-- { peerID := ranked[i].ID + switch { case m.store.Size() <= int(m.options.MaxPeers): return nil case m.dialing[peerID]: - case m.connected[peerID]: + case m.isConnected(peerID): default: if err := m.store.Delete(peerID); err != nil { return err } + m.metrics.PeersStored.Add(-1) } } return nil } +func (m *PeerManager) isConnected(peerID types.NodeID) bool { + _, ok := m.connected[peerID] + return ok +} + +type connectionStats struct { + incoming uint16 + outgoing uint16 +} + +func (m *PeerManager) getConnectedInfo() connectionStats { + out := connectionStats{} + for _, direction := range m.connected { + switch direction { + case peerConnectionIncoming: + out.incoming++ + case peerConnectionOutgoing: + out.outgoing++ + } + } + return out +} + // Add adds a peer to the manager, given as an address. If the peer already // exists, the address is added to it if it isn't already present. This will push // low scoring peers out of the address book if it exceeds the maximum size. @@ -412,12 +472,17 @@ func (m *PeerManager) Add(address NodeAddress) (bool, error) { if ok { return false, nil } + if peer.Inactive { + return false, nil + } // else add the new address peer.AddressInfo[address] = &peerAddressInfo{Address: address} if err := m.store.Set(peer); err != nil { return false, err } + + m.metrics.PeersStored.Add(1) if err := m.prunePeers(); err != nil { return true, err } @@ -437,18 +502,35 @@ func (m *PeerManager) PeerRatio() float64 { return float64(m.store.Size()) / float64(m.options.MaxPeers) } +func (m *PeerManager) HasMaxPeerCapacity() bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + return len(m.connected) >= int(m.options.MaxConnected) +} + +func (m *PeerManager) HasDialedMaxPeers() bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + stats := m.getConnectedInfo() + + return stats.outgoing >= m.options.MaxOutgoingConnections +} + // DialNext finds an appropriate peer address to dial, and marks it as dialing. // If no peer is found, or all connection slots are full, it blocks until one // becomes available. The caller must call Dialed() or DialFailed() for the // returned peer. func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) { for { - address, err := m.TryDialNext() - if err != nil || (address != NodeAddress{}) { - return address, err + if address := m.TryDialNext(); (address != NodeAddress{}) { + return address, nil } + select { case <-m.dialWaker.Sleep(): + continue case <-ctx.Done(): return NodeAddress{}, ctx.Err() } @@ -457,20 +539,28 @@ func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) { // TryDialNext is equivalent to DialNext(), but immediately returns an empty // address if no peers or connection slots are available. -func (m *PeerManager) TryDialNext() (NodeAddress, error) { +func (m *PeerManager) TryDialNext() NodeAddress { m.mtx.Lock() defer m.mtx.Unlock() // We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including // MaxConnectedUpgrade allows us to probe additional peers that have a // higher score than any other peers, and if successful evict it. - if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= - int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { - return NodeAddress{}, nil + if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { + return NodeAddress{} + } + + cinfo := m.getConnectedInfo() + if m.options.MaxOutgoingConnections > 0 && cinfo.outgoing >= m.options.MaxOutgoingConnections { + return NodeAddress{} } for _, peer := range m.store.Ranked() { - if m.dialing[peer.ID] || m.connected[peer.ID] { + if m.dialing[peer.ID] || m.isConnected(peer.ID) { + continue + } + + if !peer.LastDisconnected.IsZero() && time.Since(peer.LastDisconnected) < m.options.DisconnectCooldownPeriod { continue } @@ -479,6 +569,10 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { continue } + if id, ok := m.store.Resolve(addressInfo.Address); ok && (m.isConnected(id) || m.dialing[id]) { + continue + } + // We now have an eligible address to dial. If we're full but have // upgrade capacity (as checked above), we find a lower-scored peer // we can replace and mark it as upgrading so noone else claims it. @@ -489,25 +583,24 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { upgradeFromPeer := m.findUpgradeCandidate(peer.ID, peer.Score()) if upgradeFromPeer == "" { - return NodeAddress{}, nil + return NodeAddress{} } m.upgrading[upgradeFromPeer] = peer.ID } m.dialing[peer.ID] = true - return addressInfo.Address, nil + return addressInfo.Address } } - return NodeAddress{}, nil + return NodeAddress{} } // DialFailed reports a failed dial attempt. This will make the peer available // for dialing again when appropriate (possibly after a retry timeout). -// -// FIXME: This should probably delete or mark bad addresses/peers after some time. func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error { m.mtx.Lock() defer m.mtx.Unlock() + m.metrics.PeersConnectedFailure.Add(1) delete(m.dialing, address.NodeID) for from, to := range m.upgrading { @@ -527,6 +620,7 @@ func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error addressInfo.LastDialFailure = time.Now().UTC() addressInfo.DialFailures++ + if err := m.store.Set(peer); err != nil { return err } @@ -560,6 +654,8 @@ func (m *PeerManager) Dialed(address NodeAddress, peerOpts ...func(*peerInfo)) e m.mtx.Lock() defer m.mtx.Unlock() + m.metrics.PeersConnectedSuccess.Add(1) + delete(m.dialing, address.NodeID) var upgradeFromPeer types.NodeID @@ -574,12 +670,11 @@ func (m *PeerManager) Dialed(address NodeAddress, peerOpts ...func(*peerInfo)) e if address.NodeID == m.selfID { return fmt.Errorf("rejecting connection to self (%v)", address.NodeID) } - if m.connected[address.NodeID] { + if m.isConnected(address.NodeID) { return fmt.Errorf("peer %v is already connected", address.NodeID) } if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { - if upgradeFromPeer == "" || len(m.connected) >= - int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { + if upgradeFromPeer == "" || len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { return fmt.Errorf("already connected to maximum number of peers") } } @@ -589,6 +684,11 @@ func (m *PeerManager) Dialed(address NodeAddress, peerOpts ...func(*peerInfo)) e return fmt.Errorf("peer %q was removed while dialing", address.NodeID) } now := time.Now().UTC() + if peer.Inactive { + m.metrics.PeersInactivated.Add(-1) + } + peer.Inactive = false + peer.LastConnected = now for _, opt := range peerOpts { opt(&peer) @@ -603,8 +703,7 @@ func (m *PeerManager) Dialed(address NodeAddress, peerOpts ...func(*peerInfo)) e return err } - if upgradeFromPeer != "" && m.options.MaxConnected > 0 && - len(m.connected) >= int(m.options.MaxConnected) { + if upgradeFromPeer != "" && m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { // Look for an even lower-scored peer that may have appeared since we // started the upgrade. if p, ok := m.store.Get(upgradeFromPeer); ok { @@ -613,9 +712,11 @@ func (m *PeerManager) Dialed(address NodeAddress, peerOpts ...func(*peerInfo)) e } } m.evict[upgradeFromPeer] = true + m.evictWaker.Wake() } - m.connected[peer.ID] = true - m.evictWaker.Wake() + + m.metrics.PeersConnectedOutgoing.Add(1) + m.connected[peer.ID] = peerConnectionOutgoing return nil } @@ -644,11 +745,10 @@ func (m *PeerManager) Accepted(peerID types.NodeID, peerOpts ...func(*peerInfo)) if peerID == m.selfID { return fmt.Errorf("rejecting connection from self (%v)", peerID) } - if m.connected[peerID] { + if m.isConnected(peerID) { return fmt.Errorf("peer %q is already connected", peerID) } - if m.options.MaxConnected > 0 && - len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { + if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { return fmt.Errorf("already connected to maximum number of peers") } @@ -673,6 +773,10 @@ func (m *PeerManager) Accepted(peerID types.NodeID, peerOpts ...func(*peerInfo)) } } + if peer.Inactive { + m.metrics.PeersInactivated.Add(-1) + } + peer.Inactive = false peer.LastConnected = time.Now().UTC() for _, opt := range peerOpts { opt(&peer) @@ -681,7 +785,8 @@ func (m *PeerManager) Accepted(peerID types.NodeID, peerOpts ...func(*peerInfo)) return err } - m.connected[peerID] = true + m.metrics.PeersConnectedIncoming.Add(1) + m.connected[peerID] = peerConnectionIncoming if upgradeFromPeer != "" { m.evict[upgradeFromPeer] = true } @@ -700,7 +805,7 @@ func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID, channels C m.mtx.Lock() defer m.mtx.Unlock() - if m.connected[peerID] { + if m.isConnected(peerID) { m.ready[peerID] = true pu := PeerUpdate{ NodeID: peerID, @@ -741,7 +846,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { // random one. for peerID := range m.evict { delete(m.evict, peerID) - if m.connected[peerID] && !m.evicting[peerID] { + if m.isConnected(peerID) && !m.evicting[peerID] { m.evicting[peerID] = true return peerID, nil } @@ -758,7 +863,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { ranked := m.store.Ranked() for i := len(ranked) - 1; i >= 0; i-- { peer := ranked[i] - if m.connected[peer.ID] && !m.evicting[peer.ID] { + if m.isConnected(peer.ID) && !m.evicting[peer.ID] { m.evicting[peer.ID] = true return peer.ID, nil } @@ -773,6 +878,13 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() + switch m.connected[peerID] { + case peerConnectionIncoming: + m.metrics.PeersConnectedIncoming.Add(-1) + case peerConnectionOutgoing: + m.metrics.PeersConnectedOutgoing.Add(-1) + } + ready := m.ready[peerID] delete(m.connected, peerID) @@ -781,6 +893,22 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { delete(m.evicting, peerID) delete(m.ready, peerID) + if peer, ok := m.store.Get(peerID); ok { + peer.LastDisconnected = time.Now() + _ = m.store.Set(peer) + // launch a thread to ping the dialWaker when the + // disconnected peer can be dialed again. + go func() { + timer := time.NewTimer(m.options.DisconnectCooldownPeriod) + defer timer.Stop() + select { + case <-timer.C: + m.dialWaker.Wake() + case <-ctx.Done(): + } + }() + } + if ready { pu := PeerUpdate{ NodeID: peerID, @@ -808,17 +936,34 @@ func (m *PeerManager) Errored(peerID types.NodeID, err error) { m.mtx.Lock() defer m.mtx.Unlock() - if m.connected[peerID] { + if m.isConnected(peerID) { m.evict[peerID] = true } m.evictWaker.Wake() } +// Inactivate marks a peer as inactive which means we won't attempt to +// dial this peer again. A peer can be reactivated by successfully +// dialing and connecting to the node. +func (m *PeerManager) Inactivate(peerID types.NodeID) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + peer, ok := m.store.peers[peerID] + if !ok { + return nil + } + + peer.Inactive = true + m.metrics.PeersInactivated.Add(1) + return m.store.Set(*peer) +} + // Advertise returns a list of peer addresses to advertise to a peer. // -// FIXME: This is fairly naïve and only returns the addresses of the -// highest-ranked peers. +// It sorts all peers in the peer store, and assembles a list of peers +// that is most likely to include the highest priority of peers. func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress { m.mtx.Lock() defer m.mtx.Unlock() @@ -831,19 +976,98 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress addresses = append(addresses, m.options.SelfAddress) } - for _, peer := range m.store.Ranked() { + var numAddresses int + var totalAbsScore int + ranked := m.store.Ranked() + seenAddresses := map[NodeAddress]struct{}{} + scores := map[types.NodeID]int{} + + // get the total number of possible addresses + for _, peer := range ranked { if peer.ID == peerID { continue } + score := int(peer.Score()) + if score < 0 { + totalAbsScore += -score + } else { + totalAbsScore += score + } + + scores[peer.ID] = score + for addr := range peer.AddressInfo { + if _, ok := m.options.PrivatePeers[addr.NodeID]; !ok { + numAddresses++ + } + } + } + + meanAbsScore := (totalAbsScore + 1) / (len(scores) + 1) + + var attempts uint16 + var addedLastIteration bool + + // if the number of addresses is less than the number of peers + // to advertise, adjust the limit downwards + if numAddresses < int(limit) { + limit = uint16(numAddresses) + } + + // collect addresses until we have the number requested + // (limit), or we've added all known addresses, or we've tried + // at least 256 times and the last time we iterated over + // remaining addresses we added no new candidates. + for len(addresses) < int(limit) && (attempts < (limit*2) || !addedLastIteration) { + attempts++ + addedLastIteration = false + + for idx, peer := range ranked { + if peer.ID == peerID { + continue + } - for nodeAddr, addressInfo := range peer.AddressInfo { if len(addresses) >= int(limit) { - return addresses + break } - // only add non-private NodeIDs - if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok { - addresses = append(addresses, addressInfo.Address) + for nodeAddr, addressInfo := range peer.AddressInfo { + if len(addresses) >= int(limit) { + break + } + + // only look at each address once, by + // tracking a set of addresses seen + if _, ok := seenAddresses[addressInfo.Address]; ok { + continue + } + + // only add non-private NodeIDs + if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok { + // add the peer if the total number of ranked addresses is + // will fit within the limit, or otherwise adding + // addresses based on a coin flip. + + // the coinflip is based on the score, commonly, but + // 10% of the time we'll randomly insert a "loosing" + // peer. + + // nolint:gosec // G404: Use of weak random number generator + if numAddresses <= int(limit) || rand.Intn((meanAbsScore*2)+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 { + addresses = append(addresses, addressInfo.Address) + addedLastIteration = true + seenAddresses[addressInfo.Address] = struct{}{} + } + } else { + seenAddresses[addressInfo.Address] = struct{}{} + // if the number of addresses + // is the same as the limit, + // we should remove private + // addresses from the limit so + // we can still return early. + if numAddresses == int(limit) { + limit-- + } + } } } } @@ -917,8 +1141,14 @@ func (m *PeerManager) processPeerEvent(ctx context.Context, pu PeerUpdate) { switch pu.Status { case PeerStatusBad: + if m.store.peers[pu.NodeID].MutableScore == math.MinInt16 { + return + } m.store.peers[pu.NodeID].MutableScore-- case PeerStatusGood: + if m.store.peers[pu.NodeID].MutableScore == math.MaxInt16 { + return + } m.store.peers[pu.NodeID].MutableScore++ } } @@ -1009,9 +1239,11 @@ func (m *PeerManager) findUpgradeCandidate(id types.NodeID, score PeerScore) typ for i := len(ranked) - 1; i >= 0; i-- { candidate := ranked[i] switch { + case candidate.ID == id: + continue case candidate.Score() >= score: return "" // no further peers can be scored lower, due to sorting - case !m.connected[candidate.ID]: + case !m.isConnected(candidate.ID): case m.evict[candidate.ID]: case m.evicting[candidate.ID]: case m.upgrading[candidate.ID] != "": @@ -1050,44 +1282,6 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration return delay } -// GetHeight returns a peer's height, as reported via SetHeight, or 0 if the -// peer or height is unknown. -// -// FIXME: This is a temporary workaround to share state between the consensus -// and mempool reactors, carried over from the legacy P2P stack. Reactors should -// not have dependencies on each other, instead tracking this themselves. -func (m *PeerManager) GetHeight(peerID types.NodeID) int64 { - m.mtx.Lock() - defer m.mtx.Unlock() - - peer, _ := m.store.Get(peerID) - return peer.Height -} - -// SetHeight stores a peer's height, making it available via GetHeight. -// -// FIXME: This is a temporary workaround to share state between the consensus -// and mempool reactors, carried over from the legacy P2P stack. Reactors should -// not have dependencies on each other, instead tracking this themselves. -func (m *PeerManager) SetHeight(peerID types.NodeID, height int64) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - peer, ok := m.store.Get(peerID) - if !ok { - peer = m.newPeerInfo(peerID) - } - peer.Height = height - return m.store.Set(peer) -} - -// SetProTxHashToPeerInfo sets a proTxHash in peerInfo.proTxHash to keep this value in a store -func SetProTxHashToPeerInfo(proTxHash types.ProTxHash) func(info *peerInfo) { - return func(info *peerInfo) { - info.ProTxHash = proTxHash.Copy() - } -} - // peerStore stores information about peers. It is not thread-safe, assuming it // is only used by PeerManager which handles concurrency control. This allows // the manager to execute multiple operations atomically via its own mutex. @@ -1098,6 +1292,7 @@ func SetProTxHashToPeerInfo(proTxHash types.ProTxHash) func(info *peerInfo) { type peerStore struct { db dbm.DB peers map[types.NodeID]*peerInfo + index map[NodeAddress]types.NodeID ranked []*peerInfo // cache for Ranked(), nil invalidates cache } @@ -1117,6 +1312,7 @@ func newPeerStore(db dbm.DB) (*peerStore, error) { // loadPeers loads all peers from the database into memory. func (s *peerStore) loadPeers() error { peers := map[types.NodeID]*peerInfo{} + addrs := map[NodeAddress]types.NodeID{} start, end := keyPeerInfoRange() iter, err := s.db.Iterator(start, end) @@ -1136,11 +1332,18 @@ func (s *peerStore) loadPeers() error { return fmt.Errorf("invalid peer data: %w", err) } peers[peer.ID] = peer + for addr := range peer.AddressInfo { + // TODO maybe check to see if we've seen this + // addr before for a different peer, there + // could be duplicates. + addrs[addr] = peer.ID + } } if iter.Error() != nil { return iter.Error() } s.peers = peers + s.index = addrs s.ranked = nil // invalidate cache if populated return nil } @@ -1152,6 +1355,12 @@ func (s *peerStore) Get(id types.NodeID) (peerInfo, bool) { return peer.Copy(), ok } +// Resolve returns the peer ID for a given node address if known. +func (s *peerStore) Resolve(addr NodeAddress) (types.NodeID, bool) { + id, ok := s.index[addr] + return id, ok +} + // Set stores peer data. The input data will be copied, and can safely be reused // by the caller. func (s *peerStore) Set(peer peerInfo) error { @@ -1180,20 +1389,29 @@ func (s *peerStore) Set(peer peerInfo) error { // update the existing pointer address. *current = peer } + for addr := range peer.AddressInfo { + s.index[addr] = peer.ID + } return nil } // Delete deletes a peer, or does nothing if it does not exist. func (s *peerStore) Delete(id types.NodeID) error { - if _, ok := s.peers[id]; !ok { + peer, ok := s.peers[id] + if !ok { return nil } - if err := s.db.Delete(keyPeerInfo(id)); err != nil { - return err + for _, addr := range peer.AddressInfo { + delete(s.index, addr.Address) } delete(s.peers, id) s.ranked = nil + + if err := s.db.Delete(keyPeerInfo(id)); err != nil { + return err + } + return nil } @@ -1229,8 +1447,6 @@ func (s *peerStore) Ranked() []*peerInfo { s.ranked = append(s.ranked, peer) } sort.Slice(s.ranked, func(i, j int) bool { - // FIXME: If necessary, consider precomputing scores before sorting, - // to reduce the number of Score() calls. return s.ranked[i].Score() > s.ranked[j].Score() }) return s.ranked @@ -1243,17 +1459,18 @@ func (s *peerStore) Size() int { // peerInfo contains peer information stored in a peerStore. type peerInfo struct { - ID types.NodeID - AddressInfo map[NodeAddress]*peerAddressInfo - LastConnected time.Time + ID types.NodeID + AddressInfo map[NodeAddress]*peerAddressInfo + LastConnected time.Time + LastDisconnected time.Time // These fields are ephemeral, i.e. not persisted to the database. Persistent bool - Seed bool Height int64 FixedScore PeerScore // mainly for tests MutableScore int64 // updated by router + Inactive bool ProTxHash types.ProTxHash } @@ -1264,6 +1481,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) { p := &peerInfo{ ID: types.NodeID(msg.ID), AddressInfo: map[NodeAddress]*peerAddressInfo{}, + Inactive: msg.Inactive, } if msg.LastConnected != nil { p.LastConnected = *msg.LastConnected @@ -1286,6 +1504,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) { func (p *peerInfo) ToProto() *p2pproto.PeerInfo { msg := &p2pproto.PeerInfo{ ID: string(p.ID), + Inactive: p.Inactive, LastConnected: &p.LastConnected, ProTxHash: p.ProTxHash, } @@ -1295,6 +1514,7 @@ func (p *peerInfo) ToProto() *p2pproto.PeerInfo { if msg.LastConnected.IsZero() { msg.LastConnected = nil } + return msg } @@ -1312,6 +1532,46 @@ func (p *peerInfo) Copy() peerInfo { return c } +// LastDialed returns when the peer was last dialed, and if that dial +// attempt was successful. If the peer was never dialed the time stamp +// is zero time. +func (p *peerInfo) LastDialed() (time.Time, bool) { + var ( + last time.Time + success bool + ) + last = last.Add(-1) // so it's after the epoch + + for _, addr := range p.AddressInfo { + if addr.LastDialFailure.Equal(addr.LastDialSuccess) { + if addr.LastDialFailure.IsZero() { + continue + } + if last.After(addr.LastDialSuccess) { + continue + } + success = true + last = addr.LastDialSuccess + } + if addr.LastDialFailure.After(last) { + success = false + last = addr.LastDialFailure + } + if addr.LastDialSuccess.After(last) || last.Equal(addr.LastDialSuccess) { + success = true + last = addr.LastDialSuccess + } + } + + // if we never modified last, then we should return it to the + // zero value + if last.Add(1).IsZero() { + return time.Time{}, success + } + + return last, success +} + // Score calculates a score for the peer. Higher-scored peers will be // preferred over lower scores. func (p *peerInfo) Score() PeerScore { @@ -1333,8 +1593,8 @@ func (p *peerInfo) Score() PeerScore { score -= int64(addr.DialFailures) } - if score <= 0 { - return 0 + if score < math.MinInt16 { + score = math.MinInt16 } return PeerScore(score) @@ -1455,5 +1715,13 @@ func (m *PeerManager) UpdatePeerInfo(nodeID types.NodeID, modifier func(peerInfo func (m *PeerManager) IsDialingOrConnected(nodeID types.NodeID) bool { m.mtx.Lock() defer m.mtx.Unlock() - return m.dialing[nodeID] || m.connected[nodeID] + _, ok := m.connected[nodeID] + return m.dialing[nodeID] || ok +} + +// SetProTxHashToPeerInfo sets a proTxHash in peerInfo.proTxHash to keep this value in a store +func SetProTxHashToPeerInfo(proTxHash types.ProTxHash) func(info *peerInfo) { + return func(info *peerInfo) { + info.ProTxHash = proTxHash.Copy() + } } diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index a45df0b728..b454da151f 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -34,7 +34,7 @@ func TestPeerScoring(t *testing.T) { t.Run("Synchronous", func(t *testing.T) { // update the manager and make sure it's correct - require.EqualValues(t, 0, peerManager.Scores()[id]) + require.Zero(t, peerManager.Scores()[id]) // add a bunch of good status updates and watch things increase. for i := 1; i < 10; i++ { @@ -97,3 +97,173 @@ func TestPeerScoring(t *testing.T) { } }) } + +func makeMockPeerStore(t *testing.T, peers ...peerInfo) *peerStore { + t.Helper() + s, err := newPeerStore(dbm.NewMemDB()) + if err != nil { + t.Fatal(err) + } + for idx := range peers { + if err := s.Set(peers[idx]); err != nil { + t.Fatal(err) + } + } + return s +} + +func TestPeerRanking(t *testing.T) { + t.Run("InactiveSecond", func(t *testing.T) { + t.Skip("inactive status is not currently factored into peer rank.") + + store := makeMockPeerStore(t, + peerInfo{ID: "second", Inactive: true}, + peerInfo{ID: "first", Inactive: false}, + ) + + ranked := store.Ranked() + if len(ranked) != 2 { + t.Fatal("missing peer in ranked output") + } + if ranked[0].ID != "first" { + t.Error("inactive peer is first") + } + if ranked[1].ID != "second" { + t.Error("active peer is second") + } + }) + t.Run("ScoreOrder", func(t *testing.T) { + for _, test := range []struct { + Name string + First int64 + Second int64 + }{ + { + Name: "Mirror", + First: 100, + Second: -100, + }, + { + Name: "VeryLow", + First: 0, + Second: -100, + }, + { + Name: "High", + First: 300, + Second: 256, + }, + } { + t.Run(test.Name, func(t *testing.T) { + store := makeMockPeerStore(t, + peerInfo{ + ID: "second", + MutableScore: test.Second, + }, + peerInfo{ + ID: "first", + MutableScore: test.First, + }) + + ranked := store.Ranked() + if len(ranked) != 2 { + t.Fatal("missing peer in ranked output") + } + if ranked[0].ID != "first" { + t.Error("higher peer is first") + } + if ranked[1].ID != "second" { + t.Error("higher peer is second") + } + }) + } + }) +} + +func TestLastDialed(t *testing.T) { + t.Run("Zero", func(t *testing.T) { + p := &peerInfo{} + ts, ok := p.LastDialed() + if !ts.IsZero() { + t.Error("timestamp should be zero:", ts) + } + if ok { + t.Error("peer reported success, despite none") + } + }) + t.Run("NeverDialed", func(t *testing.T) { + p := &peerInfo{ + AddressInfo: map[NodeAddress]*peerAddressInfo{ + {NodeID: "kip"}: {}, + {NodeID: "merlin"}: {}, + }, + } + ts, ok := p.LastDialed() + if !ts.IsZero() { + t.Error("timestamp should be zero:", ts) + } + if ok { + t.Error("peer reported success, despite none") + } + }) + t.Run("Ordered", func(t *testing.T) { + base := time.Now() + for _, test := range []struct { + Name string + SuccessTime time.Time + FailTime time.Time + ExpectedSuccess bool + }{ + { + Name: "Zero", + }, + { + Name: "Success", + SuccessTime: base.Add(time.Hour), + FailTime: base, + ExpectedSuccess: true, + }, + { + Name: "Equal", + SuccessTime: base, + FailTime: base, + ExpectedSuccess: true, + }, + { + Name: "Failure", + SuccessTime: base, + FailTime: base.Add(time.Hour), + ExpectedSuccess: false, + }, + } { + t.Run(test.Name, func(t *testing.T) { + p := &peerInfo{ + AddressInfo: map[NodeAddress]*peerAddressInfo{ + {NodeID: "kip"}: {LastDialSuccess: test.SuccessTime}, + {NodeID: "merlin"}: {LastDialFailure: test.FailTime}, + }, + } + ts, ok := p.LastDialed() + if test.ExpectedSuccess && !ts.Equal(test.SuccessTime) { + if !ts.Equal(test.FailTime) { + t.Fatal("got unexpected timestamp:", ts) + } + + t.Error("last dialed time reported incorrect value:", ts) + } + if !test.ExpectedSuccess && !ts.Equal(test.FailTime) { + if !ts.Equal(test.SuccessTime) { + t.Fatal("got unexpected timestamp:", ts) + } + + t.Error("last dialed time reported incorrect value:", ts) + } + if test.ExpectedSuccess != ok { + t.Error("test reported incorrect outcome for last dialed type") + } + }) + } + + }) + +} diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 82d1e26932..3e72c333b2 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -296,6 +296,10 @@ func TestPeerManager_DialNext(t *testing.T) { } func TestPeerManager_DialNext_Retry(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -384,16 +388,14 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) // Add b. We shouldn't be able to dial it, due to MaxConnected. added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Spawn a goroutine to fail a's dial attempt. @@ -427,8 +429,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.DialFailed(ctx, dial)) failed := time.Now() @@ -458,8 +459,7 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { err = peerManager.Accepted(a.NodeID) require.NoError(t, err) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Zero(t, dial) dctx, dcancel := context.WithTimeout(ctx, 300*time.Millisecond) @@ -490,8 +490,7 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -499,16 +498,14 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // At this point, adding c will not allow dialing it. added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -524,11 +521,11 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - a.NodeID: 0, - b.NodeID: 1, - c.NodeID: 2, - d.NodeID: 3, - e.NodeID: 0, + a.NodeID: p2p.PeerScore(0), + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(2), + d.NodeID: p2p.PeerScore(3), + e.NodeID: p2p.PeerScore(0), }, PersistentPeers: []types.NodeID{c.NodeID, d.NodeID}, MaxConnected: 2, @@ -540,7 +537,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() + dial := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -549,8 +546,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // Even though we are at capacity, we should be allowed to dial c for an @@ -558,8 +554,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, c, dial) // However, since we're using all upgrade slots now, we can't add and dial @@ -567,24 +562,20 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(d) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // We go through with c's upgrade. require.NoError(t, peerManager.Dialed(c)) // Still can't dial d. - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Now, if we disconnect a, we should be allowed to dial d because we have a // free upgrade slot. + require.Error(t, peerManager.Dialed(d)) peerManager.Disconnected(ctx, a.NodeID) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) - require.Equal(t, d, dial) require.NoError(t, peerManager.Dialed(d)) // However, if we disconnect b (such that only c and d are connected), we @@ -594,8 +585,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(e) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -605,7 +595,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1}, MaxConnected: 1, MaxConnectedUpgrade: 2, }) @@ -615,8 +605,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -624,8 +613,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // Adding c and dialing it will fail, because a is the only connected @@ -633,8 +621,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Empty(t, dial) } @@ -655,22 +642,19 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) // Adding a's TCP address will not dispense a, since it's already dialing. added, err = peerManager.Add(aTCP) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Marking a as dialed will still not dispense it. require.NoError(t, peerManager.Dialed(a)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Adding b and accepting a connection from it will not dispense it either. @@ -678,8 +662,7 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(bID)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -708,16 +691,14 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) { // All addresses should be dispensed as long as dialing them has failed. dial := []p2p.NodeAddress{} for range addresses { - address, err := peerManager.TryDialNext() - require.NoError(t, err) + address := peerManager.TryDialNext() require.NotZero(t, address) require.NoError(t, peerManager.DialFailed(ctx, address)) dial = append(dial, address) } require.ElementsMatch(t, dial, addresses) - address, err := peerManager.TryDialNext() - require.NoError(t, err) + address := peerManager.TryDialNext() require.Zero(t, address) } @@ -742,15 +723,14 @@ func TestPeerManager_DialFailed(t *testing.T) { // Dialing and then calling DialFailed with a different address (same // NodeID) should unmark as dialing and allow us to dial the other address // again, but not register the failed address. - dial, err := peerManager.TryDialNext() + dial := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) require.NoError(t, peerManager.DialFailed(ctx, p2p.NodeAddress{ Protocol: "tcp", NodeID: aID, Hostname: "localhost"})) require.Equal(t, []p2p.NodeAddress{a}, peerManager.Addresses(aID)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, a, dial) // Calling DialFailed on same address twice should be fine. @@ -771,7 +751,10 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{ + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(2), + }, MaxConnected: 1, MaxConnectedUpgrade: 2, }) @@ -781,8 +764,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -790,8 +772,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // Adding c and dialing it will fail, even though it could upgrade a and we @@ -800,14 +781,12 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Empty(t, dial) // Failing b's dial will now make c available for dialing. require.NoError(t, peerManager.DialFailed(ctx, b)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, c, dial) } @@ -822,8 +801,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -833,8 +811,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) require.NoError(t, peerManager.Accepted(b.NodeID)) @@ -863,8 +840,7 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) // Marking b as dialed in the meanwhile (even without TryDialNext) @@ -887,7 +863,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 2, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: p2p.PeerScore(1), d.NodeID: 1}, }) require.NoError(t, err) @@ -906,8 +882,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, c, dial) require.NoError(t, peerManager.Dialed(c)) @@ -937,7 +912,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 2, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1}, }) require.NoError(t, err) @@ -951,8 +926,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, b, dial) require.NoError(t, peerManager.Dialed(b)) @@ -961,8 +935,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Empty(t, dial) // a should now be evicted. @@ -984,10 +957,10 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { MaxConnected: 2, MaxConnectedUpgrade: 1, PeerScores: map[types.NodeID]p2p.PeerScore{ - a.NodeID: 3, - b.NodeID: 2, - c.NodeID: 10, - d.NodeID: 1, + a.NodeID: p2p.PeerScore(3), + b.NodeID: p2p.PeerScore(2), + c.NodeID: p2p.PeerScore(10), + d.NodeID: p2p.PeerScore(1), }, }) require.NoError(t, err) @@ -1008,8 +981,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, c, dial) // In the meanwhile, a disconnects and d connects. d is even lower-scored @@ -1040,9 +1012,9 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { MaxConnected: 2, MaxConnectedUpgrade: 1, PeerScores: map[types.NodeID]p2p.PeerScore{ - a.NodeID: 1, - b.NodeID: 2, - c.NodeID: 3, + a.NodeID: p2p.PeerScore(1), + b.NodeID: p2p.PeerScore(2), + c.NodeID: p2p.PeerScore(3), }, }) require.NoError(t, err) @@ -1062,7 +1034,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() + dial := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, c, dial) @@ -1108,8 +1080,7 @@ func TestPeerManager_Accepted(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, c, dial) require.NoError(t, peerManager.Accepted(c.NodeID)) require.Error(t, peerManager.Dialed(c)) @@ -1118,8 +1089,7 @@ func TestPeerManager_Accepted(t *testing.T) { added, err = peerManager.Add(d) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, d, dial) require.NoError(t, peerManager.Dialed(d)) require.Error(t, peerManager.Accepted(d.NodeID)) @@ -1161,8 +1131,8 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - c.NodeID: 1, - d.NodeID: 2, + c.NodeID: p2p.PeerScore(1), + d.NodeID: p2p.PeerScore(2), }, MaxConnected: 1, MaxConnectedUpgrade: 1, @@ -1209,8 +1179,8 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - b.NodeID: 1, - c.NodeID: 1, + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(1), }, MaxConnected: 1, MaxConnectedUpgrade: 2, @@ -1252,8 +1222,8 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - b.NodeID: 1, - c.NodeID: 1, + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(1), }, MaxConnected: 1, MaxConnectedUpgrade: 2, @@ -1270,8 +1240,7 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, b, dial) // a has already been claimed as an upgrade of a, so accepting @@ -1428,7 +1397,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1)}, }) require.NoError(t, err) @@ -1445,8 +1414,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { added, err := peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, b, dial) require.NoError(t, peerManager.Dialed(b)) }() @@ -1469,7 +1437,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{ + b.NodeID: p2p.PeerScore(1), + }, }) require.NoError(t, err) @@ -1578,13 +1548,11 @@ func TestPeerManager_Disconnected(t *testing.T) { // Disconnecting a dialing peer does not unmark it as dialing, to avoid // dialing it multiple times in parallel. - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) peerManager.Disconnected(ctx, a.NodeID) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -1657,8 +1625,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) // Outbound connection with peer error and eviction. - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.Empty(t, sub.Updates()) @@ -1681,8 +1648,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) // Outbound connection with dial failure. - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, a, dial) require.Empty(t, sub.Updates()) @@ -1787,8 +1753,7 @@ func TestPeerManager_Close(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.DialFailed(ctx, a)) } @@ -1833,6 +1798,7 @@ func TestPeerManager_Advertise(t *testing.T) { require.NoError(t, err) require.True(t, added) + require.Len(t, peerManager.Advertise(dID, 100), 6) // d should get all addresses. require.ElementsMatch(t, []p2p.NodeAddress{ aTCP, aMem, bTCP, bMem, cTCP, cMem, @@ -1846,10 +1812,18 @@ func TestPeerManager_Advertise(t *testing.T) { // Asking for 0 addresses should return, well, 0. require.Empty(t, peerManager.Advertise(aID, 0)) - // Asking for 2 addresses should get the highest-rated ones, i.e. a. - require.ElementsMatch(t, []p2p.NodeAddress{ - aTCP, aMem, - }, peerManager.Advertise(dID, 2)) + // Asking for 2 addresses should get two addresses + // the content of the list when there are two + addrs := peerManager.Advertise(dID, 2) + require.Len(t, addrs, 2) + for _, addr := range addrs { + if dID == addr.NodeID { + t.Fatal("never advertise self") + } + if cID == addr.NodeID { + t.Fatal("should not have returned the lowest ranked peer") + } + } } func TestPeerManager_Advertise_Self(t *testing.T) { @@ -1868,38 +1842,3 @@ func TestPeerManager_Advertise_Self(t *testing.T) { self, }, peerManager.Advertise(dID, 100)) } - -func TestPeerManager_SetHeight_GetHeight(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - - db := dbm.NewMemDB() - peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) - require.NoError(t, err) - - // Getting a height should default to 0, for unknown peers and - // for known peers without height. - added, err := peerManager.Add(a) - require.NoError(t, err) - require.True(t, added) - require.EqualValues(t, 0, peerManager.GetHeight(a.NodeID)) - require.EqualValues(t, 0, peerManager.GetHeight(b.NodeID)) - - // Setting a height should work for a known node. - require.NoError(t, peerManager.SetHeight(a.NodeID, 3)) - require.EqualValues(t, 3, peerManager.GetHeight(a.NodeID)) - - // Setting a height should add an unknown node. - require.Equal(t, []types.NodeID{a.NodeID}, peerManager.Peers()) - require.NoError(t, peerManager.SetHeight(b.NodeID, 7)) - require.EqualValues(t, 7, peerManager.GetHeight(b.NodeID)) - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) - - // The heights should not be persisted. - peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) - require.NoError(t, err) - - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) - require.Zero(t, peerManager.GetHeight(a.NodeID)) - require.Zero(t, peerManager.GetHeight(b.NodeID)) -} diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index bd47373265..9618433f4d 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -145,7 +145,7 @@ func (r *Reactor) OnStop() {} // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. -func (r *Reactor) processPexCh(ctx context.Context, pexCh *p2p.Channel) { +func (r *Reactor) processPexCh(ctx context.Context, pexCh p2p.Channel) { incoming := make(chan *p2p.Envelope) go func() { defer close(incoming) @@ -192,8 +192,7 @@ func (r *Reactor) processPexCh(ctx context.Context, pexCh *p2p.Channel) { // A request from another peer, or a response to one of our requests. dur, err := r.handlePexMessage(ctx, envelope, pexCh) if err != nil { - r.logger.Error("failed to process message", - "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) + r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) if serr := pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, @@ -225,7 +224,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU // handlePexMessage handles envelopes sent from peers on the PexChannel. // If an update was received, a new polling interval is returned; otherwise the // duration is 0. -func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope, pexCh *p2p.Channel) (time.Duration, error) { +func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope, pexCh p2p.Channel) (time.Duration, error) { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -308,7 +307,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // that peer a request for more peer addresses. The chosen peer is moved into // the requestsSent bucket so that we will not attempt to contact them again // until they've replied or updated. -func (r *Reactor) sendRequestForPeers(ctx context.Context, pexCh *p2p.Channel) error { +func (r *Reactor) sendRequestForPeers(ctx context.Context, pexCh p2p.Channel) error { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { @@ -359,7 +358,8 @@ func (r *Reactor) calculateNextRequestTime(added int) time.Duration { // If the peer store is nearly full, wait the maximum interval. if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { r.logger.Debug("Peer manager is nearly full", - "sleep_period", fullCapacityInterval, "ratio", ratio) + "sleep_period", fullCapacityInterval, + "ratio", ratio) return fullCapacityInterval } diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 840562ab43..9257a317b4 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -275,7 +275,7 @@ type singleTestReactor struct { pexInCh chan p2p.Envelope pexOutCh chan p2p.Envelope pexErrCh chan p2p.PeerError - pexCh *p2p.Channel + pexCh p2p.Channel peerCh chan p2p.PeerUpdate manager *p2p.PeerManager } @@ -287,9 +287,11 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { pexInCh := make(chan p2p.Envelope, chBuf) pexOutCh := make(chan p2p.Envelope, chBuf) pexErrCh := make(chan p2p.PeerError, chBuf) + + chDesc := pex.ChannelDescriptor() pexCh := p2p.NewChannel( - p2p.ChannelID(pex.PexChannel), - new(p2pproto.PexMessage), + chDesc.ID, + chDesc.Name, pexInCh, pexOutCh, pexErrCh, @@ -300,7 +302,7 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) { return pexCh, nil } @@ -325,7 +327,7 @@ type reactorTestSuite struct { logger log.Logger reactors map[types.NodeID]*pex.Reactor - pexChannels map[types.NodeID]*p2p.Channel + pexChannels map[types.NodeID]p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate peerUpdates map[types.NodeID]*p2p.PeerUpdates @@ -368,7 +370,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT logger: log.NewNopLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(ctx, t, networkOpts), reactors: make(map[types.NodeID]*pex.Reactor, realNodes), - pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), + pexChannels: make(map[types.NodeID]p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), total: opts.TotalNodes, @@ -389,7 +391,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf) rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) - chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.pexChannels[nodeID], nil } @@ -450,7 +452,7 @@ func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID]) - chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) { return r.pexChannels[nodeID], nil } diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index 21c950dfb0..f53c988a6f 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -31,8 +31,16 @@ func (pq priorityQueue) get(i int) *pqEnvelope { return pq[i] } func (pq priorityQueue) Len() int { return len(pq) } func (pq priorityQueue) Less(i, j int) bool { - // if both elements have the same priority, prioritize based on most recent + // if both elements have the same priority, prioritize based + // on most recent and largest if pq[i].priority == pq[j].priority { + diff := pq[i].timestamp.Sub(pq[j].timestamp) + if diff < 0 { + diff *= -1 + } + if diff < 10*time.Millisecond { + return pq[i].size > pq[j].size + } return pq[i].timestamp.After(pq[j].timestamp) } @@ -70,6 +78,7 @@ var _ queue = (*pqScheduler)(nil) type pqScheduler struct { logger log.Logger metrics *Metrics + lc *metricsLabelCache size uint sizes map[uint]uint // cumulative priority sizes pq *priorityQueue @@ -88,6 +97,7 @@ type pqScheduler struct { func newPQScheduler( logger log.Logger, m *Metrics, + lc *metricsLabelCache, chDescs []*ChannelDescriptor, enqueueBuf, dequeueBuf, capacity uint, ) *pqScheduler { @@ -117,6 +127,7 @@ func newPQScheduler( return &pqScheduler{ logger: logger.With("router", "scheduler"), metrics: m, + lc: lc, chDescs: chDescsCopy, capacity: capacity, chPriorities: chPriorities, @@ -197,13 +208,11 @@ func (s *pqScheduler) process(ctx context.Context) { } else { pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.ChannelID)) s.metrics.PeerQueueDroppedMsgs.With("ch_id", pqEnvTmpChIDStr).Add(1) - s.logger.Debug( - "dropped envelope", + s.logger.Debug("dropped envelope", "ch_id", pqEnvTmpChIDStr, "priority", pqEnvTmp.priority, "msg_size", pqEnvTmp.size, - "capacity", s.capacity, - ) + "capacity", s.capacity) s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnvTmp.envelope.To)).Add(float64(-pqEnvTmp.size)) @@ -227,13 +236,11 @@ func (s *pqScheduler) process(ctx context.Context) { // There is not sufficient capacity to drop lower priority Envelopes, // so we drop the incoming Envelope. s.metrics.PeerQueueDroppedMsgs.With("ch_id", chIDStr).Add(1) - s.logger.Debug( - "dropped envelope", + s.logger.Debug("dropped envelope", "ch_id", chIDStr, "priority", pqEnv.priority, "msg_size", pqEnv.size, - "capacity", s.capacity, - ) + "capacity", s.capacity) } } @@ -251,7 +258,7 @@ func (s *pqScheduler) process(ctx context.Context) { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, "peer_id", string(pqEnv.envelope.To), - "message_type", s.metrics.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) + "message_type", s.lc.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) s.metrics.PeerPendingSendBytes.With( "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { @@ -269,12 +276,10 @@ func (s *pqScheduler) process(ctx context.Context) { } func (s *pqScheduler) push(pqEnv *pqEnvelope) { - chIDStr := strconv.Itoa(int(pqEnv.envelope.ChannelID)) - // enqueue the incoming Envelope heap.Push(s.pq, pqEnv) s.size += pqEnv.size - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Add(float64(pqEnv.size)) + s.metrics.PeerQueueMsgSize.With("ch_id", strconv.Itoa(int(pqEnv.envelope.ChannelID))).Add(float64(pqEnv.size)) // Update the cumulative sizes by adding the Envelope's size to every // priority less than or equal to it. diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index 22ecbcecb5..d1057ac7e2 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -17,7 +17,7 @@ func TestCloseWhileDequeueFull(t *testing.T) { chDescs := []*ChannelDescriptor{ {ID: 0x01, Priority: 1}, } - pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) + pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), newMetricsLabelCache(), chDescs, uint(enqueueLength), 1, 120) for i := 0; i < enqueueLength; i++ { pqueue.enqueue() <- Envelope{ diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 4a5461fc12..6683e5219e 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "math/rand" "net" "runtime" "sync" @@ -14,6 +13,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/crypto" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" @@ -62,12 +62,6 @@ type RouterOptions struct { // return an error to reject the peer. FilterPeerByID func(context.Context, types.NodeID) error - // DialSleep controls the amount of time that the router - // sleeps between dialing peers. If not set, a default value - // is used that sleeps for a (random) amount of time up to 3 - // seconds between submitting each peer to be dialed. - DialSleep func(context.Context) - // NumConcrruentDials controls how many parallel go routines // are used to dial peers. This defaults to the value of // runtime.NumCPU. @@ -75,8 +69,9 @@ type RouterOptions struct { } const ( - queueTypeFifo = "fifo" - queueTypePriority = "priority" + queueTypeFifo = "fifo" + queueTypePriority = "priority" + queueTypeSimplePriority = "simple-priority" ) // Validate validates router options. @@ -84,7 +79,7 @@ func (o *RouterOptions) Validate() error { switch o.QueueType { case "": o.QueueType = queueTypeFifo - case queueTypeFifo, queueTypePriority: + case queueTypeFifo, queueTypePriority, queueTypeSimplePriority: // pass default: return fmt.Errorf("queue type %q is not supported", o.QueueType) @@ -148,7 +143,9 @@ type Router struct { *service.BaseService logger log.Logger - metrics *Metrics + metrics *Metrics + lc *metricsLabelCache + options RouterOptions privKey crypto.PrivKey peerManager *PeerManager @@ -193,6 +190,7 @@ func NewRouter( router := &Router{ logger: logger, metrics: metrics, + lc: newMetricsLabelCache(), privKey: privKey, nodeInfoProducer: nodeInfoProducer, connTracker: newConnTracker( @@ -226,11 +224,14 @@ func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error size++ } - q := newPQScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) + q := newPQScheduler(r.logger, r.metrics, r.lc, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) q.start(ctx) return q }, nil + case queueTypeSimplePriority: + return func(size int) queue { return newSimplePriorityQueue(ctx, size, r.chDescs) }, nil + default: return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType) } @@ -239,7 +240,7 @@ func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error // ChannelCreator allows routers to construct their own channels, // either by receiving a reference to Router.OpenChannel or using some // kind shim for testing purposes. -type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error) +type ChannelCreator func(context.Context, *ChannelDescriptor) (Channel, error) // OpenChannel opens a new channel for the given message type. The caller must // close the channel when done, before stopping the Router. messageType is the @@ -247,7 +248,7 @@ type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error) // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*Channel, error) { +func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() @@ -262,11 +263,10 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*C queue := r.queueFactory(chDesc.RecvBufferCapacity) outCh := make(chan Envelope, chDesc.RecvBufferCapacity) errCh := make(chan PeerError, chDesc.RecvBufferCapacity) - channel := NewChannel(id, messageType, queue.dequeue(), outCh, errCh) - channel.name = chDesc.Name + channel := NewChannel(chDesc.ID, chDesc.Name, queue.dequeue(), outCh, errCh) var wrapper Wrapper - if w, ok := messageType.(Wrapper); ok { + if w, ok := chDesc.MessageType.(Wrapper); ok { wrapper = w } @@ -287,7 +287,7 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*C queue.close() }() - r.routeChannel(ctx, id, outCh, errCh, wrapper) + r.routeChannel(ctx, chDesc.ID, outCh, errCh, wrapper) }() return channel, nil @@ -307,11 +307,10 @@ func (r *Router) routeChannel( ) { for { select { - case envelope, ok := <-outCh: - if !ok { - return + case envelope := <-outCh: + if envelope.IsZero() { + continue } - // Mark the envelope with the channel ID to allow sendPeer() to pass // it on to Transport.SendMessage(). envelope.ChannelID = chID @@ -388,23 +387,37 @@ func (r *Router) routeChannel( } } - case peerError, ok := <-errCh: - if !ok { - return - } + case peerError := <-errCh: + maxPeerCapacity := r.peerManager.HasMaxPeerCapacity() + r.logger.Error("peer error", + "peer", peerError.NodeID, + "err", peerError.Err, + "disconnecting", peerError.Fatal || maxPeerCapacity, + ) - r.logger.Error("peer error, evicting", "peer", peerError.NodeID, "err", peerError.Err) + if peerError.Fatal || maxPeerCapacity { + // if the error is fatal or all peer + // slots are in use, we can error + // (disconnect) from the peer. + r.peerManager.Errored(peerError.NodeID, peerError.Err) + } else { + // this just decrements the peer + // score. + r.peerManager.processPeerEvent(ctx, PeerUpdate{ + NodeID: peerError.NodeID, + Status: PeerStatusBad, + }) + } - r.peerManager.Errored(peerError.NodeID, peerError.Err) case <-ctx.Done(): return } } } -func (r *Router) numConccurentDials() int { +func (r *Router) numConcurrentDials() int { if r.options.NumConcurrentDials == nil { - return runtime.NumCPU() + return runtime.NumCPU() * 32 } return r.options.NumConcurrentDials() @@ -426,43 +439,22 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { return r.options.FilterPeerByID(ctx, id) } -func (r *Router) dialSleep(ctx context.Context) { - if r.options.DialSleep == nil { - const ( - maxDialerInterval = 3000 - minDialerInterval = 250 - ) - - // nolint:gosec // G404: Use of weak random number generator - dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval) - - timer := time.NewTimer(dur * time.Millisecond) - defer timer.Stop() - - select { - case <-ctx.Done(): - case <-timer.C: - } - - return - } - - r.options.DialSleep(ctx) -} - // acceptPeers accepts inbound connections from peers on the given transport, // and spawns goroutines that route messages to/from them. func (r *Router) acceptPeers(ctx context.Context, transport Transport) { for { conn, err := transport.Accept(ctx) - switch err { - case nil: - case io.EOF: - r.logger.Debug("stopping accept routine", "transport", transport) + switch { + case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): + r.logger.Debug("stopping accept routine", "transport", transport, "err", "context canceled") return - default: - r.logger.Error("failed to accept connection", "transport", transport, "err", err) + case errors.Is(err, io.EOF): + r.logger.Debug("stopping accept routine", "transport", transport, "err", "EOF") return + case err != nil: + // in this case we got an error from the net.Listener. + r.logger.Error("failed to accept connection", "transport", transport, "err", err) + continue } incomingIP := conn.RemoteEndpoint().IP @@ -470,11 +462,11 @@ func (r *Router) acceptPeers(ctx context.Context, transport Transport) { closeErr := conn.Close() r.logger.Debug("rate limiting incoming peer", "err", err, - "ip", incomingIP.String(), + "ip", tmstrings.LazyStringer(incomingIP), "close_err", closeErr, ) - return + continue } // Spawn a goroutine for the handshake, to avoid head-of-line blocking. @@ -544,7 +536,7 @@ func (r *Router) dialPeers(ctx context.Context) { // able to add peers at a reasonable pace, though the number // is somewhat arbitrary. The action is further throttled by a // sleep after sending to the addresses channel. - for i := 0; i < r.numConccurentDials(); i++ { + for i := 0; i < r.numConcurrentDials(); i++ { wg.Add(1) go func() { defer wg.Done() @@ -566,19 +558,13 @@ LOOP: switch { case errors.Is(err, context.Canceled): break LOOP - case err != nil: - r.logger.Error("failed to find next peer to dial", "err", err) - break LOOP + case address == NodeAddress{}: + continue LOOP } select { case addresses <- address: - // this jitters the frequency that we call - // DialNext and prevents us from attempting to - // create connections too quickly. - - r.dialSleep(ctx) - continue + continue LOOP case <-ctx.Done(): close(addresses) break LOOP @@ -594,7 +580,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { case errors.Is(err, context.Canceled): return case err != nil: - r.logger.Error("failed to dial peer", "peer", address, "err", err) + r.logger.Debug("failed to dial peer", "peer", address, "err", err) if err = r.peerManager.DialFailed(ctx, address); err != nil { r.logger.Error("failed to report dial failure", "peer", address, "err", err) } @@ -617,8 +603,8 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { proTxHashSetter := SetProTxHashToPeerInfo(peerInfo.ProTxHash) if err := r.runWithPeerMutex(func() error { return r.peerManager.Dialed(address, proTxHashSetter) }); err != nil { - r.logger.Error("failed to dial peer", - "op", "outgoing/dialing", "peer", address.NodeID, "err", err) + r.logger.Error("failed to dial peer", "op", "outgoing/dialing", "peer", address.NodeID, "err", err) + r.peerManager.dialWaker.Wake() conn.Close() return } @@ -676,12 +662,13 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection, // Internet can't and needs a different public address. conn, err := r.transport.Dial(dialCtx, endpoint) if err != nil { - r.logger.Error("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err) + r.logger.Debug("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err) } else { r.logger.Debug("dialed peer", "peer", address.NodeID, "endpoint", endpoint) return conn, nil } } + return nil, errors.New("all endpoints failed") } @@ -693,20 +680,15 @@ func (r *Router) handshakePeer( expectID types.NodeID, ) (types.NodeInfo, error) { - if r.options.HandshakeTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, r.options.HandshakeTimeout) - defer cancel() - } - nodeInfo := r.nodeInfoProducer() - peerInfo, peerKey, err := conn.Handshake(ctx, *nodeInfo, r.privKey) + peerInfo, peerKey, err := conn.Handshake(ctx, r.options.HandshakeTimeout, *nodeInfo, r.privKey) if err != nil { return peerInfo, err } if err = peerInfo.Validate(); err != nil { return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err) } + if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) @@ -715,7 +697,12 @@ func (r *Router) handshakePeer( return peerInfo, fmt.Errorf("expected to connect with peer %q, got %q", expectID, peerInfo.NodeID) } + if err := nodeInfo.CompatibleWith(peerInfo); err != nil { + if err := r.peerManager.Inactivate(peerInfo.NodeID); err != nil { + return peerInfo, fmt.Errorf("problem inactivating peer %q: %w", peerInfo.ID(), err) + } + return peerInfo, ErrRejected{ err: err, id: peerInfo.ID(), @@ -735,7 +722,7 @@ func (r *Router) runWithPeerMutex(fn func() error) error { // channels. It will close the given connection and send queue when done, or if // they are closed elsewhere it will cause this method to shut down and return. func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connection, channels ChannelIDSet) { - r.metrics.Peers.Add(1) + r.metrics.PeersConnected.Add(1) r.peerManager.Ready(ctx, peerID, channels) sendQueue := r.getOrMakeQueue(peerID, channels) @@ -748,7 +735,7 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec sendQueue.close() r.peerManager.Disconnected(ctx, peerID) - r.metrics.Peers.Add(-1) + r.metrics.PeersConnected.Add(-1) }() r.logger.Info("peer connected", "peer", peerID, "endpoint", conn) @@ -841,7 +828,7 @@ func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Conn r.metrics.PeerReceiveBytesTotal.With( "chID", fmt.Sprint(chID), "peer_id", string(peerID), - "message_type", r.metrics.ValueToMetricLabel(msg)).Add(float64(proto.Size(msg))) + "message_type", r.lc.ValueToMetricLabel(msg)).Add(float64(proto.Size(msg))) r.metrics.RouterChannelQueueSend.Observe(time.Since(start).Seconds()) // r.logger.Debug("received message", "peer", peerID, "msg", msg) @@ -907,6 +894,8 @@ func (r *Router) evictPeers(ctx context.Context) { queue, ok := r.peerQueues[peerID] r.peerMtx.RUnlock() + r.metrics.PeersEvicted.Add(1) + if ok { queue.close() } diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 663e6b81c9..748731f32d 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -26,7 +26,7 @@ import ( "github.com/tendermint/tendermint/types" ) -func echoReactor(ctx context.Context, channel *p2p.Channel) { +func echoReactor(ctx context.Context, channel p2p.Channel) { iter := channel.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() @@ -41,6 +41,10 @@ func echoReactor(ctx context.Context, channel *p2p.Channel) { } func TestRouter_Network(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -162,6 +166,10 @@ func TestRouter_Channel_Basic(t *testing.T) { // Channel tests are hairy to mock, so we use an in-memory network instead. func TestRouter_Channel_SendReceive(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -224,6 +232,10 @@ func TestRouter_Channel_SendReceive(t *testing.T) { } func TestRouter_Channel_Broadcast(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) ctx, cancel := context.WithCancel(context.Background()) @@ -255,6 +267,10 @@ func TestRouter_Channel_Broadcast(t *testing.T) { } func TestRouter_Channel_Wrapper(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) ctx, cancel := context.WithCancel(context.Background()) @@ -385,7 +401,7 @@ func TestRouter_AcceptPeers(t *testing.T) { connCtx, connCancel := context.WithCancel(context.Background()) mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) @@ -442,78 +458,51 @@ func TestRouter_AcceptPeers(t *testing.T) { } } -func TestRouter_AcceptPeers_Error(t *testing.T) { - t.Cleanup(leaktest.Check(t)) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Set up a mock transport that returns an error, which should prevent - // the router from calling Accept again. - mockTransport := &mocks.Transport{} - mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Accept", mock.Anything).Once().Return(nil, errors.New("boom")) - mockTransport.On("Close").Return(nil) - mockTransport.On("Listen", mock.Anything).Return(nil) - - // Set up and start the router. - peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) - require.NoError(t, err) - - router, err := p2p.NewRouter( - log.NewNopLogger(), - p2p.NopMetrics(), - selfKey, - peerManager, - func() *types.NodeInfo { return &selfInfo }, - mockTransport, - nil, - p2p.RouterOptions{}, - ) - require.NoError(t, err) - - require.NoError(t, router.Start(ctx)) - time.Sleep(time.Second) - router.Stop() - - mockTransport.AssertExpectations(t) -} +func TestRouter_AcceptPeers_Errors(t *testing.T) { + if testing.Short() { + // Each subtest takes more than one second due to the time.Sleep call, + // so just skip from the parent test in short mode. + t.Skip("skipping test in short mode") + } -func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { - t.Cleanup(leaktest.Check(t)) + for _, err := range []error{io.EOF, context.Canceled, context.DeadlineExceeded} { + t.Run(err.Error(), func(t *testing.T) { + t.Cleanup(leaktest.Check(t)) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // Set up a mock transport that returns io.EOF once, which should prevent - // the router from calling Accept again. - mockTransport := &mocks.Transport{} - mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) - mockTransport.On("Close").Return(nil) - mockTransport.On("Listen", mock.Anything).Return(nil) + // Set up a mock transport that returns io.EOF once, which should prevent + // the router from calling Accept again. + mockTransport := &mocks.Transport{} + mockTransport.On("String").Maybe().Return("mock") + mockTransport.On("Accept", mock.Anything).Once().Return(nil, err) + mockTransport.On("Close").Return(nil) + mockTransport.On("Listen", mock.Anything).Return(nil) - // Set up and start the router. - peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) - require.NoError(t, err) + // Set up and start the router. + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) + require.NoError(t, err) - router, err := p2p.NewRouter( - log.NewNopLogger(), - p2p.NopMetrics(), - selfKey, - peerManager, - func() *types.NodeInfo { return &selfInfo }, - mockTransport, - nil, - p2p.RouterOptions{}, - ) - require.NoError(t, err) + router, err := p2p.NewRouter( + log.NewNopLogger(), + p2p.NopMetrics(), + selfKey, + peerManager, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, + p2p.RouterOptions{}, + ) + require.NoError(t, err) - require.NoError(t, router.Start(ctx)) - time.Sleep(time.Second) - router.Stop() + require.NoError(t, router.Start(ctx)) + time.Sleep(time.Second) + router.Stop() - mockTransport.AssertExpectations(t) + mockTransport.AssertExpectations(t) + }) + } } func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { @@ -530,7 +519,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF) mockConnection.On("Close").Return(nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) @@ -618,7 +607,7 @@ func TestRouter_DialPeers(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") if tc.dialErr == nil { - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() } @@ -704,7 +693,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF) mockConnection.On("Close").Return(nil) @@ -745,7 +734,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockTransport, nil, p2p.RouterOptions{ - DialSleep: func(_ context.Context) {}, NumConcurrentDials: func() int { ncpu := runtime.NumCPU() if ncpu <= 3 { @@ -787,7 +775,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(peerInfo, peerKey.PubKey(), nil) mockConnection.On("ReceiveMessage", mock.Anything).WaitUntil(closeCh).Return(chID, nil, io.EOF) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) @@ -842,6 +830,10 @@ func TestRouter_EvictPeers(t *testing.T) { } func TestRouter_ChannelCompatability(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -856,7 +848,7 @@ func TestRouter_ChannelCompatability(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(incompatiblePeer, peerKey.PubKey(), nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Return(nil) @@ -907,7 +899,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(peer, peerKey.PubKey(), nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Return(nil) diff --git a/internal/p2p/rqueue.go b/internal/p2p/rqueue.go new file mode 100644 index 0000000000..8d6406864a --- /dev/null +++ b/internal/p2p/rqueue.go @@ -0,0 +1,112 @@ +package p2p + +import ( + "container/heap" + "context" + "sort" + "time" + + "github.com/gogo/protobuf/proto" +) + +type simpleQueue struct { + input chan Envelope + output chan Envelope + closeFn func() + closeCh <-chan struct{} + + maxSize int + chDescs []*ChannelDescriptor +} + +func newSimplePriorityQueue(ctx context.Context, size int, chDescs []*ChannelDescriptor) *simpleQueue { + if size%2 != 0 { + size++ + } + + ctx, cancel := context.WithCancel(ctx) + q := &simpleQueue{ + input: make(chan Envelope, size*2), + output: make(chan Envelope, size/2), + maxSize: size * size, + closeCh: ctx.Done(), + closeFn: cancel, + } + + go q.run(ctx) + return q +} + +func (q *simpleQueue) enqueue() chan<- Envelope { return q.input } +func (q *simpleQueue) dequeue() <-chan Envelope { return q.output } +func (q *simpleQueue) close() { q.closeFn() } +func (q *simpleQueue) closed() <-chan struct{} { return q.closeCh } + +func (q *simpleQueue) run(ctx context.Context) { + defer q.closeFn() + + var chPriorities = make(map[ChannelID]uint, len(q.chDescs)) + for _, chDesc := range q.chDescs { + chID := chDesc.ID + chPriorities[chID] = uint(chDesc.Priority) + } + + pq := make(priorityQueue, 0, q.maxSize) + heap.Init(&pq) + ticker := time.NewTicker(10 * time.Millisecond) + // must have a buffer of exactly one because both sides of + // this channel are used in this loop, and simply signals adds + // to the heap + signal := make(chan struct{}, 1) + for { + select { + case <-ctx.Done(): + return + case <-q.closeCh: + return + case e := <-q.input: + // enqueue the incoming Envelope + heap.Push(&pq, &pqEnvelope{ + envelope: e, + size: uint(proto.Size(e.Message)), + priority: chPriorities[e.ChannelID], + timestamp: time.Now().UTC(), + }) + + select { + case signal <- struct{}{}: + default: + if len(pq) > q.maxSize { + sort.Sort(pq) + pq = pq[:q.maxSize] + } + } + + case <-ticker.C: + if len(pq) > q.maxSize { + sort.Sort(pq) + pq = pq[:q.maxSize] + } + if len(pq) > 0 { + select { + case signal <- struct{}{}: + default: + } + } + case <-signal: + SEND: + for len(pq) > 0 { + select { + case <-ctx.Done(): + return + case <-q.closeCh: + return + case q.output <- heap.Pop(&pq).(*pqEnvelope).envelope: + continue SEND + default: + break SEND + } + } + } + } +} diff --git a/internal/p2p/rqueue_test.go b/internal/p2p/rqueue_test.go new file mode 100644 index 0000000000..43c4066e57 --- /dev/null +++ b/internal/p2p/rqueue_test.go @@ -0,0 +1,47 @@ +package p2p + +import ( + "context" + "testing" + "time" +) + +func TestSimpleQueue(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // set up a small queue with very small buffers so we can + // watch it shed load, then send a bunch of messages to the + // queue, most of which we'll watch it drop. + sq := newSimplePriorityQueue(ctx, 1, nil) + for i := 0; i < 100; i++ { + sq.enqueue() <- Envelope{From: "merlin"} + } + + seen := 0 + +RETRY: + for seen <= 2 { + select { + case e := <-sq.dequeue(): + if e.From != "merlin" { + continue + } + seen++ + case <-time.After(10 * time.Millisecond): + break RETRY + } + } + // if we don't see any messages, then it's just broken. + if seen == 0 { + t.Errorf("seen %d messages, should have seen more than one", seen) + } + // ensure that load shedding happens: there can be at most 3 + // messages that we get out of this, one that was buffered + // plus 2 that were under the cap, everything else gets + // dropped. + if seen > 3 { + t.Errorf("saw %d messages, should have seen 5 or fewer", seen) + } + +} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index 1b48d8b0fd..da67cacaf7 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net" + "time" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" @@ -81,7 +82,7 @@ type Connection interface { // FIXME: The handshake should really be the Router's responsibility, but // that requires the connection interface to be byte-oriented rather than // message-oriented (see comment above). - Handshake(context.Context, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) + Handshake(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) // ReceiveMessage returns the next message received on the connection, // blocking until one is available. Returns io.EOF if closed. diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 0520f04db6..270f006ac6 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -9,6 +9,7 @@ import ( "net" "strconv" "sync" + "time" "golang.org/x/net/netutil" @@ -280,6 +281,7 @@ func newMConnConnection( // Handshake implements Connection. func (c *mConnConnection) Handshake( ctx context.Context, + timeout time.Duration, nodeInfo types.NodeInfo, privKey crypto.PrivKey, ) (types.NodeInfo, crypto.PubKey, error) { @@ -289,6 +291,12 @@ func (c *mConnConnection) Handshake( peerKey crypto.PubKey errCh = make(chan error, 1) ) + handshakeCtx := ctx + if timeout > 0 { + var cancel context.CancelFunc + handshakeCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } // To handle context cancellation, we need to do the handshake in a // goroutine and abort the blocking network calls by closing the connection // when the context is canceled. @@ -301,25 +309,29 @@ func (c *mConnConnection) Handshake( } }() var err error - mconn, peerInfo, peerKey, err = c.handshake(ctx, nodeInfo, privKey) + mconn, peerInfo, peerKey, err = c.handshake(handshakeCtx, nodeInfo, privKey) select { case errCh <- err: - case <-ctx.Done(): + case <-handshakeCtx.Done(): } }() select { - case <-ctx.Done(): + case <-handshakeCtx.Done(): _ = c.Close() - return types.NodeInfo{}, nil, ctx.Err() + return types.NodeInfo{}, nil, handshakeCtx.Err() case err := <-errCh: if err != nil { return types.NodeInfo{}, nil, err } c.mconn = mconn + // Start must not use the handshakeCtx. The handshakeCtx may have a + // timeout set that is intended to terminate only the handshake procedure. + // The context passed to Start controls the entire lifecycle of the + // mconn. if err = c.mconn.Start(ctx); err != nil { return types.NodeInfo{}, nil, err } diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index c478dbe1d2..6fafd01aeb 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -59,6 +59,10 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { } func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index f02e828d6f..8260e6a81c 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -7,6 +7,7 @@ import ( "io" "net" "sync" + "time" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/log" @@ -273,9 +274,16 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint { // Handshake implements Connection. func (c *MemoryConnection) Handshake( ctx context.Context, + timeout time.Duration, nodeInfo types.NodeInfo, privKey crypto.PrivKey, ) (types.NodeInfo, crypto.PubKey, error) { + if timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + select { case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}: c.logger.Debug("sent handshake", "nodeInfo", nodeInfo) diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index b4edf9bc95..d58c23955b 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -296,7 +296,7 @@ func TestConnection_Handshake(t *testing.T) { errCh := make(chan error, 1) go func() { // Must use assert due to goroutine. - peerInfo, peerKey, err := ba.Handshake(ctx, bInfo, bKey) + peerInfo, peerKey, err := ba.Handshake(ctx, 0, bInfo, bKey) if err == nil { assert.Equal(t, aInfo, peerInfo) assert.Equal(t, aKey.PubKey(), peerKey) @@ -307,7 +307,7 @@ func TestConnection_Handshake(t *testing.T) { } }() - peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey) + peerInfo, peerKey, err := ab.Handshake(ctx, 0, aInfo, aKey) require.NoError(t, err) require.Equal(t, bInfo, peerInfo) require.Equal(t, bKey.PubKey(), peerKey) @@ -328,7 +328,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { ab, ba := dialAccept(ctx, t, a, b) timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) cancel() - _, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) + _, _, err := ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey()) require.Error(t, err) require.Equal(t, context.Canceled, err) _ = ab.Close() @@ -338,7 +338,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { ab, ba = dialAccept(ctx, t, a, b) timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() - _, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) + _, _, err = ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey()) require.Error(t, err) require.Equal(t, context.DeadlineExceeded, err) _ = ab.Close() @@ -642,13 +642,13 @@ func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport) go func() { privKey := ed25519.GenPrivKey() nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} - _, _, err := ba.Handshake(ctx, nodeInfo, privKey) + _, _, err := ba.Handshake(ctx, 0, nodeInfo, privKey) errCh <- err }() privKey := ed25519.GenPrivKey() nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} - _, _, err := ab.Handshake(ctx, nodeInfo, privKey) + _, _, err := ab.Handshake(ctx, 0, nodeInfo, privKey) require.NoError(t, err) timer := time.NewTimer(2 * time.Second) diff --git a/internal/proxy/client_test.go b/internal/proxy/client_test.go index 09ac3f2c87..41a34bde7d 100644 --- a/internal/proxy/client_test.go +++ b/internal/proxy/client_test.go @@ -58,7 +58,7 @@ func (app *appConnTest) Info(ctx context.Context, req *types.RequestInfo) (*type var SOCKET = "socket" func TestEcho(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", t.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) if err != nil { @@ -98,7 +98,7 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", b.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) if err != nil { @@ -146,7 +146,7 @@ func TestInfo(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", t.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) if err != nil { diff --git a/internal/proxy/metrics.gen.go b/internal/proxy/metrics.gen.go new file mode 100644 index 0000000000..ea483f83db --- /dev/null +++ b/internal/proxy/metrics.gen.go @@ -0,0 +1,32 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package proxy + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + MethodTiming: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "method_timing", + Help: "Timing for each ABCI method.", + + Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, + }, append(labels, "method", "type")).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + MethodTiming: discard.NewHistogram(), + } +} diff --git a/internal/proxy/metrics.go b/internal/proxy/metrics.go index 99bd7d7b04..b95687a03b 100644 --- a/internal/proxy/metrics.go +++ b/internal/proxy/metrics.go @@ -2,9 +2,6 @@ package proxy import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,35 +10,10 @@ const ( MetricsSubsystem = "abci_connection" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains the prometheus metrics exposed by the proxy package. type Metrics struct { - MethodTiming metrics.Histogram -} - -// PrometheusMetrics constructs a Metrics instance that collects metrics samples. -// The resulting metrics will be prefixed with namespace and labeled with the -// defaultLabelsAndValues. defaultLabelsAndValues must be a list of string pairs -// where the first of each pair is the label and the second is the value. -func PrometheusMetrics(namespace string, defaultLabelsAndValues ...string) *Metrics { - defaultLabels := []string{} - for i := 0; i < len(defaultLabelsAndValues); i += 2 { - defaultLabels = append(defaultLabels, defaultLabelsAndValues[i]) - } - return &Metrics{ - MethodTiming: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "method_timing", - Help: "ABCI Method Timing", - Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, - }, append(defaultLabels, []string{"method", "type"}...)).With(defaultLabelsAndValues...), - } -} - -// NopMetrics constructs a Metrics instance that discards all samples and is suitable -// for testing. -func NopMetrics() *Metrics { - return &Metrics{ - MethodTiming: discard.NewHistogram(), - } + // Timing for each ABCI method. + MethodTiming metrics.Histogram `metrics_bucketsizes:".0001,.0004,.002,.009,.02,.1,.65,2,6,25" metrics_labels:"method, type"` } diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 1b3c212167..8ee334c7ac 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -188,23 +188,23 @@ func (env *Environment) BlockResults(ctx context.Context, req *coretypes.Request return nil, err } - results, err := env.StateStore.LoadABCIResponses(height) + results, err := env.StateStore.LoadFinalizeBlockResponses(height) if err != nil { return nil, err } var totalGasUsed int64 - for _, res := range results.FinalizeBlock.GetTxResults() { + for _, res := range results.GetTxResults() { totalGasUsed += res.GetGasUsed() } return &coretypes.ResultBlockResults{ Height: height, - TxsResults: results.FinalizeBlock.TxResults, + TxsResults: results.TxResults, TotalGasUsed: totalGasUsed, - FinalizeBlockEvents: results.FinalizeBlock.Events, - ValidatorSetUpdate: results.FinalizeBlock.ValidatorSetUpdate, - ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.FinalizeBlock.ConsensusParamUpdates), + FinalizeBlockEvents: results.Events, + ValidatorSetUpdate: results.ValidatorSetUpdate, + ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.ConsensusParamUpdates), }, nil } diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index 478ae08a66..e7716224c3 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -13,7 +13,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/rpc/coretypes" ) @@ -70,19 +69,17 @@ func TestBlockchainInfo(t *testing.T) { } func TestBlockResults(t *testing.T) { - results := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - TxResults: []*abci.ExecTxResult{ - {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, - {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, - {Code: 1, Log: "not ok", GasUsed: 0}, - }, + results := &abci.ResponseFinalizeBlock{ + TxResults: []*abci.ExecTxResult{ + {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, + {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, + {Code: 1, Log: "not ok", GasUsed: 0}, }, } env := &Environment{} env.StateStore = sm.NewStore(dbm.NewMemDB()) - err := env.StateStore.SaveABCIResponses(100, results) + err := env.StateStore.SaveFinalizeBlockResponses(100, results) require.NoError(t, err) mockstore := &mocks.BlockStore{} mockstore.On("Height").Return(int64(100)) @@ -99,11 +96,11 @@ func TestBlockResults(t *testing.T) { {101, true, nil}, {100, false, &coretypes.ResultBlockResults{ Height: 100, - TxsResults: results.FinalizeBlock.TxResults, + TxsResults: results.TxResults, TotalGasUsed: 15, - FinalizeBlockEvents: results.FinalizeBlock.Events, - ValidatorSetUpdate: results.FinalizeBlock.ValidatorSetUpdate, - ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.FinalizeBlock.ConsensusParamUpdates), + FinalizeBlockEvents: results.Events, + ValidatorSetUpdate: results.ValidatorSetUpdate, + ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.ConsensusParamUpdates), }}, } diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 7b7dc3087b..75ff14ed90 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -18,6 +18,7 @@ import ( "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/eventlog" + "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" tmpubsub "github.com/tendermint/tendermint/internal/pubsub" @@ -26,7 +27,6 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/rpc/coretypes" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" @@ -236,7 +236,8 @@ func (env *Environment) StartService(ctx context.Context, conf *config.Config) ( // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if cfg.WriteTimeout > 0 && cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 5464818ed2..309412baa1 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -19,20 +19,24 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +// More: +// https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +// Deprecated and should be removed in 0.37 func (env *Environment) BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(ctx, req.Tx, nil, mempool.TxInfo{}) - if err != nil { - return nil, err - } + go func() { _ = env.Mempool.CheckTx(ctx, req.Tx, nil, mempool.TxInfo{}) }() return &coretypes.ResultBroadcastTx{Hash: req.Tx.Hash()}, nil } -// BroadcastTxSync returns with the response from CheckTx. Does not wait for +// Deprecated and should be remove in 0.37 +func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + return env.BroadcastTx(ctx, req) +} + +// BroadcastTx returns with the response from CheckTx. Does not wait for // DeliverTx result. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync -func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { +func (env *Environment) BroadcastTx(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { resCh := make(chan *abci.ResponseCheckTx, 1) err := env.Mempool.CheckTx( ctx, @@ -54,13 +58,10 @@ func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.Requ return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Err()) case r := <-resCh: return &coretypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Codespace: r.Codespace, - MempoolError: r.MempoolError, - Info: r.Info, - Hash: req.Tx.Hash(), + Code: r.Code, + Data: r.Data, + Codespace: r.Codespace, + Hash: req.Tx.Hash(), }, nil } } @@ -92,7 +93,7 @@ func (env *Environment) BroadcastTxCommit(ctx context.Context, req *coretypes.Re return &coretypes.ResultBroadcastTxCommit{ CheckTx: *r, Hash: req.Tx.Hash(), - }, fmt.Errorf("transaction encountered error (%s)", r.MempoolError) + }, fmt.Errorf("wrong ABCI CodeType, got (%d) instead of OK", r.Code) } if !indexer.KVSinkEnabled(env.EventSinks) { diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index 4bc1ca4140..107b0e226d 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -28,7 +28,7 @@ func NewRoutesMap(svc RPCService, opts *RouteOptions) RoutesMap { out := RoutesMap{ // Event subscription. Note that subscribe, unsubscribe, and // unsubscribe_all are only available via the websocket endpoint. - "events": rpc.NewRPCFunc(svc.Events), + "events": rpc.NewRPCFunc(svc.Events).Timeout(0), "subscribe": rpc.NewWSRPCFunc(svc.Subscribe), "unsubscribe": rpc.NewWSRPCFunc(svc.Unsubscribe), "unsubscribe_all": rpc.NewWSRPCFunc(svc.UnsubscribeAll), @@ -59,8 +59,11 @@ func NewRoutesMap(svc RPCService, opts *RouteOptions) RoutesMap { "num_unconfirmed_txs": rpc.NewRPCFunc(svc.NumUnconfirmedTxs), // tx broadcast API + "broadcast_tx": rpc.NewRPCFunc(svc.BroadcastTx), + // TODO remove after 0.36 + // deprecated broadcast tx methods: "broadcast_tx_commit": rpc.NewRPCFunc(svc.BroadcastTxCommit), - "broadcast_tx_sync": rpc.NewRPCFunc(svc.BroadcastTxSync), + "broadcast_tx_sync": rpc.NewRPCFunc(svc.BroadcastTx), "broadcast_tx_async": rpc.NewRPCFunc(svc.BroadcastTxAsync), // abci API @@ -87,6 +90,7 @@ type RPCService interface { BlockSearch(ctx context.Context, req *coretypes.RequestBlockSearch) (*coretypes.ResultBlockSearch, error) BlockchainInfo(ctx context.Context, req *coretypes.RequestBlockchainInfo) (*coretypes.ResultBlockchainInfo, error) BroadcastEvidence(ctx context.Context, req *coretypes.RequestBroadcastEvidence) (*coretypes.ResultBroadcastEvidence, error) + BroadcastTx(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) BroadcastTxCommit(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTxCommit, error) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) diff --git a/internal/state/errors.go b/internal/state/errors.go index e8ad776f46..516b20e5f5 100644 --- a/internal/state/errors.go +++ b/internal/state/errors.go @@ -46,7 +46,7 @@ type ( Height int64 } - ErrNoABCIResponsesForHeight struct { + ErrNoFinalizeBlockResponsesForHeight struct { Height int64 } ) @@ -102,6 +102,6 @@ func (e ErrNoConsensusParamsForHeight) Error() string { return fmt.Sprintf("could not find consensus params for height #%d", e.Height) } -func (e ErrNoABCIResponsesForHeight) Error() string { - return fmt.Sprintf("could not find results for height #%d", e.Height) +func (e ErrNoFinalizeBlockResponsesForHeight) Error() string { + return fmt.Sprintf("could not find FinalizeBlock responses for height #%d", e.Height) } diff --git a/internal/state/execution.go b/internal/state/execution.go index 47d3e61c1f..4ed1ed20a5 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -16,7 +16,6 @@ import ( "github.com/tendermint/tendermint/internal/mempool" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmtypes "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -128,13 +127,13 @@ func (blockExec *BlockExecutor) CreateProposalBlock( rpp, err := blockExec.appClient.PrepareProposal( ctx, &abci.RequestPrepareProposal{ - MaxTxBytes: maxDataBytes, - Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: abci.ExtendedCommitInfo(localLastCommit), - ByzantineValidators: block.Evidence.ToABCI(), - Height: block.Height, - Time: block.Time, - NextValidatorsHash: block.NextValidatorsHash, + MaxTxBytes: maxDataBytes, + Txs: block.Txs.ToSliceOfBytes(), + LocalLastCommit: abci.ExtendedCommitInfo(localLastCommit), + Misbehavior: block.Evidence.ToABCI(), + Height: block.Height, + Time: block.Time, + NextValidatorsHash: block.NextValidatorsHash, // Dash's fields CoreChainLockedHeight: block.CoreChainLockedHeight, @@ -184,14 +183,14 @@ func (blockExec *BlockExecutor) ProcessProposal( state State, ) (bool, error) { resp, err := blockExec.appClient.ProcessProposal(ctx, &abci.RequestProcessProposal{ - Hash: block.Header.Hash(), - Height: block.Header.Height, - Time: block.Header.Time, - Txs: block.Data.Txs.ToSliceOfBytes(), - ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), - ByzantineValidators: block.Evidence.ToABCI(), - ProposerProTxHash: block.ProposerProTxHash, - NextValidatorsHash: block.NextValidatorsHash, + Hash: block.Header.Hash(), + Height: block.Header.Height, + Time: block.Header.Time, + Txs: block.Data.Txs.ToSliceOfBytes(), + ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + Misbehavior: block.Evidence.ToABCI(), + ProposerProTxHash: block.ProposerProTxHash, + NextValidatorsHash: block.NextValidatorsHash, }) if err != nil { return false, ErrInvalidBlock(err) @@ -199,6 +198,13 @@ func (blockExec *BlockExecutor) ProcessProposal( if resp.IsStatusUnknown() { panic(fmt.Sprintf("ProcessProposal responded with status %s", resp.Status.String())) } + // we force the abci app to return only 32 byte app hashes (set to 20 temporarily) + if resp.AppHash != nil && len(resp.AppHash) != blockExec.appHashSize { + blockExec.logger.Error( + "Client returned invalid app hash size", "bytesLength", len(resp.AppHash), + ) + return false, errors.New("invalid App Hash size") + } return resp.IsAccepted(), nil } @@ -267,16 +273,16 @@ func (blockExec *BlockExecutor) ApplyBlock( startTime := time.Now().UnixNano() txs := block.Txs.ToSliceOfBytes() version := block.Header.Version.ToProto() - finalizeBlockResponse, err := blockExec.appClient.FinalizeBlock( + fBlockRes, err := blockExec.appClient.FinalizeBlock( ctx, &abci.RequestFinalizeBlock{ - Hash: block.Hash(), - Height: block.Header.Height, - Time: block.Header.Time, - Txs: txs, - DecidedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), - ByzantineValidators: block.Evidence.ToABCI(), - NextValidatorsHash: block.NextValidatorsHash, + Hash: block.Hash(), + Height: block.Header.Height, + Time: block.Header.Time, + Txs: txs, + DecidedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + Misbehavior: block.Evidence.ToABCI(), + NextValidatorsHash: block.NextValidatorsHash, // Dash's fields ProposerProTxHash: block.ProposerProTxHash, @@ -291,33 +297,52 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, ErrProxyAppConn(err) } - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: finalizeBlockResponse, + numValUpdates := 0 + if fBlockRes.ValidatorSetUpdate != nil { + numValUpdates = len(fBlockRes.ValidatorSetUpdate.ValidatorUpdates) } + blockExec.logger.Info( + "finalized block", + "height", block.Height, + "num_txs_res", len(fBlockRes.TxResults), + "num_val_updates", numValUpdates, + "block_app_hash", fmt.Sprintf("%X", fBlockRes.AppHash), + ) + // Save the results before we commit. - if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { + err = blockExec.store.SaveFinalizeBlockResponses(block.Height, fBlockRes) + if err != nil && !errors.Is(err, ErrNoFinalizeBlockResponsesForHeight{block.Height}) { + // It is correct to have an empty ResponseFinalizeBlock for ApplyBlock, + // but not for saving it to the state store return state, err } // validate the validator updates and convert to tendermint types - err = validateValidatorSetUpdate(finalizeBlockResponse.ValidatorSetUpdate, state.ConsensusParams.Validator) + err = validateValidatorSetUpdate(fBlockRes.ValidatorSetUpdate, state.ConsensusParams.Validator) if err != nil { return state, fmt.Errorf("error in validator updates: %w", err) } nextCoreChainLock, err := types.CoreChainLockFromProto( - finalizeBlockResponse.NextCoreChainLockUpdate, + fBlockRes.NextCoreChainLockUpdate, ) if err != nil { return state, fmt.Errorf("error in chain lock from proto: %v", err) } // The quorum type should not even matter here - validators, thresholdPublicKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(finalizeBlockResponse.ValidatorSetUpdate) + validators, thresholdPublicKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(fBlockRes.ValidatorSetUpdate) if err != nil { return state, fmt.Errorf("error in chain lock from proto: %v", err) } + if len(validators) > 0 { + blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validators)) + blockExec.metrics.ValidatorSetUpdates.Add(1) + } + if fBlockRes.ConsensusParamUpdates != nil { + blockExec.metrics.ConsensusParamUpdates.Add(1) + } if len(validators) > 0 { blockExec.logger.Debug( @@ -330,18 +355,18 @@ func (blockExec *BlockExecutor) ApplyBlock( } // Update the state with the block and responses. - rs, err := abci.MarshalTxResults(finalizeBlockResponse.TxResults) + rs, err := abci.MarshalTxResults(fBlockRes.TxResults) if err != nil { return state, fmt.Errorf("marshaling TxResults: %w", err) } h := merkle.HashFromByteSlices(rs) - state, err = state.Update(proTxHash, blockID, &block.Header, h, finalizeBlockResponse.ConsensusParamUpdates, validators, thresholdPublicKey, quorumHash) + state, err = state.Update(proTxHash, blockID, &block.Header, h, fBlockRes.ConsensusParamUpdates, validators, thresholdPublicKey, quorumHash) if err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } // Lock mempool, commit app state, update mempoool. - appHash, retainHeight, err := blockExec.Commit(ctx, state, block, finalizeBlockResponse.TxResults) + retainHeight, err := blockExec.Commit(ctx, state, block, fBlockRes.TxResults) if err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } @@ -353,7 +378,7 @@ func (blockExec *BlockExecutor) ApplyBlock( blockExec.NextCoreChainLock = nextCoreChainLock // Update the app hash and save the state. - state.AppHash = appHash + state.AppHash = fBlockRes.AppHash if err := blockExec.store.Save(state); err != nil { return state, err } @@ -373,7 +398,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, finalizeBlockResponse, state.NextValidators) + fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, fBlockRes, state.NextValidators) return state, nil } @@ -406,7 +431,7 @@ func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *t } if !resp.IsOK() { - return types.ErrVoteInvalidExtension + return errors.New("invalid vote extension") } return nil @@ -423,7 +448,7 @@ func (blockExec *BlockExecutor) Commit( state State, block *types.Block, txResults []*abci.ExecTxResult, -) ([]byte, int64, error) { +) (int64, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -432,22 +457,14 @@ func (blockExec *BlockExecutor) Commit( err := blockExec.mempool.FlushAppConn(ctx) if err != nil { blockExec.logger.Error("client error during mempool.FlushAppConn", "err", err) - return nil, 0, err + return 0, err } // Commit block, get hash back res, err := blockExec.appClient.Commit(ctx) if err != nil { blockExec.logger.Error("client error during proxyAppConn.Commit", "err", err) - return nil, 0, err - } - - // we force the abci app to return only 32 byte app hashes (set to 20 temporarily) - if res.Data != nil && len(res.Data) != blockExec.appHashSize { - blockExec.logger.Error( - "Client returned invalid app hash size", "bytesLength", len(res.Data), - ) - return nil, 0, errors.New("invalid App Hash size") + return 0, err } // ResponseCommit has no error code - just data @@ -456,7 +473,7 @@ func (blockExec *BlockExecutor) Commit( "height", block.Height, "core_height", block.CoreChainLockedHeight, "num_txs", len(block.Txs), - "app_hash", fmt.Sprintf("%X", res.Data), + "block_app_hash", fmt.Sprintf("%X", block.AppHash), ) // Update mempool. @@ -467,9 +484,10 @@ func (blockExec *BlockExecutor) Commit( txResults, TxPreCheckForState(state), TxPostCheckForState(state), + state.ConsensusParams.ABCI.RecheckTx, ) - return res.Data, res.RetainHeight, err + return res.RetainHeight, err } func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { @@ -574,13 +592,13 @@ func (state State) Update( // We need to generate LastStateID before changing the state lastStateID := state.StateID() - // Update the validator set with the latest abciResponses. + // Update the validator set with the latest responses to FinalizeBlock. lastHeightValsChanged := state.LastHeightValidatorsChanged if len(validatorUpdates) > 0 { if bytes.Equal(nValSet.QuorumHash, quorumHash) { err := nValSet.UpdateWithChangeSet(validatorUpdates, newThresholdPublicKey, quorumHash) if err != nil { - return state, fmt.Errorf("error changing validator set: %w", err) + return state, fmt.Errorf("changing validator set: %w", err) } // Change results from this height but only applies to the next next height. lastHeightValsChanged = header.Height + 1 + 1 @@ -595,7 +613,7 @@ func (state State) Update( // Update validator proposer priority and set state variables. nValSet.IncrementProposerPriority(1) - // Update the params with the latest abciResponses. + // Update the params with the latest responses to FinalizeBlock. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged if consensusParamUpdates != nil { @@ -603,7 +621,7 @@ func (state State) Update( nextParams = state.ConsensusParams.UpdateConsensusParams(consensusParamUpdates) err := nextParams.ValidateConsensusParams() if err != nil { - return state, fmt.Errorf("error updating consensus params: %w", err) + return state, fmt.Errorf("updating consensus params: %w", err) } state.Version.Consensus.App = nextParams.Version.AppVersion @@ -734,12 +752,12 @@ func ExecCommitBlock( finalizeBlockResponse, err := appConn.FinalizeBlock( ctx, &abci.RequestFinalizeBlock{ - Hash: block.Hash(), - Height: block.Height, - Time: block.Time, - Txs: block.Txs.ToSliceOfBytes(), - DecidedLastCommit: buildLastCommitInfo(block, store, initialHeight), - ByzantineValidators: block.Evidence.ToABCI(), + Hash: block.Hash(), + Height: block.Height, + Time: block.Time, + Txs: block.Txs.ToSliceOfBytes(), + DecidedLastCommit: buildLastCommitInfo(block, store, initialHeight), + Misbehavior: block.Evidence.ToABCI(), // Dash's fields CoreChainLockedHeight: block.CoreChainLockedHeight, @@ -785,15 +803,15 @@ func ExecCommitBlock( fireEvents(be.logger, be.eventBus, block, blockID, finalizeBlockResponse, validatorSetUpdate) } - // Commit block, get hash back - res, err := appConn.Commit(ctx) + // Commit block + _, err = appConn.Commit(ctx) if err != nil { - logger.Error("client error during proxyAppConn.Commit", "err", res) + logger.Error("client error during proxyAppConn.Commit", "err", err) return nil, err } - // ResponseCommit has no error or log, just data - return res.Data, nil + // ResponseCommit has no error or log + return finalizeBlockResponse.AppHash, nil } func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64) (uint64, error) { diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index b0cbf410ea..d799910747 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -69,6 +69,7 @@ func TestApplyBlock(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) blockExec := sm.NewBlockExecutor( stateStore, @@ -153,6 +154,7 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) eventBus := eventbus.NewDefault(logger) @@ -175,7 +177,7 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) { require.NoError(t, err) // TODO check state and mempool - assert.Equal(t, abciMb, app.ByzantineValidators) + assert.Equal(t, abciMb, app.Misbehavior) } func TestProcessProposal(t *testing.T) { @@ -233,11 +235,11 @@ func TestProcessProposal(t *testing.T) { block1.Txs = txs expectedRpp := &abci.RequestProcessProposal{ - Txs: block1.Txs.ToSliceOfBytes(), - Hash: block1.Hash(), - Height: block1.Header.Height, - Time: block1.Header.Time, - ByzantineValidators: block1.Evidence.ToABCI(), + Txs: block1.Txs.ToSliceOfBytes(), + Hash: block1.Hash(), + Height: block1.Header.Height, + Time: block1.Header.Time, + Misbehavior: block1.Evidence.ToABCI(), ProposedLastCommit: abci.CommitInfo{ Round: 0, //QuorumHash: @@ -452,6 +454,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) @@ -620,6 +623,7 @@ func TestEmptyPrepareProposal(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) @@ -744,9 +748,9 @@ func TestPrepareProposalRemoveTxs(t *testing.T) { eventBus, sm.NopMetrics(), ) - proposerProTxHash, _ := state.Validators.GetByIndex(0) + proTxHash, _ := state.Validators.GetByIndex(0) commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proTxHash, 0) require.NoError(t, err) require.Len(t, block.Data.Txs.ToSliceOfBytes(), len(trs)-2) @@ -921,7 +925,6 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { ) proposerProTxHash, _ := state.Validators.GetByIndex(0) commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0) require.ErrorContains(t, err, "transaction data size exceeds maximum") require.Nil(t, block, "") diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 302a96b918..c87c7b6adb 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -17,7 +17,6 @@ import ( sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" tmtime "github.com/tendermint/tendermint/libs/time" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -142,15 +141,13 @@ func makeHeaderPartsResponsesValPowerChange( t *testing.T, state sm.State, power int64, -) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses) { +) (types.Header, *types.CoreChainLock, types.BlockID, *abci.ResponseFinalizeBlock) { t.Helper() block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) require.NoError(t, err) + finalizeBlockResponses := &abci.ResponseFinalizeBlock{} - abciResponses := &tmstate.ABCIResponses{} - - abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{} // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { @@ -159,7 +156,7 @@ func makeHeaderPartsResponsesValPowerChange( thresholdPubKey, err := encoding.PubKeyToProto(state.NextValidators.ThresholdPublicKey) require.NoError(t, err) - abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ + finalizeBlockResponses = &abci.ResponseFinalizeBlock{ ValidatorSetUpdate: &abci.ValidatorSetUpdate{ ValidatorUpdates: []abci.ValidatorUpdate{ {PubKey: &vPbPk, Power: power}, @@ -170,41 +167,35 @@ func makeHeaderPartsResponsesValPowerChange( } } - return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, finalizeBlockResponses } -func makeHeaderPartsResponsesValKeysRegenerate(t *testing.T, state sm.State, regenerate bool) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses) { +func makeHeaderPartsResponsesValKeysRegenerate(t *testing.T, state sm.State, regenerate bool) (types.Header, *types.CoreChainLock, types.BlockID, *abci.ResponseFinalizeBlock) { block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) if err != nil { t.Error(err) } - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorSetUpdate: nil}, - } + fbResp := &abci.ResponseFinalizeBlock{} if regenerate == true { proTxHashes := state.Validators.GetProTxHashes() valUpdates := types.ValidatorUpdatesRegenerateOnProTxHashes(proTxHashes) - abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ - ValidatorSetUpdate: &valUpdates, - } + fbResp.ValidatorSetUpdate = &valUpdates } - return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, fbResp } func makeHeaderPartsResponsesParams( t *testing.T, state sm.State, params *types.ConsensusParams, -) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses) { +) (types.Header, *types.CoreChainLock, types.BlockID, *abci.ResponseFinalizeBlock) { t.Helper() block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) require.NoError(t, err) pbParams := params.ToProto() - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams}, - } - return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + finalizeBlockResponses := &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams} + return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, finalizeBlockResponses } func randomGenesisDoc() *types.GenesisDoc { @@ -268,8 +259,8 @@ func makeRandomStateFromConsensusParams( type testApp struct { abci.BaseApplication - ByzantineValidators []abci.Misbehavior - ValidatorSetUpdate *abci.ValidatorSetUpdate + Misbehavior []abci.Misbehavior + ValidatorSetUpdate *abci.ValidatorSetUpdate } var _ abci.Application = (*testApp)(nil) @@ -279,7 +270,7 @@ func (app *testApp) Info(_ context.Context, req *abci.RequestInfo) (*abci.Respon } func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - app.ByzantineValidators = req.ByzantineValidators + app.Misbehavior = req.Misbehavior resTxs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index 5356b4c07b..1b9a3120b9 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -65,7 +65,7 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { } // 2. index FinalizeBlock events - if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, types.EventTypeFinalizeBlock, height); err != nil { + if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, "finalize_block", height); err != nil { return fmt.Errorf("failed to index FinalizeBlock events: %w", err) } diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index e73e4a3ba2..d6db82806a 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -4,6 +4,7 @@ import ( "context" "time" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/libs/log" @@ -96,7 +97,14 @@ func (is *Service) publish(msg pubsub.Message) error { if curr.Size() != 0 { start := time.Now() - err := sink.IndexTxEvents(curr.Ops) + + var err error + curr.Ops, err = DeduplicateBatch(curr.Ops, sink) + if err != nil { + is.logger.Error("failed to deduplicate batch", "height", is.currentBlock.height, "error", err) + } + + err = sink.IndexTxEvents(curr.Ops) if err != nil { is.logger.Error("failed to index block txs", "height", is.currentBlock.height, "err", err) @@ -169,3 +177,45 @@ func IndexingEnabled(sinks []EventSink) bool { return false } + +// DeduplicateBatch consider the case of duplicate txs. +// if the current one under investigation is NOT OK, then we need to check +// whether there's a previously indexed tx. +// SKIP the current tx if the previously indexed record is found and successful. +func DeduplicateBatch(ops []*abci.TxResult, sink EventSink) ([]*abci.TxResult, error) { + result := make([]*abci.TxResult, 0, len(ops)) + + // keep track of successful txs in this block in order to suppress latter ones being indexed. + var successfulTxsInThisBlock = make(map[string]struct{}) + + for _, txResult := range ops { + hash := types.Tx(txResult.Tx).Hash() + + if txResult.Result.IsOK() { + successfulTxsInThisBlock[string(hash)] = struct{}{} + } else { + // if it already appeared in current block and was successful, skip. + if _, found := successfulTxsInThisBlock[string(hash)]; found { + continue + } + + // check if this tx hash is already indexed + old, err := sink.GetTxByHash(hash) + + // if db op errored + // Not found is not an error + if err != nil { + return nil, err + } + + // if it's already indexed in an older block and was successful, skip. + if old != nil && old.Result.Code == abci.CodeTypeOK { + continue + } + } + + result = append(result, txResult) + } + + return result, nil +} diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index 6126ae2595..6dc1bdf504 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -109,6 +109,165 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { assert.Nil(t, teardown(t, pool)) } +func TestTxIndexDuplicatedTx(t *testing.T) { + var mockTx = types.Tx("MOCK_TX_HASH") + + testCases := []struct { + name string + tx1 abci.TxResult + tx2 abci.TxResult + expSkip bool // do we expect the second tx to be skipped by tx indexer + }{ + {"skip, previously successful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + true, + }, + {"not skip, previously unsuccessful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + false, + }, + {"not skip, both successful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + false, + }, + {"not skip, both unsuccessful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + false, + }, + {"skip, same block, previously successful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + true, + }, + {"not skip, same block, previously unsuccessful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sink := kv.NewEventSink(dbm.NewMemDB()) + + if tc.tx1.Height != tc.tx2.Height { + // index the first tx + err := sink.IndexTxEvents([]*abci.TxResult{&tc.tx1}) + require.NoError(t, err) + + // check if the second one should be skipped. + ops, err := indexer.DeduplicateBatch([]*abci.TxResult{&tc.tx2}, sink) + require.NoError(t, err) + + if tc.expSkip { + require.Empty(t, ops) + } else { + require.Equal(t, []*abci.TxResult{&tc.tx2}, ops) + } + } else { + // same block + ops := []*abci.TxResult{&tc.tx1, &tc.tx2} + ops, err := indexer.DeduplicateBatch(ops, sink) + require.NoError(t, err) + if tc.expSkip { + // the second one is skipped + require.Equal(t, []*abci.TxResult{&tc.tx1}, ops) + } else { + require.Equal(t, []*abci.TxResult{&tc.tx1, &tc.tx2}, ops) + } + } + }) + } +} + func readSchema() ([]*schema.Migration, error) { filename := "./sink/psql/schema.sql" contents, err := os.ReadFile(filename) diff --git a/internal/state/indexer/metrics.gen.go b/internal/state/indexer/metrics.gen.go new file mode 100644 index 0000000000..8b079d8d5c --- /dev/null +++ b/internal/state/indexer/metrics.gen.go @@ -0,0 +1,51 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package indexer + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_events_seconds", + Help: "Latency for indexing block events.", + }, labels).With(labelsAndValues...), + TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_events_seconds", + Help: "Latency for indexing transaction events.", + }, labels).With(labelsAndValues...), + BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "blocks_indexed", + Help: "Number of complete blocks indexed.", + }, labels).With(labelsAndValues...), + TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "transactions_indexed", + Help: "Number of transactions indexed.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + BlockEventsSeconds: discard.NewHistogram(), + TxEventsSeconds: discard.NewHistogram(), + BlocksIndexed: discard.NewCounter(), + TransactionsIndexed: discard.NewCounter(), + } +} diff --git a/internal/state/indexer/metrics.go b/internal/state/indexer/metrics.go index aa64a4bb2b..93dd0dc9ec 100644 --- a/internal/state/indexer/metrics.go +++ b/internal/state/indexer/metrics.go @@ -2,12 +2,10 @@ package indexer import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - - prometheus "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) +//go:generate go run ../../../scripts/metricsgen -struct=Metrics + // MetricsSubsystem is a the subsystem label for the indexer package. const MetricsSubsystem = "indexer" @@ -25,49 +23,3 @@ type Metrics struct { // Number of transactions indexed. TransactionsIndexed metrics.Counter } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_events_seconds", - Help: "Latency for indexing block events.", - }, labels).With(labelsAndValues...), - TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "tx_events_seconds", - Help: "Latency for indexing transaction events.", - }, labels).With(labelsAndValues...), - BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "blocks_indexed", - Help: "Number of complete blocks indexed.", - }, labels).With(labelsAndValues...), - TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "transactions_indexed", - Help: "Number of transactions indexed.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns an indexer metrics stub that discards all samples. -func NopMetrics() *Metrics { - return &Metrics{ - BlockEventsSeconds: discard.NewHistogram(), - TxEventsSeconds: discard.NewHistogram(), - BlocksIndexed: discard.NewCounter(), - TransactionsIndexed: discard.NewCounter(), - } -} diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index decf551abd..0c7cf86df9 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -12,8 +12,6 @@ import ( tenderminttypes "github.com/tendermint/tendermint/types" - testing "testing" - types "github.com/tendermint/tendermint/abci/types" ) @@ -168,8 +166,13 @@ func (_m *EventSink) Type() indexer.EventSinkType { return r0 } -// NewEventSink creates a new instance of EventSink. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventSink(t testing.TB) *EventSink { +type mockConstructorTestingTNewEventSink interface { + mock.TestingT + Cleanup(func()) +} + +// NewEventSink creates a new instance of EventSink. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEventSink(t mockConstructorTestingTNewEventSink) *EventSink { mock := &EventSink{} mock.Mock.Test(t) diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go index c063832640..57f5e5c3d6 100644 --- a/internal/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -9,8 +9,7 @@ import ( "strings" "time" - "github.com/gogo/protobuf/proto" - + "github.com/gogo/protobuf/jsonpb" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" @@ -177,12 +176,16 @@ INSERT INTO `+tableBlocks+` (height, chain_id, created_at) }) } +var ( + jsonpbMarshaller = jsonpb.Marshaler{} +) + func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error { ts := time.Now().UTC() for _, txr := range txrs { // Encode the result message in protobuf wire format for indexing. - resultData, err := proto.Marshal(txr) + resultData, err := jsonpbMarshaller.MarshalToString(txr) if err != nil { return fmt.Errorf("marshaling tx_result: %w", err) } diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index 72d14b5d89..2625d72451 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -1,6 +1,7 @@ package psql import ( + "bytes" "context" "database/sql" "flag" @@ -12,7 +13,7 @@ import ( "time" "github.com/adlio/schema" - "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/jsonpb" "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" @@ -151,6 +152,8 @@ func TestType(t *testing.T) { assert.Equal(t, indexer.PSQL, psqlSink.Type()) } +var jsonpbUnmarshaller = jsonpb.Unmarshaler{} + func TestIndexing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -278,13 +281,14 @@ func loadTxResult(hash []byte) (*abci.TxResult, error) { hashString := fmt.Sprintf("%X", hash) var resultData []byte if err := testDB().QueryRow(` -SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; -`, hashString).Scan(&resultData); err != nil { + SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; + `, hashString).Scan(&resultData); err != nil { return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err) } + reader := bytes.NewBuffer(resultData) txr := new(abci.TxResult) - if err := proto.Unmarshal(resultData, txr); err != nil { + if err := jsonpbUnmarshaller.Unmarshal(reader, txr); err != nil { return nil, fmt.Errorf("unmarshaling txr: %w", err) } diff --git a/internal/state/metrics.gen.go b/internal/state/metrics.gen.go new file mode 100644 index 0000000000..eb8ca9f780 --- /dev/null +++ b/internal/state/metrics.gen.go @@ -0,0 +1,46 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package state + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_processing_time", + Help: "Time between BeginBlock and EndBlock.", + + Buckets: stdprometheus.LinearBuckets(1, 10, 10), + }, labels).With(labelsAndValues...), + ConsensusParamUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "consensus_param_updates", + Help: "Number of consensus parameter updates returned by the application since process start.", + }, labels).With(labelsAndValues...), + ValidatorSetUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_set_updates", + Help: "Number of validator set updates returned by the application since process start.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + BlockProcessingTime: discard.NewHistogram(), + ConsensusParamUpdates: discard.NewCounter(), + ValidatorSetUpdates: discard.NewCounter(), + } +} diff --git a/internal/state/metrics.go b/internal/state/metrics.go index bcd713f5ff..3663121a6c 100644 --- a/internal/state/metrics.go +++ b/internal/state/metrics.go @@ -2,9 +2,6 @@ package state import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,34 +10,20 @@ const ( MetricsSubsystem = "state" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { // Time between BeginBlock and EndBlock. - BlockProcessingTime metrics.Histogram -} + BlockProcessingTime metrics.Histogram `metrics_buckettype:"lin" metrics_bucketsizes:"1,10,10"` -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_processing_time", - Help: "Time between BeginBlock and EndBlock in ms.", - Buckets: stdprometheus.LinearBuckets(1, 10, 10), - }, labels).With(labelsAndValues...), - } -} + // ConsensusParamUpdates is the total number of times the application has + // udated the consensus params since process start. + //metrics:Number of consensus parameter updates returned by the application since process start. + ConsensusParamUpdates metrics.Counter -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - BlockProcessingTime: discard.NewHistogram(), - } + // ValidatorSetUpdates is the total number of times the application has + // udated the validator set since process start. + //metrics:Number of validator set updates returned by the application since process start. + ValidatorSetUpdates metrics.Counter } diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index 09075f3b06..cf61a8587e 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -5,8 +5,6 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -169,6 +167,22 @@ func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { return r0 } +// LoadBlockSeenCommitAt provides a mock function with given fields: height +func (_m *BlockStore) LoadSeenCommitAt(height int64) *types.Commit { + ret := _m.Called(height) + + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Commit) + } + } + + return r0 +} + // LoadSeenCommit provides a mock function with given fields: func (_m *BlockStore) LoadSeenCommit() *types.Commit { ret := _m.Called() @@ -225,8 +239,13 @@ func (_m *BlockStore) Size() int64 { return r0 } -// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t testing.TB) *BlockStore { +type mockConstructorTestingTNewBlockStore interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 49633269b1..aa7c7b3364 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -74,8 +72,13 @@ func (_m *EvidencePool) Update(_a0 context.Context, _a1 state.State, _a2 types.E _m.Called(_a0, _a1, _a2) } -// NewEvidencePool creates a new instance of EvidencePool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvidencePool(t testing.TB) *EvidencePool { +type mockConstructorTestingTNewEvidencePool interface { + mock.TestingT + Cleanup(func()) +} + +// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEvidencePool(t mockConstructorTestingTNewEvidencePool) *EvidencePool { mock := &EvidencePool{} mock.Mock.Test(t) diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 9b41f3c1bc..1200999ef1 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,10 +4,9 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/internal/state" - tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" + abcitypes "github.com/tendermint/tendermint/abci/types" - testing "testing" + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) @@ -66,17 +65,15 @@ func (_m *Store) Load() (state.State, error) { return r0, r1 } -// LoadABCIResponses provides a mock function with given fields: _a0 -func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, error) { +// LoadConsensusParams provides a mock function with given fields: _a0 +func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { ret := _m.Called(_a0) - var r0 *tendermintstate.ABCIResponses - if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok { + var r0 types.ConsensusParams + if rf, ok := ret.Get(0).(func(int64) types.ConsensusParams); ok { r0 = rf(_a0) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*tendermintstate.ABCIResponses) - } + r0 = ret.Get(0).(types.ConsensusParams) } var r1 error @@ -89,15 +86,17 @@ func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, e return r0, r1 } -// LoadConsensusParams provides a mock function with given fields: _a0 -func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { +// LoadFinalizeBlockResponses provides a mock function with given fields: _a0 +func (_m *Store) LoadFinalizeBlockResponses(_a0 int64) (*abcitypes.ResponseFinalizeBlock, error) { ret := _m.Called(_a0) - var r0 types.ConsensusParams - if rf, ok := ret.Get(0).(func(int64) types.ConsensusParams); ok { + var r0 *abcitypes.ResponseFinalizeBlock + if rf, ok := ret.Get(0).(func(int64) *abcitypes.ResponseFinalizeBlock); ok { r0 = rf(_a0) } else { - r0 = ret.Get(0).(types.ConsensusParams) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcitypes.ResponseFinalizeBlock) + } } var r1 error @@ -161,12 +160,12 @@ func (_m *Store) Save(_a0 state.State) error { return r0 } -// SaveABCIResponses provides a mock function with given fields: _a0, _a1 -func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses) error { +// SaveFinalizeBlockResponses provides a mock function with given fields: _a0, _a1 +func (_m *Store) SaveFinalizeBlockResponses(_a0 int64, _a1 *abcitypes.ResponseFinalizeBlock) error { ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(int64, *tendermintstate.ABCIResponses) error); ok { + if rf, ok := ret.Get(0).(func(int64, *abcitypes.ResponseFinalizeBlock) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) @@ -189,8 +188,13 @@ func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet return r0 } -// NewStore creates a new instance of Store. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewStore(t testing.TB) *Store { +type mockConstructorTestingTNewStore interface { + mock.TestingT + Cleanup(func()) +} + +// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStore(t mockConstructorTestingTNewStore) *Store { mock := &Store{} mock.Mock.Test(t) diff --git a/internal/state/rollback.go b/internal/state/rollback.go index 6fdc01e03f..bc8b428c55 100644 --- a/internal/state/rollback.go +++ b/internal/state/rollback.go @@ -38,6 +38,8 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { // state store height is equal to blockstore height. We're good to proceed with rolling back state rollbackHeight := invalidState.LastBlockHeight - 1 rollbackBlock := bs.LoadBlockMeta(rollbackHeight) + commit := bs.LoadBlockCommit(rollbackHeight) + if rollbackBlock == nil { return -1, nil, fmt.Errorf("block at height %d not found", rollbackHeight) } @@ -85,6 +87,9 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { LastBlockHeight: rollbackBlock.Header.Height, LastBlockID: rollbackBlock.BlockID, LastBlockTime: rollbackBlock.Header.Time, + LastStateID: commit.StateID, + + LastCoreChainLockedBlockHeight: rollbackBlock.Header.CoreChainLockedHeight, NextValidators: invalidState.Validators, Validators: invalidState.LastValidators, diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go index ddd51b224e..2d3a4defb1 100644 --- a/internal/state/rollback_test.go +++ b/internal/state/rollback_test.go @@ -52,6 +52,7 @@ func TestRollback(t *testing.T) { LastResultsHash: initialState.LastResultsHash, }, } + commit := &types.Commit{} nextBlock := &types.BlockMeta{ BlockID: initialState.LastBlockID, Header: types.Header{ @@ -62,6 +63,7 @@ func TestRollback(t *testing.T) { }, } blockStore.On("LoadBlockMeta", height).Return(block) + blockStore.On("LoadBlockCommit", height).Return(commit) blockStore.On("LoadBlockMeta", nextHeight).Return(nextBlock) blockStore.On("Height").Return(nextHeight) @@ -94,6 +96,7 @@ func TestRollbackNoBlocks(t *testing.T) { blockStore := &mocks.BlockStore{} blockStore.On("Height").Return(height) blockStore.On("LoadBlockMeta", height).Return(nil) + blockStore.On("LoadBlockCommit", height-1).Return(&types.Commit{}) blockStore.On("LoadBlockMeta", height-1).Return(nil) _, _, err := state.Rollback(blockStore, stateStore) diff --git a/internal/state/services.go b/internal/state/services.go index 40365f2fbf..c8f4ffe5ee 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -37,6 +37,7 @@ type BlockStore interface { LoadBlockCommit(height int64) *types.Commit LoadSeenCommit() *types.Commit + LoadSeenCommitAt(height int64) *types.Commit } //----------------------------------------------------------------------------- diff --git a/internal/state/state_test.go b/internal/state/state_test.go index 198b551dcb..867bb41be0 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -23,7 +23,6 @@ import ( sm "github.com/tendermint/tendermint/internal/state" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -104,8 +103,8 @@ func TestStateSaveLoad(t *testing.T) { loadedState, state) } -// TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. -func TestABCIResponsesSaveLoad1(t *testing.T) { +// TestFinalizeBlockResponsesSaveLoad1 tests saving and loading responses to FinalizeBlock. +func TestFinalizeBlockResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) stateStore := sm.NewStore(stateDB) @@ -116,34 +115,33 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { block, err := statefactory.MakeBlock(state, 2, new(types.Commit), nil, 0) require.NoError(t, err) - abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ExecTxResult, 2) - abciResponses.FinalizeBlock = new(abci.ResponseFinalizeBlock) - abciResponses.FinalizeBlock.TxResults = dtxs + finalizeBlockResponses := new(abci.ResponseFinalizeBlock) + finalizeBlockResponses.TxResults = dtxs - abciResponses.FinalizeBlock.TxResults[0] = &abci.ExecTxResult{Data: []byte("foo"), Events: nil} - abciResponses.FinalizeBlock.TxResults[1] = &abci.ExecTxResult{Data: []byte("bar"), Log: "ok", Events: nil} + finalizeBlockResponses.TxResults[0] = &abci.ExecTxResult{Data: []byte("foo"), Events: nil} + finalizeBlockResponses.TxResults[1] = &abci.ExecTxResult{Data: []byte("bar"), Log: "ok", Events: nil} pubKey := bls12381.GenPrivKey().PubKey() abciPubKey, err := cryptoenc.PubKeyToProto(pubKey) require.NoError(t, err) vu := types.TM2PB.NewValidatorUpdate(pubKey, 100, crypto.RandProTxHash(), types.RandValidatorAddress().String()) - abciResponses.FinalizeBlock.ValidatorSetUpdate = &abci.ValidatorSetUpdate{ + finalizeBlockResponses.ValidatorSetUpdate = &abci.ValidatorSetUpdate{ ValidatorUpdates: []abci.ValidatorUpdate{vu}, ThresholdPublicKey: abciPubKey, } - err = stateStore.SaveABCIResponses(block.Height, abciResponses) + err = stateStore.SaveFinalizeBlockResponses(block.Height, finalizeBlockResponses) require.NoError(t, err) - loadedABCIResponses, err := stateStore.LoadABCIResponses(block.Height) + loadedFinalizeBlockResponses, err := stateStore.LoadFinalizeBlockResponses(block.Height) require.NoError(t, err) - assert.Equal(t, abciResponses, loadedABCIResponses, - "ABCIResponses don't match:\ngot: %v\nexpected: %v\n", - loadedABCIResponses, abciResponses) + assert.Equal(t, finalizeBlockResponses, loadedFinalizeBlockResponses, + "FinalizeBlockResponses don't match:\ngot: %v\nexpected: %v\n", + loadedFinalizeBlockResponses, finalizeBlockResponses) } -// TestResultsSaveLoad tests saving and loading ABCI results. -func TestABCIResponsesSaveLoad2(t *testing.T) { +// TestFinalizeBlockResponsesSaveLoad2 tests saving and loading responses to FinalizeBlock. +func TestFinalizeBlockResponsesSaveLoad2(t *testing.T) { tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) @@ -199,32 +197,31 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { // Query all before, this should return error. for i := range cases { h := int64(i + 1) - res, err := stateStore.LoadABCIResponses(h) + res, err := stateStore.LoadFinalizeBlockResponses(h) assert.Error(t, err, "%d: %#v", i, res) } // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save - responses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - TxResults: tc.added, - }, + responses := &abci.ResponseFinalizeBlock{ + TxResults: tc.added, + AppHash: []byte("a_hash"), } - err := stateStore.SaveABCIResponses(h, responses) + err := stateStore.SaveFinalizeBlockResponses(h, responses) require.NoError(t, err) } - // Query all before, should return expected value. + // Query all after, should return expected value. for i, tc := range cases { h := int64(i + 1) - res, err := stateStore.LoadABCIResponses(h) + res, err := stateStore.LoadFinalizeBlockResponses(h) if assert.NoError(t, err, "%d", i) { t.Log(res) e, err := abci.MarshalTxResults(tc.expected) require.NoError(t, err) he := merkle.HashFromByteSlices(e) - rs, err := abci.MarshalTxResults(res.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(res.TxResults) hrs := merkle.HashFromByteSlices(rs) require.NoError(t, err) assert.Equal(t, he, hrs, "%d", i) @@ -288,14 +285,14 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { changeIndex++ } header, _, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, types.DefaultDashVotingPower) - validatorUpdates, thresholdPubKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.FinalizeBlock.ValidatorSetUpdate) + validatorUpdates, thresholdPubKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.ValidatorSetUpdate) require.NoError(t, err) - rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(responses.TxResults) require.NoError(t, err) // Any node pro tx hash should do firstNodeProTxHash, _ := state.Validators.GetByIndex(0) h := merkle.HashFromByteSlices(rs) - state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates, thresholdPubKey, quorumHash) + state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.ConsensusParamUpdates, validatorUpdates, thresholdPubKey, quorumHash) require.NoError(t, err) validator := state.Validators.Validators[0] testCases[i-1] = validator.PubKey @@ -924,12 +921,12 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Save state etc. var validatorUpdates []*types.Validator validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.FinalizeBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.ValidatorSetUpdate) require.NoError(t, err) - rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(responses.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) - state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates, + state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 @@ -1020,16 +1017,16 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex++ cp = params[changeIndex] } - header, _, blockID, responses := makeHeaderPartsResponsesParams(t, state, &cp) - validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.FinalizeBlock.ValidatorSetUpdate) + header, _, blockID, fpResp := makeHeaderPartsResponsesParams(t, state, &cp) + validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = types.PB2TM.ValidatorUpdatesFromValidatorSet(fpResp.ValidatorSetUpdate) require.NoError(t, err) - rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(fpResp.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) // Any node pro tx hash should do firstNodeProTxHash, _ := state.Validators.GetByIndex(0) - state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + state, err = state.Update(firstNodeProTxHash, blockID, &header, h, fpResp.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) require.NoError(t, err) err = stateStore.Save(state) @@ -1113,20 +1110,18 @@ func TestState_StateID(t *testing.T) { func blockExecutorFunc(t *testing.T, firstProTxHash crypto.ProTxHash) func(prevState, state sm.State, vsu *abci.ValidatorSetUpdate) sm.State { return func(prevState, state sm.State, vsu *abci.ValidatorSetUpdate) sm.State { t.Helper() - resp := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorSetUpdate: vsu}, - } + fpResp := &abci.ResponseFinalizeBlock{ValidatorSetUpdate: vsu} validatorUpdates, thresholdPubKey, quorumHash, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(resp.FinalizeBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(fpResp.ValidatorSetUpdate) require.NoError(t, err) block, err := statefactory.MakeBlock(prevState, prevState.LastBlockHeight+1, new(types.Commit), nil, 0) require.NoError(t, err) blockID, err := block.BlockID() require.NoError(t, err) - rs, err := abci.MarshalTxResults(resp.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(fpResp.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) - state, err = state.Update(firstProTxHash, blockID, &block.Header, h, resp.FinalizeBlock.ConsensusParamUpdates, + state, err = state.Update(firstProTxHash, blockID, &block.Header, h, fpResp.ConsensusParamUpdates, validatorUpdates, thresholdPubKey, quorumHash) require.NoError(t, err) return state diff --git a/internal/state/store.go b/internal/state/store.go index 6433a25ab8..f33ee6c5d1 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -26,12 +26,23 @@ const ( //------------------------------------------------------------------------ +// key prefixes +// NB: Before modifying these, cross-check them with those in +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(thane): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] +// * internal/p2p/peermanager.go [1] const ( // prefixes are unique across all tm db's - prefixValidators = int64(5) - prefixConsensusParams = int64(6) - prefixABCIResponses = int64(7) - prefixState = int64(8) + prefixValidators = int64(5) + prefixConsensusParams = int64(6) + prefixABCIResponses = int64(7) // deprecated in v0.36 + prefixState = int64(8) + prefixFinalizeBlockResponses = int64(14) ) func encodeKey(prefix int64, height int64) []byte { @@ -54,6 +65,10 @@ func abciResponsesKey(height int64) []byte { return encodeKey(prefixABCIResponses, height) } +func finalizeBlockResponsesKey(height int64) []byte { + return encodeKey(prefixFinalizeBlockResponses, height) +} + // stateKey should never change after being set in init() var stateKey []byte @@ -78,14 +93,14 @@ type Store interface { Load() (State, error) // LoadValidators loads the validator set at a given height LoadValidators(int64) (*types.ValidatorSet, error) - // LoadABCIResponses loads the abciResponse for a given height - LoadABCIResponses(int64) (*tmstate.ABCIResponses, error) + // LoadFinalizeBlockResponses loads the responses to FinalizeBlock for a given height + LoadFinalizeBlockResponses(int64) (*abci.ResponseFinalizeBlock, error) // LoadConsensusParams loads the consensus params for a given height LoadConsensusParams(int64) (types.ConsensusParams, error) // Save overwrites the previous state with the updated one Save(State) error - // SaveABCIResponses saves ABCIResponses for a given height - SaveABCIResponses(int64, *tmstate.ABCIResponses) error + // SaveFinalizeBlockResponses saves responses to FinalizeBlock for a given height + SaveFinalizeBlockResponses(int64, *abci.ResponseFinalizeBlock) error // SaveValidatorSet saves the validator set at a given height SaveValidatorSets(int64, int64, *types.ValidatorSet) error // Bootstrap is used for bootstrapping state when not starting from a initial height. @@ -134,7 +149,6 @@ func (store dbStore) loadState(key []byte) (state State, err error) { if err != nil { return state, err } - return *sm, nil } @@ -245,7 +259,7 @@ func (store dbStore) PruneStates(retainHeight int64) error { return err } - if err := store.pruneABCIResponses(retainHeight); err != nil { + if err := store.pruneFinalizeBlockResponses(retainHeight); err != nil { return err } @@ -336,10 +350,15 @@ func (store dbStore) pruneConsensusParams(retainHeight int64) error { ) } -// pruneABCIResponses calls a reverse iterator from base height to retain height batch deleting -// all abci responses in between -func (store dbStore) pruneABCIResponses(height int64) error { - return store.pruneRange(abciResponsesKey(1), abciResponsesKey(height)) +// pruneFinalizeBlockResponses calls a reverse iterator from base height to retain height +// batch deleting all responses to FinalizeBlock, and legacy ABCI responses, in between +func (store dbStore) pruneFinalizeBlockResponses(height int64) error { + err := store.pruneRange(finalizeBlockResponsesKey(1), finalizeBlockResponsesKey(height)) + if err == nil { + // Remove any stale legacy ABCI responses + err = store.pruneRange(abciResponsesKey(1), abciResponsesKey(height)) + } + return err } // pruneRange is a generic function for deleting a range of keys in reverse order. @@ -406,60 +425,63 @@ func (store dbStore) reverseBatchDelete(batch dbm.Batch, start, end []byte) ([]b //------------------------------------------------------------------------ -// LoadABCIResponses loads the ABCIResponses for the given height from the -// database. If not found, ErrNoABCIResponsesForHeight is returned. +// LoadFinalizeBlockResponses loads the responses to FinalizeBlock for the +// given height from the database. If not found, +// ErrNoFinalizeBlockResponsesForHeight is returned. // -// This is useful for recovering from crashes where we called app.Commit and -// before we called s.Save(). It can also be used to produce Merkle proofs of -// the result of txs. -func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) { - buf, err := store.db.Get(abciResponsesKey(height)) +// This is useful for recovering from crashes where we called app.Commit +// and before we called s.Save(). It can also be used to produce Merkle +// proofs of the result of txs. +func (store dbStore) LoadFinalizeBlockResponses(height int64) (*abci.ResponseFinalizeBlock, error) { + buf, err := store.db.Get(finalizeBlockResponsesKey(height)) if err != nil { return nil, err } if len(buf) == 0 { - - return nil, ErrNoABCIResponsesForHeight{height} + return nil, ErrNoFinalizeBlockResponsesForHeight{height} } - abciResponses := new(tmstate.ABCIResponses) - err = abciResponses.Unmarshal(buf) + finalizeBlockResponses := new(abci.ResponseFinalizeBlock) + err = finalizeBlockResponses.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. - return abciResponses, nil + return finalizeBlockResponses, nil } -// SaveABCIResponses persists the ABCIResponses to the database. +// SaveFinalizeBlockResponses persists to the database the responses to FinalizeBlock. // This is useful in case we crash after app.Commit and before s.Save(). // Responses are indexed by height so they can also be loaded later to produce // Merkle proofs. // // Exposed for testing. -func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { - return store.saveABCIResponses(height, abciResponses) +func (store dbStore) SaveFinalizeBlockResponses(height int64, finalizeBlockResponses *abci.ResponseFinalizeBlock) error { + return store.saveFinalizeBlockResponses(height, finalizeBlockResponses) } -func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { +func (store dbStore) saveFinalizeBlockResponses(height int64, finalizeBlockResponses *abci.ResponseFinalizeBlock) error { var dtxs []*abci.ExecTxResult // strip nil values, - for _, tx := range abciResponses.FinalizeBlock.TxResults { + for _, tx := range finalizeBlockResponses.TxResults { if tx != nil { dtxs = append(dtxs, tx) } } - abciResponses.FinalizeBlock.TxResults = dtxs + finalizeBlockResponses.TxResults = dtxs - bz, err := abciResponses.Marshal() + bz, err := finalizeBlockResponses.Marshal() if err != nil { return err } + if len(bz) == 0 { + return ErrNoFinalizeBlockResponsesForHeight{height} + } - return store.db.SetSync(abciResponsesKey(height), bz) + return store.db.SetSync(finalizeBlockResponsesKey(height), bz) } // SaveValidatorSets is used to save the validator set over multiple heights. diff --git a/internal/state/store_test.go b/internal/state/store_test.go index 1e8682a4fb..e8448a68c6 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -14,7 +14,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" sm "github.com/tendermint/tendermint/internal/state" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -222,15 +221,14 @@ func TestPruneStates(t *testing.T) { err := stateStore.Save(state) require.NoError(t, err) - err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - TxResults: []*abci.ExecTxResult{ - {Data: []byte{1}}, - {Data: []byte{2}}, - {Data: []byte{3}}, - }, + err = stateStore.SaveFinalizeBlockResponses(h, &abci.ResponseFinalizeBlock{ + TxResults: []*abci.ExecTxResult{ + {Data: []byte{1}}, + {Data: []byte{2}}, + {Data: []byte{3}}, }, - }) + }, + ) require.NoError(t, err) } @@ -251,9 +249,9 @@ func TestPruneStates(t *testing.T) { require.NoError(t, err, h) require.NotNil(t, params, h) - abci, err := stateStore.LoadABCIResponses(h) + finRes, err := stateStore.LoadFinalizeBlockResponses(h) require.NoError(t, err, h) - require.NotNil(t, abci, h) + require.NotNil(t, finRes, h) } emptyParams := types.ConsensusParams{} @@ -277,9 +275,9 @@ func TestPruneStates(t *testing.T) { require.Equal(t, emptyParams, params, h) } - abci, err := stateStore.LoadABCIResponses(h) + finRes, err := stateStore.LoadFinalizeBlockResponses(h) require.Error(t, err, h) - require.Nil(t, abci, h) + require.Nil(t, finRes, h) } }) } diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 4355d92c2b..303a729dfb 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -59,6 +59,7 @@ func TestValidateBlockHeader(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) blockStore := store.NewBlockStore(dbm.NewMemDB()) @@ -191,6 +192,7 @@ func TestValidateBlockCommit(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) blockStore := store.NewBlockStore(dbm.NewMemDB()) @@ -381,6 +383,7 @@ func TestValidateBlockEvidence(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) state.ConsensusParams.Evidence.MaxBytes = 1000 @@ -475,6 +478,5 @@ func TestValidateBlockEvidence(t *testing.T) { evidence, 0, ) - } } diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index 2380036f63..927a32d087 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -126,6 +126,10 @@ func TestBlockQueueWithFailures(t *testing.T) { // Test that when all the blocks are retrieved that the queue still holds on to // it's workers and in the event of failure can still fetch the failed block func TestBlockQueueBlocks(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2) @@ -176,6 +180,10 @@ loop: } func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 9cdb349784..e7ad731483 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -26,14 +26,14 @@ var ( // NOTE: It is not the responsibility of the dispatcher to verify the light blocks. type Dispatcher struct { // the channel with which to send light block requests on - requestCh *p2p.Channel + requestCh p2p.Channel mtx sync.Mutex // all pending calls that have been dispatched and are awaiting an answer calls map[types.NodeID]chan *types.LightBlock } -func NewDispatcher(requestChannel *p2p.Channel) *Dispatcher { +func NewDispatcher(requestChannel p2p.Channel) *Dispatcher { return &Dispatcher{ requestCh: requestChannel, calls: make(map[types.NodeID]chan *types.LightBlock), diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index 65c517be43..8f6783e67b 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -24,13 +24,13 @@ type channelInternal struct { Error chan p2p.PeerError } -func testChannel(size int) (*channelInternal, *p2p.Channel) { +func testChannel(size int) (*channelInternal, p2p.Channel) { in := &channelInternal{ In: make(chan p2p.Envelope, size), Out: make(chan p2p.Envelope, size), Error: make(chan p2p.PeerError, size), } - return in, p2p.NewChannel(0, nil, in.In, in.Out, in.Error) + return in, p2p.NewChannel(0, "test", in.In, in.Out, in.Error) } func TestDispatcherBasic(t *testing.T) { diff --git a/internal/statesync/metrics.gen.go b/internal/statesync/metrics.gen.go new file mode 100644 index 0000000000..b4d5caa12c --- /dev/null +++ b/internal/statesync/metrics.gen.go @@ -0,0 +1,72 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package statesync + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + TotalSnapshots: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_snapshots", + Help: "The total number of snapshots discovered.", + }, labels).With(labelsAndValues...), + ChunkProcessAvgTime: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "chunk_process_avg_time", + Help: "The average processing time per chunk.", + }, labels).With(labelsAndValues...), + SnapshotHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_height", + Help: "The height of the current snapshot the has been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunk: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunk", + Help: "The current number of chunks that have been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunk_total", + Help: "The total number of chunks in the current snapshot.", + }, labels).With(labelsAndValues...), + BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "back_filled_blocks", + Help: "The current number of blocks that have been back-filled.", + }, labels).With(labelsAndValues...), + BackFillBlocksTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "back_fill_blocks_total", + Help: "The total number of blocks that need to be back-filled.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + TotalSnapshots: discard.NewCounter(), + ChunkProcessAvgTime: discard.NewGauge(), + SnapshotHeight: discard.NewGauge(), + SnapshotChunk: discard.NewCounter(), + SnapshotChunkTotal: discard.NewGauge(), + BackFilledBlocks: discard.NewCounter(), + BackFillBlocksTotal: discard.NewGauge(), + } +} diff --git a/internal/statesync/metrics.go b/internal/statesync/metrics.go index fb134f5804..a8a3af9152 100644 --- a/internal/statesync/metrics.go +++ b/internal/statesync/metrics.go @@ -2,9 +2,6 @@ package statesync import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -12,80 +9,22 @@ const ( MetricsSubsystem = "statesync" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { - TotalSnapshots metrics.Counter + // The total number of snapshots discovered. + TotalSnapshots metrics.Counter + // The average processing time per chunk. ChunkProcessAvgTime metrics.Gauge - SnapshotHeight metrics.Gauge - SnapshotChunk metrics.Counter - SnapshotChunkTotal metrics.Gauge - BackFilledBlocks metrics.Counter + // The height of the current snapshot the has been processed. + SnapshotHeight metrics.Gauge + // The current number of chunks that have been processed. + SnapshotChunk metrics.Counter + // The total number of chunks in the current snapshot. + SnapshotChunkTotal metrics.Gauge + // The current number of blocks that have been back-filled. + BackFilledBlocks metrics.Counter + // The total number of blocks that need to be back-filled. BackFillBlocksTotal metrics.Gauge } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - TotalSnapshots: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "total_snapshots", - Help: "The total number of snapshots discovered.", - }, labels).With(labelsAndValues...), - ChunkProcessAvgTime: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "chunk_process_avg_time", - Help: "The average processing time per chunk.", - }, labels).With(labelsAndValues...), - SnapshotHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "snapshot_height", - Help: "The height of the current snapshot the has been processed.", - }, labels).With(labelsAndValues...), - SnapshotChunk: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "snapshot_chunk", - Help: "The current number of chunks that have been processed.", - }, labels).With(labelsAndValues...), - SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "snapshot_chunks_total", - Help: "The total number of chunks in the current snapshot.", - }, labels).With(labelsAndValues...), - BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "backfilled_blocks", - Help: "The current number of blocks that have been back-filled.", - }, labels).With(labelsAndValues...), - BackFillBlocksTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "backfilled_blocks_total", - Help: "The total number of blocks that need to be back-filled.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - TotalSnapshots: discard.NewCounter(), - ChunkProcessAvgTime: discard.NewGauge(), - SnapshotHeight: discard.NewGauge(), - SnapshotChunk: discard.NewCounter(), - SnapshotChunkTotal: discard.NewGauge(), - BackFilledBlocks: discard.NewCounter(), - BackFillBlocksTotal: discard.NewGauge(), - } -} diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 582ebcd9c4..099588ed12 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -85,8 +83,13 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, return r0, r1 } -// NewStateProvider creates a new instance of StateProvider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateProvider(t testing.TB) *StateProvider { +type mockConstructorTestingTNewStateProvider interface { + mock.TestingT + Cleanup(func()) +} + +// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStateProvider(t mockConstructorTestingTNewStateProvider) *StateProvider { mock := &StateProvider{} mock.Mock.Test(t) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index f87aed4d98..3b1c38817e 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -314,7 +314,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { return nil } - go r.processChannels(ctx, map[p2p.ChannelID]*p2p.Channel{ + go r.processChannels(ctx, map[p2p.ChannelID]p2p.Channel{ SnapshotChannel: snapshotCh, ChunkChannel: chunkCh, LightBlockChannel: blockCh, @@ -635,7 +635,7 @@ func (r *Reactor) backfill( // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope, snapshotCh *p2p.Channel) error { +func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope, snapshotCh p2p.Channel) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -707,40 +707,34 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel // handleChunkMessage handles envelopes sent from peers on the ChunkChannel. // It returns an error only if the Envelope.Message is unknown for this channel. // This should never be called outside of handleMessage. -func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh *p2p.Channel) error { +func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.ChunkRequest: - r.logger.Debug( - "received chunk request", + r.logger.Debug("received chunk request", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, - "peer", envelope.From, - ) + "peer", envelope.From) resp, err := r.conn.LoadSnapshotChunk(ctx, &abci.RequestLoadSnapshotChunk{ Height: msg.Height, Format: msg.Format, Chunk: msg.Index, }) if err != nil { - r.logger.Error( - "failed to load chunk", + r.logger.Error("failed to load chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "err", err, - "peer", envelope.From, - ) + "peer", envelope.From) return nil } - r.logger.Debug( - "sending chunk", + r.logger.Debug("sending chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, - "peer", envelope.From, - ) + "peer", envelope.From) if err := chunkCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ChunkResponse{ @@ -763,13 +757,11 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope return nil } - r.logger.Debug( - "received chunk; adding to sync", + r.logger.Debug("received chunk; adding to sync", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, - "peer", envelope.From, - ) + "peer", envelope.From) _, err := r.syncer.AddChunk(&chunk{ Height: msg.Height, Format: msg.Format, @@ -778,14 +770,12 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope Sender: envelope.From, }) if err != nil { - r.logger.Error( - "failed to add chunk", + r.logger.Error("failed to add chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "err", err, - "peer", envelope.From, - ) + "peer", envelope.From) return nil } @@ -796,7 +786,7 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope return nil } -func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope, blockCh *p2p.Channel) error { +func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope, blockCh p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.LightBlockRequest: r.logger.Info("received light block request", "height", msg.Height) @@ -853,7 +843,7 @@ func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Env return nil } -func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope, paramsCh *p2p.Channel) error { +func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope, paramsCh p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.ParamsRequest: r.logger.Debug("received consensus params request", "height", msg.Height) @@ -902,7 +892,7 @@ func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelop // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans map[p2p.ChannelID]*p2p.Channel) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans map[p2p.ChannelID]p2p.Channel) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -936,12 +926,12 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, cha // encountered during message execution will result in a PeerError being sent on // the respective channel. When the reactor is stopped, we will catch the signal // and close the p2p Channel gracefully. -func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.ChannelID]*p2p.Channel) { - // make sure that the iterator gets cleaned up in case of error +func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.ChannelID]p2p.Channel) { + // make sure tht the iterator gets cleaned up in case of error ctx, cancel := context.WithCancel(ctx) defer cancel() - chs := make([]*p2p.Channel, 0, len(chanTable)) + chs := make([]p2p.Channel, 0, len(chanTable)) for key := range chanTable { chs = append(chs, chanTable[key]) } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index e64a72096c..80a7f0134c 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -49,22 +49,22 @@ type reactorTestSuite struct { conn *clientmocks.Client stateProvider *mocks.StateProvider - snapshotChannel *p2p.Channel + snapshotChannel p2p.Channel snapshotInCh chan p2p.Envelope snapshotOutCh chan p2p.Envelope snapshotPeerErrCh chan p2p.PeerError - chunkChannel *p2p.Channel + chunkChannel p2p.Channel chunkInCh chan p2p.Envelope chunkOutCh chan p2p.Envelope chunkPeerErrCh chan p2p.PeerError - blockChannel *p2p.Channel + blockChannel p2p.Channel blockInCh chan p2p.Envelope blockOutCh chan p2p.Envelope blockPeerErrCh chan p2p.PeerError - paramsChannel *p2p.Channel + paramsChannel p2p.Channel paramsInCh chan p2p.Envelope paramsOutCh chan p2p.Envelope paramsPeerErrCh chan p2p.PeerError @@ -114,7 +114,7 @@ func setup( rts.snapshotChannel = p2p.NewChannel( SnapshotChannel, - new(ssproto.Message), + "snapshot", rts.snapshotInCh, rts.snapshotOutCh, rts.snapshotPeerErrCh, @@ -122,7 +122,7 @@ func setup( rts.chunkChannel = p2p.NewChannel( ChunkChannel, - new(ssproto.Message), + "chunk", rts.chunkInCh, rts.chunkOutCh, rts.chunkPeerErrCh, @@ -130,7 +130,7 @@ func setup( rts.blockChannel = p2p.NewChannel( LightBlockChannel, - new(ssproto.Message), + "lightblock", rts.blockInCh, rts.blockOutCh, rts.blockPeerErrCh, @@ -138,7 +138,7 @@ func setup( rts.paramsChannel = p2p.NewChannel( ParamsChannel, - new(ssproto.Message), + "params", rts.paramsInCh, rts.paramsOutCh, rts.paramsPeerErrCh, @@ -152,7 +152,7 @@ func setup( rts.privVal = types.NewMockPV() rts.dashcoreClient = dashcore.NewMockClient(chainID, llmqType, rts.privVal, false) - chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { switch desc.ID { case SnapshotChannel: return rts.snapshotChannel, nil @@ -214,6 +214,10 @@ func setup( } func TestReactor_Sync(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() @@ -640,6 +644,10 @@ func TestReactor_StateProviderP2P(t *testing.T) { } func TestReactor_Backfill(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -648,6 +656,10 @@ func TestReactor_Backfill(t *testing.T) { for _, failureRate := range failureRates { failureRate := failureRate t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) { + if testing.Short() && failureRate > 0 { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index 6ca671ee7c..445be8487c 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -212,7 +212,7 @@ type stateProviderP2P struct { sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 - paramsSendCh *p2p.Channel + paramsSendCh p2p.Channel paramsRecvCh chan types.ConsensusParams } @@ -224,7 +224,7 @@ func NewP2PStateProvider( initialHeight int64, trustHeight int64, providers []lightprovider.Provider, - paramsSendCh *p2p.Channel, + paramsSendCh p2p.Channel, logger log.Logger, dashCoreClient dashcore.Client, ) (StateProvider, error) { diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 6745abf6be..059ec7f2f7 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -56,8 +56,8 @@ type syncer struct { stateProvider StateProvider conn abciclient.Client snapshots *snapshotPool - snapshotCh *p2p.Channel - chunkCh *p2p.Channel + snapshotCh p2p.Channel + chunkCh p2p.Channel tempDir string fetchers int32 retryTimeout time.Duration @@ -84,11 +84,9 @@ func (s *syncer) AddChunk(chunk *chunk) (bool, error) { return false, err } if added { - s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, - "chunk", chunk.Index) + s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index) } else { - s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, - "chunk", chunk.Index) + s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index) } return added, nil } @@ -137,12 +135,20 @@ func (s *syncer) SyncAny( discoveryTime = minimumDiscoveryTime } + timer := time.NewTimer(discoveryTime) + defer timer.Stop() + if discoveryTime > 0 { if err := requestSnapshots(); err != nil { return sm.State{}, nil, err } - s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) - time.Sleep(discoveryTime) + s.logger.Info("discovering snapshots", + "interval", discoveryTime) + select { + case <-ctx.Done(): + return sm.State{}, nil, ctx.Err() + case <-timer.C: + } } // The app may ask us to retry a snapshot restoration, in which case we need to reuse @@ -151,8 +157,11 @@ func (s *syncer) SyncAny( snapshot *snapshot chunks *chunkQueue err error + iters int ) + for { + iters++ // If not nil, we're going to retry restoration of the same snapshot. if snapshot == nil { snapshot = s.snapshots.Best() @@ -162,9 +171,16 @@ func (s *syncer) SyncAny( if discoveryTime == 0 { return sm.State{}, nil, errNoSnapshots } - s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) - time.Sleep(discoveryTime) - continue + s.logger.Info("discovering snapshots", + "iterations", iters, + "interval", discoveryTime) + timer.Reset(discoveryTime) + select { + case <-ctx.Done(): + return sm.State{}, nil, ctx.Err() + case <-timer.C: + continue + } } if chunks == nil { chunks, err = newChunkQueue(snapshot, s.tempDir) @@ -494,13 +510,11 @@ func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uin return nil } - s.logger.Debug( - "Requesting snapshot chunk", + s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height, "format", snapshot.Format, "chunk", chunk, - "peer", peer, - ) + "peer", peer) msg := p2p.Envelope{ To: peer, diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 85abe23440..452f95ad7f 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -23,6 +23,10 @@ import ( ) func TestSyncer_SyncAny(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/store/store.go b/internal/store/store.go index 39257c5194..74cbaef4b0 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -2,6 +2,7 @@ package store import ( "bytes" + "errors" "fmt" "strconv" @@ -267,23 +268,19 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { // and it comes from the block.LastCommit for `height+1`. // If no commit is found for the given height, it returns nil. func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { - var pbc = new(tmproto.Commit) bz, err := bs.db.Get(blockCommitKey(height)) if err != nil { panic(err) } - if len(bz) == 0 { - return nil - } - err = proto.Unmarshal(bz, pbc) - if err != nil { - panic(fmt.Errorf("error reading block commit: %w", err)) - } - commit, err := types.CommitFromProto(pbc) + return mustDecodeCommit(bz) +} + +func (bs *BlockStore) LoadSeenCommitAt(height int64) *types.Commit { + bz, err := bs.db.Get(seenCommitAtKey(height)) if err != nil { - panic(fmt.Errorf("error reading block commit: %w", err)) + panic(err) } - return commit + return mustDecodeCommit(bz) } // LoadSeenCommit returns the last locally seen Commit before being @@ -306,7 +303,7 @@ func (bs *BlockStore) LoadSeenCommit() *types.Commit { commit, err := types.CommitFromProto(pbc) if err != nil { - panic(fmt.Errorf("error from proto commit: %w", err)) + panic(fmt.Errorf("converting seen commit: %w", err)) } return commit } @@ -458,17 +455,36 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s if block == nil { panic("BlockStore can only save a non-nil block") } - batch := bs.db.NewBatch() + if err := bs.saveBlockToBatch(batch, block, blockParts, seenCommit); err != nil { + panic(err) + } + + if err := batch.WriteSync(); err != nil { + panic(err) + } + + if err := batch.Close(); err != nil { + panic(err) + } +} + +func (bs *BlockStore) saveBlockToBatch(batch dbm.Batch, block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) error { + if block == nil { + panic("BlockStore can only save a non-nil block") + } height := block.Height hash := block.Hash() if g, w := height, bs.Height()+1; bs.Base() > 0 && g != w { - panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) + return fmt.Errorf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g) } if !blockParts.IsComplete() { - panic("BlockStore can only save complete block part sets") + return errors.New("BlockStore can only save complete block part sets") + } + if height != seenCommit.Height { + return fmt.Errorf("BlockStore cannot save seen commit of a different height (block: %d, commit: %d)", height, seenCommit.Height) } // Save block parts. This must be done before the block meta, since callers @@ -483,38 +499,37 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s blockMeta := types.NewBlockMeta(block, blockParts) pbm := blockMeta.ToProto() if pbm == nil { - panic("nil blockmeta") + return errors.New("nil blockmeta") } metaBytes := mustEncode(pbm) if err := batch.Set(blockMetaKey(height), metaBytes); err != nil { - panic(err) + return err } if err := batch.Set(blockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { - panic(err) + return err } pbc := block.LastCommit.ToProto() blockCommitBytes := mustEncode(pbc) if err := batch.Set(blockCommitKey(height-1), blockCommitBytes); err != nil { - panic(err) + return err } // Save seen commit (seen +2/3 precommits for block) pbsc := seenCommit.ToProto() seenCommitBytes := mustEncode(pbsc) if err := batch.Set(seenCommitKey(), seenCommitBytes); err != nil { - panic(err) + return err } - if err := batch.WriteSync(); err != nil { - panic(err) + // stores seen-commit at height, because tendermint does the same but only for extended commit + if err := batch.Set(seenCommitAtKey(height), seenCommitBytes); err != nil { + return err } - if err := batch.Close(); err != nil { - panic(err) - } + return nil } func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, batch dbm.Batch) { @@ -587,13 +602,23 @@ func (bs *BlockStore) Close() error { //---------------------------------- KEY ENCODING ----------------------------------------- // key prefixes +// NB: Before modifying these, cross-check them with those in +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(thane): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] --> Looks OK, as it is also called "SeenCommit" +// * internal/p2p/peermanager.go [1] const ( // prefixes are unique across all tm db's - prefixBlockMeta = int64(0) - prefixBlockPart = int64(1) - prefixBlockCommit = int64(2) - prefixSeenCommit = int64(3) - prefixBlockHash = int64(4) + prefixBlockMeta = int64(0) + prefixBlockPart = int64(1) + prefixBlockCommit = int64(2) + prefixSeenCommit = int64(3) + prefixBlockHash = int64(4) + prefixSeenCommitAt = int64(13) ) func blockMetaKey(height int64) []byte { @@ -643,6 +668,14 @@ func seenCommitKey() []byte { return key } +func seenCommitAtKey(height int64) []byte { + key, err := orderedcode.Append(nil, prefixSeenCommitAt, height) + if err != nil { + panic(err) + } + return key +} + func blockHashKey(hash []byte) []byte { key, err := orderedcode.Append(nil, prefixBlockHash, string(hash)) if err != nil { @@ -661,3 +694,19 @@ func mustEncode(pb proto.Message) []byte { } return bz } + +func mustDecodeCommit(bz []byte) *types.Commit { + if len(bz) == 0 { + return nil + } + var pbc = new(tmproto.Commit) + err := proto.Unmarshal(bz, pbc) + if err != nil { + panic(fmt.Errorf("error reading block commit: %w", err)) + } + commit, err := types.CommitFromProto(pbc) + if err != nil { + panic(fmt.Errorf("converting commit to proto: %w", err)) + } + return commit +} diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 0d3d70cbda..da7d5b6619 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -97,7 +97,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { require.NoError(t, err) part2 := validPartSet.GetPart(1) - seenCommit := makeTestCommit(state, 10, tmtime.Now()) + seenCommit := makeTestCommit(state, block.Header.Height, tmtime.Now()) bs.SaveBlock(block, validPartSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") @@ -156,9 +156,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(header1, commitAtH10), - parts: incompletePartSet, - wantPanic: "only save complete block", // incomplete parts + block: newBlock(header1, commitAtH10), + parts: incompletePartSet, + wantPanic: "only save complete block", // incomplete parts + seenCommit: makeTestCommit(state, 10, tmtime.Now()), }, { @@ -187,7 +188,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(header1, commitAtH10), + block: block, parts: validPartSet, seenCommit: seenCommit, @@ -196,7 +197,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(header1, commitAtH10), + block: block, parts: validPartSet, seenCommit: seenCommit, @@ -348,7 +349,6 @@ func TestLoadBlockPart(t *testing.T) { require.NoError(t, err) partSet, err := block.MakePartSet(2) require.NoError(t, err) - require.NoError(t, err) part1 := partSet.GetPart(0) pb1, err := part1.ToProto() @@ -492,7 +492,7 @@ func TestBlockFetchAtHeight(t *testing.T) { partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(state, 10, tmtime.Now()) + seenCommit := makeTestCommit(state, block.Header.Height, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -515,6 +515,7 @@ func TestBlockFetchAtHeight(t *testing.T) { func TestSeenAndCanonicalCommit(t *testing.T) { state, store := makeStateAndBlockStore(t, t.TempDir()) + loadCommit := func() (interface{}, error) { meta := store.LoadSeenCommit() return meta, nil @@ -545,6 +546,8 @@ func TestSeenAndCanonicalCommit(t *testing.T) { require.Nil(t, c5) c6 := store.LoadBlockCommit(h - 1) require.Equal(t, blockCommit.Hash(), c6.Hash()) + c7 := store.LoadSeenCommitAt(h) + require.Equal(t, seenCommit.Hash(), c7.Hash()) } } diff --git a/libs/log/default.go b/libs/log/default.go index e1c7af3d8c..7bd35de18b 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -59,9 +59,7 @@ func NewLogger(level string, logWriter io.Writer) (Logger, error) { // make the writer thread-safe logWriter = newSyncWriter(logWriter) - return &defaultLogger{ - Logger: zerolog.New(logWriter).Level(logLevel).With().Timestamp().Logger(), - }, nil + return &defaultLogger{Logger: zerolog.New(logWriter).Level(logLevel).With().Timestamp().Logger()}, nil } func (l defaultLogger) Info(msg string, keyVals ...interface{}) { @@ -77,9 +75,7 @@ func (l defaultLogger) Debug(msg string, keyVals ...interface{}) { } func (l defaultLogger) With(keyVals ...interface{}) Logger { - return &defaultLogger{ - Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger(), - } + return &defaultLogger{Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger()} } // OverrideWithNewLogger replaces an existing logger's internal with diff --git a/libs/strings/string.go b/libs/strings/string.go deleted file mode 100644 index 95ea03b5a6..0000000000 --- a/libs/strings/string.go +++ /dev/null @@ -1,62 +0,0 @@ -package strings - -import ( - "fmt" - "strings" -) - -// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a -// slice of the string s with all leading and trailing Unicode code points -// contained in cutset removed. If sep is empty, SplitAndTrim splits after each -// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of -// -1. also filter out empty strings, only return non-empty strings. -func SplitAndTrimEmpty(s, sep, cutset string) []string { - if s == "" { - return []string{} - } - - spl := strings.Split(s, sep) - nonEmptyStrings := make([]string, 0, len(spl)) - - for i := 0; i < len(spl); i++ { - element := strings.Trim(spl[i], cutset) - if element != "" { - nonEmptyStrings = append(nonEmptyStrings, element) - } - } - - return nonEmptyStrings -} - -// ASCIITrim removes spaces from an a ASCII string, erroring if the -// sequence is not an ASCII string. -func ASCIITrim(s string) (string, error) { - if len(s) == 0 { - return "", nil - } - r := make([]byte, 0, len(s)) - for _, b := range []byte(s) { - switch { - case b == 32: - continue // skip space - case 32 < b && b <= 126: - r = append(r, b) - default: - return "", fmt.Errorf("non-ASCII (non-tab) char 0x%X", b) - } - } - return string(r), nil -} - -// StringSliceEqual checks if string slices a and b are equal -func StringSliceEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/libs/time/mocks/source.go b/libs/time/mocks/source.go index 7878d86f51..4146993b31 100644 --- a/libs/time/mocks/source.go +++ b/libs/time/mocks/source.go @@ -3,11 +3,9 @@ package mocks import ( - testing "testing" + time "time" mock "github.com/stretchr/testify/mock" - - time "time" ) // Source is an autogenerated mock type for the Source type @@ -29,8 +27,13 @@ func (_m *Source) Now() time.Time { return r0 } -// NewSource creates a new instance of Source. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewSource(t testing.TB) *Source { +type mockConstructorTestingTNewSource interface { + mock.TestingT + Cleanup(func()) +} + +// NewSource creates a new instance of Source. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSource(t mockConstructorTestingTNewSource) *Source { mock := &Source{} mock.Mock.Test(t) diff --git a/light/client.go b/light/client.go index b729c8ccf8..af006fb6f0 100644 --- a/light/client.go +++ b/light/client.go @@ -11,6 +11,7 @@ import ( "github.com/tendermint/tendermint/crypto" dashcore "github.com/tendermint/tendermint/dash/core" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" "github.com/tendermint/tendermint/light/store" @@ -443,7 +444,8 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now return fmt.Errorf("existing trusted header %X does not match newHeader %X", l.Hash(), newHeader.Hash()) } c.logger.Debug("header has already been verified", - "height", newHeader.Height, "hash", newHeader.Hash()) + "height", newHeader.Height, + "hash", tmstrings.LazyBlockHash(newHeader)) return nil } @@ -777,7 +779,12 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) // process all the responses as they come in for i := 0; i < cap(witnessResponsesC); i++ { - response := <-witnessResponsesC + var response witnessResponse + select { + case response = <-witnessResponsesC: + case <-ctx.Done(): + return nil, ctx.Err() + } switch response.err { // success! We have found a new primary case nil: diff --git a/light/example_test.go b/light/example_test.go index cbfba354c0..9888b1106d 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -21,6 +21,10 @@ import ( // Manually getting light blocks and verifying them. func TestExampleClient(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() conf, err := rpctest.CreateConfig(t, "ExampleClient_VerifyLightBlockAtHeight") diff --git a/light/light_test.go b/light/light_test.go index c035d102c8..e1e94254db 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -26,6 +26,10 @@ import ( // Automatically getting new headers and verifying them. func TestClientIntegration_Update(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Parallel() ctx, cancel := context.WithCancel(context.Background()) @@ -88,6 +92,10 @@ func TestClientIntegration_Update(t *testing.T) { // Manually getting light blocks and verifying them. func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -165,6 +173,10 @@ func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*type } func TestClientStatusRPC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() conf, err := rpctest.CreateConfig(t, t.Name()) diff --git a/light/provider/http/http.go b/light/provider/http/http.go index 933e21485b..228e28c473 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -170,13 +170,13 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato const maxPages = 100 var ( - perPage = 100 - vals []*types.Validator - thresholdPublicKey crypto.PubKey - quorumType btcjson.LLMQType - quorumHash crypto.QuorumHash - page = 1 - total = -1 + perPage = 100 + vals []*types.Validator + thresholdPubKey crypto.PubKey + quorumType btcjson.LLMQType + quorumHash crypto.QuorumHash + page = 1 + total = -1 ) for len(vals) != total && page <= maxPages { @@ -184,8 +184,8 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato // is negative we will keep repeating. attempt := uint16(0) for { - requestThresholdPublicKey := attempt == 0 - res, err := p.client.Validators(ctx, height, &page, &perPage, &requestThresholdPublicKey) + reqThresholdPubKey := attempt == 0 + res, err := p.client.Validators(ctx, height, &page, &perPage, &reqThresholdPubKey) switch e := err.(type) { case nil: // success!! Now we validate the response if len(res.Validators) == 0 { @@ -227,13 +227,12 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato // terminate the connection with the peer. return nil, provider.ErrUnreliableProvider{Reason: e} } - // update the total and increment the page index so we can fetch the // next page of validators if need be total = res.Total vals = append(vals, res.Validators...) - if requestThresholdPublicKey { - thresholdPublicKey = *res.ThresholdPublicKey + if reqThresholdPubKey { + thresholdPubKey = *res.ThresholdPublicKey quorumHash = *res.QuorumHash quorumType = res.QuorumType } @@ -241,7 +240,7 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato } } - valSet, err := types.ValidatorSetFromExistingValidators(vals, thresholdPublicKey, quorumType, quorumHash) + valSet, err := types.ValidatorSetFromExistingValidators(vals, thresholdPubKey, quorumType, quorumHash) if err != nil { return nil, provider.ErrBadLightBlock{Reason: err} } diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index fd9371c32a..3a92144fb4 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -35,6 +35,10 @@ func TestNewProvider(t *testing.T) { } func TestProvider(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() cfg, err := rpctest.CreateConfig(t, t.Name()) diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index e136046f9d..d77418303a 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -68,8 +66,13 @@ func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) erro return r0 } -// NewProvider creates a new instance of Provider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewProvider(t testing.TB) *Provider { +type mockConstructorTestingTNewProvider interface { + mock.TestingT + Cleanup(func()) +} + +// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewProvider(t mockConstructorTestingTNewProvider) *Provider { mock := &Provider{} mock.Mock.Test(t) diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 2df52b921a..df8b5f9dba 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -52,6 +52,10 @@ func (p proxyService) BroadcastTxAsync(ctx context.Context, req *coretypes.Reque return p.Client.BroadcastTxAsync(ctx, req.Tx) } +func (p proxyService) BroadcastTx(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + return p.Client.BroadcastTx(ctx, req.Tx) +} + func (p proxyService) BroadcastTxCommit(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTxCommit, error) { return p.Client.BroadcastTxCommit(ctx, req.Tx) } diff --git a/light/rpc/client.go b/light/rpc/client.go index 94856e462a..992a677fb4 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -233,6 +233,10 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.R return c.next.BroadcastTxSync(ctx, tx) } +func (c *Client) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return c.next.BroadcastTx(ctx, tx) +} + func (c *Client) UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.UnconfirmedTxs(ctx, page, perPage) } diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index ea6d6a2d44..439ce31ea7 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - time "time" types "github.com/tendermint/tendermint/types" @@ -118,8 +116,13 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 return r0, r1 } -// NewLightClient creates a new instance of LightClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewLightClient(t testing.TB) *LightClient { +type mockConstructorTestingTNewLightClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewLightClient(t mockConstructorTestingTNewLightClient) *LightClient { mock := &LightClient{} mock.Mock.Test(t) diff --git a/light/store/db/db.go b/light/store/db/db.go index c364e17092..17ee6766d9 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -13,6 +13,16 @@ import ( "github.com/tendermint/tendermint/types" ) +// key prefixes +// NB: Before modifying these, cross-check them with those in +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(sergio): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] +// * internal/p2p/peermanager.go [1] const ( prefixLightBlock = int64(11) prefixSize = int64(12) diff --git a/node/node.go b/node/node.go index b58fc506ae..45dc48e17c 100644 --- a/node/node.go +++ b/node/node.go @@ -224,7 +224,7 @@ func makeNode( weAreOnlyValidator := onlyValidatorIsUs(state, proTxHash) - peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID, nodeMetrics.p2p) closers = append(closers, peerCloser) if err != nil { return nil, combineCloseError( @@ -306,7 +306,7 @@ func makeNode( node.evPool = evPool mpReactor, mp := createMempoolReactor(logger, cfg, proxyApp, stateStore, nodeMetrics.mempool, - peerManager.Subscribe, node.router.OpenChannel, peerManager.GetHeight) + peerManager.Subscribe, node.router.OpenChannel) node.rpcEnv.Mempool = mp node.services = append(node.services, mpReactor) @@ -780,7 +780,9 @@ func loadStateFromDBOrGenesisDocProvider(stateStore sm.Store, genDoc *types.Gene func getRouterConfig(conf *config.Config, appClient abciclient.Client) p2p.RouterOptions { opts := p2p.RouterOptions{ - QueueType: conf.P2P.QueueType, + QueueType: conf.P2P.QueueType, + HandshakeTimeout: conf.P2P.HandshakeTimeout, + DialTimeout: conf.P2P.DialTimeout, } if conf.FilterPeers && appClient != nil { diff --git a/node/node_test.go b/node/node_test.go index 74b6b717f2..5bbb5e08ab 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -114,6 +114,10 @@ func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger } func TestNodeDelayedStart(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + cfg, err := config.ResetTestRoot(t.TempDir(), "node_delayed_start_test") require.NoError(t, err) @@ -209,6 +213,10 @@ func TestNodeSetPrivValTCP(t *testing.T) { // address without a protocol must result in error func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -471,6 +479,10 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { } func TestMaxProposalBlockSize(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/node/seed.go b/node/seed.go index a0b71e411f..92d55230f2 100644 --- a/node/seed.go +++ b/node/seed.go @@ -67,7 +67,7 @@ func makeSeedNode( // Setup Transport and Switch. p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID, p2pMetrics) if err != nil { return nil, combineCloseError( fmt.Errorf("failed to create peer manager: %w", err), diff --git a/node/setup.go b/node/setup.go index b14fc2ea96..e53d824f5f 100644 --- a/node/setup.go +++ b/node/setup.go @@ -19,6 +19,7 @@ import ( "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" @@ -30,7 +31,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" - tmstrings "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" "github.com/tendermint/tendermint/types" @@ -149,7 +149,6 @@ func createMempoolReactor( memplMetrics *mempool.Metrics, peerEvents p2p.PeerEventSubscriber, chCreator p2p.ChannelCreator, - peerHeight func(types.NodeID) int64, ) (service.Service, mempool.Mempool) { logger = logger.With("module", "mempool") @@ -168,7 +167,6 @@ func createMempoolReactor( mp, chCreator, peerEvents, - peerHeight, ) if cfg.Consensus.WaitForTxs() { @@ -206,6 +204,7 @@ func createPeerManager( cfg *config.Config, dbProvider config.DBProvider, nodeID types.NodeID, + metrics *p2p.Metrics, ) (*p2p.PeerManager, closer, error) { selfAddr, err := p2p.ParseNodeAddress(nodeID.AddressString(cfg.P2P.ExternalAddress)) @@ -227,16 +226,29 @@ func createPeerManager( maxConns = 64 } + var maxOutgoingConns uint16 + switch { + case cfg.P2P.MaxOutgoingConnections > 0: + maxOutgoingConns = cfg.P2P.MaxOutgoingConnections + default: + maxOutgoingConns = maxConns / 2 + } + + maxUpgradeConns := uint16(4) + options := p2p.PeerManagerOptions{ - SelfAddress: selfAddr, - MaxConnected: maxConns, - MaxConnectedUpgrade: 4, - MaxPeers: 1000, - MinRetryTime: 250 * time.Millisecond, - MaxRetryTime: 30 * time.Minute, - MaxRetryTimePersistent: 5 * time.Minute, - RetryTimeJitter: 5 * time.Second, - PrivatePeers: privatePeerIDs, + SelfAddress: selfAddr, + MaxConnected: maxConns, + MaxOutgoingConnections: maxOutgoingConns, + MaxConnectedUpgrade: maxUpgradeConns, + DisconnectCooldownPeriod: 2 * time.Second, + MaxPeers: maxUpgradeConns + 4*maxConns, + MinRetryTime: 250 * time.Millisecond, + MaxRetryTime: 30 * time.Minute, + MaxRetryTimePersistent: 5 * time.Minute, + RetryTimeJitter: 5 * time.Second, + PrivatePeers: privatePeerIDs, + Metrics: metrics, } peers := []p2p.NodeAddress{} diff --git a/privval/utils.go b/privval/utils.go index 1d6681b452..a2cbbf5014 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -27,13 +27,17 @@ func IsConnTimeout(err error) bool { // NewSignerListener creates a new SignerListenerEndpoint using the corresponding listen address func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEndpoint, error) { - var listener net.Listener - protocol, address := tmnet.ProtocolAndAddress(listenAddr) + if protocol != "unix" && protocol != "tcp" { //nolint:goconst + return nil, fmt.Errorf("unsupported address family %q, want unix or tcp", protocol) + } + ln, err := net.Listen(protocol, address) if err != nil { return nil, err } + + var listener net.Listener switch protocol { case "unix": listener = NewUnixListener(ln) @@ -41,13 +45,8 @@ func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEnd // TODO: persist this key so external signer can actually authenticate us listener = NewTCPListener(ln, ed25519.GenPrivKey()) default: - return nil, fmt.Errorf( - "wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", - protocol, - ) + panic("invalid protocol: " + protocol) // semantically unreachable } - pve := NewSignerListenerEndpoint(logger.With("module", "privval"), listener) - - return pve, nil + return NewSignerListenerEndpoint(logger.With("module", "privval"), listener), nil } diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 2709b6c214..735dc1ecb9 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -5,7 +5,6 @@ option go_package = "github.com/tendermint/tendermint/abci/types"; import "tendermint/crypto/proof.proto"; import "tendermint/types/dash.proto"; -import "tendermint/types/types.proto"; import "tendermint/crypto/keys.proto"; import "tendermint/types/params.proto"; import "tendermint/version/types.proto"; @@ -26,10 +25,7 @@ message Request { RequestInfo info = 3; RequestInitChain init_chain = 4; RequestQuery query = 5; - RequestBeginBlock begin_block = 6 [deprecated = true]; RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8 [deprecated = true]; - RequestEndBlock end_block = 9 [deprecated = true]; RequestCommit commit = 10; RequestListSnapshots list_snapshots = 11; RequestOfferSnapshot offer_snapshot = 12; @@ -41,6 +37,7 @@ message Request { RequestVerifyVoteExtension verify_vote_extension = 18; RequestFinalizeBlock finalize_block = 19; } + reserved 6, 8, 9; // RequestBeginBlock, RequestDeliverTx, RequestEndBlock } message RequestEcho { @@ -74,13 +71,6 @@ message RequestQuery { bool prove = 4; } -message RequestBeginBlock { - bytes hash = 1; - tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - CommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 4 [(gogoproto.nullable) = false]; -} - enum CheckTxType { NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; @@ -91,14 +81,6 @@ message RequestCheckTx { CheckTxType type = 2; } -message RequestDeliverTx { - bytes tx = 1; -} - -message RequestEndBlock { - int64 height = 1; -} - message RequestCommit {} // lists available snapshots @@ -131,7 +113,7 @@ message RequestPrepareProposal { // sent to the app for possible modifications. repeated bytes txs = 2; ExtendedCommitInfo local_last_commit = 3 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 4 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 4 [(gogoproto.nullable) = false]; int64 height = 5; google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; bytes next_validators_hash = 7; @@ -146,7 +128,7 @@ message RequestPrepareProposal { message RequestProcessProposal { repeated bytes txs = 1; CommitInfo proposed_last_commit = 2 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; // hash is the merkle root hash of the fields of the proposed block. bytes hash = 4; int64 height = 5; @@ -174,8 +156,8 @@ message RequestVerifyVoteExtension { message RequestFinalizeBlock { repeated bytes txs = 1; CommitInfo decided_last_commit = 2 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; - // hash is the merkle root hash of the fields of the proposed block. + repeated Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; + // hash is the merkle root hash of the fields of the decided block. bytes hash = 4; int64 height = 5; google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; @@ -199,10 +181,7 @@ message Response { ResponseInfo info = 4; ResponseInitChain init_chain = 5; ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7 [deprecated = true]; ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9 [deprecated = true]; - ResponseEndBlock end_block = 10 [deprecated = true]; ResponseCommit commit = 11; ResponseListSnapshots list_snapshots = 12; ResponseOfferSnapshot offer_snapshot = 13; @@ -214,6 +193,7 @@ message Response { ResponseVerifyVoteExtension verify_vote_extension = 19; ResponseFinalizeBlock finalize_block = 20; } + reserved 7, 9, 10; // ResponseBeginBlock, ResponseDeliverTx, ResponseEndBlock } // nondeterministic @@ -262,26 +242,15 @@ message ResponseQuery { string codespace = 10; } -message ResponseBeginBlock { - repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - message ResponseCheckTx { uint32 code = 1; bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic int64 gas_wanted = 5; - int64 gas_used = 6; - repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; string codespace = 8; string sender = 9; int64 priority = 10; - // mempool_error is set by Tendermint. - - // ABCI applications creating a ResponseCheckTX should not set mempool_error. - string mempool_error = 11; + reserved 3, 4, 6, 7, 11; // see https://github.com/tendermint/tendermint/issues/8543 } message ResponseDeliverTx { @@ -296,17 +265,8 @@ message ResponseDeliverTx { string codespace = 8; } -message ResponseEndBlock { - tendermint.types.ConsensusParams consensus_param_updates = 2; - repeated Event events = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; - tendermint.types.CoreChainLock next_core_chain_lock_update = 100; - ValidatorSetUpdate validator_set_update = 101 [(gogoproto.nullable) = true]; -} - message ResponseCommit { - // reserve 1 - bytes data = 2; + reserved 1, 2; int64 retain_height = 3; } @@ -393,7 +353,6 @@ message ResponseFinalizeBlock { repeated ExecTxResult tx_results = 2; tendermint.types.ConsensusParams consensus_param_updates = 4; bytes app_hash = 5; - int64 retain_height = 6; tendermint.types.CoreChainLock next_core_chain_lock_update = 100; ValidatorSetUpdate validator_set_update = 101 [(gogoproto.nullable) = true]; @@ -427,7 +386,7 @@ message ExtendedCommitInfo { } // Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// ResponseFinalizeBlock, ResponseDeliverTx, ExecTxResult // Later, transactions may be queried using these events. message Event { string type = 1; diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index c002003228..ae85bb2ed0 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -116,7 +116,8 @@ func (m *NoBlockResponse) GetHeight() int64 { // BlockResponse returns block to the requested type BlockResponse struct { - Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Commit *types.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` } func (m *BlockResponse) Reset() { *m = BlockResponse{} } @@ -159,6 +160,13 @@ func (m *BlockResponse) GetBlock() *types.Block { return nil } +func (m *BlockResponse) GetCommit() *types.Commit { + if m != nil { + return m.Commit + } + return nil +} + // StatusRequest requests the status of a peer. type StatusRequest struct { } @@ -385,30 +393,32 @@ func init() { func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) } var fileDescriptor_19b397c236e0fa07 = []byte{ - // 368 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x4f, 0xfa, 0x40, - 0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x1b, 0xa3, 0xc4, 0x98, 0x86, 0xd4, 0x97, - 0xe8, 0xc1, 0x36, 0xc1, 0xa3, 0xc6, 0x03, 0x27, 0x4c, 0x7c, 0x49, 0x4a, 0xbc, 0x78, 0x21, 0x14, - 0x37, 0x40, 0x94, 0x2e, 0x32, 0xdb, 0x03, 0xdf, 0xc2, 0x2f, 0xe0, 0xf7, 0xf1, 0xc8, 0xd1, 0xa3, - 0x81, 0x2f, 0x62, 0x98, 0x2d, 0x65, 0x69, 0xb0, 0xb7, 0xdd, 0xe9, 0x33, 0xbf, 0x79, 0xfa, 0x64, - 0x16, 0xea, 0x82, 0x45, 0x2f, 0x6c, 0x32, 0x1a, 0x46, 0xc2, 0x0f, 0xdf, 0x78, 0xef, 0x15, 0xa7, - 0x51, 0xcf, 0x17, 0xd3, 0x31, 0x43, 0x6f, 0x3c, 0xe1, 0x82, 0xdb, 0xbb, 0x6b, 0x85, 0x97, 0x2a, - 0x0e, 0x0e, 0x95, 0x3e, 0x52, 0xcb, 0x6e, 0xd9, 0xe3, 0x9e, 0x42, 0xa5, 0xb9, 0xbc, 0x06, 0xec, - 0x3d, 0x66, 0x28, 0xec, 0x3d, 0x28, 0x0d, 0xd8, 0xb0, 0x3f, 0x10, 0x35, 0xbd, 0xae, 0x9f, 0x19, - 0x41, 0x72, 0x73, 0xcf, 0xc1, 0x7a, 0xe0, 0x89, 0x12, 0xc7, 0x3c, 0x42, 0xf6, 0xa7, 0xf4, 0x06, - 0xcc, 0x4d, 0xe1, 0x05, 0x14, 0x69, 0x24, 0xe9, 0xca, 0x8d, 0x7d, 0x4f, 0xf1, 0x29, 0xfd, 0x4b, - 0xbd, 0x54, 0xb9, 0x16, 0x98, 0x6d, 0xd1, 0x15, 0x31, 0x26, 0x9e, 0xdc, 0x6b, 0xa8, 0xae, 0x0a, - 0xf9, 0xa3, 0x6d, 0x1b, 0x0a, 0x61, 0x17, 0x59, 0xed, 0x1f, 0x55, 0xe9, 0xec, 0x7e, 0x1a, 0xf0, - 0xff, 0x9e, 0x21, 0x76, 0xfb, 0xcc, 0xbe, 0x05, 0x93, 0x66, 0x74, 0x26, 0x12, 0x9d, 0x38, 0x72, - 0xbd, 0x6d, 0xc9, 0x79, 0x6a, 0x30, 0x2d, 0x2d, 0xa8, 0x84, 0x6a, 0x50, 0x6d, 0xd8, 0x89, 0x78, - 0x67, 0x45, 0x93, 0xbe, 0x68, 0x6e, 0xb9, 0x71, 0xb2, 0x1d, 0x97, 0xc9, 0xaf, 0xa5, 0x05, 0x56, - 0x94, 0x89, 0xf4, 0x0e, 0xaa, 0x19, 0xa2, 0x41, 0xc4, 0xa3, 0x5c, 0x83, 0x29, 0xcf, 0x0c, 0xb3, - 0x34, 0xa4, 0xdc, 0xd2, 0xdf, 0x2d, 0xe4, 0xd1, 0x36, 0x42, 0x5f, 0xd2, 0x50, 0x2d, 0xd8, 0x8f, - 0x60, 0xa5, 0xb4, 0xc4, 0x5c, 0x91, 0x70, 0xc7, 0xf9, 0xb8, 0xd4, 0x5d, 0x15, 0x37, 0x2a, 0xcd, - 0x22, 0x18, 0x18, 0x8f, 0x9a, 0x4f, 0x5f, 0x73, 0x47, 0x9f, 0xcd, 0x1d, 0xfd, 0x67, 0xee, 0xe8, - 0x1f, 0x0b, 0x47, 0x9b, 0x2d, 0x1c, 0xed, 0x7b, 0xe1, 0x68, 0xcf, 0x57, 0xfd, 0xa1, 0x18, 0xc4, - 0xa1, 0xd7, 0xe3, 0x23, 0x5f, 0x5d, 0xe2, 0xf5, 0x91, 0x76, 0xd8, 0xdf, 0xf6, 0x30, 0xc2, 0x12, - 0x7d, 0xbb, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1c, 0xa3, 0x45, 0x37, 0x03, 0x00, 0x00, + // 391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xcd, 0x4e, 0xea, 0x40, + 0x14, 0xc7, 0xdb, 0x5b, 0xe0, 0x26, 0xe7, 0x52, 0x9a, 0x3b, 0xb9, 0xb9, 0x12, 0x63, 0x1a, 0x52, + 0x3f, 0xa2, 0x0b, 0x5b, 0xa3, 0x4b, 0x5d, 0xe1, 0x06, 0x13, 0x3f, 0x92, 0x12, 0x37, 0x6e, 0x08, + 0xad, 0x13, 0x68, 0xb4, 0x9d, 0xca, 0x4c, 0x17, 0xbc, 0x85, 0x2f, 0xe0, 0xfb, 0xb8, 0x64, 0xe9, + 0xd2, 0xc0, 0x8b, 0x18, 0x66, 0x86, 0x32, 0xd4, 0xda, 0xdd, 0x30, 0xf3, 0x3b, 0x3f, 0xfe, 0xe7, + 0xcc, 0x14, 0x3a, 0x0c, 0x27, 0x8f, 0x78, 0x12, 0x47, 0x09, 0xf3, 0x82, 0x67, 0x12, 0x3e, 0xd1, + 0x69, 0x12, 0x7a, 0x6c, 0x9a, 0x62, 0xea, 0xa6, 0x13, 0xc2, 0x08, 0xfa, 0xb7, 0x26, 0xdc, 0x9c, + 0xd8, 0xde, 0x51, 0xea, 0x38, 0x2d, 0xaa, 0x45, 0x4d, 0xc9, 0xa9, 0x62, 0x74, 0x0e, 0xa0, 0xd9, + 0x5d, 0xc2, 0x3e, 0x7e, 0xc9, 0x30, 0x65, 0xe8, 0x3f, 0x34, 0xc6, 0x38, 0x1a, 0x8d, 0x59, 0x5b, + 0xef, 0xe8, 0x87, 0x86, 0x2f, 0x7f, 0x39, 0x47, 0x60, 0xdd, 0x12, 0x49, 0xd2, 0x94, 0x24, 0x14, + 0xff, 0x88, 0xa6, 0x60, 0x6e, 0x82, 0xc7, 0x50, 0xe7, 0x81, 0x38, 0xf7, 0xe7, 0x74, 0xcb, 0x55, + 0xba, 0x10, 0x59, 0x04, 0x2f, 0x28, 0x74, 0x02, 0x8d, 0x90, 0xc4, 0x71, 0xc4, 0xda, 0xbf, 0x38, + 0xdf, 0xfe, 0xce, 0x5f, 0xf2, 0x73, 0x5f, 0x72, 0x8e, 0x05, 0x66, 0x9f, 0x0d, 0x59, 0x46, 0x65, + 0x17, 0xce, 0x05, 0xb4, 0x56, 0x1b, 0xd5, 0x61, 0x11, 0x82, 0x5a, 0x30, 0xa4, 0x98, 0xff, 0x95, + 0xe1, 0xf3, 0xb5, 0xf3, 0x66, 0xc0, 0xef, 0x1b, 0x4c, 0xe9, 0x70, 0x84, 0xd1, 0x15, 0x98, 0x3c, + 0xd5, 0x60, 0x22, 0xd4, 0xb2, 0x07, 0xc7, 0x2d, 0xbb, 0x09, 0x57, 0x1d, 0x65, 0x4f, 0xf3, 0x9b, + 0x81, 0x3a, 0xda, 0x3e, 0xfc, 0x4d, 0xc8, 0x60, 0x65, 0x13, 0xb9, 0x64, 0x8b, 0xfb, 0xe5, 0xba, + 0xc2, 0xc4, 0x7b, 0x9a, 0x6f, 0x25, 0x85, 0x4b, 0xb8, 0x86, 0x56, 0xc1, 0x68, 0x70, 0xe3, 0x6e, + 0x65, 0xc0, 0xdc, 0x67, 0x06, 0x45, 0x1b, 0xe5, 0x73, 0xcb, 0xdb, 0xad, 0x55, 0xd9, 0x36, 0x86, + 0xbe, 0xb4, 0x51, 0x75, 0x03, 0xdd, 0x81, 0x95, 0xdb, 0x64, 0xb8, 0x3a, 0xd7, 0xed, 0x55, 0xeb, + 0xf2, 0x74, 0x2d, 0xba, 0xb1, 0xd3, 0xad, 0x83, 0x41, 0xb3, 0xb8, 0x7b, 0xff, 0x3e, 0xb7, 0xf5, + 0xd9, 0xdc, 0xd6, 0x3f, 0xe7, 0xb6, 0xfe, 0xba, 0xb0, 0xb5, 0xd9, 0xc2, 0xd6, 0x3e, 0x16, 0xb6, + 0xf6, 0x70, 0x3e, 0x8a, 0xd8, 0x38, 0x0b, 0xdc, 0x90, 0xc4, 0x9e, 0xfa, 0xec, 0xd7, 0x4b, 0xfe, + 0xea, 0xbd, 0xb2, 0x0f, 0x2d, 0x68, 0xf0, 0xb3, 0xb3, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x83, + 0x34, 0xe6, 0xd2, 0x87, 0x03, 0x00, 0x00, } func (m *BlockRequest) Marshal() (dAtA []byte, err error) { @@ -487,6 +497,18 @@ func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.Block != nil { { size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) @@ -740,6 +762,10 @@ func (m *BlockResponse) Size() (n int) { l = m.Block.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -1049,6 +1075,42 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &types.Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto index 4febfd145c..af09429368 100644 --- a/proto/tendermint/blocksync/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -4,6 +4,7 @@ package tendermint.blocksync; option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync"; import "tendermint/types/block.proto"; +import "tendermint/types/types.proto"; // BlockRequest requests a block for a specific height message BlockRequest { @@ -18,7 +19,8 @@ message NoBlockResponse { // BlockResponse returns block to the requested message BlockResponse { - tendermint.types.Block block = 1; + tendermint.types.Block block = 1; + tendermint.types.Commit commit = 2; } // StatusRequest requests the status of a peer. diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index 3b79eb0110..5bfab3fa6a 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -96,7 +96,8 @@ type NodeInfo struct { Channels []byte `protobuf:"bytes,6,opt,name=channels,proto3" json:"channels,omitempty"` Moniker string `protobuf:"bytes,7,opt,name=moniker,proto3" json:"moniker,omitempty"` Other NodeInfoOther `protobuf:"bytes,8,opt,name=other,proto3" json:"other"` - ProTxHash []byte `protobuf:"bytes,9,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` + // dash's fields + ProTxHash []byte `protobuf:"bytes,100,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` } func (m *NodeInfo) Reset() { *m = NodeInfo{} } @@ -251,7 +252,9 @@ type PeerInfo struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` AddressInfo []*PeerAddressInfo `protobuf:"bytes,2,rep,name=address_info,json=addressInfo,proto3" json:"address_info,omitempty"` LastConnected *time.Time `protobuf:"bytes,3,opt,name=last_connected,json=lastConnected,proto3,stdtime" json:"last_connected,omitempty"` - ProTxHash []byte `protobuf:"bytes,4,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` + Inactive bool `protobuf:"varint,4,opt,name=inactive,proto3" json:"inactive,omitempty"` + // dash's fields + ProTxHash []byte `protobuf:"bytes,100,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` } func (m *PeerInfo) Reset() { *m = PeerInfo{} } @@ -308,6 +311,13 @@ func (m *PeerInfo) GetLastConnected() *time.Time { return nil } +func (m *PeerInfo) GetInactive() bool { + if m != nil { + return m.Inactive + } + return false +} + func (m *PeerInfo) GetProTxHash() []byte { if m != nil { return m.ProTxHash @@ -394,47 +404,48 @@ func init() { func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) } var fileDescriptor_c8a29e659aeca578 = []byte{ - // 636 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x6f, 0xd3, 0x30, - 0x18, 0x6d, 0xda, 0xae, 0x3f, 0xbe, 0xae, 0xeb, 0xb0, 0x26, 0x94, 0x55, 0xa2, 0x99, 0xba, 0xcb, - 0x4e, 0xa9, 0x54, 0xc4, 0x81, 0xe3, 0xb2, 0x09, 0xa8, 0x84, 0x58, 0x15, 0x26, 0x0e, 0x70, 0x88, - 0xd2, 0xd8, 0x6d, 0xad, 0xa5, 0xb6, 0xe5, 0xb8, 0x50, 0xfe, 0x8b, 0xfd, 0x59, 0xbb, 0xb1, 0x23, - 0xa7, 0x82, 0xb2, 0x03, 0x17, 0xfe, 0x08, 0x64, 0x27, 0xd9, 0xd6, 0x8a, 0xc3, 0x6e, 0x7e, 0xdf, - 0xe7, 0xf7, 0xfc, 0xbe, 0x67, 0xcb, 0xd0, 0x55, 0x84, 0x61, 0x22, 0x17, 0x94, 0xa9, 0x81, 0x18, - 0x8a, 0x81, 0xfa, 0x2e, 0x48, 0xe2, 0x0a, 0xc9, 0x15, 0x47, 0x7b, 0x0f, 0x3d, 0x57, 0x0c, 0x45, - 0xf7, 0x60, 0xc6, 0x67, 0xdc, 0xb4, 0x06, 0x7a, 0x95, 0xed, 0xea, 0x3a, 0x33, 0xce, 0x67, 0x31, - 0x19, 0x18, 0x34, 0x59, 0x4e, 0x07, 0x8a, 0x2e, 0x48, 0xa2, 0xc2, 0x85, 0xc8, 0x36, 0xf4, 0x2f, - 0xa1, 0x33, 0xd6, 0x8b, 0x88, 0xc7, 0x9f, 0x88, 0x4c, 0x28, 0x67, 0xe8, 0x10, 0x2a, 0x62, 0x28, - 0x6c, 0xeb, 0xc8, 0x3a, 0xa9, 0x7a, 0xf5, 0x74, 0xed, 0x54, 0xc6, 0xc3, 0xb1, 0xaf, 0x6b, 0xe8, - 0x00, 0x76, 0x26, 0x31, 0x8f, 0xae, 0xec, 0xb2, 0x6e, 0xfa, 0x19, 0x40, 0xfb, 0x50, 0x09, 0x85, - 0xb0, 0x2b, 0xa6, 0xa6, 0x97, 0xfd, 0x3f, 0x65, 0x68, 0x7c, 0xe0, 0x98, 0x8c, 0xd8, 0x94, 0xa3, - 0x31, 0xec, 0x8b, 0xfc, 0x88, 0xe0, 0x6b, 0x76, 0x86, 0x11, 0x6f, 0x0d, 0x1d, 0x77, 0x73, 0x08, - 0x77, 0xcb, 0x8a, 0x57, 0xbd, 0x59, 0x3b, 0x25, 0xbf, 0x23, 0xb6, 0x1c, 0x1e, 0x43, 0x9d, 0x71, - 0x4c, 0x02, 0x8a, 0x8d, 0x91, 0xa6, 0x07, 0xe9, 0xda, 0xa9, 0x99, 0x03, 0xcf, 0xfd, 0x9a, 0x6e, - 0x8d, 0x30, 0x72, 0xa0, 0x15, 0xd3, 0x44, 0x11, 0x16, 0x84, 0x18, 0x4b, 0xe3, 0xae, 0xe9, 0x43, - 0x56, 0x3a, 0xc5, 0x58, 0x22, 0x1b, 0xea, 0x8c, 0xa8, 0x6f, 0x5c, 0x5e, 0xd9, 0x55, 0xd3, 0x2c, - 0xa0, 0xee, 0x14, 0x46, 0x77, 0xb2, 0x4e, 0x0e, 0x51, 0x17, 0x1a, 0xd1, 0x3c, 0x64, 0x8c, 0xc4, - 0x89, 0x5d, 0x3b, 0xb2, 0x4e, 0x76, 0xfd, 0x7b, 0xac, 0x59, 0x0b, 0xce, 0xe8, 0x15, 0x91, 0x76, - 0x3d, 0x63, 0xe5, 0x10, 0xbd, 0x86, 0x1d, 0xae, 0xe6, 0x44, 0xda, 0x0d, 0x33, 0xf6, 0x8b, 0xed, - 0xb1, 0x8b, 0xa8, 0x2e, 0xf4, 0xa6, 0x7c, 0xe8, 0x8c, 0x81, 0x7a, 0xd0, 0x12, 0x92, 0x07, 0x6a, - 0x15, 0xcc, 0xc3, 0x64, 0x6e, 0x37, 0xcd, 0x99, 0x4d, 0x21, 0xf9, 0xe5, 0xea, 0x5d, 0x98, 0xcc, - 0xfb, 0x5f, 0xa0, 0xbd, 0xc1, 0x46, 0x87, 0xd0, 0x50, 0xab, 0x80, 0x32, 0x4c, 0x56, 0x26, 0xe5, - 0xa6, 0x5f, 0x57, 0xab, 0x91, 0x86, 0x68, 0x00, 0x2d, 0x29, 0x22, 0x13, 0x07, 0x49, 0x92, 0x3c, - 0xba, 0xbd, 0x74, 0xed, 0x80, 0x3f, 0x3e, 0x3b, 0xcd, 0xaa, 0x3e, 0x48, 0x11, 0xe5, 0xeb, 0xfe, - 0x0f, 0x0b, 0x1a, 0x63, 0x42, 0xa4, 0xb9, 0xc6, 0xe7, 0x50, 0xa6, 0x38, 0x93, 0xf4, 0x6a, 0xe9, - 0xda, 0x29, 0x8f, 0xce, 0xfd, 0x32, 0xc5, 0xc8, 0x83, 0xdd, 0x5c, 0x31, 0xa0, 0x6c, 0xca, 0xed, - 0xf2, 0x51, 0xe5, 0xbf, 0x57, 0x4b, 0x88, 0xcc, 0x75, 0xb5, 0x9c, 0xdf, 0x0a, 0x1f, 0x00, 0x7a, - 0x0b, 0x7b, 0x71, 0x98, 0xa8, 0x20, 0xe2, 0x8c, 0x91, 0x48, 0x11, 0x6c, 0xae, 0xab, 0x35, 0xec, - 0xba, 0xd9, 0xfb, 0x75, 0x8b, 0xf7, 0xeb, 0x5e, 0x16, 0xef, 0xd7, 0xab, 0x5e, 0xff, 0x72, 0x2c, - 0xbf, 0xad, 0x79, 0x67, 0x05, 0x6d, 0x3b, 0xae, 0xea, 0x76, 0x5c, 0x7f, 0x2d, 0xe8, 0x6c, 0x39, - 0xd1, 0xf7, 0x56, 0x44, 0x92, 0x07, 0x96, 0x43, 0xf4, 0x1e, 0x9e, 0x19, 0x5b, 0x98, 0x86, 0x71, - 0x90, 0x2c, 0xa3, 0xa8, 0x88, 0xed, 0x29, 0xce, 0x3a, 0x9a, 0x7a, 0x4e, 0xc3, 0xf8, 0x63, 0x46, - 0xdc, 0x54, 0x9b, 0x86, 0x34, 0x5e, 0x4a, 0xf2, 0xe4, 0x39, 0xef, 0xd5, 0xde, 0x64, 0x44, 0x74, - 0x0c, 0xed, 0xc7, 0x42, 0x89, 0x99, 0xb5, 0xed, 0xef, 0xe2, 0x87, 0x3d, 0x89, 0x77, 0x71, 0x93, - 0xf6, 0xac, 0xdb, 0xb4, 0x67, 0xfd, 0x4e, 0x7b, 0xd6, 0xf5, 0x5d, 0xaf, 0x74, 0x7b, 0xd7, 0x2b, - 0xfd, 0xbc, 0xeb, 0x95, 0x3e, 0xbf, 0x9a, 0x51, 0x35, 0x5f, 0x4e, 0xdc, 0x88, 0x2f, 0x06, 0x8f, - 0x7e, 0x99, 0xc7, 0x1f, 0x8e, 0xf9, 0x4b, 0x36, 0x7f, 0xa0, 0x49, 0xcd, 0x54, 0x5f, 0xfe, 0x0b, - 0x00, 0x00, 0xff, 0xff, 0xb0, 0x80, 0xe4, 0x4e, 0x9a, 0x04, 0x00, 0x00, + // 648 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xbd, 0x6e, 0xdb, 0x3a, + 0x18, 0xb5, 0x6c, 0xc7, 0x3f, 0x74, 0x1c, 0xe7, 0x12, 0xc1, 0x85, 0x62, 0xe0, 0x4a, 0x81, 0xb3, + 0x64, 0x92, 0x00, 0x5f, 0x74, 0xe8, 0x18, 0x25, 0x68, 0x6b, 0xa0, 0x68, 0x0c, 0x36, 0xe8, 0xd0, + 0x0e, 0x82, 0x2c, 0xd2, 0x36, 0x11, 0x99, 0x24, 0x28, 0x3a, 0x75, 0xdf, 0x22, 0x8f, 0x95, 0x31, + 0x63, 0x27, 0xb7, 0x50, 0x86, 0x0e, 0xed, 0x43, 0x14, 0xa4, 0xa4, 0x24, 0x36, 0x0a, 0x34, 0x1b, + 0xcf, 0xf7, 0xf1, 0x1c, 0x1e, 0x9e, 0x8f, 0x20, 0xe8, 0x2b, 0xc2, 0x30, 0x91, 0x0b, 0xca, 0x94, + 0x2f, 0x86, 0xc2, 0x57, 0x5f, 0x04, 0x49, 0x3d, 0x21, 0xb9, 0xe2, 0x70, 0xef, 0xb1, 0xe7, 0x89, + 0xa1, 0xe8, 0x1f, 0xcc, 0xf8, 0x8c, 0x9b, 0x96, 0xaf, 0x57, 0xf9, 0xae, 0xbe, 0x3b, 0xe3, 0x7c, + 0x96, 0x10, 0xdf, 0xa0, 0xc9, 0x72, 0xea, 0x2b, 0xba, 0x20, 0xa9, 0x8a, 0x16, 0x22, 0xdf, 0x30, + 0xb8, 0x04, 0xbd, 0xb1, 0x5e, 0xc4, 0x3c, 0xf9, 0x40, 0x64, 0x4a, 0x39, 0x83, 0x87, 0xa0, 0x26, + 0x86, 0xc2, 0xb6, 0x8e, 0xac, 0x93, 0x7a, 0xd0, 0xcc, 0xd6, 0x6e, 0x6d, 0x3c, 0x1c, 0x23, 0x5d, + 0x83, 0x07, 0x60, 0x67, 0x92, 0xf0, 0xf8, 0xca, 0xae, 0xea, 0x26, 0xca, 0x01, 0xdc, 0x07, 0xb5, + 0x48, 0x08, 0xbb, 0x66, 0x6a, 0x7a, 0x39, 0xf8, 0x51, 0x05, 0xad, 0x77, 0x1c, 0x93, 0x11, 0x9b, + 0x72, 0x38, 0x06, 0xfb, 0xa2, 0x38, 0x22, 0xbc, 0xce, 0xcf, 0x30, 0xe2, 0x9d, 0xa1, 0xeb, 0x6d, + 0x5e, 0xc2, 0xdb, 0xb2, 0x12, 0xd4, 0x6f, 0xd7, 0x6e, 0x05, 0xf5, 0xc4, 0x96, 0xc3, 0x63, 0xd0, + 0x64, 0x1c, 0x93, 0x90, 0x62, 0x63, 0xa4, 0x1d, 0x80, 0x6c, 0xed, 0x36, 0xcc, 0x81, 0xe7, 0xa8, + 0xa1, 0x5b, 0x23, 0x0c, 0x5d, 0xd0, 0x49, 0x68, 0xaa, 0x08, 0x0b, 0x23, 0x8c, 0xa5, 0x71, 0xd7, + 0x46, 0x20, 0x2f, 0x9d, 0x62, 0x2c, 0xa1, 0x0d, 0x9a, 0x8c, 0xa8, 0xcf, 0x5c, 0x5e, 0xd9, 0x75, + 0xd3, 0x2c, 0xa1, 0xee, 0x94, 0x46, 0x77, 0xf2, 0x4e, 0x01, 0x61, 0x1f, 0xb4, 0xe2, 0x79, 0xc4, + 0x18, 0x49, 0x52, 0xbb, 0x71, 0x64, 0x9d, 0xec, 0xa2, 0x07, 0xac, 0x59, 0x0b, 0xce, 0xe8, 0x15, + 0x91, 0x76, 0x33, 0x67, 0x15, 0x10, 0xbe, 0x04, 0x3b, 0x5c, 0xcd, 0x89, 0xb4, 0x5b, 0xe6, 0xda, + 0xff, 0x6d, 0x5f, 0xbb, 0x8c, 0xea, 0x42, 0x6f, 0x2a, 0x2e, 0x9d, 0x33, 0xa0, 0x03, 0x3a, 0x42, + 0xf2, 0x50, 0xad, 0xc2, 0x79, 0x94, 0xce, 0x6d, 0x6c, 0xce, 0x6c, 0x0b, 0xc9, 0x2f, 0x57, 0x6f, + 0xa2, 0x74, 0x3e, 0xf8, 0x04, 0xba, 0x1b, 0x6c, 0x78, 0x08, 0x5a, 0x6a, 0x15, 0x52, 0x86, 0xc9, + 0xca, 0xa4, 0xdc, 0x46, 0x4d, 0xb5, 0x1a, 0x69, 0x08, 0x7d, 0xd0, 0x91, 0x22, 0x36, 0x71, 0x90, + 0x34, 0x2d, 0xa2, 0xdb, 0xcb, 0xd6, 0x2e, 0x40, 0xe3, 0xb3, 0xd3, 0xbc, 0x8a, 0x80, 0x14, 0x71, + 0xb1, 0x1e, 0xfc, 0xb4, 0x40, 0x6b, 0x4c, 0x88, 0x34, 0x63, 0xfc, 0x17, 0x54, 0x29, 0xce, 0x25, + 0x83, 0x46, 0xb6, 0x76, 0xab, 0xa3, 0x73, 0x54, 0xa5, 0x18, 0x06, 0x60, 0xb7, 0x50, 0x0c, 0x29, + 0x9b, 0x72, 0xbb, 0x7a, 0x54, 0xfb, 0xe3, 0x68, 0x09, 0x91, 0x85, 0xae, 0x96, 0x43, 0x9d, 0xe8, + 0x11, 0xc0, 0xd7, 0x60, 0x2f, 0x89, 0x52, 0x15, 0xc6, 0x9c, 0x31, 0x12, 0x2b, 0x82, 0xcd, 0xb8, + 0x3a, 0xc3, 0xbe, 0x97, 0xbf, 0x5f, 0xaf, 0x7c, 0xbf, 0xde, 0x65, 0xf9, 0x7e, 0x83, 0xfa, 0xcd, + 0x37, 0xd7, 0x42, 0x5d, 0xcd, 0x3b, 0x2b, 0x69, 0x7a, 0x3e, 0x94, 0x45, 0xb1, 0xa2, 0xd7, 0xc4, + 0x0c, 0xb5, 0x85, 0x1e, 0xf0, 0x5f, 0xa3, 0xfc, 0x65, 0x81, 0xde, 0x96, 0x4b, 0x3d, 0xd3, 0x32, + 0xae, 0x22, 0xcc, 0x02, 0xc2, 0xb7, 0xe0, 0x1f, 0x63, 0x19, 0xd3, 0x28, 0x09, 0xd3, 0x65, 0x1c, + 0x97, 0x91, 0x3e, 0xc7, 0x75, 0x4f, 0x53, 0xcf, 0x69, 0x94, 0xbc, 0xcf, 0x89, 0x9b, 0x6a, 0xd3, + 0x88, 0x26, 0x4b, 0x49, 0x9e, 0x9d, 0xc1, 0x83, 0xda, 0xab, 0x9c, 0x08, 0x8f, 0x41, 0xf7, 0xa9, + 0x50, 0x6a, 0xa2, 0xe8, 0xa2, 0x5d, 0xfc, 0xb8, 0x27, 0x0d, 0x2e, 0x6e, 0x33, 0xc7, 0xba, 0xcb, + 0x1c, 0xeb, 0x7b, 0xe6, 0x58, 0x37, 0xf7, 0x4e, 0xe5, 0xee, 0xde, 0xa9, 0x7c, 0xbd, 0x77, 0x2a, + 0x1f, 0x5f, 0xcc, 0xa8, 0x9a, 0x2f, 0x27, 0x5e, 0xcc, 0x17, 0xfe, 0x93, 0x1f, 0xe8, 0xe9, 0x67, + 0x64, 0xfe, 0x99, 0xcd, 0xdf, 0x69, 0xd2, 0x30, 0xd5, 0xff, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, + 0xc4, 0x51, 0xcb, 0xf4, 0xb6, 0x04, 0x00, 0x00, } func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) { @@ -500,7 +511,9 @@ func (m *NodeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.ProTxHash) i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 } { size, err := m.Other.MarshalToSizedBuffer(dAtA[:i]) @@ -629,7 +642,19 @@ func (m *PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.ProTxHash) i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 + } + if m.Inactive { + i-- + if m.Inactive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } if m.LastConnected != nil { n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastConnected, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected):]) @@ -785,7 +810,7 @@ func (m *NodeInfo) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) l = len(m.ProTxHash) if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n += 2 + l + sovTypes(uint64(l)) } return n } @@ -827,9 +852,12 @@ func (m *PeerInfo) Size() (n int) { l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected) n += 1 + l + sovTypes(uint64(l)) } + if m.Inactive { + n += 2 + } l = len(m.ProTxHash) if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n += 2 + l + sovTypes(uint64(l)) } return n } @@ -1260,7 +1288,7 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 9: + case 100: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ProTxHash", wireType) } @@ -1561,6 +1589,26 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Inactive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Inactive = bool(v != 0) + case 100: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ProTxHash", wireType) } diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto index 2afed8494f..5af5447ef9 100644 --- a/proto/tendermint/p2p/types.proto +++ b/proto/tendermint/p2p/types.proto @@ -21,7 +21,9 @@ message NodeInfo { bytes channels = 6; string moniker = 7; NodeInfoOther other = 8 [(gogoproto.nullable) = false]; - bytes pro_tx_hash = 9; + + // dash's fields + bytes pro_tx_hash = 100; } message NodeInfoOther { @@ -33,7 +35,10 @@ message PeerInfo { string id = 1 [(gogoproto.customname) = "ID"]; repeated PeerAddressInfo address_info = 2; google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true]; - bytes pro_tx_hash = 4; + bool inactive = 4; + + // dash's fields + bytes pro_tx_hash = 100; } message PeerAddressInfo { diff --git a/proto/tendermint/privval/service.proto b/proto/tendermint/privval/service.proto index c42eba64d8..4ceac5695a 100644 --- a/proto/tendermint/privval/service.proto +++ b/proto/tendermint/privval/service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; package tendermint.privval; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; import "tendermint/privval/types.proto"; diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index f0acbcd30f..f2fb79cb17 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -9,8 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/tendermint/tendermint/abci/types" - types1 "github.com/tendermint/tendermint/proto/tendermint/types" + types "github.com/tendermint/tendermint/proto/tendermint/types" version "github.com/tendermint/tendermint/proto/tendermint/version" io "io" math "math" @@ -30,64 +29,17 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// ABCIResponses retains the responses -// of the various ABCI calls during block processing. -// It is persisted to disk for each height before calling Commit. -type ABCIResponses struct { - FinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=finalize_block,json=finalizeBlock,proto3" json:"finalize_block,omitempty"` -} - -func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } -func (m *ABCIResponses) String() string { return proto.CompactTextString(m) } -func (*ABCIResponses) ProtoMessage() {} -func (*ABCIResponses) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{0} -} -func (m *ABCIResponses) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ABCIResponses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ABCIResponses.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ABCIResponses) XXX_Merge(src proto.Message) { - xxx_messageInfo_ABCIResponses.Merge(m, src) -} -func (m *ABCIResponses) XXX_Size() int { - return m.Size() -} -func (m *ABCIResponses) XXX_DiscardUnknown() { - xxx_messageInfo_ABCIResponses.DiscardUnknown(m) -} - -var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo - -func (m *ABCIResponses) GetFinalizeBlock() *types.ResponseFinalizeBlock { - if m != nil { - return m.FinalizeBlock - } - return nil -} - // ValidatorsInfo represents the latest validator set, or the last height it changed type ValidatorsInfo struct { - ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` - LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` + ValidatorSet *types.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` } func (m *ValidatorsInfo) Reset() { *m = ValidatorsInfo{} } func (m *ValidatorsInfo) String() string { return proto.CompactTextString(m) } func (*ValidatorsInfo) ProtoMessage() {} func (*ValidatorsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{1} + return fileDescriptor_ccfacf933f22bf93, []int{0} } func (m *ValidatorsInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -116,7 +68,7 @@ func (m *ValidatorsInfo) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatorsInfo proto.InternalMessageInfo -func (m *ValidatorsInfo) GetValidatorSet() *types1.ValidatorSet { +func (m *ValidatorsInfo) GetValidatorSet() *types.ValidatorSet { if m != nil { return m.ValidatorSet } @@ -132,15 +84,15 @@ func (m *ValidatorsInfo) GetLastHeightChanged() int64 { // ConsensusParamsInfo represents the latest consensus params, or the last height it changed type ConsensusParamsInfo struct { - ConsensusParams types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` - LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` + ConsensusParams types.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` } func (m *ConsensusParamsInfo) Reset() { *m = ConsensusParamsInfo{} } func (m *ConsensusParamsInfo) String() string { return proto.CompactTextString(m) } func (*ConsensusParamsInfo) ProtoMessage() {} func (*ConsensusParamsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{2} + return fileDescriptor_ccfacf933f22bf93, []int{1} } func (m *ConsensusParamsInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -169,11 +121,11 @@ func (m *ConsensusParamsInfo) XXX_DiscardUnknown() { var xxx_messageInfo_ConsensusParamsInfo proto.InternalMessageInfo -func (m *ConsensusParamsInfo) GetConsensusParams() types1.ConsensusParams { +func (m *ConsensusParamsInfo) GetConsensusParams() types.ConsensusParams { if m != nil { return m.ConsensusParams } - return types1.ConsensusParams{} + return types.ConsensusParams{} } func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { @@ -192,7 +144,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{3} + return fileDescriptor_ccfacf933f22bf93, []int{2} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -241,26 +193,26 @@ type State struct { ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) - LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockID types1.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` - LastStateID types1.StateID `protobuf:"bytes,102,opt,name=last_state_id,json=lastStateId,proto3" json:"last_state_id"` - LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` - LastCoreChainLockedBlockHeight uint32 `protobuf:"varint,100,opt,name=last_core_chain_locked_block_height,json=lastCoreChainLockedBlockHeight,proto3" json:"last_core_chain_locked_block_height,omitempty"` - NextCoreChainLock *types1.CoreChainLock `protobuf:"bytes,101,opt,name=next_core_chain_lock,json=nextCoreChainLock,proto3" json:"next_core_chain_lock,omitempty"` + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID types.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastStateID types.StateID `protobuf:"bytes,102,opt,name=last_state_id,json=lastStateId,proto3" json:"last_state_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + LastCoreChainLockedBlockHeight uint32 `protobuf:"varint,100,opt,name=last_core_chain_locked_block_height,json=lastCoreChainLockedBlockHeight,proto3" json:"last_core_chain_locked_block_height,omitempty"` + NextCoreChainLock *types.CoreChainLock `protobuf:"bytes,101,opt,name=next_core_chain_lock,json=nextCoreChainLock,proto3" json:"next_core_chain_lock,omitempty"` // LastValidators is used to validate block.LastCommit. // Validators are persisted to the database separately every time they change, // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 // Extra +1 due to nextValSet delay. - NextValidators *types1.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` - Validators *types1.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` - LastValidators *types1.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` - LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` + NextValidators *types.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` + Validators *types.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` + LastValidators *types.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` + LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` // Consensus parameters used for validating blocks. // Changes returned by EndBlock and updated after Commit. - ConsensusParams types1.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` - LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` + ConsensusParams types.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` // Merkle root of the results from executing prev block LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` // the latest AppHash we've received from calling abci.Commit() @@ -271,7 +223,7 @@ func (m *State) Reset() { *m = State{} } func (m *State) String() string { return proto.CompactTextString(m) } func (*State) ProtoMessage() {} func (*State) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{4} + return fileDescriptor_ccfacf933f22bf93, []int{3} } func (m *State) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,18 +280,18 @@ func (m *State) GetLastBlockHeight() int64 { return 0 } -func (m *State) GetLastBlockID() types1.BlockID { +func (m *State) GetLastBlockID() types.BlockID { if m != nil { return m.LastBlockID } - return types1.BlockID{} + return types.BlockID{} } -func (m *State) GetLastStateID() types1.StateID { +func (m *State) GetLastStateID() types.StateID { if m != nil { return m.LastStateID } - return types1.StateID{} + return types.StateID{} } func (m *State) GetLastBlockTime() time.Time { @@ -356,28 +308,28 @@ func (m *State) GetLastCoreChainLockedBlockHeight() uint32 { return 0 } -func (m *State) GetNextCoreChainLock() *types1.CoreChainLock { +func (m *State) GetNextCoreChainLock() *types.CoreChainLock { if m != nil { return m.NextCoreChainLock } return nil } -func (m *State) GetNextValidators() *types1.ValidatorSet { +func (m *State) GetNextValidators() *types.ValidatorSet { if m != nil { return m.NextValidators } return nil } -func (m *State) GetValidators() *types1.ValidatorSet { +func (m *State) GetValidators() *types.ValidatorSet { if m != nil { return m.Validators } return nil } -func (m *State) GetLastValidators() *types1.ValidatorSet { +func (m *State) GetLastValidators() *types.ValidatorSet { if m != nil { return m.LastValidators } @@ -391,11 +343,11 @@ func (m *State) GetLastHeightValidatorsChanged() int64 { return 0 } -func (m *State) GetConsensusParams() types1.ConsensusParams { +func (m *State) GetConsensusParams() types.ConsensusParams { if m != nil { return m.ConsensusParams } - return types1.ConsensusParams{} + return types.ConsensusParams{} } func (m *State) GetLastHeightConsensusParamsChanged() int64 { @@ -420,7 +372,6 @@ func (m *State) GetAppHash() []byte { } func init() { - proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses") proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo") proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo") proto.RegisterType((*Version)(nil), "tendermint.state.Version") @@ -430,92 +381,54 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 798 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0xd3, 0x4c, - 0x14, 0x8d, 0xbf, 0x3e, 0x92, 0x4c, 0xea, 0xa4, 0x75, 0xbb, 0x70, 0xd3, 0xaf, 0x4e, 0x08, 0x50, - 0x55, 0x2c, 0x1c, 0x09, 0x16, 0x88, 0x0d, 0x52, 0x93, 0x0a, 0x1a, 0x51, 0x10, 0xb8, 0xa8, 0x0b, - 0x16, 0x58, 0x13, 0x7b, 0x12, 0x5b, 0x24, 0x1e, 0xcb, 0x33, 0x29, 0x8f, 0x3d, 0xfb, 0x6e, 0xf9, - 0x47, 0x5d, 0x76, 0x89, 0x58, 0x14, 0x94, 0xfe, 0x11, 0x34, 0x0f, 0xdb, 0x93, 0xa4, 0x48, 0x45, - 0xec, 0x3c, 0xf7, 0x9c, 0x7b, 0xee, 0x99, 0x3b, 0x73, 0xc7, 0xe0, 0x7f, 0x8a, 0x22, 0x1f, 0x25, - 0xe3, 0x30, 0xa2, 0x6d, 0x42, 0x21, 0x45, 0x6d, 0xfa, 0x39, 0x46, 0xc4, 0x8e, 0x13, 0x4c, 0xb1, - 0xb1, 0x9e, 0xa3, 0x36, 0x47, 0xeb, 0x5b, 0x43, 0x3c, 0xc4, 0x1c, 0x6c, 0xb3, 0x2f, 0xc1, 0xab, - 0xef, 0x28, 0x2a, 0xb0, 0xef, 0x85, 0xaa, 0x48, 0x5d, 0x2d, 0xc1, 0xe3, 0x33, 0xe8, 0xce, 0x02, - 0xea, 0x43, 0x12, 0x48, 0xb0, 0xb9, 0x00, 0x9e, 0xc1, 0x51, 0xe8, 0x43, 0x8a, 0x13, 0xc9, 0xd8, - 0x5d, 0x60, 0xc4, 0x30, 0x81, 0xe3, 0x54, 0xdd, 0x52, 0xe0, 0x33, 0x94, 0x90, 0x10, 0x47, 0x33, - 0xd5, 0x1b, 0x43, 0x8c, 0x87, 0x23, 0xd4, 0xe6, 0xab, 0xfe, 0x64, 0xd0, 0xa6, 0xe1, 0x18, 0x11, - 0x0a, 0xc7, 0xb1, 0x20, 0xb4, 0xde, 0x03, 0xfd, 0xa0, 0xd3, 0xed, 0x39, 0x88, 0xc4, 0x38, 0x22, - 0x88, 0x18, 0x2f, 0x41, 0x75, 0x10, 0x46, 0x70, 0x14, 0x7e, 0x41, 0x6e, 0x7f, 0x84, 0xbd, 0x0f, - 0xe6, 0x7f, 0x4d, 0x6d, 0xbf, 0xf2, 0x70, 0xcf, 0x56, 0x7a, 0xc5, 0x7a, 0x60, 0xa7, 0x39, 0xcf, - 0x24, 0xbd, 0xc3, 0xd8, 0x8e, 0x3e, 0x50, 0x97, 0xad, 0xaf, 0x1a, 0xa8, 0x9e, 0xa6, 0x7b, 0x22, - 0xbd, 0x68, 0x80, 0x8d, 0x2e, 0xd0, 0xb3, 0x5d, 0xba, 0x04, 0x51, 0x53, 0xe3, 0x05, 0x2c, 0xb5, - 0x80, 0xd8, 0x43, 0x96, 0x78, 0x82, 0xa8, 0xb3, 0x76, 0xa6, 0xac, 0x0c, 0x1b, 0x6c, 0x8e, 0x20, - 0xa1, 0x6e, 0x80, 0xc2, 0x61, 0x40, 0x5d, 0x2f, 0x80, 0xd1, 0x10, 0xf9, 0xdc, 0xeb, 0x92, 0xb3, - 0xc1, 0xa0, 0x23, 0x8e, 0x74, 0x05, 0xd0, 0xfa, 0xa6, 0x81, 0xcd, 0x2e, 0x73, 0x1b, 0x91, 0x09, - 0x79, 0xcd, 0x5b, 0xc8, 0xcd, 0x38, 0x60, 0xdd, 0x4b, 0xc3, 0xae, 0x68, 0xad, 0xf4, 0x73, 0x67, - 0xd1, 0xcf, 0x9c, 0x40, 0x67, 0xf9, 0xe2, 0xaa, 0x51, 0x70, 0x6a, 0xde, 0x6c, 0xf8, 0xaf, 0xbd, - 0x05, 0xa0, 0x78, 0x2a, 0xce, 0xce, 0x38, 0x00, 0xe5, 0x4c, 0x4d, 0xfa, 0xd8, 0x55, 0x7d, 0xc8, - 0x33, 0xce, 0x9d, 0x48, 0x0f, 0x79, 0x96, 0x51, 0x07, 0x25, 0x82, 0x07, 0xf4, 0x23, 0x4c, 0x10, - 0x2f, 0x59, 0x76, 0xb2, 0x75, 0xeb, 0x47, 0x09, 0xac, 0x9c, 0xb0, 0x7b, 0x6e, 0x3c, 0x01, 0x45, - 0xa9, 0x25, 0xcb, 0x6c, 0xdb, 0xf3, 0xb3, 0x60, 0x4b, 0x53, 0xb2, 0x44, 0xca, 0x37, 0xf6, 0x40, - 0xc9, 0x0b, 0x60, 0x18, 0xb9, 0xa1, 0xd8, 0x53, 0xb9, 0x53, 0x99, 0x5e, 0x35, 0x8a, 0x5d, 0x16, - 0xeb, 0x1d, 0x3a, 0x45, 0x0e, 0xf6, 0x7c, 0xe3, 0x3e, 0xa8, 0x86, 0x51, 0x48, 0x43, 0x38, 0x92, - 0x9d, 0x30, 0xab, 0xbc, 0x03, 0xba, 0x8c, 0x8a, 0x26, 0x18, 0x0f, 0x00, 0x6f, 0x89, 0xb8, 0x6c, - 0x29, 0x73, 0x89, 0x33, 0x6b, 0x0c, 0xe0, 0xf7, 0x48, 0x72, 0x1d, 0xa0, 0x2b, 0xdc, 0xd0, 0x37, - 0x97, 0x17, 0xbd, 0x8b, 0xa3, 0xe2, 0x59, 0xbd, 0xc3, 0xce, 0x26, 0xf3, 0x3e, 0xbd, 0x6a, 0x54, - 0x8e, 0x53, 0xa9, 0xde, 0xa1, 0x53, 0xc9, 0x74, 0x7b, 0x7e, 0xa6, 0xc9, 0xf7, 0xcc, 0x34, 0x07, - 0x7f, 0xd2, 0xe4, 0x9d, 0x9b, 0xd7, 0x94, 0x41, 0xa1, 0x29, 0x16, 0xbe, 0x71, 0x0c, 0x6a, 0x8a, - 0x4f, 0x36, 0x73, 0xe6, 0x0a, 0x57, 0xad, 0xdb, 0x62, 0x20, 0xed, 0x74, 0x20, 0xed, 0xb7, 0xe9, - 0x40, 0x76, 0x4a, 0x4c, 0xf6, 0xfc, 0x67, 0x43, 0x73, 0xf4, 0xcc, 0x1f, 0x43, 0x8d, 0x17, 0xe0, - 0x2e, 0x57, 0xf3, 0x70, 0x82, 0x5c, 0xd1, 0x7a, 0x86, 0x21, 0x7f, 0xb6, 0x67, 0x7e, 0x53, 0xdb, - 0xd7, 0x1d, 0x8b, 0x51, 0xbb, 0x38, 0x41, 0xfc, 0x3c, 0x8e, 0x39, 0x4f, 0x6d, 0xe1, 0x29, 0xd8, - 0x8a, 0xd0, 0xa7, 0x05, 0x31, 0x13, 0x71, 0x7f, 0x8d, 0x9b, 0x2e, 0xbd, 0xa2, 0xc5, 0xef, 0x82, - 0xe6, 0x6c, 0x30, 0x89, 0x19, 0xc0, 0x78, 0x0e, 0x6a, 0x5c, 0x37, 0x9b, 0x52, 0x62, 0xae, 0xde, - 0x6a, 0xae, 0xab, 0x2c, 0x2d, 0x7f, 0x22, 0x8c, 0xa7, 0x00, 0x28, 0x1a, 0xc5, 0x5b, 0x69, 0x28, - 0x19, 0xcc, 0x08, 0xef, 0x96, 0x22, 0x52, 0xba, 0x9d, 0x11, 0x96, 0xa6, 0x18, 0xe9, 0x02, 0x4b, - 0x1d, 0xe3, 0x5c, 0x2f, 0x9b, 0xe8, 0x32, 0xbf, 0xa5, 0x3b, 0xf9, 0x44, 0xe7, 0xd9, 0x72, 0xb6, - 0x6f, 0x7c, 0x5f, 0xc0, 0x3f, 0xbe, 0x2f, 0xaf, 0xc0, 0xbd, 0x99, 0xf7, 0x65, 0x4e, 0x3f, 0xb3, - 0x57, 0xe1, 0xf6, 0x9a, 0xca, 0x83, 0x33, 0x2b, 0x94, 0x7a, 0x4c, 0x27, 0x30, 0x41, 0x64, 0x32, - 0xa2, 0xc4, 0x0d, 0x20, 0x09, 0xcc, 0xb5, 0xa6, 0xb6, 0xbf, 0x26, 0x26, 0xd0, 0x11, 0xf1, 0x23, - 0x48, 0x02, 0x63, 0x1b, 0x94, 0x60, 0x1c, 0x0b, 0x8a, 0xce, 0x29, 0x45, 0x18, 0xc7, 0x0c, 0xea, - 0xbc, 0xb9, 0x98, 0x5a, 0xda, 0xe5, 0xd4, 0xd2, 0x7e, 0x4d, 0x2d, 0xed, 0xfc, 0xda, 0x2a, 0x5c, - 0x5e, 0x5b, 0x85, 0xef, 0xd7, 0x56, 0xe1, 0xdd, 0xe3, 0x61, 0x48, 0x83, 0x49, 0xdf, 0xf6, 0xf0, - 0xb8, 0xad, 0xfe, 0xcf, 0xf2, 0x4f, 0xf1, 0xc7, 0x9d, 0xff, 0x57, 0xf7, 0x57, 0x79, 0xfc, 0xd1, - 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0x01, 0x5a, 0x6e, 0xc6, 0x07, 0x00, 0x00, -} - -func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ABCIResponses) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.FinalizeBlock != nil { - { - size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil + // 749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0xc7, 0xe3, 0xdb, 0x8f, 0x24, 0x93, 0x26, 0x69, 0xa7, 0x5d, 0xb8, 0xe9, 0xad, 0x93, 0xdb, + 0x7b, 0x2f, 0x8a, 0x58, 0x38, 0x12, 0x2c, 0x10, 0x1b, 0x24, 0x92, 0x4a, 0x34, 0xa2, 0x42, 0xe0, + 0xa2, 0x2e, 0xd8, 0x58, 0x13, 0x7b, 0x62, 0x5b, 0x24, 0x1e, 0xcb, 0x33, 0x29, 0xf0, 0x00, 0xec, + 0xbb, 0xe5, 0x8d, 0xba, 0xec, 0x12, 0xb1, 0x28, 0x28, 0x7d, 0x11, 0x34, 0x1f, 0xb6, 0x27, 0x49, + 0x91, 0x8a, 0xd8, 0xd9, 0xe7, 0x7f, 0xce, 0x6f, 0xfe, 0x73, 0x3c, 0x67, 0x0c, 0xfe, 0x66, 0x38, + 0xf6, 0x71, 0x3a, 0x8d, 0x62, 0xd6, 0xa3, 0x0c, 0x31, 0xdc, 0x63, 0x9f, 0x12, 0x4c, 0xed, 0x24, + 0x25, 0x8c, 0xc0, 0xed, 0x42, 0xb5, 0x85, 0xda, 0xda, 0x0b, 0x48, 0x40, 0x84, 0xd8, 0xe3, 0x4f, + 0x32, 0xaf, 0xa5, 0x53, 0x44, 0xbd, 0x4e, 0x69, 0x1d, 0xac, 0xa8, 0x3e, 0xa2, 0xa1, 0x12, 0x3b, + 0x2b, 0xe2, 0x05, 0x9a, 0x44, 0x3e, 0x62, 0x24, 0x55, 0x19, 0x87, 0x2b, 0x19, 0x09, 0x4a, 0xd1, + 0x34, 0xa3, 0x5b, 0x9a, 0x7c, 0x81, 0x53, 0x1a, 0x91, 0x78, 0x61, 0xf5, 0x76, 0x40, 0x48, 0x30, + 0xc1, 0x3d, 0xf1, 0x36, 0x9a, 0x8d, 0x7b, 0x2c, 0x9a, 0x62, 0xca, 0xd0, 0x34, 0x91, 0x09, 0x47, + 0x9f, 0x0d, 0xd0, 0x38, 0xcf, 0xd6, 0xa4, 0xc3, 0x78, 0x4c, 0xe0, 0x00, 0xd4, 0x73, 0x17, 0x2e, + 0xc5, 0xcc, 0x34, 0x3a, 0x46, 0xb7, 0xf6, 0xc8, 0xb2, 0xb5, 0x7e, 0xc8, 0x35, 0xf2, 0xc2, 0x33, + 0xcc, 0x9c, 0xad, 0x0b, 0xed, 0x0d, 0xda, 0x60, 0x77, 0x82, 0x28, 0x73, 0x43, 0x1c, 0x05, 0x21, + 0x73, 0xbd, 0x10, 0xc5, 0x01, 0xf6, 0xcd, 0xbf, 0x3a, 0x46, 0x77, 0xcd, 0xd9, 0xe1, 0xd2, 0x89, + 0x50, 0x06, 0x52, 0x38, 0xfa, 0x62, 0x80, 0xdd, 0x01, 0x89, 0x29, 0x8e, 0xe9, 0x8c, 0xbe, 0x16, + 0x5b, 0x14, 0x66, 0x1c, 0xb0, 0xed, 0x65, 0x61, 0x57, 0x6e, 0x5d, 0xf9, 0xf9, 0x67, 0xd5, 0xcf, + 0x12, 0xa0, 0xbf, 0x7e, 0x75, 0xd3, 0x2e, 0x39, 0x4d, 0x6f, 0x31, 0xfc, 0xdb, 0xde, 0x42, 0x50, + 0x3e, 0x97, 0xbd, 0x85, 0xcf, 0x41, 0x35, 0xa7, 0x29, 0x1f, 0x87, 0xba, 0x0f, 0xf5, 0x0d, 0x0a, + 0x27, 0xca, 0x43, 0x51, 0x05, 0x5b, 0xa0, 0x42, 0xc9, 0x98, 0x7d, 0x40, 0x29, 0x16, 0x4b, 0x56, + 0x9d, 0xfc, 0xfd, 0xe8, 0x5b, 0x05, 0x6c, 0x9c, 0xf1, 0xa3, 0x06, 0x9f, 0x82, 0xb2, 0x62, 0xa9, + 0x65, 0xf6, 0xed, 0xe5, 0xe3, 0x68, 0x2b, 0x53, 0x6a, 0x89, 0x2c, 0x1f, 0x3e, 0x00, 0x15, 0x2f, + 0x44, 0x51, 0xec, 0x46, 0x72, 0x4f, 0xd5, 0x7e, 0x6d, 0x7e, 0xd3, 0x2e, 0x0f, 0x78, 0x6c, 0x78, + 0xec, 0x94, 0x85, 0x38, 0xf4, 0xe1, 0xff, 0xa0, 0x11, 0xc5, 0x11, 0x8b, 0xd0, 0x44, 0x75, 0xc2, + 0x6c, 0x88, 0x0e, 0xd4, 0x55, 0x54, 0x36, 0x01, 0x3e, 0x04, 0xa2, 0x25, 0xee, 0x68, 0x42, 0xbc, + 0xf7, 0x59, 0xe6, 0x9a, 0xc8, 0x6c, 0x72, 0xa1, 0xcf, 0xe3, 0x2a, 0xd7, 0x01, 0x75, 0x2d, 0x37, + 0xf2, 0xcd, 0xf5, 0x55, 0xef, 0xf2, 0x53, 0x89, 0xaa, 0xe1, 0x71, 0x7f, 0x97, 0x7b, 0x9f, 0xdf, + 0xb4, 0x6b, 0xa7, 0x19, 0x6a, 0x78, 0xec, 0xd4, 0x72, 0xee, 0xd0, 0xcf, 0x99, 0x62, 0xcf, 0x9c, + 0x39, 0xfe, 0x15, 0x53, 0x74, 0x6e, 0x99, 0xa9, 0x82, 0x92, 0x29, 0x5f, 0x7c, 0x78, 0x0a, 0x9a, + 0x9a, 0x4f, 0x3e, 0x13, 0xe6, 0x86, 0xa0, 0xb6, 0x6c, 0x39, 0x30, 0x76, 0x36, 0x30, 0xf6, 0xdb, + 0x6c, 0x60, 0xfa, 0x15, 0x8e, 0xbd, 0xfc, 0xde, 0x36, 0x9c, 0x7a, 0xee, 0x8f, 0xab, 0xf0, 0x25, + 0xf8, 0x57, 0xd0, 0x3c, 0x92, 0x62, 0x57, 0xb6, 0x9e, 0x6b, 0xd8, 0x5f, 0xec, 0x99, 0xdf, 0x31, + 0xba, 0x75, 0xc7, 0xe2, 0xa9, 0x03, 0x92, 0x62, 0xf1, 0x3d, 0x4e, 0x45, 0x9e, 0xde, 0xc2, 0x73, + 0xb0, 0x17, 0xe3, 0x8f, 0x2b, 0x30, 0x13, 0x0b, 0x7f, 0xed, 0xbb, 0x0e, 0xbd, 0xc6, 0x12, 0x67, + 0xc1, 0x70, 0x76, 0x38, 0x62, 0x41, 0x80, 0x2f, 0x40, 0x53, 0x70, 0xf3, 0x29, 0xa5, 0xe6, 0xe6, + 0xbd, 0xe6, 0xba, 0xc1, 0xcb, 0x8a, 0x2b, 0x02, 0x3e, 0x03, 0x40, 0x63, 0x94, 0xef, 0xc5, 0xd0, + 0x2a, 0xb8, 0x11, 0xd1, 0x2d, 0x0d, 0x52, 0xb9, 0x9f, 0x11, 0x5e, 0xa6, 0x19, 0x19, 0x00, 0x4b, + 0x1f, 0xe3, 0x82, 0x97, 0x4f, 0x74, 0x55, 0x9c, 0xd2, 0x83, 0x62, 0xa2, 0x8b, 0x6a, 0x35, 0xdb, + 0x77, 0xde, 0x2f, 0xe0, 0x0f, 0xef, 0x97, 0x57, 0xe0, 0xbf, 0x85, 0xfb, 0x65, 0x89, 0x9f, 0xdb, + 0xab, 0x09, 0x7b, 0x1d, 0xed, 0xc2, 0x59, 0x04, 0x65, 0x1e, 0xb3, 0x09, 0x4c, 0x31, 0x9d, 0x4d, + 0x18, 0x75, 0x43, 0x44, 0x43, 0x73, 0xab, 0x63, 0x74, 0xb7, 0xe4, 0x04, 0x3a, 0x32, 0x7e, 0x82, + 0x68, 0x08, 0xf7, 0x41, 0x05, 0x25, 0x89, 0x4c, 0xa9, 0x8b, 0x94, 0x32, 0x4a, 0x12, 0x2e, 0xf5, + 0xdf, 0x5c, 0xcd, 0x2d, 0xe3, 0x7a, 0x6e, 0x19, 0x3f, 0xe6, 0x96, 0x71, 0x79, 0x6b, 0x95, 0xae, + 0x6f, 0xad, 0xd2, 0xd7, 0x5b, 0xab, 0xf4, 0xee, 0x49, 0x10, 0xb1, 0x70, 0x36, 0xb2, 0x3d, 0x32, + 0xed, 0xe9, 0xff, 0x9b, 0xe2, 0x51, 0xfe, 0xf4, 0x96, 0x7f, 0x97, 0xa3, 0x4d, 0x11, 0x7f, 0xfc, + 0x33, 0x00, 0x00, 0xff, 0xff, 0x69, 0xc8, 0x1f, 0xf3, 0x49, 0x07, 0x00, 0x00, } func (m *ValidatorsInfo) Marshal() (dAtA []byte, err error) { @@ -764,12 +677,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err11 != nil { - return 0, err11 + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err10 != nil { + return 0, err10 } - i -= n11 - i = encodeVarintTypes(dAtA, i, uint64(n11)) + i -= n10 + i = encodeVarintTypes(dAtA, i, uint64(n10)) i-- dAtA[i] = 0x2a { @@ -818,19 +731,6 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *ABCIResponses) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FinalizeBlock != nil { - l = m.FinalizeBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - func (m *ValidatorsInfo) Size() (n int) { if m == nil { return 0 @@ -944,92 +844,6 @@ func sovTypes(x uint64) (n int) { func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *ABCIResponses) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ABCIResponses: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FinalizeBlock == nil { - m.FinalizeBlock = &types.ResponseFinalizeBlock{} - } - if err := m.FinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1089,7 +903,7 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ValidatorSet == nil { - m.ValidatorSet = &types1.ValidatorSet{} + m.ValidatorSet = &types.ValidatorSet{} } if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1561,7 +1375,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.NextValidators == nil { - m.NextValidators = &types1.ValidatorSet{} + m.NextValidators = &types.ValidatorSet{} } if err := m.NextValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1597,7 +1411,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Validators == nil { - m.Validators = &types1.ValidatorSet{} + m.Validators = &types.ValidatorSet{} } if err := m.Validators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1633,7 +1447,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastValidators == nil { - m.LastValidators = &types1.ValidatorSet{} + m.LastValidators = &types.ValidatorSet{} } if err := m.LastValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1846,7 +1660,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.NextCoreChainLock == nil { - m.NextCoreChainLock = &types1.CoreChainLock{} + m.NextCoreChainLock = &types.CoreChainLock{} } if err := m.NextCoreChainLock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index 1285dfe1dd..14971b3a0b 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -4,7 +4,6 @@ package tendermint.state; option go_package = "github.com/tendermint/tendermint/proto/tendermint/state"; import "gogoproto/gogo.proto"; -import "tendermint/abci/types.proto"; import "tendermint/types/types.proto"; import "tendermint/types/dash.proto"; import "tendermint/types/validator.proto"; @@ -12,13 +11,6 @@ import "tendermint/types/params.proto"; import "tendermint/version/types.proto"; import "google/protobuf/timestamp.proto"; -// ABCIResponses retains the responses -// of the various ABCI calls during block processing. -// It is persisted to disk for each height before calling Commit. -message ABCIResponses { - tendermint.abci.ResponseFinalizeBlock finalize_block = 2; -} - // ValidatorsInfo represents the latest validator set, or the last height it changed message ValidatorsInfo { tendermint.types.ValidatorSet validator_set = 1; diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 41d417b915..5a2b3824f0 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -36,6 +36,7 @@ type ConsensusParams struct { Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` Synchrony *SynchronyParams `protobuf:"bytes,5,opt,name=synchrony,proto3" json:"synchrony,omitempty"` Timeout *TimeoutParams `protobuf:"bytes,6,opt,name=timeout,proto3" json:"timeout,omitempty"` + Abci *ABCIParams `protobuf:"bytes,7,opt,name=abci,proto3" json:"abci,omitempty"` } func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } @@ -113,6 +114,13 @@ func (m *ConsensusParams) GetTimeout() *TimeoutParams { return nil } +func (m *ConsensusParams) GetAbci() *ABCIParams { + if m != nil { + return m.Abci + } + return nil +} + // BlockParams contains limits on the block size. type BlockParams struct { // Max block size, in bytes. @@ -566,6 +574,53 @@ func (m *TimeoutParams) GetBypassCommitTimeout() bool { return false } +// ABCIParams configure functionality specific to the Application Blockchain Interface. +type ABCIParams struct { + // Indicates if CheckTx should be called on all the transactions + // remaining in the mempool after a block is executed. + RecheckTx bool `protobuf:"varint,2,opt,name=recheck_tx,json=recheckTx,proto3" json:"recheck_tx,omitempty"` +} + +func (m *ABCIParams) Reset() { *m = ABCIParams{} } +func (m *ABCIParams) String() string { return proto.CompactTextString(m) } +func (*ABCIParams) ProtoMessage() {} +func (*ABCIParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{8} +} +func (m *ABCIParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIParams.Merge(m, src) +} +func (m *ABCIParams) XXX_Size() int { + return m.Size() +} +func (m *ABCIParams) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIParams proto.InternalMessageInfo + +func (m *ABCIParams) GetRecheckTx() bool { + if m != nil { + return m.RecheckTx + } + return false +} + func init() { proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams") proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams") @@ -575,55 +630,59 @@ func init() { proto.RegisterType((*HashedParams)(nil), "tendermint.types.HashedParams") proto.RegisterType((*SynchronyParams)(nil), "tendermint.types.SynchronyParams") proto.RegisterType((*TimeoutParams)(nil), "tendermint.types.TimeoutParams") + proto.RegisterType((*ABCIParams)(nil), "tendermint.types.ABCIParams") } func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } var fileDescriptor_e12598271a686f57 = []byte{ - // 680 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcf, 0x6e, 0xd3, 0x4a, - 0x14, 0xc6, 0xe3, 0x26, 0x4d, 0x93, 0x93, 0xa6, 0xa9, 0xe6, 0xde, 0xab, 0xeb, 0xdb, 0xab, 0x3a, - 0xc5, 0x0b, 0x54, 0x09, 0xc9, 0x41, 0xad, 0x50, 0x85, 0xc4, 0x1f, 0x91, 0x06, 0x81, 0x84, 0x8a, - 0x90, 0x29, 0x2c, 0xba, 0xb1, 0xc6, 0xc9, 0xe0, 0x5a, 0x8d, 0x3d, 0x96, 0xc7, 0x8e, 0xe2, 0xb7, - 0x60, 0x85, 0x78, 0x04, 0x78, 0x93, 0x2e, 0xbb, 0x64, 0x05, 0x28, 0x7d, 0x03, 0xd6, 0x2c, 0xd0, - 0xfc, 0x6b, 0x9a, 0x94, 0xd2, 0xac, 0xe2, 0xcc, 0xf9, 0x7e, 0xfe, 0x3c, 0xdf, 0x39, 0x33, 0xb0, - 0x99, 0x91, 0x78, 0x40, 0xd2, 0x28, 0x8c, 0xb3, 0x4e, 0x56, 0x24, 0x84, 0x75, 0x12, 0x9c, 0xe2, - 0x88, 0x39, 0x49, 0x4a, 0x33, 0x8a, 0xd6, 0xa7, 0x65, 0x47, 0x94, 0x37, 0xfe, 0x0e, 0x68, 0x40, - 0x45, 0xb1, 0xc3, 0x9f, 0xa4, 0x6e, 0xc3, 0x0a, 0x28, 0x0d, 0x86, 0xa4, 0x23, 0xfe, 0xf9, 0xf9, - 0xbb, 0xce, 0x20, 0x4f, 0x71, 0x16, 0xd2, 0x58, 0xd6, 0xed, 0x9f, 0x4b, 0xd0, 0xda, 0xa7, 0x31, - 0x23, 0x31, 0xcb, 0xd9, 0x2b, 0xe1, 0x80, 0x76, 0x61, 0xd9, 0x1f, 0xd2, 0xfe, 0x89, 0x69, 0x6c, - 0x19, 0xdb, 0x8d, 0x9d, 0x4d, 0x67, 0xde, 0xcb, 0xe9, 0xf2, 0xb2, 0x54, 0xbb, 0x52, 0x8b, 0x1e, - 0x40, 0x8d, 0x8c, 0xc2, 0x01, 0x89, 0xfb, 0xc4, 0x5c, 0x12, 0xdc, 0xd6, 0x55, 0xee, 0xa9, 0x52, - 0x28, 0xf4, 0x82, 0x40, 0x8f, 0xa1, 0x3e, 0xc2, 0xc3, 0x70, 0x80, 0x33, 0x9a, 0x9a, 0x65, 0x81, - 0xdf, 0xba, 0x8a, 0xbf, 0xd5, 0x12, 0xc5, 0x4f, 0x19, 0x74, 0x1f, 0x56, 0x46, 0x24, 0x65, 0x21, - 0x8d, 0xcd, 0x8a, 0xc0, 0xdb, 0xbf, 0xc1, 0xa5, 0x40, 0xc1, 0x5a, 0xcf, 0xbd, 0x59, 0x11, 0xf7, - 0x8f, 0x53, 0x1a, 0x17, 0xe6, 0xf2, 0x75, 0xde, 0xaf, 0xb5, 0x44, 0x7b, 0x5f, 0x30, 0xdc, 0x3b, - 0x0b, 0x23, 0x42, 0xf3, 0xcc, 0xac, 0x5e, 0xe7, 0x7d, 0x28, 0x05, 0xda, 0x5b, 0xe9, 0xed, 0x7d, - 0x68, 0x5c, 0xca, 0x12, 0xfd, 0x0f, 0xf5, 0x08, 0x8f, 0x3d, 0xbf, 0xc8, 0x08, 0x13, 0xe9, 0x97, - 0xdd, 0x5a, 0x84, 0xc7, 0x5d, 0xfe, 0x1f, 0xfd, 0x0b, 0x2b, 0xbc, 0x18, 0x60, 0x26, 0x02, 0x2e, - 0xbb, 0xd5, 0x08, 0x8f, 0x9f, 0x61, 0x66, 0x7f, 0x36, 0x60, 0x6d, 0x36, 0x59, 0x74, 0x07, 0x10, - 0xd7, 0xe2, 0x80, 0x78, 0x71, 0x1e, 0x79, 0xa2, 0x45, 0xfa, 0x8d, 0xad, 0x08, 0x8f, 0x9f, 0x04, - 0xe4, 0x65, 0x1e, 0x09, 0x6b, 0x86, 0x0e, 0x60, 0x5d, 0x8b, 0xf5, 0x74, 0xa8, 0x16, 0xfe, 0xe7, - 0xc8, 0xf1, 0x71, 0xf4, 0xf8, 0x38, 0x3d, 0x25, 0xe8, 0xd6, 0x4e, 0xbf, 0xb6, 0x4b, 0x1f, 0xbf, - 0xb5, 0x0d, 0x77, 0x4d, 0xbe, 0x4f, 0x57, 0x66, 0x37, 0x51, 0x9e, 0xdd, 0x84, 0x7d, 0x0f, 0x5a, - 0x73, 0x5d, 0x44, 0x36, 0x34, 0x93, 0xdc, 0xf7, 0x4e, 0x48, 0xe1, 0x89, 0xac, 0x4c, 0x63, 0xab, - 0xbc, 0x5d, 0x77, 0x1b, 0x49, 0xee, 0xbf, 0x20, 0xc5, 0x21, 0x5f, 0xb2, 0xef, 0x42, 0x73, 0xa6, - 0x7b, 0xa8, 0x0d, 0x0d, 0x9c, 0x24, 0x9e, 0xee, 0x39, 0xdf, 0x59, 0xc5, 0x05, 0x9c, 0x24, 0x4a, - 0x66, 0x1f, 0xc1, 0xea, 0x73, 0xcc, 0x8e, 0xc9, 0x40, 0x01, 0xb7, 0xa1, 0x25, 0x52, 0xf0, 0xe6, - 0x03, 0x6e, 0x8a, 0xe5, 0x03, 0x9d, 0xb2, 0x0d, 0xcd, 0xa9, 0x6e, 0x9a, 0x75, 0x43, 0xab, 0x78, - 0xe0, 0x1f, 0x0c, 0x68, 0xcd, 0xcd, 0x03, 0xea, 0x41, 0x33, 0x22, 0x8c, 0x89, 0x10, 0xc9, 0x10, - 0x17, 0xea, 0xf0, 0xfc, 0x21, 0xc1, 0x8a, 0x48, 0x6f, 0x55, 0x51, 0x3d, 0x0e, 0xa1, 0x87, 0x50, - 0x4f, 0x52, 0xd2, 0x0f, 0xd9, 0x42, 0x3d, 0x90, 0x6f, 0x98, 0x12, 0xf6, 0x8f, 0x25, 0x68, 0xce, - 0x4c, 0x1a, 0x9f, 0xcd, 0x24, 0xa5, 0x09, 0x65, 0x64, 0xd1, 0x0f, 0xd2, 0x7a, 0xbe, 0x23, 0xf5, - 0xc8, 0x77, 0x94, 0xe1, 0x45, 0xbf, 0x67, 0x55, 0x51, 0x3d, 0x0e, 0xa1, 0x5d, 0xa8, 0x8c, 0x68, - 0x46, 0xd4, 0xa1, 0xbe, 0x11, 0x16, 0x62, 0xf4, 0x08, 0x80, 0xff, 0x2a, 0xdf, 0xca, 0x82, 0x39, - 0x70, 0x44, 0x9a, 0xee, 0x41, 0xb5, 0x4f, 0xa3, 0x28, 0xcc, 0xd4, 0x79, 0xbe, 0x91, 0x55, 0x72, - 0xb4, 0x03, 0xff, 0xf8, 0x45, 0x82, 0x19, 0xf3, 0xe4, 0x82, 0x77, 0xf9, 0x60, 0xd7, 0xdc, 0xbf, - 0x64, 0x71, 0x5f, 0xd4, 0x54, 0xd0, 0xdd, 0x37, 0x9f, 0x26, 0x96, 0x71, 0x3a, 0xb1, 0x8c, 0xb3, - 0x89, 0x65, 0x7c, 0x9f, 0x58, 0xc6, 0xfb, 0x73, 0xab, 0x74, 0x76, 0x6e, 0x95, 0xbe, 0x9c, 0x5b, - 0xa5, 0xa3, 0xbd, 0x20, 0xcc, 0x8e, 0x73, 0xdf, 0xe9, 0xd3, 0xa8, 0x73, 0xf9, 0x4a, 0x9f, 0x3e, - 0xca, 0x3b, 0x7b, 0xfe, 0xba, 0xf7, 0xab, 0x62, 0x7d, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xfc, 0x06, 0xae, 0x9f, 0x09, 0x06, 0x00, 0x00, + // 723 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xdf, 0x6e, 0xd3, 0x30, + 0x14, 0xc6, 0x9b, 0xb5, 0xeb, 0xda, 0xd3, 0x75, 0x9d, 0x0c, 0x88, 0x30, 0x58, 0x3a, 0x72, 0x81, + 0x26, 0x4d, 0x4a, 0xa7, 0x4d, 0x68, 0x42, 0xe2, 0x8f, 0xd6, 0x16, 0x01, 0x42, 0x43, 0x28, 0x0c, + 0x2e, 0x76, 0x13, 0x39, 0xa9, 0x49, 0xa3, 0x35, 0x71, 0x14, 0x27, 0x55, 0xf3, 0x16, 0x5c, 0x21, + 0x1e, 0x01, 0x6e, 0x78, 0x8e, 0x5d, 0xee, 0x92, 0x2b, 0x40, 0xdd, 0x1b, 0xf0, 0x04, 0xc8, 0x8e, + 0xb3, 0xae, 0x1d, 0x63, 0xbd, 0x8a, 0xe3, 0xf3, 0xfd, 0x7c, 0xe2, 0xef, 0x9c, 0xd8, 0xb0, 0x1e, + 0x93, 0xa0, 0x47, 0x22, 0xdf, 0x0b, 0xe2, 0x56, 0x9c, 0x86, 0x84, 0xb5, 0x42, 0x1c, 0x61, 0x9f, + 0x19, 0x61, 0x44, 0x63, 0x8a, 0x56, 0x27, 0x61, 0x43, 0x84, 0xd7, 0x6e, 0xba, 0xd4, 0xa5, 0x22, + 0xd8, 0xe2, 0xa3, 0x4c, 0xb7, 0xa6, 0xb9, 0x94, 0xba, 0x03, 0xd2, 0x12, 0x6f, 0x76, 0xf2, 0xb1, + 0xd5, 0x4b, 0x22, 0x1c, 0x7b, 0x34, 0xc8, 0xe2, 0xfa, 0xf7, 0x22, 0x34, 0x3a, 0x34, 0x60, 0x24, + 0x60, 0x09, 0x7b, 0x2b, 0x32, 0xa0, 0x5d, 0x58, 0xb4, 0x07, 0xd4, 0x39, 0x56, 0x95, 0x0d, 0x65, + 0xb3, 0xb6, 0xb3, 0x6e, 0xcc, 0xe6, 0x32, 0xda, 0x3c, 0x9c, 0xa9, 0xcd, 0x4c, 0x8b, 0x1e, 0x43, + 0x85, 0x0c, 0xbd, 0x1e, 0x09, 0x1c, 0xa2, 0x2e, 0x08, 0x6e, 0xe3, 0x32, 0xf7, 0x5c, 0x2a, 0x24, + 0x7a, 0x4e, 0xa0, 0x67, 0x50, 0x1d, 0xe2, 0x81, 0xd7, 0xc3, 0x31, 0x8d, 0xd4, 0xa2, 0xc0, 0xef, + 0x5f, 0xc6, 0x3f, 0xe4, 0x12, 0xc9, 0x4f, 0x18, 0xf4, 0x08, 0x96, 0x86, 0x24, 0x62, 0x1e, 0x0d, + 0xd4, 0x92, 0xc0, 0x9b, 0xff, 0xc0, 0x33, 0x81, 0x84, 0x73, 0x3d, 0xcf, 0xcd, 0xd2, 0xc0, 0xe9, + 0x47, 0x34, 0x48, 0xd5, 0xc5, 0xab, 0x72, 0xbf, 0xcb, 0x25, 0x79, 0xee, 0x73, 0x86, 0xe7, 0x8e, + 0x3d, 0x9f, 0xd0, 0x24, 0x56, 0xcb, 0x57, 0xe5, 0x3e, 0xcc, 0x04, 0x79, 0x6e, 0xa9, 0x47, 0xdb, + 0x50, 0xc2, 0xb6, 0xe3, 0xa9, 0x4b, 0x82, 0xbb, 0x77, 0x99, 0xdb, 0x6f, 0x77, 0x5e, 0x49, 0x48, + 0x28, 0xf5, 0x0e, 0xd4, 0x2e, 0xb8, 0x8f, 0xee, 0x42, 0xd5, 0xc7, 0x23, 0xcb, 0x4e, 0x63, 0xc2, + 0x44, 0xbd, 0x8a, 0x66, 0xc5, 0xc7, 0xa3, 0x36, 0x7f, 0x47, 0xb7, 0x61, 0x89, 0x07, 0x5d, 0xcc, + 0x44, 0x49, 0x8a, 0x66, 0xd9, 0xc7, 0xa3, 0x17, 0x98, 0xe9, 0xdf, 0x14, 0x58, 0x99, 0xae, 0x05, + 0xda, 0x02, 0xc4, 0xb5, 0xd8, 0x25, 0x56, 0x90, 0xf8, 0x96, 0x28, 0x6a, 0xbe, 0x62, 0xc3, 0xc7, + 0xa3, 0x7d, 0x97, 0xbc, 0x49, 0x7c, 0x91, 0x9a, 0xa1, 0x03, 0x58, 0xcd, 0xc5, 0x79, 0x3f, 0xc9, + 0xa2, 0xdf, 0x31, 0xb2, 0x86, 0x33, 0xf2, 0x86, 0x33, 0xba, 0x52, 0xd0, 0xae, 0x9c, 0xfc, 0x6c, + 0x16, 0xbe, 0xfc, 0x6a, 0x2a, 0xe6, 0x4a, 0xb6, 0x5e, 0x1e, 0x99, 0xde, 0x44, 0x71, 0x7a, 0x13, + 0xfa, 0x43, 0x68, 0xcc, 0xd4, 0x1d, 0xe9, 0x50, 0x0f, 0x13, 0xdb, 0x3a, 0x26, 0xa9, 0x25, 0x5c, + 0x52, 0x95, 0x8d, 0xe2, 0x66, 0xd5, 0xac, 0x85, 0x89, 0xfd, 0x9a, 0xa4, 0x87, 0x7c, 0x4a, 0xdf, + 0x86, 0xfa, 0x54, 0xbd, 0x51, 0x13, 0x6a, 0x38, 0x0c, 0xad, 0xbc, 0x4b, 0xf8, 0xce, 0x4a, 0x26, + 0xe0, 0x30, 0x94, 0x32, 0xfd, 0x08, 0x96, 0x5f, 0x62, 0xd6, 0x27, 0x3d, 0x09, 0x3c, 0x80, 0x86, + 0x70, 0xc1, 0x9a, 0x35, 0xb8, 0x2e, 0xa6, 0x0f, 0x72, 0x97, 0x75, 0xa8, 0x4f, 0x74, 0x13, 0xaf, + 0x6b, 0xb9, 0x8a, 0x1b, 0xfe, 0x59, 0x81, 0xc6, 0x4c, 0x07, 0xa1, 0x2e, 0xd4, 0x7d, 0xc2, 0x98, + 0x30, 0x91, 0x0c, 0x70, 0x2a, 0x7f, 0xb7, 0xff, 0x38, 0x58, 0x12, 0xee, 0x2d, 0x4b, 0xaa, 0xcb, + 0x21, 0xf4, 0x04, 0xaa, 0x61, 0x44, 0x1c, 0x8f, 0xcd, 0x55, 0x83, 0x6c, 0x85, 0x09, 0xa1, 0xff, + 0x59, 0x80, 0xfa, 0x54, 0x6f, 0xf2, 0x6e, 0x0e, 0x23, 0x1a, 0x52, 0x46, 0xe6, 0xfd, 0xa0, 0x5c, + 0xcf, 0x77, 0x24, 0x87, 0x7c, 0x47, 0x31, 0x9e, 0xf7, 0x7b, 0x96, 0x25, 0xd5, 0xe5, 0x10, 0xda, + 0x85, 0xd2, 0x90, 0xc6, 0x44, 0x1e, 0x03, 0xd7, 0xc2, 0x42, 0x8c, 0x9e, 0x02, 0xf0, 0xa7, 0xcc, + 0x5b, 0x9a, 0xd3, 0x07, 0x8e, 0x64, 0x49, 0xf7, 0xa0, 0xec, 0x50, 0xdf, 0xf7, 0x62, 0x79, 0x02, + 0x5c, 0xcb, 0x4a, 0x39, 0xda, 0x81, 0x5b, 0x76, 0x1a, 0x62, 0xc6, 0xac, 0x6c, 0xc2, 0xba, 0x78, + 0x14, 0x54, 0xcc, 0x1b, 0x59, 0xb0, 0x23, 0x62, 0xd2, 0x68, 0x7d, 0x0b, 0x60, 0xf2, 0x5f, 0xa3, + 0x75, 0x80, 0x88, 0x38, 0x7d, 0xe2, 0x1c, 0x5b, 0xf1, 0x48, 0x58, 0x56, 0x31, 0xab, 0x72, 0xe6, + 0x70, 0xd4, 0x7e, 0xff, 0x75, 0xac, 0x29, 0x27, 0x63, 0x4d, 0x39, 0x1d, 0x6b, 0xca, 0xef, 0xb1, + 0xa6, 0x7c, 0x3a, 0xd3, 0x0a, 0xa7, 0x67, 0x5a, 0xe1, 0xc7, 0x99, 0x56, 0x38, 0xda, 0x73, 0xbd, + 0xb8, 0x9f, 0xd8, 0x86, 0x43, 0xfd, 0xd6, 0xc5, 0x1b, 0x63, 0x32, 0xcc, 0xae, 0x84, 0xd9, 0xdb, + 0xc4, 0x2e, 0x8b, 0xf9, 0xdd, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0x18, 0xb7, 0x2e, 0x68, + 0x06, 0x00, 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -663,6 +722,9 @@ func (this *ConsensusParams) Equal(that interface{}) bool { if !this.Timeout.Equal(that1.Timeout) { return false } + if !this.Abci.Equal(that1.Abci) { + return false + } return true } func (this *BlockParams) Equal(that interface{}) bool { @@ -910,6 +972,30 @@ func (this *TimeoutParams) Equal(that interface{}) bool { } return true } +func (this *ABCIParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ABCIParams) + if !ok { + that2, ok := that.(ABCIParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.RecheckTx != that1.RecheckTx { + return false + } + return true +} func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -930,6 +1016,18 @@ func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Abci != nil { + { + size, err := m.Abci.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.Timeout != nil { { size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) @@ -1063,12 +1161,12 @@ func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err7 != nil { - return 0, err7 + n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err8 != nil { + return 0, err8 } - i -= n7 - i = encodeVarintParams(dAtA, i, uint64(n7)) + i -= n8 + i = encodeVarintParams(dAtA, i, uint64(n8)) i-- dAtA[i] = 0x12 if m.MaxAgeNumBlocks != 0 { @@ -1193,22 +1291,22 @@ func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if m.Precision != nil { - n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):]) - if err8 != nil { - return 0, err8 + n9, err9 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):]) + if err9 != nil { + return 0, err9 } - i -= n8 - i = encodeVarintParams(dAtA, i, uint64(n8)) + i -= n9 + i = encodeVarintParams(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x12 } if m.MessageDelay != nil { - n9, err9 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):]) - if err9 != nil { - return 0, err9 + n10, err10 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):]) + if err10 != nil { + return 0, err10 } - i -= n9 - i = encodeVarintParams(dAtA, i, uint64(n9)) + i -= n10 + i = encodeVarintParams(dAtA, i, uint64(n10)) i-- dAtA[i] = 0xa } @@ -1246,58 +1344,91 @@ func (m *TimeoutParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x30 } if m.Commit != nil { - n10, err10 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Commit, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintParams(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x2a - } - if m.VoteDelta != nil { - n11, err11 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.VoteDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta):]) + n11, err11 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Commit, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit):]) if err11 != nil { return 0, err11 } i -= n11 i = encodeVarintParams(dAtA, i, uint64(n11)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a } - if m.Vote != nil { - n12, err12 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Vote, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote):]) + if m.VoteDelta != nil { + n12, err12 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.VoteDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta):]) if err12 != nil { return 0, err12 } i -= n12 i = encodeVarintParams(dAtA, i, uint64(n12)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } - if m.ProposeDelta != nil { - n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.ProposeDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta):]) + if m.Vote != nil { + n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Vote, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote):]) if err13 != nil { return 0, err13 } i -= n13 i = encodeVarintParams(dAtA, i, uint64(n13)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - if m.Propose != nil { - n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Propose, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose):]) + if m.ProposeDelta != nil { + n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.ProposeDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta):]) if err14 != nil { return 0, err14 } i -= n14 i = encodeVarintParams(dAtA, i, uint64(n14)) i-- + dAtA[i] = 0x12 + } + if m.Propose != nil { + n15, err15 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Propose, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose):]) + if err15 != nil { + return 0, err15 + } + i -= n15 + i = encodeVarintParams(dAtA, i, uint64(n15)) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } +func (m *ABCIParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RecheckTx { + i-- + if m.RecheckTx { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + func encodeVarintParams(dAtA []byte, offset int, v uint64) int { offset -= sovParams(v) base := offset @@ -1339,6 +1470,10 @@ func (m *ConsensusParams) Size() (n int) { l = m.Timeout.Size() n += 1 + l + sovParams(uint64(l)) } + if m.Abci != nil { + l = m.Abci.Size() + n += 1 + l + sovParams(uint64(l)) + } return n } @@ -1465,6 +1600,18 @@ func (m *TimeoutParams) Size() (n int) { return n } +func (m *ABCIParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RecheckTx { + n += 2 + } + return n +} + func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1716,6 +1863,42 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Abci", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Abci == nil { + m.Abci = &ABCIParams{} + } + if err := m.Abci.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) @@ -2557,6 +2740,76 @@ func (m *TimeoutParams) Unmarshal(dAtA []byte) error { } return nil } +func (m *ABCIParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecheckTx", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RecheckTx = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index 466ba464fe..6b30b415ee 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -17,6 +17,7 @@ message ConsensusParams { VersionParams version = 4; SynchronyParams synchrony = 5; TimeoutParams timeout = 6; + ABCIParams abci = 7; } // BlockParams contains limits on the block size. @@ -127,3 +128,10 @@ message TimeoutParams { // for the full commit timeout. bool bypass_commit_timeout = 6; } + +// ABCIParams configure functionality specific to the Application Blockchain Interface. +message ABCIParams { + // Indicates if CheckTx should be called on all the transactions + // remaining in the mempool after a block is executed. + bool recheck_tx = 2; +} diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index d919e0d74c..054d8c9eb4 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -18,6 +18,10 @@ import ( ) func TestHTTPSimple(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -68,6 +72,10 @@ func TestHTTPSimple(t *testing.T) { } func TestHTTPBatching(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index a66becbd58..eb13e8d77e 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -15,6 +15,10 @@ import ( ) func TestWaitForHeight(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index c42ce6b5b8..4bc29b63f3 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -243,6 +243,10 @@ func (c *baseRPCClient) BroadcastTxSync(ctx context.Context, tx types.Tx) (*core return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } +func (c *baseRPCClient) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return c.broadcastTX(ctx, "broadcast_tx_sync", tx) +} + func (c *baseRPCClient) broadcastTX(ctx context.Context, route string, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { result := new(coretypes.ResultBroadcastTx) if err := c.caller.Call(ctx, route, &coretypes.RequestBroadcastTx{Tx: tx}, result); err != nil { diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 61405ab14f..169671fd61 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -62,6 +62,8 @@ type ABCIClient interface { opts ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) // Writing to abci app + BroadcastTx(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) + // These methods are deprecated: BroadcastTxCommit(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error) BroadcastTxAsync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) BroadcastTxSync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index b95a77aeda..6ed7cff684 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -87,6 +87,10 @@ func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes. return c.env.BroadcastTxCommit(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } +func (c *Local) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return c.env.BroadcastTx(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) +} + func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index a6aebdb14b..142d64d19b 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -88,13 +88,16 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes. return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, - Log: c.Log, Codespace: c.Codespace, Hash: tx.Hash(), }, nil } func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return a.BroadcastTx(ctx, tx) +} + +func (a ABCIApp) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c, err := a.App.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err @@ -107,7 +110,6 @@ func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.R return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, - Log: c.Log, Codespace: c.Codespace, Hash: tx.Hash(), }, nil @@ -160,6 +162,14 @@ func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes return res.(*coretypes.ResultBroadcastTx), nil } +func (m ABCIMock) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + res, err := m.Broadcast.GetResponse(tx) + if err != nil { + return nil, err + } + return res.(*coretypes.ResultBroadcastTx), nil +} + func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { @@ -254,3 +264,14 @@ func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coret }) return res, err } + +func (r *ABCIRecorder) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + res, err := r.Client.BroadcastTx(ctx, tx) + r.addCall(Call{ + Name: "broadcast_tx", + Args: tx, + Response: res, + Error: err, + }) + return res, err +} diff --git a/rpc/client/mocks/abci_client.go b/rpc/client/mocks/abci_client.go index 07683bc261..76587a0775 100644 --- a/rpc/client/mocks/abci_client.go +++ b/rpc/client/mocks/abci_client.go @@ -12,8 +12,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -91,6 +89,29 @@ func (_m *ABCIClient) ABCIQueryWithOptions(ctx context.Context, path string, dat return r0, r1 } +// BroadcastTx provides a mock function with given fields: _a0, _a1 +func (_m *ABCIClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BroadcastTxAsync provides a mock function with given fields: _a0, _a1 func (_m *ABCIClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) @@ -160,8 +181,13 @@ func (_m *ABCIClient) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coret return r0, r1 } -// NewABCIClient creates a new instance of ABCIClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewABCIClient(t testing.TB) *ABCIClient { +type mockConstructorTestingTNewABCIClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewABCIClient creates a new instance of ABCIClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewABCIClient(t mockConstructorTestingTNewABCIClient) *ABCIClient { mock := &ABCIClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index e16a9a8e82..7c485c425d 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -12,8 +12,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -229,6 +227,29 @@ func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*c return r0, r1 } +// BroadcastTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BroadcastTxAsync provides a mock function with given fields: _a0, _a1 func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) @@ -798,8 +819,13 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP return r0, r1 } -// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t testing.TB) *Client { +type mockConstructorTestingTNewClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t mockConstructorTestingTNewClient) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/events_client.go b/rpc/client/mocks/events_client.go index eba096284c..19b882552f 100644 --- a/rpc/client/mocks/events_client.go +++ b/rpc/client/mocks/events_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // EventsClient is an autogenerated mock type for the EventsClient type @@ -39,8 +37,13 @@ func (_m *EventsClient) Events(ctx context.Context, req *coretypes.RequestEvents return r0, r1 } -// NewEventsClient creates a new instance of EventsClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventsClient(t testing.TB) *EventsClient { +type mockConstructorTestingTNewEventsClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewEventsClient creates a new instance of EventsClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEventsClient(t mockConstructorTestingTNewEventsClient) *EventsClient { mock := &EventsClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/evidence_client.go b/rpc/client/mocks/evidence_client.go index 7824a2ae4e..3b8d3cf2ed 100644 --- a/rpc/client/mocks/evidence_client.go +++ b/rpc/client/mocks/evidence_client.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -41,8 +39,13 @@ func (_m *EvidenceClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evide return r0, r1 } -// NewEvidenceClient creates a new instance of EvidenceClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvidenceClient(t testing.TB) *EvidenceClient { +type mockConstructorTestingTNewEvidenceClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewEvidenceClient creates a new instance of EvidenceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEvidenceClient(t mockConstructorTestingTNewEvidenceClient) *EvidenceClient { mock := &EvidenceClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/history_client.go b/rpc/client/mocks/history_client.go index ecd0190504..ae28fe69b2 100644 --- a/rpc/client/mocks/history_client.go +++ b/rpc/client/mocks/history_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // HistoryClient is an autogenerated mock type for the HistoryClient type @@ -85,8 +83,13 @@ func (_m *HistoryClient) GenesisChunked(_a0 context.Context, _a1 uint) (*coretyp return r0, r1 } -// NewHistoryClient creates a new instance of HistoryClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewHistoryClient(t testing.TB) *HistoryClient { +type mockConstructorTestingTNewHistoryClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewHistoryClient creates a new instance of HistoryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewHistoryClient(t mockConstructorTestingTNewHistoryClient) *HistoryClient { mock := &HistoryClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/mempool_client.go b/rpc/client/mocks/mempool_client.go index 0dfea703fe..714c66c6d4 100644 --- a/rpc/client/mocks/mempool_client.go +++ b/rpc/client/mocks/mempool_client.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -101,8 +99,13 @@ func (_m *MempoolClient) UnconfirmedTxs(ctx context.Context, page *int, perPage return r0, r1 } -// NewMempoolClient creates a new instance of MempoolClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewMempoolClient(t testing.TB) *MempoolClient { +type mockConstructorTestingTNewMempoolClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewMempoolClient creates a new instance of MempoolClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMempoolClient(t mockConstructorTestingTNewMempoolClient) *MempoolClient { mock := &MempoolClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/network_client.go b/rpc/client/mocks/network_client.go index 73bb11d612..d0f7eaa559 100644 --- a/rpc/client/mocks/network_client.go +++ b/rpc/client/mocks/network_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // NetworkClient is an autogenerated mock type for the NetworkClient type @@ -131,8 +129,13 @@ func (_m *NetworkClient) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, return r0, r1 } -// NewNetworkClient creates a new instance of NetworkClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetworkClient(t testing.TB) *NetworkClient { +type mockConstructorTestingTNewNetworkClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewNetworkClient creates a new instance of NetworkClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewNetworkClient(t mockConstructorTestingTNewNetworkClient) *NetworkClient { mock := &NetworkClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/remote_client.go b/rpc/client/mocks/remote_client.go index b4271dceff..029c712034 100644 --- a/rpc/client/mocks/remote_client.go +++ b/rpc/client/mocks/remote_client.go @@ -12,8 +12,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -229,6 +227,29 @@ func (_m *RemoteClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evidenc return r0, r1 } +// BroadcastTx provides a mock function with given fields: _a0, _a1 +func (_m *RemoteClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BroadcastTxAsync provides a mock function with given fields: _a0, _a1 func (_m *RemoteClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) @@ -812,8 +833,13 @@ func (_m *RemoteClient) Validators(ctx context.Context, height *int64, page *int return r0, r1 } -// NewRemoteClient creates a new instance of RemoteClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewRemoteClient(t testing.TB) *RemoteClient { +type mockConstructorTestingTNewRemoteClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewRemoteClient creates a new instance of RemoteClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewRemoteClient(t mockConstructorTestingTNewRemoteClient) *RemoteClient { mock := &RemoteClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/sign_client.go b/rpc/client/mocks/sign_client.go index 6c1e674476..008176295a 100644 --- a/rpc/client/mocks/sign_client.go +++ b/rpc/client/mocks/sign_client.go @@ -10,8 +10,6 @@ import ( coretypes "github.com/tendermint/tendermint/rpc/coretypes" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // SignClient is an autogenerated mock type for the SignClient type @@ -249,8 +247,13 @@ func (_m *SignClient) Validators(ctx context.Context, height *int64, page *int, return r0, r1 } -// NewSignClient creates a new instance of SignClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewSignClient(t testing.TB) *SignClient { +type mockConstructorTestingTNewSignClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewSignClient creates a new instance of SignClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSignClient(t mockConstructorTestingTNewSignClient) *SignClient { mock := &SignClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/status_client.go b/rpc/client/mocks/status_client.go index eee3a471f6..bf22efa207 100644 --- a/rpc/client/mocks/status_client.go +++ b/rpc/client/mocks/status_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // StatusClient is an autogenerated mock type for the StatusClient type @@ -39,8 +37,13 @@ func (_m *StatusClient) Status(_a0 context.Context) (*coretypes.ResultStatus, er return r0, r1 } -// NewStatusClient creates a new instance of StatusClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewStatusClient(t testing.TB) *StatusClient { +type mockConstructorTestingTNewStatusClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStatusClient(t mockConstructorTestingTNewStatusClient) *StatusClient { mock := &StatusClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/subscription_client.go b/rpc/client/mocks/subscription_client.go index 4a520063d5..c476a4fc7a 100644 --- a/rpc/client/mocks/subscription_client.go +++ b/rpc/client/mocks/subscription_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // SubscriptionClient is an autogenerated mock type for the SubscriptionClient type @@ -74,8 +72,13 @@ func (_m *SubscriptionClient) UnsubscribeAll(ctx context.Context, subscriber str return r0 } -// NewSubscriptionClient creates a new instance of SubscriptionClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscriptionClient(t testing.TB) *SubscriptionClient { +type mockConstructorTestingTNewSubscriptionClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewSubscriptionClient creates a new instance of SubscriptionClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSubscriptionClient(t mockConstructorTestingTNewSubscriptionClient) *SubscriptionClient { mock := &SubscriptionClient{} mock.Mock.Test(t) diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index d1003791d7..ac8d3c00d1 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -16,6 +16,7 @@ import ( "github.com/dashevo/dashd-go/btcjson" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/privval" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" @@ -24,7 +25,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" @@ -132,6 +132,10 @@ func TestClientOperations(t *testing.T) { }) t.Run("Batching", func(t *testing.T) { t.Run("JSONRPCCalls", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + logger := log.NewTestingLogger(t) c := getHTTPClient(t, logger, conf) testBatchedJSONRPCCalls(ctx, t, c) @@ -170,6 +174,10 @@ func TestClientOperations(t *testing.T) { require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") }) t.Run("ConcurrentJSONRPC", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + logger := log.NewTestingLogger(t) var wg sync.WaitGroup @@ -292,6 +300,10 @@ func TestClientMethodCalls(t *testing.T) { "first: %+v, doc: %s", first, string(doc)) }) t.Run("ABCIQuery", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // write something k, v, tx := MakeTxKV() status, err := c.Status(ctx) @@ -310,6 +322,10 @@ func TestClientMethodCalls(t *testing.T) { } }) t.Run("AppCalls", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // get an offset of height to avoid racing and guessing s, err := c.Status(ctx) require.NoError(t, err) @@ -410,6 +426,10 @@ func TestClientMethodCalls(t *testing.T) { // XXX Test proof }) t.Run("BlockchainInfo", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -440,6 +460,10 @@ func TestClientMethodCalls(t *testing.T) { assert.Contains(t, err.Error(), "can't be greater than max") }) t.Run("BroadcastTxCommit", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) require.NoError(t, err, "%d: %+v", i, err) @@ -482,6 +506,10 @@ func TestClientMethodCalls(t *testing.T) { // TODO: more checks... }) t.Run("Block", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + const subscriber = "TestBlockEvents" eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) @@ -516,6 +544,10 @@ func TestClientMethodCalls(t *testing.T) { }) t.Run("Evidence", func(t *testing.T) { t.Run("BroadcastDuplicateVote", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -718,6 +750,10 @@ func TestClientMethodCallsAdvanced(t *testing.T) { } }) t.Run("TxSearchWithTimeout", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + logger := log.NewTestingLogger(t) timeoutClient := getHTTPClientWithTimeout(t, logger, conf, 10*time.Second) diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index 5a3b865a0d..28ecd79405 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -70,7 +70,6 @@ type ResultBlockResults struct { TxsResults []*abci.ExecTxResult `json:"txs_results"` TotalGasUsed int64 `json:"total_gas_used,string"` FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` - ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` ValidatorSetUpdate *abci.ValidatorSetUpdate `json:"validator_set_updates"` ConsensusParamUpdates *types.ConsensusParams `json:"consensus_param_updates"` } @@ -263,14 +262,12 @@ type ResultConsensusState struct { // CheckTx result type ResultBroadcastTx struct { - Code uint32 `json:"code"` - Data bytes.HexBytes `json:"data"` - Log string `json:"log"` - Codespace string `json:"codespace"` - MempoolError string `json:"mempool_error"` - Info string `json:"info"` - - Hash bytes.HexBytes `json:"hash"` + Code uint32 `json:"code"` + Data bytes.HexBytes `json:"data"` + Codespace string `json:"codespace"` + Hash bytes.HexBytes `json:"hash"` + + Info string `json:"info"` } // CheckTx and DeliverTx results diff --git a/rpc/coretypes/responses_test.go b/rpc/coretypes/responses_test.go index d4ced795a4..1b9be7e0bf 100644 --- a/rpc/coretypes/responses_test.go +++ b/rpc/coretypes/responses_test.go @@ -1,10 +1,19 @@ package coretypes import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" "testing" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" + pbcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" "github.com/tendermint/tendermint/types" ) @@ -33,3 +42,71 @@ func TestStatusIndexer(t *testing.T) { assert.Equal(t, tc.expected, status.TxIndexEnabled()) } } + +// A regression test for https://github.com/tendermint/tendermint/issues/8583. +func TestResultBlockResults_regression8583(t *testing.T) { + const keyData = "0123456789abcdef0123456789abcdef0123456789abcdef" // 48 bytes + quorumHash := crypto.RandQuorumHash() + quorumHashB64 := base64.StdEncoding.EncodeToString(quorumHash.Bytes()) + + rsp := &ResultBlockResults{ + ValidatorSetUpdate: &abci.ValidatorSetUpdate{ + ValidatorUpdates: []abci.ValidatorUpdate{ + { + PubKey: &pbcrypto.PublicKey{ + Sum: &pbcrypto.PublicKey_Bls12381{Bls12381: []byte(keyData)}, + }, + Power: 400, + }, + }, + ThresholdPublicKey: pbcrypto.PublicKey{ + Sum: &pbcrypto.PublicKey_Bls12381{Bls12381: []byte(keyData)}, + }, + QuorumHash: quorumHash, + }, + } + + // Use compact here so the test data remain legible. The output from the + // marshaler will have whitespace folded out so we need to do that too for + // the comparison to be valid. + var buf bytes.Buffer + require.NoError(t, json.Compact(&buf, []byte(fmt.Sprintf(`{ + "height": "0", + "txs_results": null, + "total_gas_used": "0", + "finalize_block_events": null, + "validator_set_updates": { + "validator_updates": [ + { + "pub_key": { + "type": "tendermint/PubKeyBLS12381", + "value": "MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWYwMTIzNDU2Nzg5YWJjZGVm" + }, + "power": "400" + } + ], + "threshold_public_key": { + "type": "tendermint/PubKeyBLS12381", + "value": "MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWYwMTIzNDU2Nzg5YWJjZGVm" + }, + "quorum_hash": "%s" + }, + "consensus_param_updates": null +}`, quorumHashB64)))) + + bits, err := json.Marshal(rsp) + if err != nil { + t.Fatalf("Encoding block result: %v", err) + } + if diff := cmp.Diff(buf.String(), string(bits)); diff != "" { + t.Errorf("Marshaled result (-want, +got):\n%s", diff) + } + + back := new(ResultBlockResults) + if err := json.Unmarshal(bits, back); err != nil { + t.Fatalf("Unmarshaling: %v", err) + } + if diff := cmp.Diff(rsp, back); diff != "" { + t.Errorf("Unmarshaled result (-want, +got):\n%s", diff) + } +} diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index 5bbb5fc25a..0434f6461c 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -65,6 +65,10 @@ func (h *myTestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func TestWSClientReconnectsAfterReadFailure(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) // start server @@ -97,6 +101,10 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) { } func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) // start server @@ -127,6 +135,10 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { } func TestWSClientReconnectFailure(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) // start server diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 236db9b320..0586e3019f 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -340,6 +340,10 @@ func TestRPC(t *testing.T) { } }) t.Run("WSClientPingPong", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // TestWSClientPingPong checks that a client & server exchange pings // & pongs so connection stays alive. t.Cleanup(leaktest.CheckTimeout(t, 4*time.Second)) diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 2eeded2d72..4f9e28faa8 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -81,9 +81,15 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han } } +func ensureBodyClose(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + next(w, r) + } +} + func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - // Since the pattern "/" matches all paths not matched by other registered patterns, // we check whether the path is indeed "/", otherwise return a 404 error if r.URL.Path != "/" { http.NotFound(w, r) diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 77c74ffbcf..dd4a9d8e23 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -223,7 +223,7 @@ func TestRPCNotificationInBatch(t *testing.T) { func TestUnknownRPCPath(t *testing.T) { mux := testMux() - req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) + req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", strings.NewReader("")) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 0b715835d0..fffc002f39 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -20,16 +20,27 @@ import ( // Config is a RPC server configuration. type Config struct { - // see netutil.LimitListener + // The maximum number of connections that will be accepted by the listener. + // See https://godoc.org/golang.org/x/net/netutil#LimitListener MaxOpenConnections int - // mirrors http.Server#ReadTimeout + + // Used to set the HTTP server's per-request read timeout. + // See https://godoc.org/net/http#Server.ReadTimeout ReadTimeout time.Duration - // mirrors http.Server#WriteTimeout + + // Used to set the HTTP server's per-request write timeout. Note that this + // affects ALL methods on the server, so it should not be set too low. This + // should be used as a safety valve, not a resource-control timeout. + // + // See https://godoc.org/net/http#Server.WriteTimeout WriteTimeout time.Duration - // MaxBodyBytes controls the maximum number of bytes the - // server will read parsing the request body. + + // Controls the maximum number of bytes the server will read parsing the + // request body. MaxBodyBytes int64 - // mirrors http.Server#MaxHeaderBytes + + // Controls the maximum size of a request header. + // See https://godoc.org/net/http#Server.MaxHeaderBytes MaxHeaderBytes int } @@ -38,16 +49,16 @@ func DefaultConfig() *Config { return &Config{ MaxOpenConnections: 0, // unlimited ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxBodyBytes: int64(1000000), // 1MB - MaxHeaderBytes: 1 << 20, // same as the net/http default + WriteTimeout: 0, // no default timeout + MaxBodyBytes: 1000000, // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default } } // Serve creates a http.Server and calls Serve with the given listener. It // wraps handler to recover panics and limit the request body size. func Serve(ctx context.Context, listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { - logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) + logger.Info("Starting RPC HTTP server on", "addr", listener.Addr()) h := recoverAndLogHandler(MaxBytesHandler(handler, config.MaxBodyBytes), logger) s := &http.Server{ Handler: h, diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index 8eba287283..947f8be5b7 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -9,11 +9,16 @@ import ( "net/http" "reflect" "strings" + "time" "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) +// DefaultRPCTimeout is the default context timeout for calls to any RPC method +// that does not override it with a more specific timeout. +const DefaultRPCTimeout = 60 * time.Second + // RegisterRPCFuncs adds a route to mux for each non-websocket function in the // funcMap, and also a root JSON-RPC POST handler. func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) { @@ -21,22 +26,23 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo if fn.ws { continue // skip websocket endpoints, not usable via GET calls } - mux.HandleFunc("/"+name, makeHTTPHandler(fn, logger)) + mux.HandleFunc("/"+name, ensureBodyClose(makeHTTPHandler(fn, logger))) } // Endpoints for POST. - mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) + mux.HandleFunc("/", ensureBodyClose(handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger)))) } // Function introspection // RPCFunc contains the introspected type information for a function. type RPCFunc struct { - f reflect.Value // underlying rpc function - param reflect.Type // the parameter struct, or nil - result reflect.Type // the non-error result type, or nil - args []argInfo // names and type information (for URL decoding) - ws bool // websocket only + f reflect.Value // underlying rpc function + param reflect.Type // the parameter struct, or nil + result reflect.Type // the non-error result type, or nil + args []argInfo // names and type information (for URL decoding) + timeout time.Duration // default request timeout, 0 means none + ws bool // websocket only } // argInfo records the name of a field, along with a bit to tell whether the @@ -52,6 +58,12 @@ type argInfo struct { // with the resulting argument value. It reports an error if parameter parsing // fails, otherwise it returns the result from the wrapped function. func (rf *RPCFunc) Call(ctx context.Context, params json.RawMessage) (interface{}, error) { + // If ctx has its own deadline we will respect it; otherwise use rf.timeout. + if _, ok := ctx.Deadline(); !ok && rf.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, rf.timeout) + defer cancel() + } args, err := rf.parseParams(ctx, params) if err != nil { return nil, err @@ -74,6 +86,11 @@ func (rf *RPCFunc) Call(ctx context.Context, params json.RawMessage) (interface{ return returns[0].Interface(), nil } +// Timeout updates rf to include a default timeout for calls to rf. This +// timeout is used if one is not already provided on the request context. +// Setting d == 0 means there will be no timeout. Returns rf to allow chaining. +func (rf *RPCFunc) Timeout(d time.Duration) *RPCFunc { rf.timeout = d; return rf } + // parseParams parses the parameters of a JSON-RPC request and returns the // corresponding argument values. On success, the first argument value will be // the value of ctx. @@ -129,7 +146,9 @@ func (rf *RPCFunc) adjustParams(data []byte) (json.RawMessage, error) { // func(context.Context, *T) (R, error) // // for an arbitrary struct type T and type R. NewRPCFunc will panic if f does -// not have one of these forms. +// not have one of these forms. A newly-constructed RPCFunc has a default +// timeout of DefaultRPCTimeout; use the Timeout method to adjust this as +// needed. func NewRPCFunc(f interface{}) *RPCFunc { rf, err := newRPCFunc(f) if err != nil { @@ -215,10 +234,11 @@ func newRPCFunc(f interface{}) (*RPCFunc, error) { } return &RPCFunc{ - f: fv, - param: ptype, - result: rtype, - args: args, + f: fv, + param: ptype, + result: rtype, + args: args, + timeout: DefaultRPCTimeout, // until overridden }, nil } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index f9e70dfca5..bedb64d886 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -82,9 +82,50 @@ paths: /broadcast_tx_sync: get: summary: Returns with the response from CheckTx. Does not wait for DeliverTx result. + deprecated: true tags: - Tx operationId: broadcast_tx_sync + description: | + This method is deprecated in Tendermint v0.36, and will be + removed in v0.37. Use `broadcast_tx`, which has similar + semantics. + + This method blocks until CheckTx returns and reports its result, but + does not wait for the transaction to be included in a block. To know + when the transaction is included in a block, check for the transaction + event using JSON-RPC. See + https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html + + See https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + required: true + schema: + type: string + example: "456" + description: The transaction + responses: + "200": + description: Empty + content: + application/json: + schema: + $ref: "#/components/schemas/BroadcastTxResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /broadcast_tx: + get: + summary: Returns with the response from CheckTx. Does not wait for DeliverTx result. + tags: + - Tx + operationId: broadcast_tx description: | This method blocks until CheckTx returns and reports its result, but does not wait for the transaction to be included in a block. To know @@ -118,10 +159,14 @@ paths: /broadcast_tx_async: get: summary: Returns right away, with no response. Does not wait for CheckTx nor DeliverTx results. + deprecated: true tags: - Tx operationId: broadcast_tx_async description: | + This method is deprecated in Tendermint v0.36, and will be + removed in v0.37. Use `broadcast_tx`. + This method submits the transaction and returns immediately without waiting for the transaction to be checked (CheckTx) or committed. Too know when the transaction is included in a block, you can check for the @@ -154,6 +199,7 @@ paths: /broadcast_tx_commit: get: summary: Returns with the responses from CheckTx and DeliverTx. + deprecated: true tags: - Tx operationId: broadcast_tx_commit @@ -165,7 +211,7 @@ paths: succeed and report the failing (non-zero) ABCI result code. WARNING: Use this only for testing and development. For production use, - call broadcast_tx_sync or broadcast_tx_async. + call broadcast_tx. To know when a transaction is included in a block, check for the transaction event using JSON-RPC. See @@ -260,10 +306,9 @@ paths: operationId: events description: | Fetch a batch of events posted by the consensus node and matching a - specified query. + specified query string. - The query grammar is defined in - https://godoc.org/github.com/tendermint/tendermint/internal/pubsub/query/syntax. + The query grammar is defined in [pubsub/query/syntax](https://godoc.org/github.com/tendermint/tendermint/internal/pubsub/query/syntax). An empty query matches all events; otherwise a query comprises one or more terms comparing event metadata to target values. For example, to select new block events: @@ -275,13 +320,13 @@ paths: tm.event = 'Tx' AND tx.hash = 'EA7B33F' - The comparison operators include "=", "<", "<=", ">", ">=", and - "CONTAINS". Operands may be strings (in single quotes), numbers, dates, - or timestamps. In addition, the "EXISTS" operator allows you to check + The comparison operators include `=`, `<`, `<=`, `>`, `>=`, and + `CONTAINS`. Operands may be strings (in single quotes), numbers, dates, + or timestamps. In addition, the `EXISTS` operator allows you to check for the presence of an attribute regardless of its value. - Tendermint defines a tm.event attribute for all events. Transactions - are also assigned tx.hash and tx.height attributes. Other attributes + Tendermint defines a `tm.event` attribute for all events. Transactions + are also assigned `tx.hash` and `tx.height` attributes. Other attributes are provided by the application as ABCI Event records. The name of the event in the query is formed by combining the type and attribute key with a period. For example, given: @@ -295,16 +340,16 @@ paths: }, }} - the query may refer to the names "reward.address", "reward.amount", and - "reward.balance", as in: + the query may refer to the names`"reward.address`,`"reward.amount`, and + `reward.balance`, as in: reward.address EXISTS AND reward.balance > 45 The node maintains a log of all events within an operator-defined time window. The /events method returns the most recent items from the log that match the query. Each item returned includes a cursor that marks - its location in the log. Cursors can be passed via the "before" and - "after" parameters to fetch events earlier in the log. + its location in the log. Cursors can be passed via the `before` and + `after` parameters to fetch events earlier in the log. parameters: - in: query name: filter @@ -1508,7 +1553,7 @@ components: description: Event filter query type: object properties: - filter: + query: type: string example: "tm.event = 'Tx'" EventsResponse: diff --git a/scripts/confix/condiff/condiff.go b/scripts/condiff/condiff.go similarity index 100% rename from scripts/confix/condiff/condiff.go rename to scripts/condiff/condiff.go diff --git a/scripts/confix/confix.go b/scripts/confix/confix.go index 6677f0b49a..29c5bb7531 100644 --- a/scripts/confix/confix.go +++ b/scripts/confix/confix.go @@ -1,23 +1,17 @@ -// Program confix applies fixes to a Tendermint TOML configuration file to -// update a file created with an older version of Tendermint to a compatible -// format for a newer version. +// Program confix applies changes to a Tendermint TOML configuration file, to +// update configurations created with an older version of Tendermint to a +// compatible format for a newer version. package main import ( - "bytes" "context" - "errors" "flag" "fmt" "log" "os" "path/filepath" - "github.com/creachadair/atomicfile" - "github.com/creachadair/tomledit" - "github.com/creachadair/tomledit/transform" - "github.com/spf13/viper" - "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/confix" ) func init() { @@ -41,6 +35,7 @@ Options: var ( configPath = flag.String("config", "", "Config file path (required)") outPath = flag.String("out", "", "Output file path (default stdout)") + doVerbose = flag.Bool("v", false, "Log changes to stderr") ) func main() { @@ -49,115 +44,11 @@ func main() { log.Fatal("You must specify a non-empty -config path") } - doc, err := LoadConfig(*configPath) - if err != nil { - log.Fatalf("Loading config: %v", err) + ctx := context.Background() + if *doVerbose { + ctx = confix.WithLogWriter(ctx, os.Stderr) } - - ctx := transform.WithLogWriter(context.Background(), os.Stderr) - if err := ApplyFixes(ctx, doc); err != nil { - log.Fatalf("Updating %q: %v", *configPath, err) - } - - var buf bytes.Buffer - if err := tomledit.Format(&buf, doc); err != nil { - log.Fatalf("Formatting config: %v", err) - } - - // Verify that Tendermint can parse the results after our edits. - if err := CheckValid(buf.Bytes()); err != nil { - log.Fatalf("Updated config is invalid: %v", err) - } - - if *outPath == "" { - os.Stdout.Write(buf.Bytes()) - } else if err := atomicfile.WriteData(*outPath, buf.Bytes(), 0600); err != nil { - log.Fatalf("Writing output: %v", err) - } -} - -// ApplyFixes transforms doc and reports whether it succeeded. -func ApplyFixes(ctx context.Context, doc *tomledit.Document) error { - // Check what version of Tendermint might have created this config file, as - // a safety check for the updates we are about to make. - tmVersion := GuessConfigVersion(doc) - if tmVersion == vUnknown { - return errors.New("cannot tell what Tendermint version created this config") - } else if tmVersion < v34 || tmVersion > v36 { - // TODO(creachadair): Add in rewrites for older versions. This will - // require some digging to discover what the changes were. The upgrade - // instructions do not give specifics. - return fmt.Errorf("unable to update version %s config", tmVersion) - } - return plan.Apply(ctx, doc) -} - -// LoadConfig loads and parses the TOML document from path. -func LoadConfig(path string) (*tomledit.Document, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - return tomledit.Parse(f) -} - -const ( - vUnknown = "" - v32 = "v0.32" - v33 = "v0.33" - v34 = "v0.34" - v35 = "v0.35" - v36 = "v0.36" -) - -// GuessConfigVersion attempts to figure out which version of Tendermint -// created the specified config document. It returns "" if the creating version -// cannot be determined, otherwise a string of the form "vX.YY". -func GuessConfigVersion(doc *tomledit.Document) string { - hasDisableWS := doc.First("rpc", "experimental-disable-websocket") != nil - hasUseLegacy := doc.First("p2p", "use-legacy") != nil // v0.35 only - if hasDisableWS && !hasUseLegacy { - return v36 - } - - hasBlockSync := transform.FindTable(doc, "blocksync") != nil // add: v0.35 - hasStateSync := transform.FindTable(doc, "statesync") != nil // add: v0.34 - if hasBlockSync && hasStateSync { - return v35 - } else if hasStateSync { - return v34 - } - - hasIndexKeys := doc.First("tx_index", "index_keys") != nil // add: v0.33 - hasIndexTags := doc.First("tx_index", "index_tags") != nil // rem: v0.33 - if hasIndexKeys && !hasIndexTags { - return v33 - } - - hasFastSync := transform.FindTable(doc, "fastsync") != nil // add: v0.32 - if hasIndexTags && hasFastSync { - return v32 + if err := confix.Upgrade(ctx, *configPath, *outPath); err != nil { + log.Fatalf("Upgrading config: %v", err) } - - // Something older, probably. - return vUnknown -} - -// CheckValid checks whether the specified config appears to be a valid -// Tendermint config file. This emulates how the node loads the config. -func CheckValid(data []byte) error { - v := viper.New() - v.SetConfigType("toml") - - if err := v.ReadConfig(bytes.NewReader(data)); err != nil { - return fmt.Errorf("reading config: %w", err) - } - - var cfg config.Config - if err := v.Unmarshal(&cfg); err != nil { - return fmt.Errorf("decoding config: %w", err) - } - - return cfg.ValidateBasic() } diff --git a/scripts/keymigrate/migrate.go b/scripts/keymigrate/migrate.go index ca2c528e2f..2c5873427e 100644 --- a/scripts/keymigrate/migrate.go +++ b/scripts/keymigrate/migrate.go @@ -26,7 +26,7 @@ type ( migrateFunc func(keyID) (keyID, error) ) -func getAllLegacyKeys(db dbm.DB) ([]keyID, error) { +func getAllLegacyKeys(db dbm.DB, storeName string) ([]keyID, error) { var out []keyID iter, err := db.Iterator(nil, nil) @@ -37,9 +37,16 @@ func getAllLegacyKeys(db dbm.DB) ([]keyID, error) { for ; iter.Valid(); iter.Next() { k := iter.Key() - // make sure it's a key with a legacy format, and skip - // all other keys, to make it safe to resume the migration. - if !checkKeyType(k).isLegacy() { + // make sure it's a key that we'd expect to see in + // this database, with a legacy format, and skip all + // other keys, to make it safe to resume the + // migration. + kt, err := checkKeyType(k, storeName) + if err != nil { + return nil, err + } + + if !kt.isLegacy() { continue } @@ -86,240 +93,407 @@ const ( var prefixes = []struct { prefix []byte ktype keyType + check func(keyID) bool }{ - {[]byte("consensusParamsKey:"), consensusParamsKey}, - {[]byte("abciResponsesKey:"), abciResponsesKey}, - {[]byte("validatorsKey:"), validatorsKey}, - {[]byte("stateKey"), stateStoreKey}, - {[]byte("H:"), blockMetaKey}, - {[]byte("P:"), blockPartKey}, - {[]byte("C:"), commitKey}, - {[]byte("SC:"), seenCommitKey}, - {[]byte("BH:"), blockHashKey}, - {[]byte("size"), lightSizeKey}, - {[]byte("lb/"), lightBlockKey}, - {[]byte("\x00"), evidenceCommittedKey}, - {[]byte("\x01"), evidencePendingKey}, + {[]byte(legacyConsensusParamsPrefix), consensusParamsKey, nil}, + {[]byte(legacyAbciResponsePrefix), abciResponsesKey, nil}, + {[]byte(legacyValidatorPrefix), validatorsKey, nil}, + {[]byte(legacyStateKeyPrefix), stateStoreKey, nil}, + {[]byte(legacyBlockMetaPrefix), blockMetaKey, nil}, + {[]byte(legacyBlockPartPrefix), blockPartKey, nil}, + {[]byte(legacyCommitPrefix), commitKey, nil}, + {[]byte(legacySeenCommitPrefix), seenCommitKey, nil}, + {[]byte(legacyBlockHashPrefix), blockHashKey, nil}, + {[]byte(legacyLightSizePrefix), lightSizeKey, nil}, + {[]byte(legacyLightBlockPrefix), lightBlockKey, nil}, + {[]byte(legacyEvidenceComittedPrefix), evidenceCommittedKey, checkEvidenceKey}, + {[]byte(legacyEvidencePendingPrefix), evidencePendingKey, checkEvidenceKey}, } -// checkKeyType classifies a candidate key based on its structure. -func checkKeyType(key keyID) keyType { - for _, p := range prefixes { - if bytes.HasPrefix(key, p.prefix) { - return p.ktype - } - } - - // A legacy event key has the form: - // - // / / / - // - // Transaction hashes are stored as a raw binary hash with no prefix. - // - // Because a hash can contain any byte, it is possible (though unlikely) - // that a hash could have the correct form for an event key, in which case - // we would translate it incorrectly. To reduce the likelihood of an - // incorrect interpretation, we parse candidate event keys and check for - // some structural properties before making a decision. - // - // Note, though, that nothing prevents event names or values from containing - // additional "/" separators, so the parse has to be forgiving. - parts := bytes.Split(key, []byte("/")) - if len(parts) >= 4 { - // Special case for tx.height. - if len(parts) == 4 && bytes.Equal(parts[0], []byte("tx.height")) { - return txHeightKey - } - - // The name cannot be empty, but we don't know where the name ends and - // the value begins, so insist that there be something. - var n int - for _, part := range parts[:len(parts)-2] { - n += len(part) - } - // Check whether the last two fields could be .../height/index. - if n > 0 && isDecimal(parts[len(parts)-1]) && isDecimal(parts[len(parts)-2]) { - return abciEventKey - } - } - - // If we get here, it's not an event key. Treat it as a hash if it is the - // right length. Note that it IS possible this could collide with the - // translation of some other key (though not a hash, since encoded hashes - // will be longer). The chance of that is small, but there is nothing we can - // do to detect it. - if len(key) == 32 { - return txHashKey - } - return nonLegacyKey -} +const ( + legacyConsensusParamsPrefix = "consensusParamsKey:" + legacyAbciResponsePrefix = "abciResponsesKey:" + legacyValidatorPrefix = "validatorsKey:" + legacyStateKeyPrefix = "stateKey" + legacyBlockMetaPrefix = "H:" + legacyBlockPartPrefix = "P:" + legacyCommitPrefix = "C:" + legacySeenCommitPrefix = "SC:" + legacyBlockHashPrefix = "BH:" + legacyLightSizePrefix = "size" + legacyLightBlockPrefix = "lb/" + legacyEvidenceComittedPrefix = "\x00" + legacyEvidencePendingPrefix = "\x01" +) -// isDecimal reports whether buf is a non-empty sequence of Unicode decimal -// digits. -func isDecimal(buf []byte) bool { - for _, c := range buf { - if c < '0' || c > '9' { - return false - } - } - return len(buf) != 0 +type migrationDefinition struct { + name string + storeName string + prefix []byte + ktype keyType + check func(keyID) bool + transform migrateFunc } -func migrateKey(key keyID) (keyID, error) { - switch checkKeyType(key) { - case blockMetaKey: - val, err := strconv.Atoi(string(key[2:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(0), int64(val)) - case blockPartKey: - parts := bytes.Split(key[2:], []byte(":")) - if len(parts) != 2 { - return nil, fmt.Errorf("block parts key has %d rather than 2 components", - len(parts)) - } - valOne, err := strconv.Atoi(string(parts[0])) - if err != nil { - return nil, err - } - - valTwo, err := strconv.Atoi(string(parts[1])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(1), int64(valOne), int64(valTwo)) - case commitKey: - val, err := strconv.Atoi(string(key[2:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(2), int64(val)) - case seenCommitKey: - val, err := strconv.Atoi(string(key[3:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(3), int64(val)) - case blockHashKey: - hash := string(key[3:]) - if len(hash)%2 == 1 { - hash = "0" + hash - } - val, err := hex.DecodeString(hash) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(4), string(val)) - case validatorsKey: - val, err := strconv.Atoi(string(key[14:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(5), int64(val)) - case consensusParamsKey: - val, err := strconv.Atoi(string(key[19:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(6), int64(val)) - case abciResponsesKey: - val, err := strconv.Atoi(string(key[17:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(7), int64(val)) - case stateStoreKey: - return orderedcode.Append(nil, int64(8)) - case evidenceCommittedKey: - return convertEvidence(key, 9) - case evidencePendingKey: - return convertEvidence(key, 10) - case lightBlockKey: - if len(key) < 24 { - return nil, fmt.Errorf("light block evidence %q in invalid format", string(key)) - } - - val, err := strconv.Atoi(string(key[len(key)-20:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(11), int64(val)) - case lightSizeKey: - return orderedcode.Append(nil, int64(12)) - case txHeightKey: - parts := bytes.Split(key, []byte("/")) - if len(parts) != 4 { - return nil, fmt.Errorf("key has %d parts rather than 4", len(parts)) - } - parts = parts[1:] // drop prefix +var migrations = []migrationDefinition{ + { + name: "consensus-params", + storeName: "state", + prefix: []byte(legacyConsensusParamsPrefix), + ktype: consensusParamsKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[19:])) + if err != nil { + return nil, err + } - elems := make([]interface{}, 0, len(parts)+1) - elems = append(elems, "tx.height") + return orderedcode.Append(nil, int64(6), int64(val)) + }, + }, + { + name: "abci-responses", + storeName: "state", + prefix: []byte(legacyAbciResponsePrefix), + ktype: abciResponsesKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[17:])) + if err != nil { + return nil, err + } - for idx, pt := range parts { - val, err := strconv.Atoi(string(pt)) + return orderedcode.Append(nil, int64(7), int64(val)) + }, + }, + { + name: "validators", + storeName: "state", + prefix: []byte(legacyValidatorPrefix), + ktype: validatorsKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[14:])) if err != nil { return nil, err } - if idx == 0 { - elems = append(elems, fmt.Sprintf("%d", val)) - } else { - elems = append(elems, int64(val)) + + return orderedcode.Append(nil, int64(5), int64(val)) + }, + }, + { + name: "tendermint-state", + storeName: "state", + prefix: []byte(legacyStateKeyPrefix), + ktype: stateStoreKey, + transform: func(key keyID) (keyID, error) { + return orderedcode.Append(nil, int64(8)) + }, + }, + { + name: "block-meta", + storeName: "blockstore", + prefix: []byte(legacyBlockMetaPrefix), + ktype: blockMetaKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[2:])) + if err != nil { + return nil, err } - } - return orderedcode.Append(nil, elems...) - case abciEventKey: - parts := bytes.Split(key, []byte("/")) + return orderedcode.Append(nil, int64(0), int64(val)) + }, + }, + { + name: "block-part", + storeName: "blockstore", + prefix: []byte(legacyBlockPartPrefix), + ktype: blockPartKey, + transform: func(key keyID) (keyID, error) { + parts := bytes.Split(key[2:], []byte(":")) + if len(parts) != 2 { + return nil, fmt.Errorf("block parts key has %d rather than 2 components", + len(parts)) + } + valOne, err := strconv.Atoi(string(parts[0])) + if err != nil { + return nil, err + } - elems := make([]interface{}, 0, 4) - if len(parts) == 4 { - elems = append(elems, string(parts[0]), string(parts[1])) + valTwo, err := strconv.Atoi(string(parts[1])) + if err != nil { + return nil, err + } - val, err := strconv.Atoi(string(parts[2])) + return orderedcode.Append(nil, int64(1), int64(valOne), int64(valTwo)) + }, + }, + { + name: "commit", + storeName: "blockstore", + prefix: []byte(legacyCommitPrefix), + ktype: commitKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[2:])) if err != nil { return nil, err } - elems = append(elems, int64(val)) - val2, err := strconv.Atoi(string(parts[3])) + return orderedcode.Append(nil, int64(2), int64(val)) + }, + }, + { + name: "seen-commit", + storeName: "blockstore", + prefix: []byte(legacySeenCommitPrefix), + ktype: seenCommitKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[3:])) if err != nil { return nil, err } - elems = append(elems, int64(val2)) - } else { - elems = append(elems, string(parts[0])) - parts = parts[1:] - val, err := strconv.Atoi(string(parts[len(parts)-1])) + return orderedcode.Append(nil, int64(3), int64(val)) + }, + }, + { + name: "block-hash", + storeName: "blockstore", + prefix: []byte(legacyBlockHashPrefix), + ktype: blockHashKey, + transform: func(key keyID) (keyID, error) { + hash := string(key[3:]) + if len(hash)%2 == 1 { + hash = "0" + hash + } + val, err := hex.DecodeString(hash) if err != nil { return nil, err } - val2, err := strconv.Atoi(string(parts[len(parts)-2])) + return orderedcode.Append(nil, int64(4), string(val)) + }, + }, + { + name: "light-size", + storeName: "light", + prefix: []byte(legacyLightSizePrefix), + ktype: lightSizeKey, + transform: func(key keyID) (keyID, error) { + return orderedcode.Append(nil, int64(12)) + }, + }, + { + name: "light-block", + storeName: "light", + prefix: []byte(legacyLightBlockPrefix), + ktype: lightBlockKey, + transform: func(key keyID) (keyID, error) { + if len(key) < 24 { + return nil, fmt.Errorf("light block evidence %q in invalid format", string(key)) + } + + val, err := strconv.Atoi(string(key[len(key)-20:])) if err != nil { return nil, err } - appKey := bytes.Join(parts[:len(parts)-3], []byte("/")) - elems = append(elems, string(appKey), int64(val), int64(val2)) + return orderedcode.Append(nil, int64(11), int64(val)) + }, + }, + { + name: "evidence-pending", + storeName: "evidence", + prefix: []byte(legacyEvidencePendingPrefix), + ktype: evidencePendingKey, + transform: func(key keyID) (keyID, error) { + return convertEvidence(key, 10) + }, + }, + { + name: "evidence-committed", + storeName: "evidence", + prefix: []byte(legacyEvidenceComittedPrefix), + ktype: evidenceCommittedKey, + transform: func(key keyID) (keyID, error) { + return convertEvidence(key, 9) + }, + }, + { + name: "event-tx", + storeName: "tx_index", + prefix: nil, + ktype: txHeightKey, + transform: func(key keyID) (keyID, error) { + parts := bytes.Split(key, []byte("/")) + if len(parts) != 4 { + return nil, fmt.Errorf("key has %d parts rather than 4", len(parts)) + } + parts = parts[1:] // drop prefix + + elems := make([]interface{}, 0, len(parts)+1) + elems = append(elems, "tx.height") + + for idx, pt := range parts { + val, err := strconv.Atoi(string(pt)) + if err != nil { + return nil, err + } + if idx == 0 { + elems = append(elems, fmt.Sprintf("%d", val)) + } else { + elems = append(elems, int64(val)) + } + } + + return orderedcode.Append(nil, elems...) + }, + }, + { + name: "event-abci", + storeName: "tx_index", + prefix: nil, + ktype: abciEventKey, + transform: func(key keyID) (keyID, error) { + parts := bytes.Split(key, []byte("/")) + + elems := make([]interface{}, 0, 4) + if len(parts) == 4 { + elems = append(elems, string(parts[0]), string(parts[1])) + + val, err := strconv.Atoi(string(parts[2])) + if err != nil { + return nil, err + } + elems = append(elems, int64(val)) + + val2, err := strconv.Atoi(string(parts[3])) + if err != nil { + return nil, err + } + elems = append(elems, int64(val2)) + } else { + elems = append(elems, string(parts[0])) + parts = parts[1:] + + val, err := strconv.Atoi(string(parts[len(parts)-1])) + if err != nil { + return nil, err + } + + val2, err := strconv.Atoi(string(parts[len(parts)-2])) + if err != nil { + return nil, err + } + + appKey := bytes.Join(parts[:len(parts)-3], []byte("/")) + elems = append(elems, string(appKey), int64(val), int64(val2)) + } + + return orderedcode.Append(nil, elems...) + }, + }, + { + name: "event-tx-hash", + storeName: "tx_index", + prefix: nil, + ktype: txHashKey, + transform: func(key keyID) (keyID, error) { + return orderedcode.Append(nil, "tx.hash", string(key)) + }, + }, +} + +// checkKeyType classifies a candidate key based on its structure. +func checkKeyType(key keyID, storeName string) (keyType, error) { + var migrations []migrationDefinition + for _, m := range migrations { + if m.storeName != storeName { + continue + } + if m.prefix == nil && storeName == "tx_index" { + // A legacy event key has the form: + // + // / / / + // + // Transaction hashes are stored as a raw binary hash with no prefix. + // + // Note, though, that nothing prevents event names or values from containing + // additional "/" separators, so the parse has to be forgiving. + parts := bytes.Split(key, []byte("/")) + if len(parts) >= 4 { + // Special case for tx.height. + if len(parts) == 4 && bytes.Equal(parts[0], []byte("tx.height")) { + return txHeightKey, nil + } + + // The name cannot be empty, but we don't know where the name ends and + // the value begins, so insist that there be something. + var n int + for _, part := range parts[:len(parts)-2] { + n += len(part) + } + // Check whether the last two fields could be .../height/index. + if n > 0 && isDecimal(parts[len(parts)-1]) && isDecimal(parts[len(parts)-2]) { + return abciEventKey, nil + } + } + + // If we get here, it's not an event key. Treat it as a hash if it is the + // right length. Note that it IS possible this could collide with the + // translation of some other key (though not a hash, since encoded hashes + // will be longer). The chance of that is small, but there is nothing we can + // do to detect it. + if len(key) == 32 { + return txHashKey, nil + } + } else if bytes.HasPrefix(key, m.prefix) { + if m.check == nil || m.check(key) { + return m.ktype, nil + } + // we have an expected legacy prefix but that + // didn't pass the check. This probably means + // the evidence data is currupt (based on the + // defined migrations) best to error here. + return -1, fmt.Errorf("in store %q, key %q exists but is not a valid key of type %q", storeName, key, m.ktype) } - return orderedcode.Append(nil, elems...) - case txHashKey: - return orderedcode.Append(nil, "tx.hash", string(key)) - default: - return nil, fmt.Errorf("key %q is in the wrong format", string(key)) + // if we get here, the key in question is either + // migrated or of a different type. We can't break + // here because there are more than one key type in a + // specific database, so we have to keep iterating. } + // if we've looked at every migration and not identified a key + // type, then the key has been migrated *or* we (possibly, but + // very unlikely have data that is in the wrong place or the + // sign of corruption.) In either case we should not attempt + // more migrations at this point + + return nonLegacyKey, nil +} + +// isDecimal reports whether buf is a non-empty sequence of Unicode decimal +// digits. +func isDecimal(buf []byte) bool { + for _, c := range buf { + if c < '0' || c > '9' { + return false + } + } + return len(buf) != 0 +} + +func migrateKey(key keyID, storeName string) (keyID, error) { + kt, err := checkKeyType(key, storeName) + if err != nil { + return nil, err + } + for _, migration := range migrations { + if migration.storeName != storeName { + continue + } + if kt == migration.ktype { + return migration.transform(key) + } + } + + return nil, fmt.Errorf("key %q is in the wrong format", string(key)) } func convertEvidence(key keyID, newPrefix int64) ([]byte, error) { @@ -342,7 +516,58 @@ func convertEvidence(key keyID, newPrefix int64) ([]byte, error) { return orderedcode.Append(nil, newPrefix, binary.BigEndian.Uint64(hb), string(evidenceHash)) } -func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error { +// checkEvidenceKey reports whether a candidate key with one of the legacy +// evidence prefixes has the correct structure for a legacy evidence key. +// +// This check is needed because transaction hashes are stored without a prefix, +// so checking the one-byte prefix alone is not enough to distinguish them. +// Legacy evidence keys are suffixed with a string of the format: +// +// "%0.16X/%X" +// +// where the first element is the height and the second is the hash. Thus, we +// check +func checkEvidenceKey(key keyID) bool { + parts := bytes.SplitN(key[1:], []byte("/"), 2) + if len(parts) != 2 || len(parts[0]) != 16 || !isHex(parts[0]) || !isHex(parts[1]) { + return false + } + return true +} + +func isHex(data []byte) bool { + for _, b := range data { + if ('0' <= b && b <= '9') || ('a' <= b && b <= 'f') || ('A' <= b && b <= 'F') { + continue + } + return false + } + return len(data) != 0 +} + +func getMigrationFunc(storeName string, key keyID) (*migrationDefinition, error) { + for idx := range migrations { + migration := migrations[idx] + + if migration.storeName == storeName { + if migration.prefix == nil { + return &migration, nil + } + if bytes.HasPrefix(migration.prefix, key) { + return &migration, nil + } + } + } + return nil, fmt.Errorf("no migration defined for data store %q and key %q", storeName, key) +} + +func replaceKey(db dbm.DB, storeName string, key keyID) error { + migration, err := getMigrationFunc(storeName, key) + if err != nil { + return err + } + gooseFn := migration.transform + exists, err := db.Has(key) if err != nil { return err @@ -401,8 +626,8 @@ func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error { // The context allows for a safe termination of the operation // (e.g connected to a singal handler,) to abort the operation // in-between migration operations. -func Migrate(ctx context.Context, db dbm.DB) error { - keys, err := getAllLegacyKeys(db) +func Migrate(ctx context.Context, storeName string, db dbm.DB) error { + keys, err := getAllLegacyKeys(db, storeName) if err != nil { return err } @@ -419,7 +644,7 @@ func Migrate(ctx context.Context, db dbm.DB) error { if err := ctx.Err(); err != nil { return err } - return replaceKey(db, key, migrateKey) + return replaceKey(db, storeName, key) }) } if g.Wait() != nil { diff --git a/scripts/keymigrate/migrate_test.go b/scripts/keymigrate/migrate_test.go index b2727a5df3..b2e7a4ab8e 100644 --- a/scripts/keymigrate/migrate_test.go +++ b/scripts/keymigrate/migrate_test.go @@ -1,16 +1,12 @@ package keymigrate import ( - "bytes" - "context" - "errors" "fmt" - "math" + "strings" "testing" "github.com/google/orderedcode" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" ) func makeKey(t *testing.T, elems ...interface{}) []byte { @@ -21,6 +17,7 @@ func makeKey(t *testing.T, elems ...interface{}) []byte { } func getLegacyPrefixKeys(val int) map[string][]byte { + vstr := fmt.Sprintf("%02x", byte(val)) return map[string][]byte{ "Height": []byte(fmt.Sprintf("H:%d", val)), "BlockPart": []byte(fmt.Sprintf("P:%d:%d", val, val)), @@ -40,14 +37,19 @@ func getLegacyPrefixKeys(val int) map[string][]byte { "UserKey1": []byte(fmt.Sprintf("foo/bar/baz/%d/%d", val, val)), "TxHeight": []byte(fmt.Sprintf("tx.height/%s/%d/%d", fmt.Sprint(val), val, val)), "TxHash": append( - bytes.Repeat([]byte{fmt.Sprint(val)[0]}, 16), - bytes.Repeat([]byte{fmt.Sprint(val)[len([]byte(fmt.Sprint(val)))-1]}, 16)..., + []byte(strings.Repeat(vstr[:1], 16)), + []byte(strings.Repeat(vstr[1:], 16))..., ), + + // Transaction hashes that could be mistaken for evidence keys. + "TxHashMimic0": append([]byte{0}, []byte(strings.Repeat(vstr, 16)[:31])...), + "TxHashMimic1": append([]byte{1}, []byte(strings.Repeat(vstr, 16)[:31])...), } } func getNewPrefixKeys(t *testing.T, val int) map[string][]byte { t.Helper() + vstr := fmt.Sprintf("%02x", byte(val)) return map[string][]byte{ "Height": makeKey(t, int64(0), int64(val)), "BlockPart": makeKey(t, int64(1), int64(val), int64(val)), @@ -66,34 +68,12 @@ func getNewPrefixKeys(t *testing.T, val int) map[string][]byte { "UserKey0": makeKey(t, "foo", "bar", int64(val), int64(val)), "UserKey1": makeKey(t, "foo", "bar/baz", int64(val), int64(val)), "TxHeight": makeKey(t, "tx.height", fmt.Sprint(val), int64(val), int64(val+2), int64(val+val)), - "TxHash": makeKey(t, "tx.hash", string(bytes.Repeat([]byte{[]byte(fmt.Sprint(val))[0]}, 32))), + "TxHash": makeKey(t, "tx.hash", strings.Repeat(vstr, 16)), + "TxHashMimic0": makeKey(t, "tx.hash", "\x00"+strings.Repeat(vstr, 16)[:31]), + "TxHashMimic1": makeKey(t, "tx.hash", "\x01"+strings.Repeat(vstr, 16)[:31]), } } -func getLegacyDatabase(t *testing.T) (int, dbm.DB) { - db := dbm.NewMemDB() - batch := db.NewBatch() - ct := 0 - - generated := []map[string][]byte{ - getLegacyPrefixKeys(8), - getLegacyPrefixKeys(9001), - getLegacyPrefixKeys(math.MaxInt32 << 1), - getLegacyPrefixKeys(math.MaxInt64 - 8), - } - - // populate database - for _, km := range generated { - for _, key := range km { - ct++ - require.NoError(t, batch.Set(key, []byte(fmt.Sprintf(`{"value": %d}`, ct)))) - } - } - require.NoError(t, batch.WriteSync()) - require.NoError(t, batch.Close()) - return ct - (2 * len(generated)) + 2, db -} - func TestMigration(t *testing.T) { t.Run("Idempotency", func(t *testing.T) { // we want to make sure that the key space for new and @@ -105,37 +85,12 @@ func TestMigration(t *testing.T) { require.Equal(t, len(legacyPrefixes), len(newPrefixes)) - t.Run("Legacy", func(t *testing.T) { - for kind, le := range legacyPrefixes { - require.True(t, checkKeyType(le).isLegacy(), kind) - } - }) - t.Run("New", func(t *testing.T) { - for kind, ne := range newPrefixes { - require.False(t, checkKeyType(ne).isLegacy(), kind) - } - }) - t.Run("Conversion", func(t *testing.T) { - for kind, le := range legacyPrefixes { - nk, err := migrateKey(le) - require.NoError(t, err, kind) - require.False(t, checkKeyType(nk).isLegacy(), kind) - } - }) t.Run("Hashes", func(t *testing.T) { t.Run("NewKeysAreNotHashes", func(t *testing.T) { for _, key := range getNewPrefixKeys(t, 9001) { require.True(t, len(key) != 32) } }) - t.Run("ContrivedLegacyKeyDetection", func(t *testing.T) { - // length 32: should appear to be a hash - require.Equal(t, txHashKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))) - - // length ≠ 32: should not appear to be a hash - require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx--"))) - require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))) - }) }) }) t.Run("Migrations", func(t *testing.T) { @@ -163,72 +118,101 @@ func TestMigration(t *testing.T) { "UserKey3": []byte("foo/bar/baz/1.2/4"), } for kind, key := range table { - out, err := migrateKey(key) + out, err := migrateKey(key, "") + // TODO probably these error at the + // moment because of store missmatches require.Error(t, err, kind) require.Nil(t, out, kind) } }) - t.Run("Replacement", func(t *testing.T) { - t.Run("MissingKey", func(t *testing.T) { - db := dbm.NewMemDB() - require.NoError(t, replaceKey(db, keyID("hi"), nil)) - }) - t.Run("ReplacementFails", func(t *testing.T) { - db := dbm.NewMemDB() - key := keyID("hi") - require.NoError(t, db.Set(key, []byte("world"))) - require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) { - return nil, errors.New("hi") - })) - }) - t.Run("KeyDisappears", func(t *testing.T) { - db := dbm.NewMemDB() - key := keyID("hi") - require.NoError(t, db.Set(key, []byte("world"))) - require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) { - require.NoError(t, db.Delete(key)) - return keyID("wat"), nil - })) - - exists, err := db.Has(key) - require.NoError(t, err) - require.False(t, exists) - - exists, err = db.Has(keyID("wat")) - require.NoError(t, err) - require.False(t, exists) - }) - }) }) - t.Run("Integration", func(t *testing.T) { - t.Run("KeyDiscovery", func(t *testing.T) { - size, db := getLegacyDatabase(t) - keys, err := getAllLegacyKeys(db) - require.NoError(t, err) - require.Equal(t, size, len(keys)) - legacyKeys := 0 - for _, k := range keys { - if checkKeyType(k).isLegacy() { - legacyKeys++ - } +} + +func TestGlobalDataStructuresForRefactor(t *testing.T) { + defer func() { + if t.Failed() { + t.Log("number of migrations:", len(migrations)) + } + }() + + const unPrefixedLegacyKeys = 3 + + t.Run("MigrationsAreDefined", func(t *testing.T) { + if len(prefixes)+unPrefixedLegacyKeys != len(migrations) { + t.Fatal("migrationse are not correctly defined", + "prefixes", len(prefixes), + "migrations", len(migrations)) + } + }) + t.Run("AllMigrationsHavePrefixDefined", func(t *testing.T) { + for _, m := range migrations { + if m.prefix == nil && m.storeName != "tx_index" { + t.Errorf("migration named %q for store %q does not have a prefix defined", m.name, m.storeName) + } + } + }) + t.Run("Deduplication", func(t *testing.T) { + t.Run("Prefixes", func(t *testing.T) { + set := map[string]struct{}{} + for _, prefix := range prefixes { + set[string(prefix.prefix)] = struct{}{} + } + if len(set) != len(prefixes) { + t.Fatal("duplicate prefix definition", + "set", len(set), + "values", set) + } + }) + t.Run("MigrationName", func(t *testing.T) { + set := map[string]struct{}{} + for _, migration := range migrations { + set[migration.name] = struct{}{} + } + if len(set) != len(migrations) { + t.Fatal("duplicate migration name defined", + "set", len(set), + "values", set) } - require.Equal(t, size, legacyKeys) }) - t.Run("KeyIdempotency", func(t *testing.T) { - for _, key := range getNewPrefixKeys(t, 84) { - require.False(t, checkKeyType(key).isLegacy()) + t.Run("MigrationPrefix", func(t *testing.T) { + set := map[string]struct{}{} + for _, migration := range migrations { + set[string(migration.prefix)] = struct{}{} + } + // three keys don't have prefixes in the + // legacy system; this is fine but it means + // the set will have 1 less than expected + // (well 2 less, but the empty key takes one + // of the slots): + expectedDupl := unPrefixedLegacyKeys - 1 + + if len(set) != len(migrations)-expectedDupl { + t.Fatal("duplicate migration prefix defined", + "set", len(set), + "expected", len(migrations)-expectedDupl, + "values", set) } }) - t.Run("Migrate", func(t *testing.T) { - _, db := getLegacyDatabase(t) - - ctx := context.Background() - err := Migrate(ctx, db) - require.NoError(t, err) - keys, err := getAllLegacyKeys(db) - require.NoError(t, err) - require.Equal(t, 0, len(keys)) - + t.Run("MigrationStoreName", func(t *testing.T) { + set := map[string]struct{}{} + for _, migration := range migrations { + set[migration.storeName] = struct{}{} + } + if len(set) != 5 { + t.Fatal("duplicate migration store name defined", + "set", len(set), + "values", set) + } + if _, ok := set[""]; ok { + t.Fatal("empty store name defined") + } }) }) + t.Run("NilPrefix", func(t *testing.T) { + _, err := getMigrationFunc("tx_index", []byte("fooo")) + if err != nil { + t.Fatal("should find an index for tx", err) + } + }) + } diff --git a/scripts/metricsgen/metricsdiff/metricsdiff.go b/scripts/metricsgen/metricsdiff/metricsdiff.go new file mode 100644 index 0000000000..5ed72ff97c --- /dev/null +++ b/scripts/metricsgen/metricsdiff/metricsdiff.go @@ -0,0 +1,197 @@ +// metricsdiff is a tool for generating a diff between two different files containing +// prometheus metrics. metricsdiff outputs which metrics have been added, removed, +// or have different sets of labels between the two files. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s + +Generate the diff between the two files of Prometheus metrics. +The input should have the format output by a Prometheus HTTP endpoint. +The tool indicates which metrics have been added, removed, or use different +label sets from path1 to path2. + +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +// Diff contains the set of metrics that were modified between two files +// containing prometheus metrics output. +type Diff struct { + Adds []string + Removes []string + + Changes []LabelDiff +} + +// LabelDiff describes the label changes between two versions of the same metric. +type LabelDiff struct { + Metric string + Adds []string + Removes []string +} + +type parsedMetric struct { + name string + labels []string +} + +type metricsList []parsedMetric + +func main() { + flag.Parse() + if flag.NArg() != 2 { + log.Fatalf("Usage is '%s ', got %d arguments", + filepath.Base(os.Args[0]), flag.NArg()) + } + fa, err := os.Open(flag.Arg(0)) + if err != nil { + log.Fatalf("Open: %v", err) + } + defer fa.Close() + fb, err := os.Open(flag.Arg(1)) + if err != nil { + log.Fatalf("Open: %v", err) + } + defer fb.Close() + md, err := DiffFromReaders(fa, fb) + if err != nil { + log.Fatalf("Generating diff: %v", err) + } + fmt.Print(md) +} + +// DiffFromReaders parses the metrics present in the readers a and b and +// determines which metrics were added and removed in b. +func DiffFromReaders(a, b io.Reader) (Diff, error) { + var parser expfmt.TextParser + amf, err := parser.TextToMetricFamilies(a) + if err != nil { + return Diff{}, err + } + bmf, err := parser.TextToMetricFamilies(b) + if err != nil { + return Diff{}, err + } + + md := Diff{} + aList := toList(amf) + bList := toList(bmf) + + i, j := 0, 0 + for i < len(aList) || j < len(bList) { + for j < len(bList) && (i >= len(aList) || bList[j].name < aList[i].name) { + md.Adds = append(md.Adds, bList[j].name) + j++ + } + for i < len(aList) && j < len(bList) && aList[i].name == bList[j].name { + adds, removes := listDiff(aList[i].labels, bList[j].labels) + if len(adds) > 0 || len(removes) > 0 { + md.Changes = append(md.Changes, LabelDiff{ + Metric: aList[i].name, + Adds: adds, + Removes: removes, + }) + } + i++ + j++ + } + for i < len(aList) && (j >= len(bList) || aList[i].name < bList[j].name) { + md.Removes = append(md.Removes, aList[i].name) + i++ + } + } + return md, nil +} + +func toList(l map[string]*dto.MetricFamily) metricsList { + r := make([]parsedMetric, len(l)) + var idx int + for name, family := range l { + r[idx] = parsedMetric{ + name: name, + labels: labelsToStringList(family.Metric[0].Label), + } + idx++ + } + sort.Sort(metricsList(r)) + return r +} + +func labelsToStringList(ls []*dto.LabelPair) []string { + r := make([]string, len(ls)) + for i, l := range ls { + r[i] = l.GetName() + } + return sort.StringSlice(r) +} + +func listDiff(a, b []string) ([]string, []string) { + adds, removes := []string{}, []string{} + i, j := 0, 0 + for i < len(a) || j < len(b) { + for j < len(b) && (i >= len(a) || b[j] < a[i]) { + adds = append(adds, b[j]) + j++ + } + for i < len(a) && j < len(b) && a[i] == b[j] { + i++ + j++ + } + for i < len(a) && (j >= len(b) || a[i] < b[j]) { + removes = append(removes, a[i]) + i++ + } + } + return adds, removes +} + +func (m metricsList) Len() int { return len(m) } +func (m metricsList) Less(i, j int) bool { return m[i].name < m[j].name } +func (m metricsList) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (m Diff) String() string { + var s strings.Builder + if len(m.Adds) > 0 || len(m.Removes) > 0 { + fmt.Fprintln(&s, "Metric changes:") + } + if len(m.Adds) > 0 { + for _, add := range m.Adds { + fmt.Fprintf(&s, "+++ %s\n", add) + } + } + if len(m.Removes) > 0 { + for _, rem := range m.Removes { + fmt.Fprintf(&s, "--- %s\n", rem) + } + } + if len(m.Changes) > 0 { + fmt.Fprintln(&s, "Label changes:") + for _, ld := range m.Changes { + fmt.Fprintf(&s, "Metric: %s\n", ld.Metric) + for _, add := range ld.Adds { + fmt.Fprintf(&s, "+++ %s\n", add) + } + for _, rem := range ld.Removes { + fmt.Fprintf(&s, "--- %s\n", rem) + } + } + } + return s.String() +} diff --git a/scripts/metricsgen/metricsdiff/metricsdiff_test.go b/scripts/metricsgen/metricsdiff/metricsdiff_test.go new file mode 100644 index 0000000000..ec27ef1e9b --- /dev/null +++ b/scripts/metricsgen/metricsdiff/metricsdiff_test.go @@ -0,0 +1,62 @@ +package main_test + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/require" + metricsdiff "github.com/tendermint/tendermint/scripts/metricsgen/metricsdiff" +) + +func TestDiff(t *testing.T) { + for _, tc := range []struct { + name string + aContents string + bContents string + + want string + }{ + { + name: "labels", + aContents: ` + metric_one{label_one="content", label_two="content"} 0 + `, + bContents: ` + metric_one{label_three="content", label_four="content"} 0 + `, + want: `Label changes: +Metric: metric_one ++++ label_three ++++ label_four +--- label_one +--- label_two +`, + }, + { + name: "metrics", + aContents: ` + metric_one{label_one="content"} 0 + `, + bContents: ` + metric_two{label_two="content"} 0 + `, + want: `Metric changes: ++++ metric_two +--- metric_one +`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + bufA := bytes.NewBuffer([]byte{}) + bufB := bytes.NewBuffer([]byte{}) + _, err := io.WriteString(bufA, tc.aContents) + require.NoError(t, err) + _, err = io.WriteString(bufB, tc.bContents) + require.NoError(t, err) + md, err := metricsdiff.DiffFromReaders(bufA, bufB) + require.NoError(t, err) + require.Equal(t, tc.want, md.String()) + }) + } +} diff --git a/scripts/metricsgen/metricsgen.go b/scripts/metricsgen/metricsgen.go new file mode 100644 index 0000000000..0f564e66ae --- /dev/null +++ b/scripts/metricsgen/metricsgen.go @@ -0,0 +1,347 @@ +// metricsgen is a code generation tool for creating constructors for Tendermint +// metrics types. +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "io" + "io/fs" + "log" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "text/template" +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s -struct + +Generate constructors for the metrics type specified by -struct contained in +the current directory. The tool creates a new file in the current directory +containing the generated code. + +Options: +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +const metricsPackageName = "github.com/go-kit/kit/metrics" + +const ( + metricNameTag = "metrics_name" + labelsTag = "metrics_labels" + bucketTypeTag = "metrics_buckettype" + bucketSizeTag = "metrics_bucketsizes" +) + +var ( + dir = flag.String("dir", ".", "Path to the directory containing the target package") + strct = flag.String("struct", "Metrics", "Struct to parse for metrics") +) + +var bucketType = map[string]string{ + "exprange": "stdprometheus.ExponentialBucketsRange", + "exp": "stdprometheus.ExponentialBuckets", + "lin": "stdprometheus.LinearBuckets", +} + +var tmpl = template.Must(template.New("tmpl").Parse(`// Code generated by metricsgen. DO NOT EDIT. + +package {{ .Package }} + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + {{ range $metric := .ParsedMetrics }} + {{- $metric.FieldName }}: prometheus.New{{ $metric.TypeName }}From(stdprometheus.{{$metric.TypeName }}Opts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "{{$metric.MetricName }}", + Help: "{{ $metric.Description }}", + {{ if ne $metric.HistogramOptions.BucketType "" }} + Buckets: {{ $metric.HistogramOptions.BucketType }}({{ $metric.HistogramOptions.BucketSizes }}), + {{ else if ne $metric.HistogramOptions.BucketSizes "" }} + Buckets: []float64{ {{ $metric.HistogramOptions.BucketSizes }} }, + {{ end }} + {{- if eq (len $metric.Labels) 0 }} + }, labels).With(labelsAndValues...), + {{ else }} + }, append(labels, {{$metric.Labels}})).With(labelsAndValues...), + {{ end }} + {{- end }} + } +} + + +func NopMetrics() *Metrics { + return &Metrics{ + {{- range $metric := .ParsedMetrics }} + {{ $metric.FieldName }}: discard.New{{ $metric.TypeName }}(), + {{- end }} + } +} +`)) + +// ParsedMetricField is the data parsed for a single field of a metric struct. +type ParsedMetricField struct { + TypeName string + FieldName string + MetricName string + Description string + Labels string + + HistogramOptions HistogramOpts +} + +type HistogramOpts struct { + BucketType string + BucketSizes string +} + +// TemplateData is all of the data required for rendering a metric file template. +type TemplateData struct { + Package string + ParsedMetrics []ParsedMetricField +} + +func main() { + flag.Parse() + if *strct == "" { + log.Fatal("You must specify a non-empty -struct") + } + td, err := ParseMetricsDir(".", *strct) + if err != nil { + log.Fatalf("Parsing file: %v", err) + } + out := filepath.Join(*dir, "metrics.gen.go") + f, err := os.Create(out) + if err != nil { + log.Fatalf("Opening file: %v", err) + } + err = GenerateMetricsFile(f, td) + if err != nil { + log.Fatalf("Generating code: %v", err) + } +} +func ignoreTestFiles(f fs.FileInfo) bool { + return !strings.Contains(f.Name(), "_test.go") +} + +// ParseMetricsDir parses the dir and scans for a struct matching structName, +// ignoring all test files. ParseMetricsDir iterates the fields of the metrics +// struct and builds a TemplateData using the data obtained from the abstract syntax tree. +func ParseMetricsDir(dir string, structName string) (TemplateData, error) { + fs := token.NewFileSet() + d, err := parser.ParseDir(fs, dir, ignoreTestFiles, parser.ParseComments) + if err != nil { + return TemplateData{}, err + } + if len(d) > 1 { + return TemplateData{}, fmt.Errorf("multiple packages found in %s", dir) + } + if len(d) == 0 { + return TemplateData{}, fmt.Errorf("no go pacakges found in %s", dir) + } + + // Grab the package name. + var pkgName string + var pkg *ast.Package + for pkgName, pkg = range d { + } + td := TemplateData{ + Package: pkgName, + } + // Grab the metrics struct + m, mPkgName, err := findMetricsStruct(pkg.Files, structName) + if err != nil { + return TemplateData{}, err + } + for _, f := range m.Fields.List { + if !isMetric(f.Type, mPkgName) { + continue + } + pmf := parseMetricField(f) + td.ParsedMetrics = append(td.ParsedMetrics, pmf) + } + + return td, err +} + +// GenerateMetricsFile executes the metrics file template, writing the result +// into the io.Writer. +func GenerateMetricsFile(w io.Writer, td TemplateData) error { + b := []byte{} + buf := bytes.NewBuffer(b) + err := tmpl.Execute(buf, td) + if err != nil { + return err + } + b, err = format.Source(buf.Bytes()) + if err != nil { + return err + } + _, err = io.Copy(w, bytes.NewBuffer(b)) + if err != nil { + return err + } + return nil +} + +func findMetricsStruct(files map[string]*ast.File, structName string) (*ast.StructType, string, error) { + var ( + st *ast.StructType + ) + for _, file := range files { + mPkgName, err := extractMetricsPackageName(file.Imports) + if err != nil { + return nil, "", fmt.Errorf("unable to determine metrics package name: %v", err) + } + if !ast.FilterFile(file, func(name string) bool { + return name == structName + }) { + continue + } + ast.Inspect(file, func(n ast.Node) bool { + switch f := n.(type) { + case *ast.TypeSpec: + if f.Name.Name == structName { + var ok bool + st, ok = f.Type.(*ast.StructType) + if !ok { + err = fmt.Errorf("found identifier for %q of wrong type", structName) + } + } + return false + default: + return true + } + }) + if err != nil { + return nil, "", err + } + if st != nil { + return st, mPkgName, nil + } + } + return nil, "", fmt.Errorf("target struct %q not found in dir", structName) +} + +func parseMetricField(f *ast.Field) ParsedMetricField { + pmf := ParsedMetricField{ + Description: extractHelpMessage(f.Doc), + MetricName: extractFieldName(f.Names[0].String(), f.Tag), + FieldName: f.Names[0].String(), + TypeName: extractTypeName(f.Type), + Labels: extractLabels(f.Tag), + } + if pmf.TypeName == "Histogram" { + pmf.HistogramOptions = extractHistogramOptions(f.Tag) + } + return pmf +} + +func extractTypeName(e ast.Expr) string { + return strings.TrimPrefix(path.Ext(types.ExprString(e)), ".") +} + +func extractHelpMessage(cg *ast.CommentGroup) string { + if cg == nil { + return "" + } + var help []string //nolint: prealloc + for _, c := range cg.List { + mt := strings.TrimPrefix(c.Text, "//metrics:") + if mt != c.Text { + return strings.TrimSpace(mt) + } + help = append(help, strings.TrimSpace(strings.TrimPrefix(c.Text, "//"))) + } + return strings.Join(help, " ") +} + +func isMetric(e ast.Expr, mPkgName string) bool { + return strings.Contains(types.ExprString(e), fmt.Sprintf("%s.", mPkgName)) +} + +func extractLabels(bl *ast.BasicLit) string { + if bl != nil { + t := reflect.StructTag(strings.Trim(bl.Value, "`")) + if v := t.Get(labelsTag); v != "" { + var res []string + for _, s := range strings.Split(v, ",") { + res = append(res, strconv.Quote(strings.TrimSpace(s))) + } + return strings.Join(res, ",") + } + } + return "" +} + +func extractFieldName(name string, tag *ast.BasicLit) string { + if tag != nil { + t := reflect.StructTag(strings.Trim(tag.Value, "`")) + if v := t.Get(metricNameTag); v != "" { + return v + } + } + return toSnakeCase(name) +} + +func extractHistogramOptions(tag *ast.BasicLit) HistogramOpts { + h := HistogramOpts{} + if tag != nil { + t := reflect.StructTag(strings.Trim(tag.Value, "`")) + if v := t.Get(bucketTypeTag); v != "" { + h.BucketType = bucketType[v] + } + if v := t.Get(bucketSizeTag); v != "" { + h.BucketSizes = v + } + } + return h +} + +func extractMetricsPackageName(imports []*ast.ImportSpec) (string, error) { + for _, i := range imports { + u, err := strconv.Unquote(i.Path.Value) + if err != nil { + return "", err + } + if u == metricsPackageName { + if i.Name != nil { + return i.Name.Name, nil + } + return path.Base(u), nil + } + } + return "", nil +} + +var capitalChange = regexp.MustCompile("([a-z0-9])([A-Z])") + +func toSnakeCase(str string) string { + snake := capitalChange.ReplaceAllString(str, "${1}_${2}") + return strings.ToLower(snake) +} diff --git a/scripts/metricsgen/metricsgen_test.go b/scripts/metricsgen/metricsgen_test.go new file mode 100644 index 0000000000..a925b591d1 --- /dev/null +++ b/scripts/metricsgen/metricsgen_test.go @@ -0,0 +1,259 @@ +package main_test + +import ( + "bytes" + "fmt" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + metricsgen "github.com/tendermint/tendermint/scripts/metricsgen" +) + +const testDataDir = "./testdata" + +func TestSimpleTemplate(t *testing.T) { + m := metricsgen.ParsedMetricField{ + TypeName: "Histogram", + FieldName: "MyMetric", + MetricName: "request_count", + Description: "how many requests were made since the start of the process", + Labels: "first, second, third", + } + td := metricsgen.TemplateData{ + Package: "mypack", + ParsedMetrics: []metricsgen.ParsedMetricField{m}, + } + b := bytes.NewBuffer([]byte{}) + err := metricsgen.GenerateMetricsFile(b, td) + if err != nil { + t.Fatalf("unable to parse template %v", err) + } +} + +func TestFromData(t *testing.T) { + infos, err := ioutil.ReadDir(testDataDir) + if err != nil { + t.Fatalf("unable to open file %v", err) + } + for _, dir := range infos { + t.Run(dir.Name(), func(t *testing.T) { + if !dir.IsDir() { + t.Fatalf("expected file %s to be directory", dir.Name()) + } + dirName := path.Join(testDataDir, dir.Name()) + pt, err := metricsgen.ParseMetricsDir(dirName, "Metrics") + if err != nil { + t.Fatalf("unable to parse from dir %q: %v", dir, err) + } + outFile := path.Join(dirName, "out.go") + if err != nil { + t.Fatalf("unable to open file %s: %v", outFile, err) + } + of, err := os.Create(outFile) + if err != nil { + t.Fatalf("unable to open file %s: %v", outFile, err) + } + defer os.Remove(outFile) + if err := metricsgen.GenerateMetricsFile(of, pt); err != nil { + t.Fatalf("unable to generate metrics file %s: %v", outFile, err) + } + if _, err := parser.ParseFile(token.NewFileSet(), outFile, nil, parser.AllErrors); err != nil { + t.Fatalf("unable to parse generated file %s: %v", outFile, err) + } + bNew, err := ioutil.ReadFile(outFile) + if err != nil { + t.Fatalf("unable to read generated file %s: %v", outFile, err) + } + goldenFile := path.Join(dirName, "metrics.gen.go") + bOld, err := ioutil.ReadFile(goldenFile) + if err != nil { + t.Fatalf("unable to read file %s: %v", goldenFile, err) + } + if !bytes.Equal(bNew, bOld) { + t.Fatalf("newly generated code in file %s does not match golden file %s\n"+ + "if the output of the metricsgen tool is expected to change run the following make target: \n"+ + "\tmake metrics", outFile, goldenFile) + } + }) + } +} + +func TestParseMetricsStruct(t *testing.T) { + const pkgName = "mypkg" + metricsTests := []struct { + name string + shouldError bool + metricsStruct string + expected metricsgen.TemplateData + }{ + { + name: "basic", + metricsStruct: `type Metrics struct { + myGauge metrics.Gauge + }`, + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Gauge", + FieldName: "myGauge", + MetricName: "my_gauge", + }, + }, + }, + }, + { + name: "histogram", + metricsStruct: "type Metrics struct {\n" + + "myHistogram metrics.Histogram `metrics_buckettype:\"exp\" metrics_bucketsizes:\"1, 100, .8\"`\n" + + "}", + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Histogram", + FieldName: "myHistogram", + MetricName: "my_histogram", + + HistogramOptions: metricsgen.HistogramOpts{ + BucketType: "stdprometheus.ExponentialBuckets", + BucketSizes: "1, 100, .8", + }, + }, + }, + }, + }, + { + name: "labeled name", + metricsStruct: "type Metrics struct {\n" + + "myCounter metrics.Counter `metrics_name:\"new_name\"`\n" + + "}", + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "new_name", + }, + }, + }, + }, + { + name: "metric labels", + metricsStruct: "type Metrics struct {\n" + + "myCounter metrics.Counter `metrics_labels:\"label1,label2\"`\n" + + "}", + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "my_counter", + Labels: "\"label1\",\"label2\"", + }, + }, + }, + }, + { + name: "ignore non-metric field", + metricsStruct: `type Metrics struct { + myCounter metrics.Counter + nonMetric string + }`, + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "my_counter", + }, + }, + }, + }, + } + for _, testCase := range metricsTests { + t.Run(testCase.name, func(t *testing.T) { + dir, err := os.MkdirTemp(os.TempDir(), "metricsdir") + if err != nil { + t.Fatalf("unable to create directory: %v", err) + } + defer os.Remove(dir) + f, err := os.Create(filepath.Join(dir, "metrics.go")) + if err != nil { + t.Fatalf("unable to open file: %v", err) + } + pkgLine := fmt.Sprintf("package %s\n", pkgName) + importClause := ` + import( + "github.com/go-kit/kit/metrics" + ) + ` + + _, err = io.WriteString(f, pkgLine) + require.NoError(t, err) + _, err = io.WriteString(f, importClause) + require.NoError(t, err) + _, err = io.WriteString(f, testCase.metricsStruct) + require.NoError(t, err) + + td, err := metricsgen.ParseMetricsDir(dir, "Metrics") + if testCase.shouldError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, testCase.expected, td) + } + }) + } +} + +func TestParseAliasedMetric(t *testing.T) { + aliasedData := ` + package mypkg + + import( + mymetrics "github.com/go-kit/kit/metrics" + ) + type Metrics struct { + m mymetrics.Gauge + } + ` + dir, err := os.MkdirTemp(os.TempDir(), "metricsdir") + if err != nil { + t.Fatalf("unable to create directory: %v", err) + } + defer os.Remove(dir) + f, err := os.Create(filepath.Join(dir, "metrics.go")) + if err != nil { + t.Fatalf("unable to open file: %v", err) + } + _, err = io.WriteString(f, aliasedData) + if err != nil { + t.Fatalf("unable to write to file: %v", err) + } + td, err := metricsgen.ParseMetricsDir(dir, "Metrics") + require.NoError(t, err) + + expected := + metricsgen.TemplateData{ + Package: "mypkg", + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Gauge", + FieldName: "m", + MetricName: "m", + }, + }, + } + require.Equal(t, expected, td) +} diff --git a/scripts/metricsgen/testdata/basic/metrics.gen.go b/scripts/metricsgen/testdata/basic/metrics.gen.go new file mode 100644 index 0000000000..d541cb2dbb --- /dev/null +++ b/scripts/metricsgen/testdata/basic/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package basic + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "height", + Help: "simple metric that tracks the height of the chain.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Height: discard.NewGauge(), + } +} diff --git a/scripts/metricsgen/testdata/basic/metrics.go b/scripts/metricsgen/testdata/basic/metrics.go new file mode 100644 index 0000000000..1a361f90f6 --- /dev/null +++ b/scripts/metricsgen/testdata/basic/metrics.go @@ -0,0 +1,11 @@ +package basic + +import "github.com/go-kit/kit/metrics" + +//go:generate go run ../../../../scripts/metricsgen -struct=Metrics + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // simple metric that tracks the height of the chain. + Height metrics.Gauge +} diff --git a/scripts/metricsgen/testdata/commented/metrics.gen.go b/scripts/metricsgen/testdata/commented/metrics.gen.go new file mode 100644 index 0000000000..c1346da384 --- /dev/null +++ b/scripts/metricsgen/testdata/commented/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package commented + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Field: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "field", + Help: "Height of the chain. We expect multi-line comments to parse correctly.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Field: discard.NewGauge(), + } +} diff --git a/scripts/metricsgen/testdata/commented/metrics.go b/scripts/metricsgen/testdata/commented/metrics.go new file mode 100644 index 0000000000..174f1e2333 --- /dev/null +++ b/scripts/metricsgen/testdata/commented/metrics.go @@ -0,0 +1,11 @@ +package commented + +import "github.com/go-kit/kit/metrics" + +//go:generate go run ../../../../scripts/metricsgen -struct=Metrics + +type Metrics struct { + // Height of the chain. + // We expect multi-line comments to parse correctly. + Field metrics.Gauge +} diff --git a/scripts/metricsgen/testdata/tags/metrics.gen.go b/scripts/metricsgen/testdata/tags/metrics.gen.go new file mode 100644 index 0000000000..43779c7a16 --- /dev/null +++ b/scripts/metricsgen/testdata/tags/metrics.gen.go @@ -0,0 +1,55 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package tags + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + WithLabels: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "with_labels", + Help: "", + }, append(labels, "step", "time")).With(labelsAndValues...), + WithExpBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "with_exp_buckets", + Help: "", + + Buckets: stdprometheus.ExponentialBuckets(.1, 100, 8), + }, labels).With(labelsAndValues...), + WithBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "with_buckets", + Help: "", + + Buckets: []float64{1, 2, 3, 4, 5}, + }, labels).With(labelsAndValues...), + Named: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "metric_with_name", + Help: "", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + WithLabels: discard.NewCounter(), + WithExpBuckets: discard.NewHistogram(), + WithBuckets: discard.NewHistogram(), + Named: discard.NewCounter(), + } +} diff --git a/scripts/metricsgen/testdata/tags/metrics.go b/scripts/metricsgen/testdata/tags/metrics.go new file mode 100644 index 0000000000..8562dcf437 --- /dev/null +++ b/scripts/metricsgen/testdata/tags/metrics.go @@ -0,0 +1,12 @@ +package tags + +import "github.com/go-kit/kit/metrics" + +//go:generate go run ../../../../scripts/metricsgen -struct=Metrics + +type Metrics struct { + WithLabels metrics.Counter `metrics_labels:"step,time"` + WithExpBuckets metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:".1,100,8"` + WithBuckets metrics.Histogram `metrics_bucketsizes:"1, 2, 3, 4, 5"` + Named metrics.Counter `metrics_name:"metric_with_name"` +} diff --git a/scripts/mockery_generate.sh b/scripts/mockery_generate.sh index 382c277bbe..2d6f40e638 100755 --- a/scripts/mockery_generate.sh +++ b/scripts/mockery_generate.sh @@ -1,3 +1,15 @@ #!/bin/sh +# +# Invoke Mockery v2 to update generated mocks for the given type. +# +# This script runs a locally-installed "mockery" if available, otherwise it +# runs the published Docker container. This legerdemain is so that the CI build +# and a local build can work off the same script. +# +if ! which mockery ; then + mockery() { + docker run --rm -v "$PWD":/w --workdir=/w vektra/mockery:v2.12.3 + } +fi -go run github.com/vektra/mockery/v2 --disable-version-string --case underscore --name $* +mockery --disable-version-string --case underscore --name "$@" diff --git a/scripts/proto-gen.sh b/scripts/proto-gen.sh new file mode 100755 index 0000000000..06fa07dd95 --- /dev/null +++ b/scripts/proto-gen.sh @@ -0,0 +1,19 @@ +#!/bin/sh +# +# Update the generated code for protocol buffers in the Tendermint repository. +# This must be run from inside a Tendermint working directory. +# +set -euo pipefail + +# Work from the root of the repository. +cd "$(git rev-parse --show-toplevel)" + +# Run inside Docker to install the correct versions of the required tools +# without polluting the local system. +docker run --rm -i -v "$PWD":/w --workdir=/w golang:1.18-alpine sh <<"EOF" +apk add git make + +go install github.com/bufbuild/buf/cmd/buf +go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest +make proto-gen +EOF diff --git a/spec/abci++/README.md b/spec/abci++/README.md index 38feba9d7e..2cd2bb4834 100644 --- a/spec/abci++/README.md +++ b/spec/abci++/README.md @@ -25,19 +25,15 @@ This allows Tendermint to run with applications written in many programming lang This specification is split as follows: -- [Overview and basic concepts](./abci++_basic_concepts_002_draft.md) - interface's overview and concepts needed to understand other parts of this specification. -- [Methods](./abci++_methods_002_draft.md) - complete details on all ABCI++ methods +- [Overview and basic concepts](./abci++_basic_concepts.md) - interface's overview and concepts + needed to understand other parts of this specification. +- [Methods](./abci++_methods.md) - complete details on all ABCI++ methods and message types. -- [Requirements for the Application](./abci++_app_requirements_002_draft.md) - formal requirements - on the Application's logic to ensure liveness of Tendermint. These requirements define what - Tendermint expects from the Application. -- [Tendermint's expected behavior](./abci++_tmint_expected_behavior_002_draft.md) - specification of +- [Requirements for the Application](./abci++_app_requirements.md) - formal requirements + on the Application's logic to ensure Tendermint properties such as liveness. These requirements define what + Tendermint expects from the Application; second part on managing ABCI application state and related topics. +- [Tendermint's expected behavior](./abci++_tmint_expected_behavior.md) - specification of how the different ABCI++ methods may be called by Tendermint. This explains what the Application is to expect from Tendermint. - ->**TODO** Re-read these and remove redundant info - -- [Applications](../abci/apps.md) - how to manage ABCI application state and other - details about building ABCI applications - [Client and Server](../abci/client-server.md) - for those looking to implement their own ABCI application servers diff --git a/spec/abci++/abci++_app_requirements.md b/spec/abci++/abci++_app_requirements.md new file mode 100644 index 0000000000..cd4d877c4a --- /dev/null +++ b/spec/abci++/abci++_app_requirements.md @@ -0,0 +1,1042 @@ +--- +order: 3 +title: Requirements for the Application +--- + +# Requirements for the Application + +## Formal Requirements + +This section specifies what Tendermint expects from the Application. It is structured as a set +of formal requirements that can be used for testing and verification of the Application's logic. + +Let *p* and *q* be two correct processes. +Let *rp* (resp. *rq*) be a round of height *h* where *p* (resp. *q*) is the +proposer. +Let *sp,h-1* be *p*'s Application's state committed for height *h-1*. +Let *vp* (resp. *vq*) be the block that *p*'s (resp. *q*'s) Tendermint passes +on to the Application +via `RequestPrepareProposal` as proposer of round *rp* (resp *rq*), height *h*, +also known as the raw proposal. +Let *up* (resp. *uq*) the possibly modified block *p*'s (resp. *q*'s) Application +returns via `ResponsePrepareProposal` to Tendermint, also known as the prepared proposal. + +Process *p*'s prepared proposal can differ in two different rounds where *p* is the proposer. + +* Requirement 1 [`PrepareProposal`, header-changes]: When the blockchain is in same-block execution mode, + *p*'s Application provides values for the following parameters in `ResponsePrepareProposal`: + `AppHash`, `TxResults`, `ConsensusParams`, `ValidatorUpdates`. Provided values for + `ConsensusParams` and `ValidatorUpdates` MAY be empty to denote that the Application + wishes to keep the current values. + +Parameters `AppHash`, `TxResults`, `ConsensusParams`, and `ValidatorUpdates` are used by Tendermint to +compute various hashes in the block header that will finally be part of the proposal. + +* Requirement 2 [`PrepareProposal`, no-header-changes]: When the blockchain is in next-block execution + mode, *p*'s Application does not provide values for the following parameters in `ResponsePrepareProposal`: + `AppHash`, `TxResults`, `ConsensusParams`, `ValidatorUpdates`. + +In practical terms, Requirements 1 and 2 imply that Tendermint will (a) panic if the Application is in +same-block execution mode and *does not* provide values for +`AppHash`, `TxResults`, `ConsensusParams`, and `ValidatorUpdates`, or +(b) log an error if the Application is in next-block execution mode and *does* provide values for +`AppHash`, `TxResults`, `ConsensusParams`, or `ValidatorUpdates` (the values provided will be ignored). + +* Requirement 3 [`PrepareProposal`, timeliness]: If *p*'s Application fully executes prepared blocks in + `PrepareProposal` and the network is in a synchronous period while processes *p* and *q* are in *rp*, + then the value of *TimeoutPropose* at *q* must be such that *q*'s propose timer does not time out + (which would result in *q* prevoting `nil` in *rp*). + +Full execution of blocks at `PrepareProposal` time stands on Tendermint's critical path. Thus, +Requirement 3 ensures the Application will set a value for `TimeoutPropose` such that the time it takes +to fully execute blocks in `PrepareProposal` does not interfere with Tendermint's propose timer. +Note that violation of Requirement 3 may just lead to further rounds, but will not compromise liveness. + +* Requirement 4 [`PrepareProposal`, tx-size]: When *p*'s Application calls `ResponsePrepareProposal`, the + total size in bytes of the transactions returned does not exceed `RequestPrepareProposal.max_tx_bytes`. + +Busy blockchains might seek to maximize the amount of transactions included in each block. Under those conditions, +Tendermint might choose to increase the transactions passed to the Application via `RequestPrepareProposal.txs` +beyond the `RequestPrepareProposal.max_tx_bytes` limit. The idea is that, if the Application drops some of +those transactions, it can still return a transaction list whose byte size is as close to +`RequestPrepareProposal.max_tx_bytes` as possible. Thus, Requirement 4 ensures that the size in bytes of the +transaction list returned by the application will never cause the resulting block to go beyond its byte size +limit. + +* Requirement 5 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*, + if *q*'s Tendermint calls `RequestProcessProposal` on *up*, + *q*'s Application returns Accept in `ResponseProcessProposal`. + +Requirement 5 makes sure that blocks proposed by correct processes *always* pass the correct receiving process's +`ProcessProposal` check. +On the other hand, if there is a deterministic bug in `PrepareProposal` or `ProcessProposal` (or in both), +strictly speaking, this makes all processes that hit the bug byzantine. This is a problem in practice, +as very often validators are running the Application from the same codebase, so potentially *all* would +likely hit the bug at the same time. This would result in most (or all) processes prevoting `nil`, with the +serious consequences on Tendermint's liveness that this entails. Due to its criticality, Requirement 5 is a +target for extensive testing and automated verification. + +* Requirement 6 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current + state and the block that is about to be applied. In other words, for any correct process *p*, and any arbitrary block *u*, + if *p*'s Tendermint calls `RequestProcessProposal` on *u* at height *h*, + then *p*'s Application's acceptance or rejection **exclusively** depends on *u* and *sp,h-1*. + +* Requirement 7 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary + block *u*, + if *p*'s (resp. *q*'s) Tendermint calls `RequestProcessProposal` on *u* at height *h*, + then *p*'s Application accepts *u* if and only if *q*'s Application accepts *u*. + Note that this requirement follows from Requirement 6 and the Agreement property of consensus. + +Requirements 6 and 7 ensure that all correct processes will react in the same way to a proposed block, even +if the proposer is Byzantine. However, `ProcessProposal` may contain a bug that renders the +acceptance or rejection of the block non-deterministic, and therefore prevents processes hitting +the bug from fulfilling Requirements 6 or 7 (effectively making those processes Byzantine). +In such a scenario, Tendermint's liveness cannot be guaranteed. +Again, this is a problem in practice if most validators are running the same software, as they are likely +to hit the bug at the same point. There is currently no clear solution to help with this situation, so +the Application designers/implementors must proceed very carefully with the logic/implementation +of `ProcessProposal`. As a general rule `ProcessProposal` SHOULD always accept the block. + +According to the Tendermint algorithm, a correct process can broadcast at most one precommit +message in round *r*, height *h*. +Since, as stated in the [Methods](./abci++_methods.md#extendvote) section, `ResponseExtendVote` +is only called when Tendermint +is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension +in round *r*, height *h*. +Let *erp* be the vote extension that the Application of a correct process *p* returns via +`ResponseExtendVote` in round *r*, height *h*. +Let *wrp* be the proposed block that *p*'s Tendermint passes to the Application via `RequestExtendVote` +in round *r*, height *h*. + +* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two different correct + processes *p* and *q*, if *q* receives *erp* from *p* in height *h*, *q*'s + Application returns Accept in `ResponseVerifyVoteExtension`. + +Requirement 8 constrains the creation and handling of vote extensions in a similar way as Requirement 5 +constrains the creation and handling of proposed blocks. +Requirement 8 ensures that extensions created by correct processes *always* pass the `VerifyVoteExtension` +checks performed by correct processes receiving those extensions. +However, if there is a (deterministic) bug in `ExtendVote` or `VerifyVoteExtension` (or in both), +we will face the same liveness issues as described for Requirement 5, as Precommit messages with invalid vote +extensions will be discarded. + +* Requirement 9 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of + the current state, the vote extension received, and the prepared proposal that the extension refers to. + In other words, for any correct process *p*, and any arbitrary vote extension *e*, and any arbitrary + block *w*, if *p*'s (resp. *q*'s) Tendermint calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, + then *p*'s Application's acceptance or rejection **exclusively** depends on *e*, *w* and *sp,h-1*. + +* Requirement 10 [`VerifyVoteExtension`, determinism-2]: For any two correct processes *p* and *q*, + and any arbitrary vote extension *e*, and any arbitrary block *w*, + if *p*'s (resp. *q*'s) Tendermint calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, + then *p*'s Application accepts *e* if and only if *q*'s Application accepts *e*. + Note that this requirement follows from Requirement 9 and the Agreement property of consensus. + +Requirements 9 and 10 ensure that the validation of vote extensions will be deterministic at all +correct processes. +Requirements 9 and 10 protect against arbitrary vote extension data from Byzantine processes, +in a similar way as Requirements 6 and 7 protect against arbitrary proposed blocks. +Requirements 9 and 10 can be violated by a bug inducing non-determinism in +`VerifyVoteExtension`. In this case liveness can be compromised. +Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension`. +As a general rule, `VerifyVoteExtension` SHOULD always accept the vote extension. + +* Requirement 11 [*all*, no-side-effects]: *p*'s calls to `RequestPrepareProposal`, + `RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height *h* do + not modify *sp,h-1*. + +* Requirement 12 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process *p*, +and any vote extension *e* that *p* received at height *h*, the computation of +*sp,h* does not depend on *e*. + +The call to correct process *p*'s `RequestFinalizeBlock` at height *h*, with block *vp,h* +passed as parameter, creates state *sp,h*. +Additionally, + +* in next-block execution mode, *p*'s `FinalizeBlock` creates a set of transaction results *Tp,h*, +* in same-block execution mode, *p*'s `PrepareProposal` creates a set of transaction results *Tp,h* + if *p* was the proposer of *vp,h*. If *p* was not the proposer of *vp,h*, + `ProcessProposal` creates *Tp,h*. `FinalizeBlock` MAY re-create *Tp,h* if it was + removed from memory during the execution of height *h*. + +* Requirement 13 [`FinalizeBlock`, determinism-1]: For any correct process *p*, + *sp,h* exclusively depends on *sp,h-1* and *vp,h*. + +* Requirement 14 [`FinalizeBlock`, determinism-2]: For any correct process *p*, + the contents of *Tp,h* exclusively depend on *sp,h-1* and *vp,h*. + +Note that Requirements 13 and 14, combined with Agreement property of consensus ensure +state machine replication, i.e., the Application state evolves consistently at all correct processes. + +Finally, notice that neither `PrepareProposal` nor `ExtendVote` have determinism-related +requirements associated. +Indeed, `PrepareProposal` is not required to be deterministic: + +* *up* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. +* *vp = vq ⇏ up = uq*. + +Likewise, `ExtendVote` can also be non-deterministic: + +* *erp* may depend on *wrp* and *sp,h-1*, + but may also depend on other values or operations. +* *wrp = wrq ⇏ + erp = erq* + +## Managing the Application state and related topics + +### Connection State + +Tendermint maintains four concurrent ABCI++ connections, namely +[Consensus Connection](#consensus-connection), +[Mempool Connection](#mempool-connection), +[Info/Query Connection](#infoquery-connection), and +[Snapshot Connection](#snapshot-connection). +It is common for an application to maintain a distinct copy of +the state for each connection, which are synchronized upon `Commit` calls. + +#### Concurrency + +In principle, each of the four ABCI++ connections operates concurrently with one +another. This means applications need to ensure access to state is +thread safe. Up to v0.35.x, both the +[default in-process ABCI client](https://github.com/tendermint/tendermint/blob/v0.35.x/abci/client/local_client.go#L18) +and the +[default Go ABCI server](https://github.com/tendermint/tendermint/blob/v0.35.x/abci/server/socket_server.go#L32) +used a global lock to guard the handling of events across all connections, so they were not +concurrent at all. This meant whether your app was compiled in-process with +Tendermint using the `NewLocalClient`, or run out-of-process using the `SocketServer`, +ABCI messages from all connections were received in sequence, one at a +time. +This is no longer the case starting from v0.36.0: the global locks have been removed and it is +up to the Application to synchronize access to its state when handling +ABCI++ methods on all connections. +Nevertheless, as all ABCI calls are now synchronous, ABCI messages using the same connection are +still received in sequence. + +#### FinalizeBlock + +When the consensus algorithm decides on a block, Tendermint uses `FinalizeBlock` to send the +decided block's data to the Application, which uses it to transition its state. + +The Application must remember the latest height from which it +has run a successful `Commit` so that it can tell Tendermint where to +pick up from when it recovers from a crash. See information on the Handshake +[here](#crash-recovery). + +#### Commit + +The Application should persist its state during `Commit`, before returning from it. + +Before invoking `Commit`, Tendermint locks the mempool and flushes the mempool connection. This ensures that +no new messages +will be received on the mempool connection during this processing step, providing an opportunity to safely +update all four +connection states to the latest committed state at the same time. + +When `Commit` returns, Tendermint unlocks the mempool. + +WARNING: if the ABCI app logic processing the `Commit` message sends a +`/broadcast_tx_sync` or `/broadcast_tx` and waits for the response +before proceeding, it will deadlock. Executing `broadcast_tx` calls +involves acquiring the mempool lock that Tendermint holds during the `Commit` call. +Synchronous mempool-related calls must be avoided as part of the sequential logic of the +`Commit` function. + +#### Candidate States + +Tendermint calls `PrepareProposal` when it is about to send a proposed block to the network. +Likewise, Tendermint calls `ProcessProposal` upon reception of a proposed block from the +network. In both cases, the proposed block's data +is disclosed to the Application, in the same conditions as is done in `FinalizeBlock`. +The block data disclosed the to Application by these three methods are the following: + +* the transaction list +* the `LastCommit` referring to the previous block +* the block header's hash (except in `PrepareProposal`, where it is not known yet) +* list of validators that misbehaved +* the block's timestamp +* `NextValidatorsHash` +* Proposer address + +The Application may decide to *immediately* execute the given block (i.e., upon `PrepareProposal` +or `ProcessProposal`). There are two main reasons why the Application may want to do this: + +* *Avoiding invalid transactions in blocks*. + In order to be sure that the block does not contain *any* invalid transaction, there may be + no way other than fully executing the transactions in the block as though it was the *decided* + block. +* *Quick `FinalizeBlock` execution*. + Upon reception of the decided block via `FinalizeBlock`, if that same block was executed + upon `PrepareProposal` or `ProcessProposal` and the resulting state was kept in memory, the + Application can simply apply that state (faster) to the main state, rather than reexecuting + the decided block (slower). + +`PrepareProposal`/`ProcessProposal` can be called many times for a given height. Moreover, +it is not possible to accurately predict which of the blocks proposed in a height will be decided, +being delivered to the Application in that height's `FinalizeBlock`. +Therefore, the state resulting from executing a proposed block, denoted a *candidate state*, should +be kept in memory as a possible final state for that height. When `FinalizeBlock` is called, the Application should +check if the decided block corresponds to one of its candidate states; if so, it will apply it as +its *ExecuteTxState* (see [Consensus Connection](#consensus-connection) below), +which will be persisted during the upcoming `Commit` call. + +Under adverse conditions (e.g., network instability), Tendermint might take many rounds. +In this case, potentially many proposed blocks will be disclosed to the Application for a given height. +By the nature of Tendermint's consensus algorithm, the number of proposed blocks received by the Application +for a particular height cannot be bound, so Application developers must act with care and use mechanisms +to bound memory usage. As a general rule, the Application should be ready to discard candidate states +before `FinalizeBlock`, even if one of them might end up corresponding to the +decided block and thus have to be reexecuted upon `FinalizeBlock`. + +### States and ABCI++ Connections + +#### Consensus Connection + +The Consensus Connection should maintain an *ExecuteTxState* — the working state +for block execution. It should be updated by the call to `FinalizeBlock` +during block execution and committed to disk as the "latest +committed state" during `Commit`. Execution of a proposed block (via `PrepareProposal`/`ProcessProposal`) +**must not** update the *ExecuteTxState*, but rather be kept as a separate candidate state until `FinalizeBlock` +confirms which of the candidate states (if any) can be used to update *ExecuteTxState*. + +#### Mempool Connection + +The mempool Connection maintains *CheckTxState*. Tendermint sequentially processes an incoming +transaction (via RPC from client or P2P from the gossip layer) against *CheckTxState*. +If the processing does not return any error, the transaction is accepted into the mempool +and Tendermint starts gossipping it. +*CheckTxState* should be reset to the latest committed state +at the end of every `Commit`. + +During the execution of a consensus instance, the *CheckTxState* may be updated concurrently with the +*ExecuteTxState*, as messages may be sent concurrently on the Consensus and Mempool connections. +At the end of the consensus instance, as described above, Tendermint locks the mempool and flushes +the mempool connection before calling `Commit`. This ensures that all pending `CheckTx` calls are +responded to and no new ones can begin. + +After the `Commit` call returns, while still holding the mempool lock, `CheckTx` is run again on all +transactions that remain in the node's local mempool after filtering those included in the block. +Parameter `Type` in `RequestCheckTx` +indicates whether an incoming transaction is new (`CheckTxType_New`), or a +recheck (`CheckTxType_Recheck`). + +Finally, after re-checking transactions in the mempool, Tendermint will unlock +the mempool connection. New transactions are once again able to be processed through `CheckTx`. + +Note that `CheckTx` is just a weak filter to keep invalid transactions out of the mempool and, +utimately, ouf of the blockchain. +Since the transaction cannot be guaranteed to be checked against the exact same state as it +will be executed as part of a (potential) decided block, `CheckTx` shouldn't check *everything* +that affects the transaction's validity, in particular those checks whose validity may depend on +transaction ordering. `CheckTx` is weak because a Byzantine node need not care about `CheckTx`; +it can propose a block full of invalid transactions if it wants. The mechanism ABCI++ has +in place for dealing with such behavior is `ProcessProposal`. + +##### Replay Protection + +It is possible for old transactions to be sent again to the Application. This is typically +undesirable for all transactions, except for a generally small subset of them which are idempotent. + +The mempool has a mechanism to prevent duplicated transactions from being processed. +This mechanism is nevertheless best-effort (currently based on the indexer) +and does not provide any guarantee of non duplication. +It is thus up to the Application to implement an application-specific +replay protection mechanism with strong guarantees as part of the logic in `CheckTx`. + +#### Info/Query Connection + +The Info (or Query) Connection should maintain a `QueryState`. This connection has two +purposes: 1) having the application answer the queries Tenderissued receives from users +(see section [Query](#query)), +and 2) synchronizing Tendermint and the Application at start up time (see +[Crash Recovery](#crash-recovery)) +or after state sync (see [State Sync](#state-sync)). + +`QueryState` is a read-only copy of *ExecuteTxState* as it was after the last +`Commit`, i.e. +after the full block has been processed and the state committed to disk. + +#### Snapshot Connection + +The Snapshot Connection is used to serve state sync snapshots for other nodes +and/or restore state sync snapshots to a local node being bootstrapped. +Snapshop management is optional: an Application may choose not to implement it. + +For more information, see Section [State Sync](#state-sync). + +### Transaction Results + +The Application is expected to return a list of +[`ExecTxResult`](./abci%2B%2B_methods.md#exectxresult) in +[`ResponseFinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock). The list of transaction +results must respect the same order as the list of transactions delivered via +[`RequestFinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock). +This section discusses the fields inside this structure, along with the fields in +[`ResponseCheckTx`](./abci%2B%2B_methods.md#checktx), +whose semantics are similar. + +The `Info` and `Log` fields are +non-deterministic values for debugging/convenience purposes. Tendermint logs them but they +are otherwise ignored. + +#### Gas + +Ethereum introduced the notion of *gas* as an abstract representation of the +cost of the resources consumed by nodes when processing a transaction. Every operation in the +Ethereum Virtual Machine uses some amount of gas. +Gas has a market-variable price based on which miners can accept or reject to execute a +particular operation. + +Users propose a maximum amount of gas for their transaction; if the transaction uses less, they get +the difference credited back. Tendermint adopts a similar abstraction, +though uses it only optionally and weakly, allowing applications to define +their own sense of the cost of execution. + +In Tendermint, the [ConsensusParams.Block.MaxGas](#consensus-parameters) limits the amount of +total gas that can be used by all transactions in a block. +The default value is `-1`, which means the block gas limit is not enforced, or that the concept of +gas is meaningless. + +Responses contain a `GasWanted` and `GasUsed` field. The former is the maximum +amount of gas the sender of a transaction is willing to use, and the latter is how much it actually +used. Applications should enforce that `GasUsed <= GasWanted` — i.e. transaction execution +or validation should fail before it can use more resources than it requested. + +When `MaxGas > -1`, Tendermint enforces the following rules: + +* `GasWanted <= MaxGas` for every transaction in the mempool +* `(sum of GasWanted in a block) <= MaxGas` when proposing a block + +If `MaxGas == -1`, no rules about gas are enforced. + +In v0.35.x and earlier versions, Tendermint does not enforce anything about Gas in consensus, +only in the mempool. +This means it does not guarantee that committed blocks satisfy these rules. +It is the application's responsibility to return non-zero response codes when gas limits are exceeded +when executing the transactions of a block. +Since the introduction of `PrepareProposal` and `ProcessProposal` in v.0.36.x, it is now possible +for the Application to enforce that all blocks proposed (and voted for) in consensus — and thus all +blocks decided — respect the `MaxGas` limits described above. + +Since the Application should enforce that `GasUsed <= GasWanted` when executing a transaction, and +it can use `PrepareProposal` and `ProcessProposal` to enforce that `(sum of GasWanted in a block) <= MaxGas` +in all proposed or prevoted blocks, +we have: + +* `(sum of GasUsed in a block) <= MaxGas` for every block + +The `GasUsed` field is ignored by Tendermint. + +#### Specifics of `ResponseCheckTx` + +If `Code != 0`, it will be rejected from the mempool and hence +not broadcasted to other peers and not included in a proposal block. + +`Data` contains the result of the `CheckTx` transaction execution, if any. It does not need to be +deterministic since, given a transaction, nodes' Applications +might have a different *CheckTxState* values when they receive it and check their validity +via `CheckTx`. +Tendermint ignores this value in `ResponseCheckTx`. + +From v0.35.x on, there is a `Priority` field in `ResponseCheckTx` that can be +used to explicitly prioritize transactions in the mempool for inclusion in a block +proposal. + +#### Specifics of `ExecTxResult` + +`FinalizeBlock` is the workhorse of the blockchain. Tendermint delivers the decided block, +including the list of all its transactions synchronously to the Application. +The block delivered (and thus the transaction order) is the same at all correct nodes as guaranteed +by the Agreement property of Tendermint consensus. + +In same block execution mode, field `LastResultsHash` in the block header refers to the results +of all transactions stored in that block. Therefore, +`PrepareProposal` must return `ExecTxResult` so that it can +be used to build the block to be proposed in the current height. + +The `Data` field in `ExecTxResult` contains an array of bytes with the transaction result. +It must be deterministic (i.e., the same value must be returned at all nodes), but it can contain arbitrary +data. Likewise, the value of `Code` must be deterministic. +If `Code != 0`, the transaction will be marked invalid, +though it is still included in the block. Invalid transaction are not indexed, as they are +considered analogous to those that failed `CheckTx`. + +Both the `Code` and `Data` are included in a structure that is hashed into the +`LastResultsHash` of the block header in the next height (next block execution mode), or the +header of the block to propose in the current height (same block execution mode, `ExecTxResult` as +part of `PrepareProposal`). + +`Events` include any events for the execution, which Tendermint will use to index +the transaction by. This allows transactions to be queried according to what +events took place during their execution. + +### Updating the Validator Set + +The application may set the validator set during +[`InitChain`](./abci%2B%2B_methods.md#initchain), and may update it during +[`FinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock) +(next block execution mode) or +[`PrepareProposal`](./abci%2B%2B_methods.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods.md#processproposal) +(same block execution mode). In all cases, a structure of type +[`ValidatorUpdate`](./abci%2B%2B_methods.md#validatorupdate) is returned. + +The `InitChain` method, used to initialize the Application, can return a list of validators. +If the list is empty, Tendermint will use the validators loaded from the genesis +file. +If the list returned by `InitChain` is not empty, Tendermint will use its contents as the validator set. +This way the application can set the initial validator set for the +blockchain. + +Applications must ensure that a single set of validator updates does not contain duplicates, i.e. +a given public key can only appear once within a given update. If an update includes +duplicates, the block execution will fail irrecoverably. + +Structure `ValidatorUpdate` contains a public key, which is used to identify the validator: +The public key currently supports three types: + +* `ed25519` +* `secp256k1` +* `sr25519` + +Structure `ValidatorUpdate` also contains an `ìnt64` field denoting the validator's new power. +Applications must ensure that +`ValidatorUpdate` structures abide by the following rules: + +* power must be non-negative +* if power is set to 0, the validator must be in the validator set; it will be removed from the set +* if power is greater than 0: + * if the validator is not in the validator set, it will be added to the + set with the given power + * if the validator is in the validator set, its power will be adjusted to the given power +* the total power of the new validator set must not exceed `MaxTotalVotingPower`, where + `MaxTotalVotingPower = MaxInt64 / 8` + +Note the updates returned after processing the block at height `H` will only take effect +at block `H+2` (see Section [Methods](./abci%2B%2B_methods.md)). + +### Consensus Parameters + +`ConsensusParams` are global parameters that apply to all validators in a blockchain. +They enforce certain limits in the blockchain, like the maximum size +of blocks, amount of gas used in a block, and the maximum acceptable age of +evidence. They can be set in +[`InitChain`](./abci%2B%2B_methods.md#initchain), and updated in +[`FinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock) +(next block execution mode) or +[`PrepareProposal`](./abci%2B%2B_methods.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods.md#processproposal) +(same block execution model). +These parameters are deterministically set and/or updated by the Application, so +all full nodes have the same value at a given height. + +#### List of Parameters + +These are the current consensus parameters (as of v0.36.x): + +1. [BlockParams.MaxBytes](#blockparamsmaxbytes) +2. [BlockParams.MaxGas](#blockparamsmaxgas) +3. [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) +4. [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) +5. [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) +6. [SynchronyParams.MessageDelay](#synchronyparamsmessagedelay) +7. [SynchronyParams.Precision](#synchronyparamsprecision) +8. [TimeoutParams.Propose](#timeoutparamspropose) +9. [TimeoutParams.ProposeDelta](#timeoutparamsproposedelta) +10. [TimeoutParams.Vote](#timeoutparamsvote) +11. [TimeoutParams.VoteDelta](#timeoutparamsvotedelta) +12. [TimeoutParams.Commit](#timeoutparamscommit) +13. [TimeoutParams.BypassCommitTimeout](#timeoutparamsbypasscommittimeout) + +##### BlockParams.MaxBytes + +The maximum size of a complete Protobuf encoded block. +This is enforced by Tendermint consensus. + +This implies a maximum transaction size that is this `MaxBytes`, less the expected size of +the header, the validator set, and any included evidence in the block. + +Must have `0 < MaxBytes < 100 MB`. + +##### BlockParams.MaxGas + +The maximum of the sum of `GasWanted` that will be allowed in a proposed block. +This is *not* enforced by Tendermint consensus. +It is left to the Application to enforce (ie. if transactions are included past the +limit, they should return non-zero codes). It is used by Tendermint to limit the +transactions included in a proposed block. + +Must have `MaxGas >= -1`. +If `MaxGas == -1`, no limit is enforced. + +##### EvidenceParams.MaxAgeDuration + +This is the maximum age of evidence in time units. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeNumBlocks` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeDuration > 0`. + +##### EvidenceParams.MaxAgeNumBlocks + +This is the maximum age of evidence in blocks. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeDuration` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeNumBlocks > 0`. + +##### EvidenceParams.MaxBytes + +This is the maximum size of total evidence in bytes that can be committed to a +single block. It should fall comfortably under the max block bytes. + +Its value must not exceed the size of +a block minus its overhead ( ~ `BlockParams.MaxBytes`). + +Must have `MaxBytes > 0`. + +##### SynchronyParams.MessageDelay + +This sets a bound on how long a proposal message may take to reach all +validators on a network and still be considered valid. + +This parameter is part of the +[proposer-based timestamps](../consensus/proposer-based-timestamp) +(PBTS) algorithm. + +##### SynchronyParams.Precision + +This sets a bound on how skewed a proposer's clock may be from any validator +on the network while still producing valid proposals. + +This parameter is part of the +[proposer-based timestamps](../consensus/proposer-based-timestamp) +(PBTS) algorithm. + +##### TimeoutParams.Propose + +Timeout in ms of the propose step of the Tendermint consensus algorithm. +This value is the initial timeout at every height (round 0). + +The value in subsequent rounds is modified by parameter `ProposeDelta`. +When a new height is started, the `Propose` timeout value is reset to this +parameter. + +If a node waiting for a proposal message does not receive one matching its +current height and round before this timeout, the node will issue a +`nil` prevote for the round and advance to the next step. + +##### TimeoutParams.ProposeDelta + +Increment in ms to be added to the `Propose` timeout every time the Tendermint +consensus algorithm advances one round in a given height. + +When a new height is started, the `Propose` timeout value is reset. + +##### TimeoutParams.Vote + +Timeout in ms of the prevote and precommit steps of the Tendermint consensus +algorithm. +This value is the initial timeout at every height (round 0). + +The value in subsequent rounds is modified by parameter `VoteDelta`. +When a new height is started, the `Vote` timeout value is reset to this +parameter. + +The `Vote` timeout does not begin until a quorum of votes has been received. +Once a quorum of votes has been seen and this timeout elapses, Tendermint will +procced to the next step of the consensus algorithm. If Tendermint receives +all of the remaining votes before the end of the timeout, it will proceed +to the next step immediately. + +##### TimeoutParams.VoteDelta + +Increment in ms to be added to the `Vote` timeout every time the Tendermint +consensus algorithm advances one round in a given height. + +When a new height is started, the `Vote` timeout value is reset. + +##### TimeoutParams.Commit + +This configures how long Tendermint will wait after receiving a quorum of +precommits before beginning consensus for the next height. This can be +used to allow slow precommits to arrive for inclusion in the next height +before progressing. + +##### TimeoutParams.BypassCommitTimeout + +This configures the node to proceed immediately to the next height once the +node has received all precommits for a block, forgoing the remaining commit timeout. +Setting this parameter to `false` (the default) causes Tendermint to wait +for the full commit timeout configured in `TimeoutParams.Commit`. + +##### ABCIParams.VoteExtensionsEnableHeight + +This parameter is either 0 or a positive height at which vote extensions +become mandatory. If the value is zero (which is the default), vote +extensions are not required. Otherwise, at all heights greater than the +configured height `H` vote extensions must be present (even if empty). +When the configured height `H` is reached, `PrepareProposal` will not +include vote extensions yet, but `ExtendVote` and `VerifyVoteExtension` will +be called. Then, when reaching height `H+1`, `PrepareProposal` will +include the vote extensions from height `H`. For all heights after `H` + +* vote extensions cannot be disabled, +* they are mandatory: all precommit messages sent MUST have an extension + attached. Nevetheless, the application MAY provide 0-length + extensions. + +Must always be set to a future height. Once set to a value different from +0, its value must not be changed. + +#### Updating Consensus Parameters + +The application may set the `ConsensusParams` during +[`InitChain`](./abci%2B%2B_methods.md#initchain), +and update them during +[`FinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock) +(next block execution mode) or +[`PrepareProposal`](./abci%2B%2B_methods.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods.md#processproposal) +(same block execution mode). +If the `ConsensusParams` is empty, it will be ignored. Each field +that is not empty will be applied in full. For instance, if updating the +`Block.MaxBytes`, applications must also set the other `Block` fields (like +`Block.MaxGas`), even if they are unchanged, as they will otherwise cause the +value to be updated to the default. + +##### `InitChain` + +`ResponseInitChain` includes a `ConsensusParams` parameter. +If `ConsensusParams` is `nil`, Tendermint will use the params loaded in the genesis +file. If `ConsensusParams` is not `nil`, Tendermint will use it. +This way the application can determine the initial consensus parameters for the +blockchain. + +##### `FinalizeBlock`, `PrepareProposal`/`ProcessProposal` + +In next block execution mode, `ResponseFinalizeBlock` accepts a `ConsensusParams` parameter. +If `ConsensusParams` is `nil`, Tendermint will do nothing. +If `ConsensusParams` is not `nil`, Tendermint will use it. +This way the application can update the consensus parameters over time. + +Likewise, in same block execution mode, `PrepareProposal` and `ProcessProposal` include +a `ConsensusParams` parameter. `PrepareProposal` may return a `ConsensusParams` to update +the consensus parameters in the block that is about to be proposed. If it returns `nil` +the consensus parameters will not be updated. `ProcessProposal` also accepts a +`ConsensusParams` parameter, which Tendermint will use it to calculate the corresponding +hashes and sanity-check them against those of the block that triggered `ProcessProposal` +at the first place. + +Note the updates returned in block `H` will take effect right away for block +`H+1` (both in next block and same block execution mode). + +### `Query` + +`Query` is a generic method with lots of flexibility to enable diverse sets +of queries on application state. Tendermint makes use of `Query` to filter new peers +based on ID and IP, and exposes `Query` to the user over RPC. + +Note that calls to `Query` are not replicated across nodes, but rather query the +local node's state - hence they may return stale reads. For reads that require +consensus, use a transaction. + +The most important use of `Query` is to return Merkle proofs of the application state at some height +that can be used for efficient application-specific light-clients. + +Note Tendermint has technically no requirements from the `Query` +message for normal operation - that is, the ABCI app developer need not implement +Query functionality if they do not wish to. + +#### Query Proofs + +The Tendermint block header includes a number of hashes, each providing an +anchor for some type of proof about the blockchain. The `ValidatorsHash` enables +quick verification of the validator set, the `DataHash` gives quick +verification of the transactions included in the block. + +The `AppHash` is unique in that it is application specific, and allows for +application-specific Merkle proofs about the state of the application. +While some applications keep all relevant state in the transactions themselves +(like Bitcoin and its UTXOs), others maintain a separated state that is +computed deterministically *from* transactions, but is not contained directly in +the transactions themselves (like Ethereum contracts and accounts). +For such applications, the `AppHash` provides a much more efficient way to verify light-client proofs. + +ABCI applications can take advantage of more efficient light-client proofs for +their state as follows: + +* in next block executon mode, return the Merkle root of the deterministic application state in + `ResponseCommit.Data`. This Merkle root will be included as the `AppHash` in the next block. +* in same block execution mode, return the Merkle root of the deterministic application state + in `ResponsePrepareProposal.AppHash`. This Merkle root will be included as the `AppHash` in + the block that is about to be proposed. +* return efficient Merkle proofs about that application state in `ResponseQuery.Proof` + that can be verified using the `AppHash` of the corresponding block. + +For instance, this allows an application's light-client to verify proofs of +absence in the application state, something which is much less efficient to do using the block hash. + +Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, +where the leaves of one tree are the root hashes of others. To support this, and +the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: + +```protobuf +message ProofOps { + repeated ProofOp ops = 1 +} + +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} +``` + +Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`. +This allows ABCI to support many different kinds of Merkle trees, encoding +formats, and proofs (eg. of presence and absence) just by varying the `type`. +The `data` contains the actual encoded proof, encoded according to the `type`. +When verifying the full proof, the root hash for one ProofOp is the value being +verified for the next ProofOp in the list. The root hash of the final ProofOp in +the list should match the `AppHash` being verified against. + +#### Peer Filtering + +When Tendermint connects to a peer, it sends two queries to the ABCI application +using the following paths, with no additional data: + +* `/p2p/filter/addr/`, where `` denote the IP address and + the port of the connection +* `p2p/filter/id/`, where `` is the peer node ID (ie. the + pubkey.Address() for the peer's PubKey) + +If either of these queries return a non-zero ABCI code, Tendermint will refuse +to connect to the peer. + +#### Paths + +Queries are directed at paths, and may optionally include additional data. + +The expectation is for there to be some number of high level paths +differentiating concerns, like `/p2p`, `/store`, and `/app`. Currently, +Tendermint only uses `/p2p`, for filtering peers. For more advanced use, see the +implementation of +[Query in the Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/v0.23.1/baseapp/baseapp.go#L333). + +### Crash Recovery + +On startup, Tendermint calls the `Info` method on the Info Connection to get the latest +committed state of the app. The app MUST return information consistent with the +last block it succesfully completed Commit for. + +If the app succesfully committed block H, then `last_block_height = H` and `last_block_app_hash = `. If the app +failed during the Commit of block H, then `last_block_height = H-1` and +`last_block_app_hash = `. + +We now distinguish three heights, and describe how Tendermint syncs itself with +the app. + +```md +storeBlockHeight = height of the last block Tendermint saw a commit for +stateBlockHeight = height of the last block for which Tendermint completed all + block processing and saved all ABCI results to disk +appBlockHeight = height of the last block for which ABCI app succesfully + completed Commit + +``` + +Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` +Note also Tendermint never calls Commit on an ABCI app twice for the same height. + +The procedure is as follows. + +First, some simple start conditions: + +If `appBlockHeight == 0`, then call InitChain. + +If `storeBlockHeight == 0`, we're done. + +Now, some sanity checks: + +If `storeBlockHeight < appBlockHeight`, error +If `storeBlockHeight < stateBlockHeight`, panic +If `storeBlockHeight > stateBlockHeight+1`, panic + +Now, the meat: + +If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, +replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. +This happens if we completed processing the block, but the app forgot its height. + +If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done. +This happens if we crashed at an opportune spot. + +If `storeBlockHeight == stateBlockHeight+1` +This happens if we started processing the block but didn't finish. + +If `appBlockHeight < stateBlockHeight` + replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, + and replay the block at `storeBlockHeight` using the WAL. +This happens if the app forgot the last block it committed. + +If `appBlockHeight == stateBlockHeight`, + replay the last block (storeBlockHeight) in full. +This happens if we crashed before the app finished Commit + +If `appBlockHeight == storeBlockHeight` + update the state using the saved ABCI responses but dont run the block against the real app. +This happens if we crashed after the app finished Commit but before Tendermint saved the state. + +### State Sync + +A new node joining the network can simply join consensus at the genesis height and replay all +historical blocks until it is caught up. However, for large chains this can take a significant +amount of time, often on the order of days or weeks. + +State sync is an alternative mechanism for bootstrapping a new node, where it fetches a snapshot +of the state machine at a given height and restores it. Depending on the application, this can +be several orders of magnitude faster than replaying blocks. + +Note that state sync does not currently backfill historical blocks, so the node will have a +truncated block history - users are advised to consider the broader network implications of this in +terms of block availability and auditability. This functionality may be added in the future. + +For details on the specific ABCI calls and types, see the +[methods](abci%2B%2B_methods.md) section. + +#### Taking Snapshots + +Applications that want to support state syncing must take state snapshots at regular intervals. How +this is accomplished is entirely up to the application. A snapshot consists of some metadata and +a set of binary chunks in an arbitrary format: + +* `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given + height has been committed, and must not contain data from any later heights. + +* `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot + formats, e.g. to switch from Protobuf to MessagePack for serialization. The application can use + this when restoring to choose whether to accept or reject a snapshot. + +* `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary + data, and should be less than 16 MB; 10 MB is a good starting point. + +* `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is + the same across nodes when downloading chunks. + +* `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other + necessary info. + +For a snapshot to be considered the same across nodes, all of these fields must be identical. When +sent across the network, snapshot metadata messages are limited to 4 MB. + +When a new node is running state sync and discovering snapshots, Tendermint will query an existing +application via the ABCI `ListSnapshots` method to discover available snapshots, and load binary +snapshot chunks via `LoadSnapshotChunk`. The application is free to choose how to implement this +and which formats to use, but must provide the following guarantees: + +* **Consistent:** A snapshot must be taken at a single isolated height, unaffected by + concurrent writes. This can be accomplished by using a data store that supports ACID + transactions with snapshot isolation. + +* **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, + for example by running in a separate thread. + +* **Deterministic:** A snapshot taken at the same height in the same format must be identical + (at the byte level) across nodes, including all metadata. This ensures good availability of + chunks, and that they fit together across nodes. + +A very basic approach might be to use a datastore with MVCC transactions (such as RocksDB), +start a transaction immediately after block commit, and spawn a new thread which is passed the +transaction handle. This thread can then export all data items, serialize them using e.g. +Protobuf, hash the byte stream, split it into chunks, and store the chunks in the file system +along with some metadata - all while the blockchain is applying new blocks in parallel. + +A more advanced approach might include incremental verification of individual chunks against the +chain app hash, parallel or batched exports, compression, and so on. + +Old snapshots should be removed after some time - generally only the last two snapshots are needed +(to prevent the last one from being removed while a node is restoring it). + +#### Bootstrapping a Node + +An empty node can be state synced by setting the configuration option `statesync.enabled = +true`. The node also needs the chain genesis file for basic chain info, and configuration for +light client verification of the restored snapshot: a set of Tendermint RPC servers, and a +trusted header hash and corresponding height from a trusted source, via the `statesync` +configuration section. + +Once started, the node will connect to the P2P network and begin discovering snapshots. These +will be offered to the local application via the `OfferSnapshot` ABCI method. Once a snapshot +is accepted Tendermint will fetch and apply the snapshot chunks. After all chunks have been +successfully applied, Tendermint verifies the app's `AppHash` against the chain using the light +client, then switches the node to normal consensus operation. + +##### Snapshot Discovery + +When the empty node joins the P2P network, it asks all peers to report snapshots via the +`ListSnapshots` ABCI call (limited to 10 per node). After some time, the node picks the most +suitable snapshot (generally prioritized by height, format, and number of peers), and offers it +to the application via `OfferSnapshot`. The application can choose a number of responses, +including accepting or rejecting it, rejecting the offered format, rejecting the peer who sent +it, and so on. Tendermint will keep discovering and offering snapshots until one is accepted or +the application aborts. + +##### Snapshot Restoration + +Once a snapshot has been accepted via `OfferSnapshot`, Tendermint begins downloading chunks from +any peers that have the same snapshot (i.e. that have identical metadata fields). Chunks are +spooled in a temporary directory, and then given to the application in sequential order via +`ApplySnapshotChunk` until all chunks have been accepted. + +The method for restoring snapshot chunks is entirely up to the application. + +During restoration, the application can respond to `ApplySnapshotChunk` with instructions for how +to continue. This will typically be to accept the chunk and await the next one, but it can also +ask for chunks to be refetched (either the current one or any number of previous ones), P2P peers +to be banned, snapshots to be rejected or retried, and a number of other responses - see the ABCI +reference for details. + +If Tendermint fails to fetch a chunk after some time, it will reject the snapshot and try a +different one via `OfferSnapshot` - the application can choose whether it wants to support +restarting restoration, or simply abort with an error. + +##### Snapshot Verification + +Once all chunks have been accepted, Tendermint issues an `Info` ABCI call to retrieve the +`LastBlockAppHash`. This is compared with the trusted app hash from the chain, retrieved and +verified using the light client. Tendermint also checks that `LastBlockHeight` corresponds to the +height of the snapshot. + +This verification ensures that an application is valid before joining the network. However, the +snapshot restoration may take a long time to complete, so applications may want to employ additional +verification during the restore to detect failures early. This might e.g. include incremental +verification of each chunk against the app hash (using bundled Merkle proofs), checksums to +protect against data corruption by the disk or network, and so on. However, it is important to +note that the only trusted information available is the app hash, and all other snapshot metadata +can be spoofed by adversaries. + +Apps may also want to consider state sync denial-of-service vectors, where adversaries provide +invalid or harmful snapshots to prevent nodes from joining the network. The application can +counteract this by asking Tendermint to ban peers. As a last resort, node operators can use +P2P configuration options to whitelist a set of trusted peers that can provide valid snapshots. + +##### Transition to Consensus + +Once the snapshots have all been restored, Tendermint gathers additional information necessary for +bootstrapping the node (e.g. chain ID, consensus parameters, validator sets, and block headers) +from the genesis file and light client RPC servers. It also calls `Info` to verify the following: + +* that the app hash from the snapshot it has delivered to the Application matches the apphash + stored in the next height's block (in next block execution), or the current block's height + (same block execution) +* that the version that the Application returns in `ResponseInfo` matches the version in the + current height's block header + +Once the state machine has been restored and Tendermint has gathered this additional +information, it transitions to block sync (if enabled) to fetch any remaining blocks up the chain +head, and then transitions to regular consensus operation. At this point the node operates like +any other node, apart from having a truncated block history at the height of the restored snapshot. diff --git a/spec/abci++/abci++_basic_concepts.md b/spec/abci++/abci++_basic_concepts.md new file mode 100644 index 0000000000..b02798b57f --- /dev/null +++ b/spec/abci++/abci++_basic_concepts.md @@ -0,0 +1,468 @@ +--- +order: 1 +title: Overview and basic concepts +--- + +## Outline + +- [ABCI++ vs. ABCI](#abci-vs-abci) +- [Method overview](#method-overview) + - [Consensus/block execution methods](#consensusblock-execution-methods) + - [Mempool methods](#mempool-methods) + - [Info methods](#info-methods) + - [State-sync methods](#state-sync-methods) +- [Next-block execution vs. same-block execution](#next-block-execution-vs-same-block-execution) +- [Tendermint proposal timeout](#tendermint-proposal-timeout) +- [Deterministic State-Machine Replication](#deterministic-state-machine-replication) +- [Events](#events) +- [Evidence](#evidence) +- [Errors](#errors) + +# Overview and basic concepts + +## ABCI++ vs. ABCI + +[↑ Back to Outline](#outline) + +The Application's main role is to execute blocks decided (a.k.a. finalized) by consensus. The +decided blocks are the main consensus's ouput to the (replicated) Application. With ABCI, the +application only interacts with consensus at *decision* time. This restricted mode of interaction +prevents numerous features for the Application, including many scalability improvements that are +now better understood than when ABCI was first written. For example, many ideas proposed to improve +scalability can be boiled down to "make the block proposers do work, so the network does not have +to". This includes optimizations such as transaction level signature aggregation, state transition +proofs, etc. Furthermore, many new security properties cannot be achieved in the current paradigm, +as the Application cannot require validators to do more than executing the transactions contained in +finalized blocks. This includes features such as threshold cryptography, and guaranteed IBC +connection attempts. + +ABCI++ addresses these limitations by allowing the application to intervene at three key places of +consensus execution: (a) at the moment a new proposal is to be created, (b) at the moment a +proposal is to be validated, and (c) at the moment a (precommit) vote is sent/received. The new +interface allows block proposers to perform application-dependent work in a block through the +`PrepareProposal` method (a); validators to perform application-dependent work and checks in a +proposed block through the `ProcessProposal` method (b); and applications to require their validators +do more than just validate blocks through the `ExtendVote` and `VerifyVoteExtension` methods (c). +Furthermore, ABCI++ coalesces {`BeginBlock`, [`DeliverTx`], `EndBlock`} into `FinalizeBlock`, as a +simplified, efficient way to deliver a decided block to the Application. + +## Method overview + +[↑ Back to Outline](#outline) + +Methods can be classified into four categories: *consensus*, *mempool*, *info*, and *state-sync*. + +### Consensus/block execution methods + +The first time a new blockchain is started, Tendermint calls `InitChain`. From then on, method +`FinalizeBlock` is executed upon the decision of each block, resulting in an updated Application +state. During the execution of an instance of consensus, which decides the block for a given +height, and before method `FinalizeBlock` is called, methods `PrepareProposal`, `ProcessProposal`, +`ExtendVote`, and `VerifyVoteExtension` may be called several times. See +[Tendermint's expected behavior](abci++_tmint_expected_behavior.md) for details on the possible +call sequences of these methods. + +- [**InitChain:**](./abci++_methods.md#initchain) This method initializes the blockchain. + Tendermint calls it once upon genesis. + +- [**PrepareProposal:**](./abci++_methods.md#prepareproposal) It allows the block + proposer to perform application-dependent work in a block before proposing it. + This enables, for instance, batch optimizations to a block, which has been empirically + demonstrated to be a key component for improved performance. Method `PrepareProposal` is called + every time Tendermint is about to broadcast a Proposal message, but no previous proposal has + been locked at the Tendermint level. Tendermint gathers outstanding transactions from the + mempool, generates a block header, and uses them to create a block to propose. Then, it calls + `RequestPrepareProposal` with the newly created proposal, called *raw proposal*. The Application + can make changes to the raw proposal, such as modifying transactions, and returns the + (potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal` + call. The logic modifying the raw proposal can be non-deterministic. + +- [**ProcessProposal:**](./abci++_methods.md#processproposal) It allows a validator to + perform application-dependent work in a proposed block. This enables features such as immediate + block execution, and allows the Application to reject invalid blocks. + Tendermint calls it when it receives a proposal and the Tendermint algorithm has not locked on a + value. The Application cannot modify the proposal at this point but can reject it if it is + invalid. If that is the case, Tendermint will prevote `nil` on the proposal, which has + strong liveness implications for Tendermint. As a general rule, the Application + SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of + the proposal is invalid (e.g., an invalid transaction); the Application can + ignore the invalid part of the prepared proposal at block execution time. + +- [**ExtendVote:**](./abci++_methods.md#extendvote) It allows applications to force their + validators to do more than just validate within consensus. `ExtendVote` allows applications to + include non-deterministic data, opaque to Tendermint, to precommit messages (the final round of + voting). The data, called *vote extension*, will be broadcast and received together with the + vote it is extending, and will be made available to the Application in the next height, + in the rounds where the local process is the proposer. + Tendermint calls `ExtendVote` when it is about to send a non-`nil` precommit message. + If the Application does not have vote extension information to provide at that time, it returns + a 0-length byte array as its vote extension. + +- [**VerifyVoteExtension:**](./abci++_methods.md#verifyvoteextension) It allows + validators to validate the vote extension data attached to a precommit message. If the validation + fails, the whole precommit message will be deemed invalid and ignored by Tendermint. + This has a negative impact on Tendermint's liveness, i.e., if vote extensions repeatedly cannot be + verified by correct validators, Tendermint may not be able to finalize a block even if sufficiently + many (+2/3) validators send precommit votes for that block. Thus, `VerifyVoteExtension` + should be used with special care. + As a general rule, an Application that detects an invalid vote extension SHOULD + accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. Tendermint calls it when + a process receives a precommit message with a (possibly empty) vote extension. + +- [**FinalizeBlock:**](./abci++_methods.md#finalizeblock) It delivers a decided block to the + Application. The Application must execute the transactions in the block deterministically and + update its state accordingly. Cryptographic commitments to the block and transaction results, + returned via the corresponding parameters in `ResponseFinalizeBlock`, are included in the header + of the next block. Tendermint calls it when a new block is decided. + +- [**Commit:**](./abci++_methods.md#commit) Instructs the Application to persist its + state. It is a fundamental part of Tendermint's crash-recovery mechanism that ensures the + synchronization between Tendermint and the Applicatin upon recovery. Tendermint calls it just after + having persisted the data returned by `ResponseFinalizeBlock`. The Application can now discard + any state or data except the one resulting from executing the transactions in the decided block. + +### Mempool methods + +- [**CheckTx:**](./abci++_methods.md#checktx) This method allows the Application to validate + transactions. Validation can be stateless (e.g., checking signatures ) or stateful + (e.g., account balances). The type of validation performed is up to the application. If a + transaction passes the validation, then Tendermint adds it to the mempool; otherwise the + transaction is discarded. + Tendermint calls it when it receives a new transaction either coming from an external + user (e.g., a client) or another node. Furthermore, Tendermint can be configured to call + re-`CheckTx` on all outstanding transactions in the mempool after calling `Commit`for a block. + +### Info methods + +- [**Info:**](./abci++_methods.md#info) Used to sync Tendermint with the Application during a + handshake that happens upon recovery, or on startup when state-sync is used. + +- [**Query:**](./abci++_methods.md#query) This method can be used to query the Application for + information about the application state. + +### State-sync methods + +State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying +state machine (application) snapshots instead of replaying historical blocks. For more details, see the +[state sync documentation](../p2p/messages/state-sync.md). + +New nodes discover and request snapshots from other nodes in the P2P network. +A Tendermint node that receives a request for snapshots from a peer will call +`ListSnapshots` on its Application. The Application returns the list of locally available +snapshots. +Note that the list does not contain the actual snapshots but metadata about them: height at which +the snapshot was taken, application-specific verification data and more (see +[snapshot data type](./abci++_methods.md#snapshot) for more details). After receiving a +list of available snapshots from a peer, the new node can offer any of the snapshots in the list to +its local Application via the `OfferSnapshot` method. The Application can check at this point the +validity of the snapshot metadata. + +Snapshots may be quite large and are thus broken into smaller "chunks" that can be +assembled into the whole snapshot. Once the Application accepts a snapshot and +begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. +The node providing "chunks" will fetch them from its local Application using +the `LoadSnapshotChunk` method. + +As the new node receives "chunks" it will apply them sequentially to the local +application with `ApplySnapshotChunk`. When all chunks have been applied, the +Application's `AppHash` is retrieved via an `Info` query. +To ensure that the sync proceeded correctly, Tendermint compares the local Application's `AppHash` +to the `AppHash` stored on the blockchain (verified via +[light client verification](../light-client/verification/README.md)). + +In summary: + +- [**ListSnapshots:**](./abci++_methods.md#listsnapshots) Used by nodes to discover available + snapshots on peers. + +- [**OfferSnapshot:**](./abci++_methods.md#offersnapshot) When a node receives a snapshot from a + peer, Tendermint uses this method to offer the snapshot to the Application. + +- [**LoadSnapshotChunk:**](./abci++_methods.md#loadsnapshotchunk) Used by Tendermint to retrieve + snapshot chunks from the Application to send to peers. + +- [**ApplySnapshotChunk:**](./abci++_methods.md#applysnapshotchunk) Used by Tendermint to hand + snapshot chunks to the Application. + +### Other methods + +Additionally, there is a [**Flush**](./abci++_methods.md#flush) method that is called on every connection, +and an [**Echo**](./abci++_methods.md#echo) method that is used for debugging. + +More details on managing state across connections can be found in the section on +[Managing Application State](./abci%2B%2B_app_requirements.md#managing-the-application-state-and-related-topics). + +## Next-block execution vs. same-block execution + +[↑ Back to Outline](#outline) + +In the original ABCI protocol, the only moment when the Application had access to a +block was after it was decided. This led to a block execution model, called *next-block +execution*, where some fields hashed in a block header refer to the execution of the +previous block, namely: + +- the Merkle root of the Application's state +- the transaction results +- the consensus parameter updates +- the validator updates + +With ABCI++, an Application may be configured to keep using the next-block execution model, by +executing the decided block in `FinalizeBlock`. However, the new methods introduced — +`PrepareProposal` and `ProcessProposal` — disclose the entire proposed block to the +Application, allowing for its immediate exectution. An Application implementing immediate execution +may additionally wish to store certain data resulting from the block's execution in the same block +that has just been executed. This brings about a new execution model, called +*same-block execution*. An Application implementing this execution model, upon receiving a raw +proposal via `RequestPrepareProposal` and potentially modifying its transaction list, fully +executes the resulting prepared proposal as though it was the decided block (immediate execution), +and the results of the block execution are used as follows: + +- The block execution may generate a set of events. The Application should store these events and + return them back to Tendermint during the `FinalizeBlock` call if the block is finally decided. +- The Merkle root resulting from executing the prepared proposal is provided in + `ResponsePrepareProposal` and thus refers to the **current block**. Tendermint + will use it in the prepared proposal's header. +- Likewise, the transaction results from executing the prepared proposal are + provided in `ResponsePrepareProposal` and refer to the transactions in the + **current block**. Tendermint will use them to calculate the results hash + in the prepared proposal's header. +- The consensus parameter updates and validator updates are also provided in + `ResponsePrepareProposal` and reflect the result of the prepared proposal's + execution. They come into force in height H+1 (as opposed to the H+2 rule + in next-block execution model). + +If the Application is configured to keep the next-block execution model, it will not +provide any data in `ResponsePrepareProposal`, other than a potentially modified +transaction list. The Application may nevertheless choose to perform immediate execution even in +next-block execution mode, however same-block execution mode *requires* immediate execution. + +The long term plan is for the execution model to be set in a new boolean parameter *same_block* in +`ConsensusParams`. Once this parameter is introduced, it **must not** be changed once the +blockchain has started, unless the Application developers *really* know what they are doing. +However, modifying `ConsensusParams` structure cannot be done lightly if we are to +preserve blockchain compatibility. Therefore we need an interim solution until +soft upgrades are specified and implemented in Tendermint. This somewhat *unsafe* +solution consists in Tendermint assuming same-block execution if the Application +fills the above mentioned fields in `ResponsePrepareProposal`. + +## Tendermint proposal timeout + +Immediate execution requires the Application to fully execute the prepared block +before returning from `PrepareProposal`, this means that Tendermint cannot make progress +during the block execution. +This stands on Tendermint's critical path: if the Application takes a long time +executing the block, the default value of *TimeoutPropose* might not be sufficient +to accommodate the long block execution time and non-proposer nodes might time +out and prevote `nil`. The proposal, in this case, will probably be rejected and a new round will be necessary. + +The Application is the best suited to provide a value for *TimeoutPropose* so +that the block execution time upon `PrepareProposal` fits well in the propose +timeout interval. Thus, the Application can adapt the value of *TimeoutPropose* at every height via +`TimeoutParams.Propose`, contained in `ConsensusParams`. + +## Deterministic State-Machine Replication + +[↑ Back to Outline](#outline) + +ABCI++ applications must implement deterministic finite-state machines to be +securely replicated by the Tendermint consensus engine. This means block execution +must be strictly deterministic: given the same +ordered set of transactions, all nodes will compute identical responses, for all +successive `FinalizeBlock` calls. This is critical because the +responses are included in the header of the next block, either via a Merkle root +or directly, so all nodes must agree on exactly what they are. + +For this reason, it is recommended that application state is not exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. The Application must only change its state based on input +from block execution (`FinalizeBlock` calls), and not through +any other kind of request. This is the only way to ensure all nodes see the same +transactions and compute the same results. + +Some Applications may choose to implement immediate execution, which entails executing the blocks +that are about to be proposed (via `PrepareProposal`), and those that the Application is asked to +validate (via `ProcessProposal`). However, the state changes caused by processing those +proposed blocks must never replace the previous state until `FinalizeBlock` confirms +the block decided. + +Additionally, vote extensions or the validation thereof (via `ExtendVote` or +`VerifyVoteExtension`) must *never* have side effects on the current state. +They can only be used when their data is provided in a `RequestPrepareProposal` call. + +If there is some non-determinism in the state machine, consensus will eventually +fail as nodes disagree over the correct values for the block header. The +non-determinism must be fixed and the nodes restarted. + +Sources of non-determinism in applications may include: + +- Hardware failures + - Cosmic rays, overheating, etc. +- Node-dependent state + - Random numbers + - Time +- Underspecification + - Library version changes + - Race conditions + - Floating point numbers + - JSON or protobuf serialization + - Iterating through hash-tables/maps/dictionaries +- External Sources + - Filesystem + - Network calls (eg. some external REST API service) + +See [#56](https://github.com/tendermint/abci/issues/56) for the original discussion. + +Note that some methods (`Query, FinalizeBlock`) return non-deterministic data in the form +of `Info` and `Log` fields. The `Log` is intended for the literal output from the Application's +logger, while the `Info` is any additional info that should be returned. These are the only fields +that are not included in block header computations, so we don't need agreement +on them. All other fields in the `Response*` must be strictly deterministic. + +## Events + +[↑ Back to Outline](#outline) + +Method `FinalizeBlock` includes an `events` field at the top level in its +`Response*`, and one `events` field per transaction included in the block. +Applications may respond to this ABCI++ method with an event list for each executed +transaction, and a general event list for the block itself. +Events allow applications to associate metadata with transactions and blocks. +Events returned via `FinalizeBlock` do not impact Tendermint consensus in any way +and instead exist to power subscriptions and queries of Tendermint state. + +An `Event` contains a `type` and a list of `EventAttributes`, which are key-value +string pairs denoting metadata about what happened during the method's (or transaction's) +execution. `Event` values can be used to index transactions and blocks according to what +happened during their execution. + +Each event has a `type` which is meant to categorize the event for a particular +`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate +`type` values, where each distinct entry is meant to categorize attributes for a +particular event. Every key and value in an event's attributes must be UTF-8 +encoded strings along with the event type itself. + +```protobuf +message Event { + string type = 1; + repeated EventAttribute attributes = 2; +} +``` + +The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The +index flag notifies the Tendermint indexer to index the attribute. The value of +the `index` flag is non-deterministic and may vary across different nodes in the network. + +```protobuf +message EventAttribute { + bytes key = 1; + bytes value = 2; + bool index = 3; // nondeterministic +} +``` + +Example: + +```go + abci.ResponseFinalizeBlock{ + // ... + Events: []abci.Event{ + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: true}, + }, + }, + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: false}, + }, + }, + { + Type: "validator.slashed", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("reason"), Value: []byte("..."), Index: true}, + }, + }, + // ... + }, +} +``` + +## Evidence + +[↑ Back to Outline](#outline) + +Tendermint's security model relies on the use of evidences of misbehavior. An evidence is an +irrefutable proof of malicious behavior by a network participant. It is the responsibility of +Tendermint to detect such malicious behavior. When malicious behavior is detected, Tendermint +will gossip evidences of misbehavior to other nodes and commit the evidences to +the chain once they are verified by a subset of validators. These evidences will then be +passed on to the Application through ABCI++. It is the responsibility of the +Application to handle evidence of misbehavior and exercise punishment. + +There are two forms of evidence: Duplicate Vote and Light Client Attack. More +information can be found in either [data structures](../core/data_structures.md) +or [accountability](../light-client/accountability/). + +EvidenceType has the following protobuf format: + +```protobuf +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} +``` + +## Errors + +[↑ Back to Outline](#outline) + +The `Query`, and `CheckTx` methods include a `Code` field in their `Response*`. +Field `Code` is meant to contain an application-specific response code. +A response code of `0` indicates no error. Any other response code +indicates to Tendermint that an error occurred. + +These methods also return a `Codespace` string to Tendermint. This field is +used to disambiguate `Code` values returned by different domains of the +Application. The `Codespace` is a namespace for the `Code`. + +Methods `Echo`, `Info`, and `InitChain` do not return errors. +An error in any of these methods represents a critical issue that Tendermint +has no reasonable way to handle. If there is an error in one +of these methods, the Application must crash to ensure that the error is safely +handled by an operator. + +Method `FinalizeBlock` is a special case. It contains a number of +`Code` and `Codespace` fields as part of type `ExecTxResult`. Each of +these codes reports errors related to the transaction it is attached to. +However, `FinalizeBlock` does not return errors at the top level, so the +same considerations on critical issues made for `Echo`, `Info`, and +`InitChain` also apply here. + +The handling of non-zero response codes by Tendermint is described below. + +### `CheckTx` + +When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated +transaction will not be added to Tendermint's mempool or it will be removed if +it is already included. + +### `ExecTxResult` (as part of `FinalizeBlock`) + +The `ExecTxResult` type delivers transaction results from the Application to Tendermint. When +Tendermint receives a `ResponseFinalizeBlock` containing an `ExecTxResult` with a non-zero `Code`, +the response code is logged. Past `Code` values can be queried by clients. As the transaction was +part of a decided block, the `Code` does not influence Tendermint consensus. + +### `Query` + +When Tendermint receives a `ResponseQuery` with a non-zero `Code`, this code is +returned directly to the client that initiated the query. diff --git a/spec/abci++/abci++_client_server.md b/spec/abci++/abci++_client_server.md new file mode 100644 index 0000000000..652652dc9f --- /dev/null +++ b/spec/abci++/abci++_client_server.md @@ -0,0 +1,102 @@ +--- +order: 5 +title: Client and Server +--- + +# Client and Server + +This section is for those looking to implement their own ABCI Server, perhaps in +a new programming language. + +You are expected to have read all previous sections of ABCI++ specification, namely +[Basic Concepts](./abci%2B%2B_basic_concepts.md), +[Methods](./abci%2B%2B_methods.md), +[Application Requirements](./abci%2B%2B_app_requirements.md), and +[Expected Behavior](./abci%2B%2B_tmint_expected_behavior.md). + +## Message Protocol and Synchrony + +The message protocol consists of pairs of requests and responses defined in the +[protobuf file](../../proto/tendermint/abci/types.proto). + +Some messages have no fields, while others may include byte-arrays, strings, integers, +or custom protobuf types. + +For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). + +As of v0.36 requests are synchronous. For each of ABCI++'s four connections (see +[Connections](./abci%2B%2B_app_requirements.md)), when Tendermint issues a request to the +Application, it will wait for the response before continuing execution. As a side effect, +requests and responses are ordered for each connection, but not necessarily across connections. + +## Server Implementations + +To use ABCI in your programming language of choice, there must be an ABCI +server in that language. Tendermint supports four implementations of the ABCI server: + +- in Tendermint's repository: + - In-process + - ABCI-socket + - GRPC +- [tendermint-rs](https://github.com/informalsystems/tendermint-rs) +- [tower-abci](https://github.com/penumbra-zone/tower-abci) + +The implementations in Tendermint's repository can be tested using `abci-cli` by setting +the `--abci` flag appropriately. + +See examples, in various stages of maintenance, in +[Go](https://github.com/tendermint/tendermint/tree/master/abci/server), +[JavaScript](https://github.com/tendermint/js-abci), +[C++](https://github.com/mdyring/cpp-tmsp), and +[Java](https://github.com/jTendermint/jabci). + +### In Process + +The simplest implementation uses function calls in Golang. +This means ABCI applications written in Golang can be linked with Tendermint Core and run as a single binary. + +### GRPC + +If you are not using Golang, +but [GRPC](https://grpc.io/) is available in your language, this is the easiest approach, +though it will have significant performance overhead. + +Please check GRPC's documentation to know to set up the Application as an +ABCI GRPC server. + +### Socket + +Tendermint's socket-based ABCI interface is an asynchronous, +raw socket server which provides ordered message passing over unix or tcp. +Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers). + +If GRPC is not available in your language, your application requires higher +performance, or otherwise enjoy programming, you may implement your own +ABCI server using the Tendermint's socket-based ABCI interface. +The first step is to auto-generate the relevant data +types and codec in your language using `protoc`. +In addition to being proto3 encoded, messages coming over +the socket are length-prefixed. proto3 doesn't have an +official length-prefix standard, so we use our own. The first byte in +the prefix represents the length of the Big Endian encoded length. The +remaining bytes in the prefix are the Big Endian encoded length. + +For example, if the proto3 encoded ABCI message is `0xDEADBEEF` (4 +bytes long), the length-prefixed message is `0x0104DEADBEEF` (`01` byte for encoding the length `04` of the message). If the proto3 +encoded ABCI message is 65535 bytes long, the length-prefixed message +would start with 0x02FFFF. + +Note that this length-prefixing scheme does not apply for GRPC. + +Note that your ABCI server must be able to support multiple connections, as +Tendermint uses four connections. + +## Client + +There are currently two use-cases for an ABCI client. One is testing +tools that allow ABCI requests to be sent to the actual application via +command line. An example of this is `abci-cli`, which accepts CLI commands +to send corresponding ABCI requests. +The other is a consensus engine, such as Tendermint Core, +which makes ABCI requests to the application as prescribed by the consensus +algorithm used. diff --git a/spec/abci++/abci++_methods_002_draft.md b/spec/abci++/abci++_methods.md similarity index 99% rename from spec/abci++/abci++_methods_002_draft.md rename to spec/abci++/abci++_methods.md index b00f0f31d3..19ff930b74 100644 --- a/spec/abci++/abci++_methods_002_draft.md +++ b/spec/abci++/abci++_methods.md @@ -74,7 +74,7 @@ title: Methods * **Response**: | Name | Type | Description | Field Number | - |------------------|----------------------------------------------|-------------------------------------------------|--------------| + |------------------|----------------------------------------------|-------------------------------------------------|--------------| | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional) | 1 | | validator_set | [ValidatorSetUpdate](#validatorsetupdate) | Initial validator set (optional). | 2 | | app_hash | bytes | Initial application hash. | 3 | diff --git a/spec/abci++/abci++_tmint_expected_behavior_002_draft.md b/spec/abci++/abci++_tmint_expected_behavior.md similarity index 66% rename from spec/abci++/abci++_tmint_expected_behavior_002_draft.md rename to spec/abci++/abci++_tmint_expected_behavior.md index 7786894505..8df5e6c844 100644 --- a/spec/abci++/abci++_tmint_expected_behavior_002_draft.md +++ b/spec/abci++/abci++_tmint_expected_behavior.md @@ -11,16 +11,20 @@ This section describes what the Application can expect from Tendermint. The Tendermint consensus algorithm is designed to protect safety under any network conditions, as long as less than 1/3 of validators' voting power is byzantine. Most of the time, though, the network will behave -synchronously and there will be no byzantine process. In these frequent, benign conditions: +synchronously, no process will fall behind, and there will be no byzantine process. The following describes +what will happen during a block height _h_ in these frequent, benign conditions: -* Tendermint will decide in round 0; +* Tendermint will decide in round 0, for height _h_; * `PrepareProposal` will be called exactly once at the proposer process of round 0, height _h_; -* `ProcessProposal` will be called exactly once at all processes except the proposer of round 0, and +* `ProcessProposal` will be called exactly once at all processes, and will return _accept_ in its `Response*`; -* `ExtendVote` will be called exactly once at all processes -* `VerifyVoteExtension` will be called _n-1_ times at each validator process, where _n_ is the number of validators; and -* `FinalizeBlock` will be finally called at all processes at the end of height _h_, conveying the same prepared - block that all calls to `PrepareProposal` and `ProcessProposal` had previously reported for height _h_. +* `ExtendVote` will be called exactly once at all processes; +* `VerifyVoteExtension` will be called exactly _n-1_ times at each validator process, where _n_ is + the number of validators, and will always return _accept_ in its `Response*`; +* `FinalizeBlock` will be called exactly once at all processes, conveying the same prepared + block that all calls to `PrepareProposal` and `ProcessProposal` had previously reported for + height _h_; and +* `Commit` will finally be called exactly once at all processes at the end of height _h_. However, the Application logic must be ready to cope with any possible run of Tendermint for a given height, including bad periods (byzantine proposers, network being asynchronous). @@ -28,7 +32,7 @@ In these cases, the sequence of calls to ABCI++ methods may not be so straighfor the Application should still be able to handle them, e.g., without crashing. The purpose of this section is to define what these sequences look like an a precise way. -As mentioned in the [Basic Concepts](abci++_basic_concepts_002_draft.md) section, Tendermint +As mentioned in the [Basic Concepts](./abci%2B%2B_basic_concepts.md) section, Tendermint acts as a client of ABCI++ and the Application acts as a server. Thus, it is up to Tendermint to determine when and in which order the different ABCI++ methods will be called. A well-written Application design should consider _any_ of these possible sequences. @@ -46,18 +50,15 @@ state-sync = *state-sync-attempt success-sync info state-sync-attempt = offer-snapshot *apply-chunk success-sync = offer-snapshot 1*apply-chunk -recovery = info *consensus-replay consensus-exec -consensus-replay = decide +recovery = info consensus-exec consensus-exec = (inf)consensus-height -consensus-height = *consensus-round decide +consensus-height = *consensus-round decide commit consensus-round = proposer / non-proposer -proposer = prepare-proposal extend-proposer -extend-proposer = *got-vote [extend-vote] *got-vote - -non-proposer = *got-vote [extend-non-proposer] *got-vote -extend-non-proposer = process-proposal *got-vote [extend-vote] +proposer = *got-vote prepare-proposal *got-vote process-proposal [extend] +extend = *got-vote extend-vote *got-vote +non-proposer = *got-vote [process-proposal] [extend] init-chain = %s"" offer-snapshot = %s"" @@ -68,12 +69,10 @@ process-proposal = %s"" extend-vote = %s"" got-vote = %s"" decide = %s"" +commit = %s"" ``` ->**TODO** Still hesitating... introduce _n_ as total number of validators, so that we can bound the occurrences of ->`got-vote` in a round. - -We have kept some of the ABCI++ methods out of the grammar, in order to keep it as clear and concise as possible. +We have kept some ABCI methods out of the grammar, in order to keep it as clear and concise as possible. A common reason for keeping all these methods out is that they all can be called at any point in a sequence defined by the grammar above. Other reasons depend on the method in question: @@ -115,7 +114,7 @@ Let us now examine the grammar line by line, providing further details. * In _state-sync_ mode, Tendermint makes one or more attempts at synchronizing the Application's state. At the beginning of each attempt, it offers the Application a snapshot found at another process. - If the Application accepts the snapshop, at sequence of calls to `ApplySnapshotChunk` method follow + If the Application accepts the snapshot, a sequence of calls to `ApplySnapshotChunk` method follow to provide the Application with all the snapshots needed, in order to reconstruct the state locally. A successful attempt must provide at least one chunk via `ApplySnapshotChunk`. At the end of a successful attempt, Tendermint calls `Info` to make sure the recontructed state's @@ -128,12 +127,10 @@ Let us now examine the grammar line by line, providing further details. >``` * In recovery mode, Tendermint first calls `Info` to know from which height it needs to replay decisions - to the Application. To replay a decision, Tendermint simply calls `FinalizeBlock` with the decided - block at that height. After this, Tendermint enters nomal consensus execution. + to the Application. After this, Tendermint enters nomal consensus execution. >```abnf ->recovery = info *consensus-replay consensus-exec ->consensus-replay = decide +>recovery = info consensus-exec >``` * The non-terminal `consensus-exec` is a key point in this grammar. It is an infinite sequence of @@ -145,33 +142,36 @@ Let us now examine the grammar line by line, providing further details. >consensus-exec = (inf)consensus-height >``` -* A consensus height consists of zero or more rounds before deciding via a call to `FinalizeBlock`. - In each round, the sequence of method calls depends on whether the local process is the proposer or not. +* A consensus height consists of zero or more rounds before deciding and executing via a call to + `FinalizeBlock`, followed by a call to `Commit`. In each round, the sequence of method calls + depends on whether the local process is the proposer or not. Note that, if a height contains zero + rounds, this means the process is replaying an already decided value (catch-up mode). >```abnf ->consensus-height = *consensus-round decide +>consensus-height = *consensus-round decide commit >consensus-round = proposer / non-proposer >``` -* If the local process is the proposer of the current round, Tendermint starts by calling `PrepareProposal`. - No calls to methods related to vote extensions (`ExtendVote`, `VerifyVoteExtension`) can be called - in the present round before `PrepareProposal`. Once `PrepareProposal` is called, calls to - `ExtendVote` and `VerifyVoteExtension` can come in any order, although the former will be called - at most once in this round. +* For every round, if the local process is the proposer of the current round, Tendermint starts by + calling `PrepareProposal`, followed by `ProcessProposal`. Then, optionally, the Application is + asked to extend its vote for that round. Calls to `VerifyVoteExtension` can come at any time: the + local process may be slightly late in the current round, or votes may come from a future round + of this height. >```abnf ->proposer = prepare-proposal extend-proposer ->extend-proposer = *got-vote [extend-vote] *got-vote +>proposer = *got-vote prepare-proposal *got-vote process-proposal [extend] +>extend = *got-vote extend-vote *got-vote >``` -* If the local process is not the proposer of the current round, Tendermint will call `ProcessProposal` - at most once. At most one call to `ExtendVote` can occur only after `ProcessProposal` is called. - A number of calls to `VerifyVoteExtension` can occur in any order with respect to `ProcessProposal` - and `ExtendVote` throughout the round. +* Also for every round, if the local process is _not_ the proposer of the current round, Tendermint + will call `ProcessProposal` at most once. At most one call to `ExtendVote` may occur only after + `ProcessProposal` is called. A number of calls to `VerifyVoteExtension` can occur in any order + with respect to `ProcessProposal` and `ExtendVote` throughout the round. The reasons are the same + as above, namely, the process running slightly late in the current round, or votes from future + rounds of this height received. >```abnf ->non-proposer = *got-vote [extend-non-proposer] *got-vote ->extend-non-proposer = process-proposal *got-vote [extend-vote] +>non-proposer = *got-vote [process-proposal] [extend] >``` * Finally, the grammar describes all its terminal symbols, which denote the different ABCI++ method calls that @@ -187,6 +187,7 @@ Let us now examine the grammar line by line, providing further details. >extend-vote = %s"" >got-vote = %s"" >decide = %s"" +>commit = %s"" >``` ## Adapting existing Applications that use ABCI @@ -202,17 +203,21 @@ to undergo any changes in their implementation. As for the new methods: -* `PrepareProposal` must create a list of [TxRecord](./abci++_methods_002_draft.md#txrecord) each containing a - transaction passed in `RequestPrepareProposal.txs`, in the same other. The field `action` must be set to `UNMODIFIED` - for all [TxRecord](./abci++_methods_002_draft.md#txrecord) elements in the list. +* `PrepareProposal` must create a list of [TxRecord](./abci++_methods.md#txrecord) each containing + a transaction passed in `RequestPrepareProposal.txs`, in the same other. The field `action` must + be set to `UNMODIFIED` for all [TxRecord](./abci++_methods.md#txrecord) elements in the list. The Application must check whether the size of all transactions exceeds the byte limit - (`RequestPrepareProposal.max_tx_bytes`). If so, the Application must remove transactions at the end of the list - until the total byte size is at or below the limit. + (`RequestPrepareProposal.max_tx_bytes`). If so, the Application must remove transactions at the + end of the list until the total byte size is at or below the limit. * `ProcessProposal` must set `ResponseProcessProposal.accept` to _true_ and return. * `ExtendVote` is to set `ResponseExtendVote.extension` to an empty byte array and return. -* `VerifyVoteExtension` must set `ResponseVerifyVoteExtension.accept` to _true_ if the extension is an empty byte array - and _false_ otherwise, then return. -* `FinalizeBlock` is to coalesce the implementation of methods `BeginBlock`, `DeliverTx`, `EndBlock`, and `Commit`. - Legacy applications looking to reuse old code that implemented `DeliverTx` should wrap the legacy - `DeliverTx` logic in a loop that executes one transaction iteration per +* `VerifyVoteExtension` must set `ResponseVerifyVoteExtension.accept` to _true_ if the extension is + an empty byte array and _false_ otherwise, then return. +* `FinalizeBlock` is to coalesce the implementation of methods `BeginBlock`, `DeliverTx`, and + `EndBlock`. Legacy applications looking to reuse old code that implemented `DeliverTx` should + wrap the legacy `DeliverTx` logic in a loop that executes one transaction iteration per transaction in `RequestFinalizeBlock.tx`. + +Finally, `Commit`, which is kept in ABCI++, no longer returns the `AppHash`. It is now up to +`FinalizeBlock` to do so. Thus, a slight refactoring of the old `Commit` implementation will be +needed to move the return of `AppHash` to `FinalizeBlock`. diff --git a/spec/abci++/v0.md b/spec/abci++/v0.md deleted file mode 100644 index 163b3f7cbe..0000000000 --- a/spec/abci++/v0.md +++ /dev/null @@ -1,156 +0,0 @@ -# Tendermint v0 Markdown pseudocode - -This translates the latex code for Tendermint consensus from the Tendermint paper into markdown. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedRound_p ← -1 -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - proposal ← getValue() - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, *⟩ with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, round_p, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - broadcast ⟨PRECOMMIT, h_p, round_p, id(v)⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit -} -``` - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v1.md b/spec/abci++/v1.md deleted file mode 100644 index 96dc8e674a..0000000000 --- a/spec/abci++/v1.md +++ /dev/null @@ -1,162 +0,0 @@ -# Tendermint v1 Markdown pseudocode - -This adds hooks for the existing ABCI to the prior pseudocode - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getBlockProposal fills in header - proposal ← getBlockProposal(txdata) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - broadcast ⟨PRECOMMIT, h_p, round_p, id(v)⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit -} -``` - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.BeginBlock(v.header) - ABCI.DeliverTxs(v.data) - ABCI.EndBlock() - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v2.md b/spec/abci++/v2.md deleted file mode 100644 index 1abd8ec670..0000000000 --- a/spec/abci++/v2.md +++ /dev/null @@ -1,180 +0,0 @@ -# Tendermint v2 Markdown pseudocode - -This adds a single-threaded implementation of ABCI++, -with no optimization for splitting out verifying the header and verifying the proposal. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getUnpreparedBlockProposal takes tx data, and fills in the unprepared header data - unpreparedProposal ← getUnpreparedBlockProposal(txdata) - // ABCI++: the proposer may reorder/update transactions in `unpreparedProposal` - proposal ← ABCI.PrepareProposal(unpreparedProposal) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ ABCI.ProcessProposal(h_p, v).accept ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ ABCI.ProcessProposal(h_p, v).accept ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - precommit_extension ← ABCI.ExtendVote(h_p, round_p, id(v)) - broadcast ⟨PRECOMMIT, h_p, round_p, id(v), precommit_extension⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit -} -``` - -### Upon receiving a precommit - -Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` -before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped -in the syntax of methods from the paper. - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.FinalizeBlock(id(v)) - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. diff --git a/spec/abci++/v3.md b/spec/abci++/v3.md deleted file mode 100644 index ed4c720b4e..0000000000 --- a/spec/abci++/v3.md +++ /dev/null @@ -1,201 +0,0 @@ -# Tendermint v3 Markdown pseudocode - -This is a single-threaded implementation of ABCI++, -with an optimization for the ProcessProposal phase. -Namely, processing of the header and the block data is separated into two different functions. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getUnpreparedBlockProposal fills in header - unpreparedProposal ← getUnpreparedBlockProposal(txdata) - proposal ← ABCI.PrepareProposal(unpreparedProposal) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v_header, −1) from proposer(h_p, round_p) while step_p = propose do { - prevote_nil ← false - // valid is Tendermints validation, ABCI.VerifyHeader is the applications - if valid(v_header) ∧ ABCI.VerifyHeader(h_p, v_header) ∧ (lockedRound_p = −1 ∨ lockedValue_p = id(v_header)) { - wait to receive proposal v corresponding to v_header - // We split up the app's header verification from the remainder of its processing of the proposal - if ABCI.ProcessProposal(h_p, v).accept { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - prevote_nil ← true - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - } else { - prevote_nil ← true - } - if prevote_nil { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v_header, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v_header)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - prevote_nil ← false - if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - wait to receive proposal v corresponding to v_header - // We split up the app's header verification from the remainder of its processing of the proposal - if ABCI.ProcessProposal(h_p, v).accept { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - prevote_nil ← true - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - } else { - prevote_nil ← true - } - if prevote_nil { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - precommit_extension ← ABCI.ExtendVote(h_p, round_p, id(v)) - broadcast ⟨PRECOMMIT, h_p, round_p, id(v), precommit_extension⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit -} -``` - -### Upon receiving a precommit - -Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` -before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped -in the syntax of methods from the paper. - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.FinalizeBlock(id(v)) - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v4.md b/spec/abci++/v4.md deleted file mode 100644 index d211fd87fc..0000000000 --- a/spec/abci++/v4.md +++ /dev/null @@ -1,199 +0,0 @@ -# Tendermint v4 Markdown pseudocode - -This is a multi-threaded implementation of ABCI++, -where ProcessProposal starts when the proposal is received, but ends before precommitting. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getUnpreparedBlockProposal fills in header - unpreparedProposal ← getUnpreparedBlockProposal(txdata) - proposal ← ABCI.PrepareProposal(unpreparedProposal) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - // We fork process proposal into a parallel process - Fork ABCI.ProcessProposal(h_p, v) - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - // We fork process proposal into a parallel process - Fork ABCI.ProcessProposal(h_p, v) - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -def OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - // Join the ProcessProposal, and output any evidence in case it has some. - processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) - for evidence in processProposalOutput.evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ -while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) - // If the proposal is valid precommit as before. - // If it was invalid, precommit nil. - // Note that ABCI.ProcessProposal(h_p, v).accept is deterministic for all honest nodes. - precommit_value ← nil - if processProposalOutput.accept { - precommit_value ← id(v) - } - precommit_extension ← ABCI.ExtendVote(h_p, round_p, precommit_value) - broadcast ⟨PRECOMMIT, h_p, round_p, precommit_value, precommit_extension⟩ - for evidence in processProposalOutput.evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - // Join ABCI.ProcessProposal, and broadcast any evidence if it exists. - processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) - for evidence in processProposalOutput.evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit -} -``` - -### Upon receiving a precommit - -Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` -before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped -in the syntax of methods from the paper. - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -def OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.FinalizeBlock(id(v)) - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci/abci.md b/spec/abci/abci.md index 5d9d59b711..84e566d507 100644 --- a/spec/abci/abci.md +++ b/spec/abci/abci.md @@ -40,13 +40,13 @@ tendermint should not continue. In the Go implementation these methods take a context and may return an error. The context exists so that applications can terminate gracefully during shutdown, and the error return value makes it -possible for applications to singal transient errors to Tendermint. +possible for applications to signal transient errors to Tendermint. ### CheckTx The `CheckTx` ABCI method controls what transactions are considered for inclusion in a block. When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated -transaction will be not be added to Tendermint's mempool or it will be removed if +transaction will not be added to Tendermint's mempool or it will be removed if it is already included. ### DeliverTx diff --git a/spec/abci/apps.md b/spec/abci/apps.md index d6ec198323..0439f2c850 100644 --- a/spec/abci/apps.md +++ b/spec/abci/apps.md @@ -13,7 +13,7 @@ Here we cover the following components of ABCI applications: and the differences between `CheckTx` and `DeliverTx`. - [Transaction Results](#transaction-results) - rules around transaction results and validity -- [Validator Set Updates](#validator-updates) - how validator sets are +- [Validator Set Updates](#updating-the-validator-set) - how validator sets are changed during `InitChain` and `EndBlock` - [Query](#query) - standards for using the `Query` method and proofs about the application state @@ -204,9 +204,6 @@ not broadcasted to other peers and not included in a proposal block. `Data` contains the result of the CheckTx transaction execution, if any. It is semantically meaningless to Tendermint. -`Events` include any events for the execution, though since the transaction has not -been committed yet, they are effectively ignored by Tendermint. - ### DeliverTx DeliverTx is the workhorse of the blockchain. Tendermint sends the @@ -315,6 +312,18 @@ txs included in a proposed block. Must have `MaxGas >= -1`. If `MaxGas == -1`, no limit is enforced. +### BlockParams.RecheckTx + +This indicates whether all nodes in the network should perform a `CheckTx` on all +transactions remaining in the mempool directly *after* the execution of every block, +i.e. whenever a new application state is created. This is often useful for garbage +collection. + +The change will come into effect immediately after `FinalizeBlock` has been +called. + +This was previously a local mempool config parameter. + ### EvidenceParams.MaxAgeDuration This is the maximum age of evidence in time units. @@ -355,7 +364,7 @@ are expected to have clocks that differ by at most `Precision`. ### SynchronyParams.MessageDelay -`SynchronyParams.MessageDelay` is a parameter of the Proposer-Based Timestamps +`SynchronyParams.MessageDelay` is a parameter of the Proposer-Based Timestamps algorithm that configures the acceptable upper-bound for transmitting a `Proposal` message from the proposer to all of the validators on the network. diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md index 1e84297f0f..3c955ff101 100644 --- a/spec/core/data_structures.md +++ b/spec/core/data_structures.md @@ -54,7 +54,7 @@ and a list of evidence of malfeasance (ie. signing conflicting votes). |--------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| | Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header] (#header) | Must adhere to the validation rules of [header](#header) | | Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to Tendermint. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](../abci/abci.md#checktx). -| Evidence | [EvidenceList](#evidence_list) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidence_list) apply | +| Evidence | [EvidenceList](#evidencelist) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidencelist) apply | | LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). | ## Execution @@ -157,7 +157,7 @@ The `BlockID` contains two distinct Merkle roots of the block. The `BlockID` inc | Name | Type | Description | Validation | |---------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------| | Hash | slice of bytes (`[]byte`) | MerkleRoot of all the fields in the header (ie. `MerkleRoot(header)`. | hash must be of length 32 | -| PartSetHeader | [PartSetHeader](#PartSetHeader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#PartSetHeader) | +| PartSetHeader | [PartSetHeader](#partsetheader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#partsetheader) | See [MerkleRoot](./encoding.md#MerkleRoot) for details. @@ -243,7 +243,7 @@ The vote extension is not part of the [`CanonicalVote`](#canonicalvote). | Height | uint64 | Height for which this vote was created. | Must be > 0 | | Round | int32 | Round that the commit corresponds to. | Must be > 0 | | BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | -| Timestamp | [Time](#Time) | The time at which a validator signed. | [Time](#time) | +| Timestamp | [Time](#time) | The time at which a validator signed. | [Time](#time) | | ValidatorAddress | slice of bytes (`[]byte`) | Address of the validator | Length must be equal to 20 | | ValidatorIndex | int32 | Index at a specific block height that corresponds to the Index of the validator in the set. | must be > 0 | | Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | @@ -300,7 +300,7 @@ is locked in POLRound. The message is signed by the validator private key. | Round | int32 | Round that the commit corresponds to. | Must be > 0 | | POLRound | int64 | Proof of lock | Must be > 0 | | BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | -| Timestamp | [Time](#Time) | Timestamp represents the time at which a validator signed. | [Time](#time) | +| Timestamp | [Time](#time) | Timestamp represents the time at which a validator signed. | [Time](#time) | | Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | ## SignedMsgType @@ -351,7 +351,7 @@ in the same round of the same height. Votes are lexicographically sorted on `Blo | VoteB | [Vote](#vote) | The second vote submitted by a validator when they equivocated | VoteB must adhere to [Vote](#vote) validation rules | | TotalVotingPower | int64 | The total power of the validator set at the height of equivocation | Must be equal to nodes own copy of the data | | ValidatorPower | int64 | Power of the equivocating validator at the height | Must be equal to the nodes own copy of the data | -| Timestamp | [Time](#Time) | Time of the block where the equivocation occurred | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#time) | Time of the block where the equivocation occurred | Must be equal to the nodes own copy of the data | ### LightClientAttackEvidence @@ -360,13 +360,13 @@ a light client such that a full node can verify, propose and commit the evidence punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this [here](../light-client/accountability#the_misbehavior_of_faulty_validators) -| Name | Type | Description | Validation | -|----------------------|------------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------| -| ConflictingBlock | [LightBlock](#LightBlock) | Read Below | Must adhere to the validation rules of [lightBlock](#lightblock) | -| CommonHeight | int64 | Read Below | must be > 0 | -| Byzantine Validators | Array of [Validators](#Validators) | validators that acted maliciously | Read Below | -| TotalVotingPower | int64 | The total power of the validator set at the height of the infraction | Must be equal to the nodes own copy of the data | -| Timestamp | [Time](#Time) | Time of the block where the infraction occurred | Must be equal to the nodes own copy of the data | +| Name | Type | Description | Validation | +|----------------------|----------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------| +| ConflictingBlock | [LightBlock](#lightblock) | Read Below | Must adhere to the validation rules of [lightBlock](#lightblock) | +| CommonHeight | int64 | Read Below | must be > 0 | +| Byzantine Validators | Array of [Validator](#validator) | validators that acted maliciously | Read Below | +| TotalVotingPower | int64 | The total power of the validator set at the height of the infraction | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#time) | Time of the block where the infraction occurred | Must be equal to the nodes own copy of the data | ## LightBlock @@ -385,7 +385,7 @@ The SignedhHeader is the [header](#header) accompanied by the commit to prove it | Name | Type | Description | Validation | |--------|-------------------|-------------------|-----------------------------------------------------------------------------------| -| Header | [Header](#Header) | [Header](#header) | Header cannot be nil and must adhere to the [Header](#Header) validation criteria | +| Header | [Header](#header) | [Header](#header) | Header cannot be nil and must adhere to the [Header](#header) validation criteria | | Commit | [Commit](#commit) | [Commit](#commit) | Commit cannot be nil and must adhere to the [Commit](#commit) criteria | ## ValidatorSet @@ -434,6 +434,7 @@ func SumTruncated(bz []byte) []byte { |--------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | max_bytes | int64 | Max size of a block, in bytes. | 1 | | max_gas | int64 | Max sum of `GasWanted` in a proposed block. NOTE: blocks that violate this may be committed if there are Byzantine proposers. It's the application's responsibility to handle this when processing a block! | 2 | +| recheck_tx | bool | Indicated whether to run `CheckTx` on all remaining transactions *after* every execution of a block | 3 | ### EvidenceParams diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index c950e47763..a3aaa1c02b 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/gogo/protobuf/proto" @@ -24,6 +25,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" types1 "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -89,6 +91,14 @@ type Config struct { // height <-> pubkey <-> voting power ValidatorUpdates map[string]map[string]string `toml:"validator_update"` + // Add artificial delays to each of the main ABCI calls to mimic computation time + // of the application + PrepareProposalDelayMS uint64 `toml:"prepare_proposal_delay_ms"` + ProcessProposalDelayMS uint64 `toml:"process_proposal_delay_ms"` + CheckTxDelayMS uint64 `toml:"check_tx_delay_ms"` + VoteExtensionDelayMS uint64 `toml:"vote_extension_delay_ms"` + FinalizeBlockDelayMS uint64 `toml:"finalize_block_delay_ms"` + // dash parameters ThesholdPublicKeyUpdate map[string]string `toml:"threshold_public_key_update"` QuorumHashUpdate map[string]string `toml:"quorum_hash_update"` @@ -184,9 +194,13 @@ func (app *Application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*a if err != nil { return &abci.ResponseCheckTx{ Code: code.CodeTypeEncodingError, - Log: err.Error(), }, nil } + + if app.cfg.CheckTxDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.CheckTxDelayMS) * time.Millisecond) + } + return &abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil } @@ -210,11 +224,17 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali var err error resp := abci.ResponseFinalizeBlock{ TxResults: txs, + AppHash: app.state.Finalize(), } resp.ValidatorSetUpdate, err = app.validatorSetUpdates(uint64(req.Height)) if err != nil { panic(err) } + + if app.cfg.FinalizeBlockDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.FinalizeBlockDelayMS) * time.Millisecond) + } + resp.NextCoreChainLockUpdate, err = app.chainLockUpdate(uint64(req.Height)) if err != nil { panic(err) @@ -243,7 +263,7 @@ func (app *Application) Commit(_ context.Context) (*abci.ResponseCommit, error) app.mu.Lock() defer app.mu.Unlock() - height, hash, err := app.state.Commit() + height, err := app.state.Commit() if err != nil { panic(err) } @@ -263,7 +283,6 @@ func (app *Application) Commit(_ context.Context) (*abci.ResponseCommit, error) retainHeight = int64(height - app.cfg.RetainBlocks + 1) } return &abci.ResponseCommit{ - Data: hash, RetainHeight: retainHeight, }, nil } @@ -352,6 +371,9 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA // total number of transaction bytes to exceed `req.MaxTxBytes`, we will not // append our special vote extension transaction. func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + app.mu.Lock() + defer app.mu.Unlock() + extCount := len(req.LocalLastCommit.ThresholdVoteExtensions) // We only generate our special transaction if we have vote extensions if extCount > 0 { @@ -407,12 +429,20 @@ func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrep Tx: tx, }) } + + if app.cfg.PrepareProposalDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.PrepareProposalDelayMS) * time.Millisecond) + } + return &abci.ResponsePrepareProposal{TxRecords: trs}, nil } // ProcessProposal implements part of the Application interface. // It accepts any proposal that does not contain a malformed transaction. func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + app.mu.Lock() + defer app.mu.Unlock() + for _, tx := range req.Txs { k, v, err := parseTx(tx) if err != nil { @@ -428,6 +458,11 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc } } } + + if app.cfg.ProcessProposalDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.ProcessProposalDelayMS) * time.Millisecond) + } + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil } @@ -439,6 +474,9 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc // key/value store ("extensionSum") with the sum of all of the numbers collected // from the vote extensions. func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { + app.mu.Lock() + defer app.mu.Unlock() + // We ignore any requests for vote extensions that don't match our expected // next height. currentHeight := app.state.Height @@ -459,7 +497,15 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot // nolint:gosec // G404: Use of weak random number generator num := rand.Int63n(voteExtensionMaxVal) extLen := binary.PutVarint(ext, num) - app.logger.Info("generated vote extension", "num", num, "ext", fmt.Sprintf("%x", ext[:extLen]), "state.Height", app.state.Height) + + if app.cfg.VoteExtensionDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.VoteExtensionDelayMS) * time.Millisecond) + } + + app.logger.Info("generated vote extension", + "num", num, + "ext", tmstrings.LazySprintf("%x", ext[:extLen]), + "state.Height", app.state.Height) return &abci.ResponseExtendVote{ VoteExtensions: []*abci.ExtendVoteExtension{ { @@ -478,6 +524,9 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot // without doing anything about them. In this case, it just makes sure that the // vote extension is a well-formed integer value. func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + app.mu.Lock() + defer app.mu.Unlock() + // We allow vote extensions to be optional if len(req.VoteExtensions) == 0 { return &abci.ResponseVerifyVoteExtension{ @@ -507,6 +556,10 @@ func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.Request nums = append(nums, num) } + if app.cfg.VoteExtensionDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.VoteExtensionDelayMS) * time.Millisecond) + } + app.logger.Info("verified vote extension value", "req", req, "nums", nums) return &abci.ResponseVerifyVoteExtension{ Status: abci.ResponseVerifyVoteExtension_ACCEPT, diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index 3e79e7a295..95da077b7b 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -138,8 +138,8 @@ func (s *State) Set(key, value string) { } } -// Commit commits the current state. -func (s *State) Commit() (uint64, []byte, error) { +// Finalize is called after applying a block, updating the height and returning the new app_hash +func (s *State) Finalize() []byte { s.Lock() defer s.Unlock() switch { @@ -151,13 +151,20 @@ func (s *State) Commit() (uint64, []byte, error) { s.Height = 1 } s.Hash = hashItems(s.Values, s.Height) + return s.Hash +} + +// Commit commits the current state. +func (s *State) Commit() (uint64, error) { + s.Lock() + defer s.Unlock() if s.persistInterval > 0 && s.Height%s.persistInterval == 0 { err := s.save() if err != nil { - return 0, nil, err + return 0, err } } - return s.Height, s.Hash, nil + return s.Height, nil } func (s *State) Rollback() error { diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index f634fc5f6d..92011394c4 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -15,9 +15,7 @@ var ( // testnetCombinations defines global testnet options, where we generate a // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ - "topology": {"single", "quad", "large"}, - "queueType": {"priority"}, // "fifo" - "initialHeight": {0, 1000}, + "topology": {"single", "quad", "large"}, "initialState": { map[string]string{}, map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, @@ -26,6 +24,8 @@ var ( // Tenderdash-specific "initialCoreChainLockedHeight": {0}, "initAppCoreChainLockedHeight": {0}, + "abci": {"builtin", "outofprocess"}, + "txSize": {1024, 2048, 4096, 8192}, } // The following specify randomly chosen values for testnet nodes. @@ -36,11 +36,10 @@ var ( "rocksdb": 10, "cleveldb": 5, } - nodeABCIProtocols = weightedChoice{ - "builtin": 50, - "tcp": 20, - "grpc": 20, - "unix": 10, + ABCIProtocols = weightedChoice{ + "tcp": 20, + "grpc": 20, + "unix": 10, } nodePrivvalProtocols = weightedChoice{ "file": 50, @@ -66,10 +65,16 @@ var ( "kill": 0.1, "restart": 0.1, } - evidence = uniformChoice{0, 1, 10} - txSize = uniformChoice{1024, 4096} // either 1kb or 4kb - ipv6 = uniformChoice{false, true} - keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} + + // the following specify random chosen values for the entire testnet + initialHeight = uniformChoice{0, 1000} + evidence = uniformChoice{0, 1, 10} + ipv6 = uniformChoice{false, true} + keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} + abciDelays = uniformChoice{"none", "small", "large"} + + voteExtensionEnableHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} + voteExtensionEnabled = uniformChoice{true, false} ) // Generate generates random testnets using the given RNG. @@ -108,16 +113,14 @@ type Options struct { func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), - ABCIProtocol: nodeABCIProtocols.Choose(r), - InitialHeight: int64(opt["initialHeight"].(int)), InitialState: opt["initialState"].(map[string]string), Validators: map[string]int64{}, ValidatorUpdates: map[string]map[string]int64{}, Nodes: map[string]*e2e.ManifestNode{}, KeyType: keyType.Choose(r).(string), Evidence: evidence.Choose(r).(int), - QueueType: opt["queueType"].(string), - TxSize: txSize.Choose(r).(int), + QueueType: "simple-priority", + TxSize: opt["txSize"].(int), // Tenderdash specific settings GenesisCoreChainLockedHeight: uint32(opt["initialCoreChainLockedHeight"].(int)), @@ -125,6 +128,33 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er ChainLockUpdates: map[string]int64{}, } + manifest.InitialHeight = int64(initialHeight.Choose(r).(int)) + + if voteExtensionEnabled.Choose(r).(bool) { + manifest.VoteExtensionsEnableHeight = manifest.InitialHeight + voteExtensionEnableHeightOffset.Choose(r).(int64) + } + + if opt["abci"] == "builtin" { + manifest.ABCIProtocol = string(e2e.ProtocolBuiltin) + } else { + manifest.ABCIProtocol = ABCIProtocols.Choose(r) + } + + switch abciDelays.Choose(r).(string) { + case "none": + case "small": + manifest.PrepareProposalDelayMS = 100 + manifest.ProcessProposalDelayMS = 100 + manifest.VoteExtensionDelayMS = 20 + manifest.FinalizeBlockDelayMS = 200 + case "large": + manifest.PrepareProposalDelayMS = 200 + manifest.ProcessProposalDelayMS = 200 + manifest.CheckTxDelayMS = 20 + manifest.VoteExtensionDelayMS = 100 + manifest.FinalizeBlockDelayMS = 500 + } + topology, ok := topologies[opt["topology"].(string)] if !ok { return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 210a5e12d1..a74fb0637e 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -1,4 +1,3 @@ -//nolint: gosec package main import ( @@ -87,6 +86,8 @@ func (cli *CLI) generate() error { return err } + // nolint: gosec + // G404: Use of weak random number generator (math/rand instead of crypto/rand) manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), cli.opts) if err != nil { return err diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 38ada84b2b..0f8352b7cc 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -3,9 +3,10 @@ evidence = 5 initial_height = 1000 -initial_state = { initial01 = "a", initial02 = "b", initial03 = "c" } +initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"} +queue_type = "simple-priority" +abci_protocol = "builtin" initial_core_chain_locked_height = 3400 -queue_type = "priority" [chainlock_updates] 1000 = 3450 @@ -40,7 +41,6 @@ block_sync = "v0" perturb = ["disconnect"] [node.validator02] -abci_protocol = "tcp" database = "cleveldb" persist_interval = 0 perturb = ["restart"] @@ -51,7 +51,6 @@ block_sync = "v0" [node.validator03] database = "badgerdb" seeds = ["seed01"] -abci_protocol = "grpc" persist_interval = 3 perturb = ["kill"] privval_protocol = "grpc" @@ -59,7 +58,6 @@ block_sync = "v0" retain_blocks = 10 [node.validator04] -abci_protocol = "builtin" snapshot_interval = 5 database = "rocksdb" persistent_peers = ["validator01"] @@ -72,7 +70,6 @@ block_sync = "v0" database = "badgerdb" state_sync = "p2p" start_at = 1005 # Becomes part of the validator set at 1010 -abci_protocol = "builtin" perturb = ["pause", "disconnect", "restart"] [node.full01] diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 96cdc414c6..160719dda9 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -87,14 +87,6 @@ func run(ctx context.Context, configFile string) error { return err } - // Start remote signer (must start before node if running builtin). - if cfg.PrivValServer != "" { - err := startRemoteSigner(ctx, cfg, logger) - if err != nil { - return err - } - } - // Start app server. err = startAppServer(ctx, cfg, logger) if err != nil { @@ -112,18 +104,24 @@ func run(ctx context.Context, configFile string) error { } func startAppServer(ctx context.Context, cfg *Config, logger log.Logger) error { + // Start remote signer (must start before node if running builtin). + if cfg.PrivValServer != "" { + err := startRemoteSigner(ctx, cfg, logger) + if err != nil { + return err + } + } + if cfg.Mode == string(e2e.ModeLight) { + return startLightNode(ctx, logger, cfg) + } switch cfg.Protocol { case "socket", "grpc": return startApp(ctx, logger, cfg) case "builtin": - switch cfg.Mode { - case string(e2e.ModeLight): - return startLightNode(ctx, logger, cfg) - case string(e2e.ModeSeed): + if cfg.Mode == string(e2e.ModeSeed) { return startSeedNode(ctx) - default: - return startNode(ctx, cfg) } + return startNode(ctx, cfg) } return fmt.Errorf("invalid protocol %q", cfg.Protocol) } @@ -263,7 +261,8 @@ func startLightNode(ctx context.Context, logger log.Logger, cfg *Config) error { // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if rpccfg.WriteTimeout <= tmcfg.RPC.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if rpccfg.WriteTimeout > 0 && rpccfg.WriteTimeout <= tmcfg.RPC.TimeoutBroadcastTxCommit { rpccfg.WriteTimeout = tmcfg.RPC.TimeoutBroadcastTxCommit + 1*time.Second } diff --git a/test/e2e/pkg/exec/exec.go b/test/e2e/pkg/exec/exec.go new file mode 100644 index 0000000000..9dcd793844 --- /dev/null +++ b/test/e2e/pkg/exec/exec.go @@ -0,0 +1,34 @@ +package exec + +import ( + "context" + "fmt" + "os" + osexec "os/exec" +) + +// Command executes a shell command. +func Command(ctx context.Context, args ...string) error { + // nolint: gosec + // G204: Subprocess launched with a potential tainted input or cmd arguments + cmd := osexec.CommandContext(ctx, args[0], args[1:]...) + out, err := cmd.CombinedOutput() + switch err := err.(type) { + case nil: + return nil + case *osexec.ExitError: + return fmt.Errorf("failed to run %q:\n%v", args, string(out)) + default: + return err + } +} + +// CommandVerbose executes a shell command while displaying its output. +func CommandVerbose(ctx context.Context, args ...string) error { + // nolint: gosec + // G204: Subprocess launched with a potential tainted input or cmd arguments + cmd := osexec.CommandContext(ctx, args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/test/e2e/pkg/infra/docker/compose.go b/test/e2e/pkg/infra/docker/compose.go new file mode 100644 index 0000000000..8291d73961 --- /dev/null +++ b/test/e2e/pkg/infra/docker/compose.go @@ -0,0 +1,97 @@ +package docker + +import ( + "bytes" + "os" + "text/template" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// makeDockerCompose generates a Docker Compose config for a testnet. +func makeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { + // Must use version 2 Docker Compose format, to support IPv6. + tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ + "addUint32": func(x, y uint32) uint32 { + return x + y + }, + "isBuiltin": func(protocol e2e.Protocol, mode e2e.Mode) bool { + return mode == e2e.ModeLight || protocol == e2e.ProtocolBuiltin + }, + "debugPort": func(index int) int { + return 40000 + index + 1 + }, + }).Parse(`version: '2.4' + +networks: + {{ .Name }}: + labels: + e2e: true + driver: bridge +{{- if .IPv6 }} + enable_ipv6: true +{{- end }} + ipam: + driver: default + config: + - subnet: {{ .IP }} + +services: +{{- range $index, $node := .Nodes }} + {{ .Name }}: + labels: + e2e: true + container_name: {{ .Name }} + image: tenderdash/e2e-node +{{- if isBuiltin $.ABCIProtocol .Mode }} + entrypoint: /usr/bin/entrypoint-builtin +{{- else if .LogLevel }} + command: start --log-level {{ .LogLevel }} +{{- end }} + init: true +{{- if $.Debug }} + environment: + - DEBUG=1 + - DEBUG_PORT={{ debugPort $index }} +{{- end }} + ports: + - 26656 + - {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660 + - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 + - 6060 +{{- if $.Debug }} + - {{ debugPort $index }}:{{ debugPort $index }} + security_opt: + - "seccomp:unconfined" + cap_add: + - SYS_PTRACE +{{- end }} + volumes: + - ./{{ .Name }}:/tenderdash +{{- if ne $.PreCompiledAppPath "" }} + - {{ $.PreCompiledAppPath }}:/usr/bin/app +{{- end }} + networks: + {{ $.Name }}: + ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} + +{{end}}`) + if err != nil { + return nil, err + } + data := &struct { + *e2e.Testnet + PreCompiledAppPath string + Debug bool + }{ + Testnet: testnet, + PreCompiledAppPath: os.Getenv("PRE_COMPILED_APP_PATH"), + Debug: os.Getenv("DEBUG") != "", + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, data) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/test/e2e/pkg/infra/docker/exec.go b/test/e2e/pkg/infra/docker/exec.go new file mode 100644 index 0000000000..de0033e325 --- /dev/null +++ b/test/e2e/pkg/infra/docker/exec.go @@ -0,0 +1,27 @@ +package docker + +import ( + "context" + "path/filepath" + + "github.com/tendermint/tendermint/test/e2e/pkg/exec" +) + +// execCompose runs a Docker Compose command for a testnet. +func execCompose(ctx context.Context, dir string, args ...string) error { + return exec.Command(ctx, append( + []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// execComposeVerbose runs a Docker Compose command for a testnet and displays its output. +func execComposeVerbose(ctx context.Context, dir string, args ...string) error { + return exec.CommandVerbose(ctx, append( + []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// execDocker runs a Docker command. +func execDocker(ctx context.Context, args ...string) error { + return exec.Command(ctx, append([]string{"docker"}, args...)...) +} diff --git a/test/e2e/pkg/infra/docker/infra.go b/test/e2e/pkg/infra/docker/infra.go new file mode 100644 index 0000000000..382576c91d --- /dev/null +++ b/test/e2e/pkg/infra/docker/infra.go @@ -0,0 +1,140 @@ +package docker + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/tendermint/tendermint/libs/log" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/exec" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" +) + +// testnetInfra provides an API for provisioning and manipulating +// infrastructure for a Docker-based testnet. +type testnetInfra struct { + logger log.Logger + testnet *e2e.Testnet +} + +var _ infra.TestnetInfra = &testnetInfra{} + +// NewTestnetInfra constructs an infrastructure provider that allows for Docker-based +// testnet infrastructure. +func NewTestnetInfra(logger log.Logger, testnet *e2e.Testnet) infra.TestnetInfra { + return &testnetInfra{ + logger: logger, + testnet: testnet, + } +} + +func (ti *testnetInfra) Setup(ctx context.Context) error { + compose, err := makeDockerCompose(ti.testnet) + if err != nil { + return err + } + // nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(filepath.Join(ti.testnet.Dir, "docker-compose.yml"), compose, 0644) + if err != nil { + return err + } + return nil +} + +func (ti *testnetInfra) StartNode(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "up", "-d", node.Name) +} + +func (ti *testnetInfra) DisconnectNode(ctx context.Context, node *e2e.Node) error { + return execDocker(ctx, "network", "disconnect", ti.testnet.Name+"_"+ti.testnet.Name, node.Name) +} + +func (ti *testnetInfra) ConnectNode(ctx context.Context, node *e2e.Node) error { + return execDocker(ctx, "network", "connect", ti.testnet.Name+"_"+ti.testnet.Name, node.Name) +} + +func (ti *testnetInfra) KillNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "kill", "-s", "SIGKILL", node.Name) +} + +func (ti *testnetInfra) StartNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "start", node.Name) +} + +func (ti *testnetInfra) PauseNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "pause", node.Name) +} + +func (ti *testnetInfra) UnpauseNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "unpause", node.Name) +} + +func (ti *testnetInfra) TerminateNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "kill", "-s", "SIGTERM", node.Name) +} + +func (ti *testnetInfra) Stop(ctx context.Context) error { + return execCompose(ctx, ti.testnet.Dir, "down") +} + +func (ti *testnetInfra) Pause(ctx context.Context) error { + return execCompose(ctx, ti.testnet.Dir, "pause") +} + +func (ti *testnetInfra) Unpause(ctx context.Context) error { + return execCompose(ctx, ti.testnet.Dir, "unpause") +} + +func (ti *testnetInfra) ShowLogs(ctx context.Context) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--no-color") +} + +func (ti *testnetInfra) ShowNodeLogs(ctx context.Context, node *e2e.Node) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--no-color", node.Name) +} + +func (ti *testnetInfra) TailLogs(ctx context.Context) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--follow") +} + +func (ti *testnetInfra) TailNodeLogs(ctx context.Context, node *e2e.Node) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--follow", node.Name) +} + +func (ti *testnetInfra) Cleanup(ctx context.Context) error { + ti.logger.Info("Removing Docker containers and networks") + + // GNU xargs requires the -r flag to not run when input is empty, macOS + // does this by default. Ugly, but works. + xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)` + + err := exec.Command(ctx, "bash", "-c", fmt.Sprintf( + "docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR)) + if err != nil { + return err + } + + err = exec.Command(ctx, "bash", "-c", fmt.Sprintf( + "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) + if err != nil { + return err + } + + // On Linux, some local files in the volume will be owned by root since Tendermint + // runs as root inside the container, so we need to clean them up from within a + // container running as root too. + absDir, err := filepath.Abs(ti.testnet.Dir) + if err != nil { + return err + } + err = execDocker(ctx, "run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), + "tenderdash/e2e-node", "sh", "-c", "rm -rf /network/*/") + if err != nil { + return err + } + + return nil +} diff --git a/test/e2e/pkg/infra/infra.go b/test/e2e/pkg/infra/infra.go new file mode 100644 index 0000000000..2fa7c5ad97 --- /dev/null +++ b/test/e2e/pkg/infra/infra.go @@ -0,0 +1,84 @@ +package infra + +import ( + "context" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// TestnetInfra provides an API for manipulating the infrastructure of a +// specific testnet. +type TestnetInfra interface { + // + // Overarching testnet infrastructure management. + // + + // Setup generates any necessary configuration for the infrastructure + // provider during testnet setup. + Setup(ctx context.Context) error + + // Stop will stop all running processes throughout the testnet without + // destroying any infrastructure. + Stop(ctx context.Context) error + + // Pause will pause all processes in the testnet. + Pause(ctx context.Context) error + + // Unpause will resume a paused testnet. + Unpause(ctx context.Context) error + + // ShowLogs prints all logs for the whole testnet to stdout. + ShowLogs(ctx context.Context) error + + // TailLogs tails the logs for all nodes in the testnet, if this is + // supported by the infrastructure provider. + TailLogs(ctx context.Context) error + + // Cleanup stops and destroys all running testnet infrastructure and + // deletes any generated files. + Cleanup(ctx context.Context) error + + // + // Node management, including node infrastructure. + // + + // StartNode provisions infrastructure for the given node and starts it. + StartNode(ctx context.Context, node *e2e.Node) error + + // DisconnectNode modifies the specified node's network configuration such + // that it becomes bidirectionally disconnected from the network (it cannot + // see other nodes, and other nodes cannot see it). + DisconnectNode(ctx context.Context, node *e2e.Node) error + + // ConnectNode modifies the specified node's network configuration such + // that it can become bidirectionally connected. + ConnectNode(ctx context.Context, node *e2e.Node) error + + // ShowNodeLogs prints all logs for the node with the give ID to stdout. + ShowNodeLogs(ctx context.Context, node *e2e.Node) error + + // TailNodeLogs tails the logs for a single node, if this is supported by + // the infrastructure provider. + TailNodeLogs(ctx context.Context, node *e2e.Node) error + + // + // Node process management. + // + + // KillNodeProcess sends SIGKILL to a node's process. + KillNodeProcess(ctx context.Context, node *e2e.Node) error + + // StartNodeProcess will start a stopped node's process. Assumes that the + // node's infrastructure has previously been provisioned using + // ProvisionNode. + StartNodeProcess(ctx context.Context, node *e2e.Node) error + + // PauseNodeProcess sends a signal to the node's process to pause it. + PauseNodeProcess(ctx context.Context, node *e2e.Node) error + + // UnpauseNodeProcess resumes a paused node's process. + UnpauseNodeProcess(ctx context.Context, node *e2e.Node) error + + // TerminateNodeProcess sends SIGTERM to a node's process. + TerminateNodeProcess(ctx context.Context, node *e2e.Node) error +} diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index c44108edc3..e407b05dac 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -86,13 +86,26 @@ type Manifest struct { QueueType string `toml:"queue_type"` // Number of bytes per tx. Default is 1kb (1024) - TxSize int + TxSize int `toml:"tx_size"` + + // VoteExtensionsEnableHeight configures the first height during which + // the chain will use and require vote extension data to be present + // in precommit messages. + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` // ABCIProtocol specifies the protocol used to communicate with the ABCI // application: "unix", "tcp", "grpc", or "builtin". Defaults to builtin. // builtin will build a complete Tendermint node into the application and // launch it instead of launching a separate Tendermint process. ABCIProtocol string `toml:"abci_protocol"` + + // Add artificial delays to each of the main ABCI calls to mimic computation time + // of the application + PrepareProposalDelayMS uint64 `toml:"prepare_proposal_delay_ms"` + ProcessProposalDelayMS uint64 `toml:"process_proposal_delay_ms"` + CheckTxDelayMS uint64 `toml:"check_tx_delay_ms"` + VoteExtensionDelayMS uint64 `toml:"vote_extension_delay_ms"` + FinalizeBlockDelayMS uint64 `toml:"finalize_block_delay_ms"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index e873e50ffa..61cc466165 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -1,4 +1,3 @@ -//nolint: gosec package e2e import ( @@ -72,20 +71,25 @@ type ValidatorConfig struct { type ValidatorsMap map[*Node]ValidatorConfig type Testnet struct { - Name string - File string - Dir string - IP *net.IPNet - InitialHeight int64 - InitialState map[string]string - Validators ValidatorsMap - ValidatorUpdates map[int64]ValidatorsMap - Nodes []*Node - KeyType string - Evidence int - LogLevel string - TxSize int - ABCIProtocol string + Name string + File string + Dir string + IP *net.IPNet + InitialHeight int64 + InitialState map[string]string + Validators ValidatorsMap + ValidatorUpdates map[int64]ValidatorsMap + Nodes []*Node + KeyType string + Evidence int + LogLevel string + TxSize int + ABCIProtocol Protocol + PrepareProposalDelayMS int + ProcessProposalDelayMS int + CheckTxDelayMS int + VoteExtensionDelayMS int + FinalizeBlockDelayMS int // Tenderdash-specific fields GenesisCoreHeight uint32 // InitialCoreHeight is a core height put into genesis file @@ -113,7 +117,6 @@ type Node struct { Mempool string StateSync string Database string - ABCIProtocol Protocol PrivvalProtocol Protocol PersistInterval uint64 SnapshotInterval uint64 @@ -194,7 +197,12 @@ func LoadTestnet(file string) (*Testnet, error) { KeyType: bls12381.KeyType, LogLevel: manifest.LogLevel, TxSize: manifest.TxSize, - ABCIProtocol: manifest.ABCIProtocol, + ABCIProtocol: Protocol(manifest.ABCIProtocol), + PrepareProposalDelayMS: int(manifest.PrepareProposalDelayMS), + ProcessProposalDelayMS: int(manifest.ProcessProposalDelayMS), + CheckTxDelayMS: int(manifest.CheckTxDelayMS), + VoteExtensionDelayMS: int(manifest.VoteExtensionDelayMS), + FinalizeBlockDelayMS: int(manifest.FinalizeBlockDelayMS), ThresholdPublicKey: ld.ThresholdPubKey, ThresholdPublicKeyUpdates: map[int64]crypto.PubKey{}, QuorumType: btcjson.LLMQType(quorumType), @@ -211,7 +219,7 @@ func LoadTestnet(file string) (*Testnet, error) { testnet.InitialHeight = manifest.InitialHeight } if testnet.ABCIProtocol == "" { - testnet.ABCIProtocol = string(ProtocolBuiltin) + testnet.ABCIProtocol = ProtocolBuiltin } if manifest.GenesisCoreChainLockedHeight > 0 { testnet.GenesisCoreHeight = manifest.GenesisCoreChainLockedHeight @@ -239,7 +247,6 @@ func LoadTestnet(file string) (*Testnet, error) { ProxyPort: proxyPortGen.Next(), Mode: ModeValidator, Database: "goleveldb", - ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, Mempool: nodeManifest.Mempool, @@ -257,9 +264,6 @@ func LoadTestnet(file string) (*Testnet, error) { if nodeManifest.Mode != "" { node.Mode = Mode(nodeManifest.Mode) } - if node.Mode == ModeLight { - node.ABCIProtocol = ProtocolBuiltin - } if nodeManifest.Database != "" { node.Database = nodeManifest.Database } @@ -440,6 +444,12 @@ func (t Testnet) Validate() error { default: return errors.New("unsupported KeyType") } + switch t.ABCIProtocol { + case ProtocolBuiltin, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: + default: + return fmt.Errorf("invalid ABCI protocol setting %q", t.ABCIProtocol) + } + for _, node := range t.Nodes { if err := node.Validate(t); err != nil { return fmt.Errorf("invalid node %q: %w", node.Name, err) @@ -488,7 +498,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid mempool version %q", n.Mempool) } switch n.QueueType { - case "", "priority", "fifo": + case "", "priority", "fifo", "simple-priority": default: return fmt.Errorf("unsupported p2p queue type: %s", n.QueueType) } @@ -497,14 +507,6 @@ func (n Node) Validate(testnet Testnet) error { default: return fmt.Errorf("invalid database setting %q", n.Database) } - switch n.ABCIProtocol { - case ProtocolBuiltin, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: - default: - return fmt.Errorf("invalid ABCI protocol setting %q", n.ABCIProtocol) - } - if n.Mode == ModeLight && n.ABCIProtocol != ProtocolBuiltin { - return errors.New("light client must use builtin protocol") - } switch n.PrivvalProtocol { case ProtocolFile, ProtocolUNIX, ProtocolTCP, ProtocolGRPC, ProtocolDashCore: default: @@ -642,6 +644,8 @@ type keyGenerator struct { } func newKeyGenerator(seed int64) *keyGenerator { + // nolint: gosec + // G404: Use of weak random number generator (math/rand instead of crypto/rand) return &keyGenerator{ random: rand.New(rand.NewSource(seed)), } diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index 1c23e3fef6..5332af29cb 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -1,70 +1,31 @@ package main import ( + "context" "errors" - "fmt" "os" - "path/filepath" "github.com/tendermint/tendermint/libs/log" - e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" ) -// Cleanup removes the Docker Compose containers and testnet directory. -func Cleanup(logger log.Logger, testnet *e2e.Testnet) error { - err := cleanupDocker(logger) - if err != nil { - return err +// Cleanup destroys all infrastructure and removes all generated testnet files. +func Cleanup(ctx context.Context, logger log.Logger, testnetDir string, ti infra.TestnetInfra) error { + if testnetDir == "" { + return errors.New("no testnet directory set") } - return cleanupDir(logger, testnet.Dir) -} - -// cleanupDocker removes all E2E resources (with label e2e=True), regardless -// of testnet. -func cleanupDocker(logger log.Logger) error { - logger.Info("Removing Docker containers and networks") - - // GNU xargs requires the -r flag to not run when input is empty, macOS - // does this by default. Ugly, but works. - xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)` - err := exec("bash", "-c", fmt.Sprintf( - "docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR)) - if err != nil { + if err := ti.Cleanup(ctx); err != nil { return err } - return exec("bash", "-c", fmt.Sprintf( - "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) -} - -// cleanupDir cleans up a testnet directory -func cleanupDir(logger log.Logger, dir string) error { - if dir == "" { - return errors.New("no directory set") - } - - _, err := os.Stat(dir) + _, err := os.Stat(testnetDir) if os.IsNotExist(err) { return nil } else if err != nil { return err } - logger.Info(fmt.Sprintf("Removing testnet directory %q", dir)) - - // On Linux, some local files in the volume will be owned by root since Tenderdash - // runs as root inside the container, so we need to clean them up from within a - // container running as root too. - absDir, err := filepath.Abs(dir) - if err != nil { - return err - } - err = execDocker("run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), - "tenderdash/e2e-node", "sh", "-c", "rm -rf /network/*/") - if err != nil { - return err - } - - return os.RemoveAll(dir) + logger.Info("Removing testnet", "directory", testnetDir) + return os.RemoveAll(testnetDir) } diff --git a/test/e2e/runner/exec.go b/test/e2e/runner/exec.go deleted file mode 100644 index f2bc5163c0..0000000000 --- a/test/e2e/runner/exec.go +++ /dev/null @@ -1,50 +0,0 @@ -//nolint: gosec -package main - -import ( - "fmt" - "os" - osexec "os/exec" - "path/filepath" -) - -// execute executes a shell command. -func exec(args ...string) error { - cmd := osexec.Command(args[0], args[1:]...) - out, err := cmd.CombinedOutput() - switch err := err.(type) { - case nil: - return nil - case *osexec.ExitError: - return fmt.Errorf("failed to run %q:\n%v", args, string(out)) - default: - return err - } -} - -// execVerbose executes a shell command while displaying its output. -func execVerbose(args ...string) error { - cmd := osexec.Command(args[0], args[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd.Run() -} - -// execCompose runs a Docker Compose command for a testnet. -func execCompose(dir string, args ...string) error { - return exec(append( - []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, - args...)...) -} - -// execComposeVerbose runs a Docker Compose command for a testnet and displays its output. -func execComposeVerbose(dir string, args ...string) error { - return execVerbose(append( - []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, - args...)...) -} - -// execDocker runs a Docker command. -func execDocker(args ...string) error { - return exec(append([]string{"docker"}, args...)...) -} diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index c4a73d33f9..9a24f11417 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -13,6 +13,8 @@ import ( "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" + "github.com/tendermint/tendermint/test/e2e/pkg/infra/docker" ) const randomSeed = 2308084734268 @@ -33,6 +35,7 @@ func main() { type CLI struct { root *cobra.Command testnet *e2e.Testnet + infra infra.TestnetInfra preserve bool } @@ -53,12 +56,23 @@ func NewCLI(logger log.Logger) *CLI { if err != nil { return err } + providerID, err := cmd.Flags().GetString("provider") + if err != nil { + return err + } + switch providerID { + case "docker": + cli.infra = docker.NewTestnetInfra(logger, testnet) + logger.Info("Using Docker-based infrastructure provider") + default: + return fmt.Errorf("unrecognized infrastructure provider ID: %s", providerID) + } cli.testnet = testnet return nil }, RunE: func(cmd *cobra.Command, args []string) (err error) { - if err = Cleanup(logger, cli.testnet); err != nil { + if err = Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { return err } defer func() { @@ -67,11 +81,11 @@ func NewCLI(logger log.Logger) *CLI { } else if err != nil { logger.Info("Preserving testnet that encountered error", "err", err) - } else if err := Cleanup(logger, cli.testnet); err != nil { + } else if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { logger.Error("error cleaning up testnet contents", "err", err) } }() - if err = Setup(logger, cli.testnet); err != nil { + if err = Setup(cmd.Context(), logger, cli.testnet, cli.infra); err != nil { return err } @@ -87,7 +101,7 @@ func NewCLI(logger log.Logger) *CLI { chLoadResult <- Load(lctx, logger, r, cli.testnet) }() startAt := time.Now() - if err = Start(ctx, logger, cli.testnet); err != nil { + if err = Start(ctx, logger, cli.testnet, cli.infra); err != nil { return err } @@ -96,7 +110,7 @@ func NewCLI(logger log.Logger) *CLI { } if cli.testnet.HasPerturbations() { - if err = Perturb(ctx, logger, cli.testnet); err != nil { + if err = Perturb(ctx, logger, cli.testnet, cli.infra); err != nil { return err } if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // allow some txs to go through @@ -134,7 +148,7 @@ func NewCLI(logger log.Logger) *CLI { if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // wait for network to settle before tests return err } - if err := Test(cli.testnet); err != nil { + if err := Test(ctx, cli.testnet); err != nil { return err } return nil @@ -144,6 +158,8 @@ func NewCLI(logger log.Logger) *CLI { cli.root.PersistentFlags().StringP("file", "f", "", "Testnet TOML manifest") _ = cli.root.MarkPersistentFlagRequired("file") + cli.root.PersistentFlags().String("provider", "docker", "Which infrastructure provider to use") + cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, "Preserves the running of the test net after tests are completed") @@ -156,7 +172,7 @@ func NewCLI(logger log.Logger) *CLI { Use: "setup", Short: "Generates the testnet directory and configuration", RunE: func(cmd *cobra.Command, args []string) error { - return Setup(logger, cli.testnet) + return Setup(cmd.Context(), logger, cli.testnet, cli.infra) }, }) @@ -166,12 +182,12 @@ func NewCLI(logger log.Logger) *CLI { RunE: func(cmd *cobra.Command, args []string) error { _, err := os.Stat(cli.testnet.Dir) if os.IsNotExist(err) { - err = Setup(logger, cli.testnet) + err = Setup(cmd.Context(), logger, cli.testnet, cli.infra) } if err != nil { return err } - return Start(cmd.Context(), logger, cli.testnet) + return Start(cmd.Context(), logger, cli.testnet, cli.infra) }, }) @@ -179,7 +195,7 @@ func NewCLI(logger log.Logger) *CLI { Use: "perturb", Short: "Perturbs the Docker testnet, e.g. by restarting or disconnecting nodes", RunE: func(cmd *cobra.Command, args []string) error { - return Perturb(cmd.Context(), logger, cli.testnet) + return Perturb(cmd.Context(), logger, cli.testnet, cli.infra) }, }) @@ -196,7 +212,7 @@ func NewCLI(logger log.Logger) *CLI { Short: "Stops the Docker testnet", RunE: func(cmd *cobra.Command, args []string) error { logger.Info("Stopping testnet") - return execCompose(cli.testnet.Dir, "down") + return cli.infra.Stop(cmd.Context()) }, }) @@ -205,7 +221,7 @@ func NewCLI(logger log.Logger) *CLI { Short: "Pauses the Docker testnet", RunE: func(cmd *cobra.Command, args []string) error { logger.Info("Pausing testnet") - return execCompose(cli.testnet.Dir, "pause") + return cli.infra.Pause(cmd.Context()) }, }) @@ -214,7 +230,7 @@ func NewCLI(logger log.Logger) *CLI { Short: "Resumes the Docker testnet", RunE: func(cmd *cobra.Command, args []string) error { logger.Info("Resuming testnet") - return execCompose(cli.testnet.Dir, "unpause") + return cli.infra.Unpause(cmd.Context()) }, }) @@ -259,7 +275,7 @@ func NewCLI(logger log.Logger) *CLI { Use: "test", Short: "Runs test cases against a running testnet", RunE: func(cmd *cobra.Command, args []string) error { - return Test(cli.testnet) + return Test(cmd.Context(), cli.testnet) }, }) @@ -267,17 +283,24 @@ func NewCLI(logger log.Logger) *CLI { Use: "cleanup", Short: "Removes the testnet directory", RunE: func(cmd *cobra.Command, args []string) error { - return Cleanup(logger, cli.testnet) + return Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra) }, }) cli.root.AddCommand(&cobra.Command{ Use: "logs [node]", - Short: "Shows the testnet or a specefic node's logs", + Short: "Shows the testnet or a specific node's logs", Example: "runner logs validator03", Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - return execComposeVerbose(cli.testnet.Dir, append([]string{"logs", "--no-color"}, args...)...) + if len(args) > 0 { + node := cli.testnet.LookupNode(args[0]) + if node == nil { + return fmt.Errorf("no such node: %s", args[0]) + } + return cli.infra.ShowNodeLogs(cmd.Context(), node) + } + return cli.infra.ShowLogs(cmd.Context()) }, }) @@ -287,9 +310,13 @@ func NewCLI(logger log.Logger) *CLI { Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 1 { - return execComposeVerbose(cli.testnet.Dir, "logs", "--follow", args[0]) + node := cli.testnet.LookupNode(args[0]) + if node == nil { + return fmt.Errorf("no such node: %s", args[0]) + } + return cli.infra.TailNodeLogs(cmd.Context(), node) } - return execComposeVerbose(cli.testnet.Dir, "logs", "--follow") + return cli.infra.TailLogs(cmd.Context()) }, }) @@ -302,20 +329,20 @@ func NewCLI(logger log.Logger) *CLI { Min Block Interval Max Block Interval over a 100 block sampling period. - + Does not run any perbutations. `, RunE: func(cmd *cobra.Command, args []string) error { - if err := Cleanup(logger, cli.testnet); err != nil { + if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { return err } defer func() { - if err := Cleanup(logger, cli.testnet); err != nil { + if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { logger.Error("error cleaning up testnet contents", "err", err) } }() - if err := Setup(logger, cli.testnet); err != nil { + if err := Setup(cmd.Context(), logger, cli.testnet, cli.infra); err != nil { return err } @@ -331,7 +358,7 @@ Does not run any perbutations. chLoadResult <- Load(lctx, logger, r, cli.testnet) }() - if err := Start(ctx, logger, cli.testnet); err != nil { + if err := Start(ctx, logger, cli.testnet, cli.infra); err != nil { return err } diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index acabf7f342..76a209ea24 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -8,10 +8,11 @@ import ( "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" ) // Perturbs a running testnet. -func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { +func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error { timer := time.NewTimer(0) // first tick fires immediately; reset below defer timer.Stop() @@ -21,7 +22,7 @@ func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error case <-ctx.Done(): return ctx.Err() case <-timer.C: - _, err := PerturbNode(ctx, logger, node, perturbation) + _, err := PerturbNode(ctx, logger, node, perturbation, ti) if err != nil { return err } @@ -36,46 +37,45 @@ func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error // PerturbNode perturbs a node with a given perturbation, returning its status // after recovering. -func PerturbNode(ctx context.Context, logger log.Logger, node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { - testnet := node.Testnet +func PerturbNode(ctx context.Context, logger log.Logger, node *e2e.Node, perturbation e2e.Perturbation, ti infra.TestnetInfra) (*rpctypes.ResultStatus, error) { switch perturbation { case e2e.PerturbationDisconnect: logger.Info(fmt.Sprintf("Disconnecting node %v...", node.Name)) - if err := execDocker("network", "disconnect", testnet.Name+"_"+testnet.Name, node.Name); err != nil { + if err := ti.DisconnectNode(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execDocker("network", "connect", testnet.Name+"_"+testnet.Name, node.Name); err != nil { + if err := ti.ConnectNode(ctx, node); err != nil { return nil, err } case e2e.PerturbationKill: logger.Info(fmt.Sprintf("Killing node %v...", node.Name)) - if err := execCompose(testnet.Dir, "kill", "-s", "SIGKILL", node.Name); err != nil { + if err := ti.KillNodeProcess(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "start", node.Name); err != nil { + if err := ti.StartNodeProcess(ctx, node); err != nil { return nil, err } case e2e.PerturbationPause: logger.Info(fmt.Sprintf("Pausing node %v...", node.Name)) - if err := execCompose(testnet.Dir, "pause", node.Name); err != nil { + if err := ti.PauseNodeProcess(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "unpause", node.Name); err != nil { + if err := ti.UnpauseNodeProcess(ctx, node); err != nil { return nil, err } case e2e.PerturbationRestart: logger.Info(fmt.Sprintf("Restarting node %v...", node.Name)) - if err := execCompose(testnet.Dir, "kill", "-s", "SIGTERM", node.Name); err != nil { + if err := ti.TerminateNodeProcess(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "start", node.Name); err != nil { + if err := ti.StartNodeProcess(ctx, node); err != nil { return nil, err } diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index b7a69168f7..f3538cc057 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -27,7 +27,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty clients = map[string]*rpchttp.HTTP{} lastHeight int64 lastIncrease = time.Now() - nodesAtHeight = map[string]struct{}{} + nodesAtHeight = map[string]int64{} numRunningNodes int ) if height == 0 { @@ -89,7 +89,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty // add this node to the set of target // height nodes - nodesAtHeight[node.Name] = struct{}{} + nodesAtHeight[node.Name] = result.SyncInfo.LatestBlockHeight // if not all of the nodes that we // have clients for have reached the diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 241908b8af..aa1f2e4eb5 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -1,8 +1,8 @@ -// nolint: gosec package main import ( "bytes" + "context" "encoding/base64" "encoding/hex" "encoding/json" @@ -14,7 +14,6 @@ import ( "sort" "strconv" "strings" - "text/template" "time" "github.com/BurntSushi/toml" @@ -27,6 +26,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" "github.com/tendermint/tendermint/types" ) @@ -47,7 +47,7 @@ const ( ) // Setup sets up the testnet configuration. -func Setup(logger log.Logger, testnet *e2e.Testnet) error { +func Setup(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error { logger.Info(fmt.Sprintf("Generating testnet files in %q", testnet.Dir)) err := os.MkdirAll(testnet.Dir, os.ModePerm) @@ -55,15 +55,6 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error { return err } - compose, err := MakeDockerCompose(testnet) - if err != nil { - return err - } - err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) - if err != nil { - return err - } - genesisNodes, err := initGenesisForEveryNode(testnet) if err != nil { return err @@ -106,6 +97,8 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error { if err != nil { return err } + // nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) if err != nil { return err @@ -157,92 +150,11 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error { } } - return nil -} - -// MakeDockerCompose generates a Docker Compose config for a testnet. -func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { - // Must use version 2 Docker Compose format, to support IPv6. - tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ - "debugPort": func(index int) int { - return 40000 + index + 1 - }, - "addUint32": func(x, y uint32) uint32 { - return x + y - }, - }).Parse(`version: '2.4' - -networks: - {{ .Name }}: - labels: - e2e: true - driver: bridge -{{- if .IPv6 }} - enable_ipv6: true -{{- end }} - ipam: - driver: default - config: - - subnet: {{ .IP }} - -services: -{{- range $index, $node := .Nodes }} - {{ .Name }}: - labels: - e2e: true - container_name: {{ .Name }} - image: tenderdash/e2e-node -{{- if eq .ABCIProtocol "builtin" }} - entrypoint: /usr/bin/entrypoint-builtin -{{- else if .LogLevel }} - command: start --log-level {{ .LogLevel }} -{{- end }} - init: true -{{- if $.Debug }} - environment: - - DEBUG=1 - - DEBUG_PORT={{ debugPort $index }} -{{- end }} - ports: - - 26656 - - {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660 - - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 - - 6060 -{{- if $.Debug }} - - {{ debugPort $index }}:{{ debugPort $index }} - security_opt: - - "seccomp:unconfined" - cap_add: - - SYS_PTRACE -{{- end }} - volumes: - - ./{{ .Name }}:/tenderdash -{{- if ne $.PreCompiledAppPath "" }} - - {{ $.PreCompiledAppPath }}:/usr/bin/app -{{- end }} - networks: - {{ $.Name }}: - ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} - -{{end}}`) - if err != nil { - return nil, err - } - var buf bytes.Buffer - data := &struct { - *e2e.Testnet - PreCompiledAppPath string - Debug bool - }{ - Testnet: testnet, - PreCompiledAppPath: os.Getenv("PRE_COMPILED_APP_PATH"), - Debug: os.Getenv("DEBUG") != "", - } - err = tmpl.Execute(&buf, data) - if err != nil { - return nil, err + if err := ti.Setup(ctx); err != nil { + return err } - return buf.Bytes(), nil + + return nil } // MakeGenesis generates a genesis document. @@ -315,7 +227,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Mode = string(node.Mode) } - switch node.ABCIProtocol { + switch node.Testnet.ABCIProtocol { case e2e.ProtocolUNIX: cfg.ProxyApp = AppAddressUNIX case e2e.ProtocolTCP: @@ -327,7 +239,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.ProxyApp = "" cfg.ABCI = "" default: - return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) + return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.Testnet.ABCIProtocol) } // Tenderdash errors if it does not have a privval key set up, regardless of whether @@ -387,14 +299,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { } } - cfg.P2P.Seeds = "" //nolint: staticcheck - for _, seed := range node.Seeds { - if len(cfg.P2P.Seeds) > 0 { //nolint: staticcheck - cfg.P2P.Seeds += "," //nolint: staticcheck - } - cfg.P2P.Seeds += seed.AddressP2P(true) //nolint: staticcheck - } - cfg.P2P.PersistentPeers = "" for _, peer := range node.PersistentPeers { if len(cfg.P2P.PersistentPeers) > 0 { @@ -411,20 +315,26 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { // MakeAppConfig generates an ABCI application config for a node. func MakeAppConfig(node *e2e.Node) ([]byte, error) { cfg := map[string]interface{}{ - "chain_id": node.Testnet.Name, - "dir": "data/app", - "listen": AppAddressUNIX, - "mode": node.Mode, - "proxy_port": node.ProxyPort, - "protocol": "socket", - "persist_interval": node.PersistInterval, - "snapshot_interval": node.SnapshotInterval, - "retain_blocks": node.RetainBlocks, - "key_type": bls12381.KeyType, - "privval_server_type": "dashcore", - "privval_server": PrivvalAddressDashCore, - } - switch node.ABCIProtocol { + "chain_id": node.Testnet.Name, + "dir": "data/app", + "listen": AppAddressUNIX, + "mode": node.Mode, + "proxy_port": node.ProxyPort, + "protocol": "socket", + "persist_interval": node.PersistInterval, + "snapshot_interval": node.SnapshotInterval, + "retain_blocks": node.RetainBlocks, + "key_type": bls12381.KeyType, + "privval_server_type": "dashcore", + "privval_server": PrivvalAddressDashCore, + "prepare_proposal_delay_ms": node.Testnet.PrepareProposalDelayMS, + "process_proposal_delay_ms": node.Testnet.ProcessProposalDelayMS, + "check_tx_delay_ms": node.Testnet.CheckTxDelayMS, + "vote_extension_delay_ms": node.Testnet.VoteExtensionDelayMS, + "finalize_block_delay_ms": node.Testnet.FinalizeBlockDelayMS, + } + + switch node.Testnet.ABCIProtocol { case e2e.ProtocolUNIX: cfg["listen"] = AppAddressUNIX case e2e.ProtocolTCP: @@ -436,7 +346,7 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { delete(cfg, "listen") cfg["protocol"] = "builtin" default: - return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) + return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.Testnet.ABCIProtocol) } if node.Mode == e2e.ModeValidator { switch node.PrivvalProtocol { @@ -531,6 +441,8 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { } bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height))) bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash))) + // nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less return os.WriteFile(cfgPath, bz, 0644) } diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index be9661df3a..5d5c2e7a96 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -8,9 +8,10 @@ import ( "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" ) -func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { +func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error { if len(testnet.Nodes) == 0 { return fmt.Errorf("no nodes in testnet") } @@ -44,7 +45,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { for len(nodeQueue) > 0 && nodeQueue[0].StartAt == 0 { node := nodeQueue[0] nodeQueue = nodeQueue[1:] - if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { + if err := ti.StartNode(ctx, node); err != nil { return err } @@ -58,7 +59,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { return err } node.HasStarted = true - logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort)) + logger.Info(fmt.Sprintf("Node %v up on http://%v:%v", node.IP, node.Name, node.ProxyPort)) } networkHeight := testnet.InitialHeight @@ -106,7 +107,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { } } - if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { + if err := ti.StartNode(ctx, node); err != nil { return err } @@ -128,8 +129,8 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { } else { lastNodeHeight = status.SyncInfo.LatestBlockHeight } - logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v at height %v", - node.Name, node.ProxyPort, lastNodeHeight)) + logger.Info(fmt.Sprintf("Node %v up on http://%v:%v at height %v", + node.IP, node.Name, node.ProxyPort, lastNodeHeight)) } return nil diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index 2237588a11..7766d6c8d9 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -1,17 +1,19 @@ package main import ( + "context" "os" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/exec" ) // Test runs test cases under tests/ -func Test(testnet *e2e.Testnet) error { +func Test(ctx context.Context, testnet *e2e.Testnet) error { err := os.Setenv("E2E_MANIFEST", testnet.File) if err != nil { return err } - return execVerbose("./build/tests", "-test.count=1", "-test.v") + return exec.CommandVerbose(ctx, "./build/tests", "-test.count=1") } diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index ed041e1861..8cae2efade 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "math/rand" - "strconv" "testing" "time" @@ -187,18 +186,3 @@ func TestApp_Tx(t *testing.T) { } } - -func TestApp_VoteExtensions(t *testing.T) { - testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { - client, err := node.Client() - require.NoError(t, err) - - // This special value should have been created by way of vote extensions - resp, err := client.ABCIQuery(ctx, "", []byte("extensionSum")) - require.NoError(t, err) - - extSum, err := strconv.Atoi(string(resp.Response.Value)) - require.NoError(t, err) - require.GreaterOrEqual(t, extSum, 0) - }) -} diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index 71a9584122..43cab77e8a 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -7,37 +7,48 @@ import ( "github.com/stretchr/testify/require" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" ) // Tests that all nodes have peered with each other, regardless of discovery method. func TestNet_Peers(t *testing.T) { - // FIXME Skip test since nodes aren't always able to fully mesh - t.SkipNow() - testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) netInfo, err := client.NetInfo(ctx) require.NoError(t, err) - require.Equal(t, len(node.Testnet.Nodes)-1, netInfo.NPeers, - "node is not fully meshed with peers") - + // FIXME: https://github.com/tendermint/tendermint/issues/8848 + // We should be able to assert that we can discover all peers in a network + expectedPeers := len(node.Testnet.Nodes) + peers := make(map[string]*e2e.Node, 0) seen := map[string]bool{} for _, n := range node.Testnet.Nodes { - seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself + // we never save light client addresses as they use RPC or ourselves + if n.Mode == e2e.ModeLight || n.Name == node.Name { + expectedPeers-- + continue + } + peers[string(types.NodeIDFromPubKey(n.NodeKey.PubKey()))] = n + seen[n.Name] = false } + + require.GreaterOrEqual(t, netInfo.NPeers, expectedPeers-1, + "node is not fully meshed with peers") + for _, peerInfo := range netInfo.Peers { - id := peerInfo.ID - peer := node.Testnet.LookupNode(string(id)) - require.NotNil(t, peer, "unknown node %v", id) + id := string(peerInfo.ID) + peer, ok := peers[id] + require.True(t, ok, "unknown node %v", id) require.Contains(t, peerInfo.URL, peer.IP.String(), "unexpected IP address for peer %v", id) - seen[string(id)] = true + seen[peer.Name] = true } - for name := range seen { - require.True(t, seen[name], "node %v not peered with %v", node.Name, name) - } + // FIXME: https://github.com/tendermint/tendermint/issues/8848 + // We should be able to assert that we can discover all peers in a network + // for name := range seen { + // require.True(t, seen[name], "node %v not peered with %v", node.Name, name) + // } }) } diff --git a/test/fuzz/README.md b/test/fuzz/README.md index 11ec9d5216..68077ad235 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -1,6 +1,7 @@ # fuzz -Fuzzing for various packages in Tendermint using [go-fuzz](https://github.com/dvyukov/go-fuzz) library. +Fuzzing for various packages in Tendermint using the fuzzing infrastructure included in +Go 1.18. Inputs: diff --git a/test/fuzz/oss-fuzz-build.sh b/test/fuzz/oss-fuzz-build.sh index 836253d4d1..528290c187 100755 --- a/test/fuzz/oss-fuzz-build.sh +++ b/test/fuzz/oss-fuzz-build.sh @@ -1,5 +1,6 @@ #!/bin/bash - +# This script is invoked by OSS-Fuzz to run fuzz tests against Tendermint core. +# See https://github.com/google/oss-fuzz/blob/master/projects/tendermint/build.sh set -euo pipefail export FUZZ_ROOT="github.com/tendermint/tendermint" @@ -8,12 +9,12 @@ build_go_fuzzer() { local function="$1" local fuzzer="$2" - gotip run github.com/orijtech/otils/corpus2ossfuzz@latest -o "$OUT"/"$fuzzer"_seed_corpus.zip -corpus test/fuzz/tests/testdata/fuzz/"$function" + go run github.com/orijtech/otils/corpus2ossfuzz@latest -o "$OUT"/"$fuzzer"_seed_corpus.zip -corpus test/fuzz/tests/testdata/fuzz/"$function" compile_native_go_fuzzer "$FUZZ_ROOT"/test/fuzz/tests "$function" "$fuzzer" } -gotip get github.com/AdamKorcz/go-118-fuzz-build/utils -gotip get github.com/prometheus/common/expfmt@v0.32.1 +go get github.com/AdamKorcz/go-118-fuzz-build/utils +go get github.com/prometheus/common/expfmt@v0.32.1 build_go_fuzzer FuzzP2PSecretConnection fuzz_p2p_secretconnection diff --git a/tools/tools.go b/tools/tools.go index 9fc291d99a..52a676b009 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -8,6 +8,7 @@ package tools import ( + _ "github.com/bufbuild/buf/cmd/buf" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/vektra/mockery/v2" ) diff --git a/types/block.go b/types/block.go index 22ec743cd8..f887ebcccd 100644 --- a/types/block.go +++ b/types/block.go @@ -930,8 +930,6 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { return commit, commit.ValidateBasic() } -//----------------------------------------------------------------------------- - // Data contains the set of transactions included in the block type Data struct { @@ -1101,3 +1099,9 @@ func BlockIDFromProto(bID *tmproto.BlockID) (*BlockID, error) { return blockID, blockID.ValidateBasic() } + +// ProtoBlockIDIsNil is similar to the IsNil function on BlockID, but for the +// Protobuf representation. +func ProtoBlockIDIsNil(bID *tmproto.BlockID) bool { + return len(bID.Hash) == 0 && ProtoPartSetHeaderIsZero(&bID.PartSetHeader) +} diff --git a/types/events.go b/types/events.go index 63d0c80911..4dfa6d5af1 100644 --- a/types/events.go +++ b/types/events.go @@ -133,7 +133,10 @@ type EventDataNewBlock struct { func (EventDataNewBlock) TypeTag() string { return "tendermint/event/NewBlock" } // ABCIEvents implements the eventlog.ABCIEventer interface. -func (e EventDataNewBlock) ABCIEvents() []abci.Event { return e.ResultFinalizeBlock.Events } +func (e EventDataNewBlock) ABCIEvents() []abci.Event { + base := []abci.Event{eventWithAttr(BlockHeightKey, fmt.Sprint(e.Block.Header.Height))} + return append(base, e.ResultFinalizeBlock.Events...) +} type EventDataNewBlockHeader struct { Header Header `json:"header"` @@ -146,7 +149,10 @@ type EventDataNewBlockHeader struct { func (EventDataNewBlockHeader) TypeTag() string { return "tendermint/event/NewBlockHeader" } // ABCIEvents implements the eventlog.ABCIEventer interface. -func (e EventDataNewBlockHeader) ABCIEvents() []abci.Event { return e.ResultFinalizeBlock.Events } +func (e EventDataNewBlockHeader) ABCIEvents() []abci.Event { + base := []abci.Event{eventWithAttr(BlockHeightKey, fmt.Sprint(e.Header.Height))} + return append(base, e.ResultFinalizeBlock.Events...) +} type EventDataNewEvidence struct { Evidence Evidence `json:"evidence"` @@ -273,18 +279,17 @@ func (EventDataEvidenceValidated) TypeTag() string { return "tendermint/event/Ev const ( // EventTypeKey is a reserved composite key for event name. EventTypeKey = "tm.event" + // TxHashKey is a reserved key, used to specify transaction's hash. // see EventBus#PublishEventTx TxHashKey = "tx.hash" + // TxHeightKey is a reserved key, used to specify transaction block's height. // see EventBus#PublishEventTx TxHeightKey = "tx.height" // BlockHeightKey is a reserved key used for indexing FinalizeBlock events. BlockHeightKey = "block.height" - - // EventTypeFinalizeBlock is a reserved key used for indexing FinalizeBlock events. - EventTypeFinalizeBlock = "finalize_block" ) var ( diff --git a/types/node_info.go b/types/node_info.go index 77e595b15d..be6e3bebaa 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -9,8 +9,8 @@ import ( "github.com/tendermint/tendermint/crypto" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmstrings "github.com/tendermint/tendermint/libs/strings" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) diff --git a/types/params.go b/types/params.go index 2f54e57da1..19a1e72299 100644 --- a/types/params.go +++ b/types/params.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" - tmstrings "github.com/tendermint/tendermint/libs/strings" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -39,6 +39,7 @@ type ConsensusParams struct { Version VersionParams `json:"version"` Synchrony SynchronyParams `json:"synchrony"` Timeout TimeoutParams `json:"timeout"` + ABCI ABCIParams `json:"abci"` } // HashedParams is a subset of ConsensusParams. @@ -92,6 +93,12 @@ type TimeoutParams struct { BypassCommitTimeout bool `json:"bypass_commit_timeout"` } +// ABCIParams configure ABCI functionality specific to the Application Blockchain +// Interface. +type ABCIParams struct { + RecheckTx bool `json:"recheck_tx"` +} + // DefaultConsensusParams returns a default ConsensusParams. func DefaultConsensusParams() *ConsensusParams { return &ConsensusParams{ @@ -101,6 +108,7 @@ func DefaultConsensusParams() *ConsensusParams { Version: DefaultVersionParams(), Synchrony: DefaultSynchronyParams(), Timeout: DefaultTimeoutParams(), + ABCI: DefaultABCIParams(), } } @@ -172,6 +180,13 @@ func DefaultTimeoutParams() TimeoutParams { } } +func DefaultABCIParams() ABCIParams { + return ABCIParams{ + // When true, run CheckTx on each transaction in the mempool after each height. + RecheckTx: true, + } +} + // TimeoutParamsOrDefaults returns the SynchronyParams, filling in any zero values // with the Tendermint defined default values. func (t TimeoutParams) TimeoutParamsOrDefaults() TimeoutParams { @@ -323,6 +338,7 @@ func (params ConsensusParams) ValidateConsensusParams() error { // Only the Block.MaxBytes and Block.MaxGas are included in the hash. // This allows the ConsensusParams to evolve more without breaking the block // protocol. No need for a Merkle tree here, just a small struct to hash. +// TODO: We should hash the other parameters as well func (params ConsensusParams) HashConsensusParams() []byte { hp := tmproto.HashedParams{ BlockMaxBytes: params.Block.MaxBytes, @@ -345,6 +361,7 @@ func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { params.Version == params2.Version && params.Synchrony == params2.Synchrony && params.Timeout == params2.Timeout && + params.ABCI == params2.ABCI && tmstrings.StringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) } @@ -401,6 +418,9 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa } res.Timeout.BypassCommitTimeout = params2.Timeout.GetBypassCommitTimeout() } + if params2.Abci != nil { + res.ABCI.RecheckTx = params2.Abci.GetRecheckTx() + } return res } @@ -433,6 +453,9 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams { Commit: ¶ms.Timeout.Commit, BypassCommitTimeout: params.Timeout.BypassCommitTimeout, }, + Abci: &tmproto.ABCIParams{ + RecheckTx: params.ABCI.RecheckTx, + }, } } @@ -480,5 +503,8 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams } c.Timeout.BypassCommitTimeout = pbParams.Timeout.BypassCommitTimeout } + if pbParams.Abci != nil { + c.ABCI.RecheckTx = pbParams.Abci.GetRecheckTx() + } return c } diff --git a/types/params_test.go b/types/params_test.go index 48d8ab3041..37a334bbb6 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/stretchr/testify/assert" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -175,6 +174,7 @@ func TestConsensusParamsValidation(t *testing.T) { type makeParamsArgs struct { blockBytes int64 blockGas int64 + recheck bool evidenceAge int64 maxEvidenceBytes int64 pubkeyTypes []string @@ -233,6 +233,9 @@ func makeParams(args makeParamsArgs) ConsensusParams { Commit: *args.commit, BypassCommitTimeout: args.bypassCommitTimeout, }, + ABCI: ABCIParams{ + RecheckTx: args.recheck, + }, } } @@ -265,19 +268,19 @@ func TestConsensusParamsHash(t *testing.T) { func TestConsensusParamsUpdate(t *testing.T) { testCases := []struct { - intialParams ConsensusParams + initialParams ConsensusParams updates *tmproto.ConsensusParams updatedParams ConsensusParams }{ // empty updates { - intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + initialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), updates: &tmproto.ConsensusParams{}, updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), }, { // update synchrony params - intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}), + initialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}), updates: &tmproto.ConsensusParams{ Synchrony: &tmproto.SynchronyParams{ Precision: durationPtr(time.Second * 2), @@ -288,7 +291,7 @@ func TestConsensusParamsUpdate(t *testing.T) { }, { // update timeout params - intialParams: makeParams(makeParamsArgs{ + initialParams: makeParams(makeParamsArgs{ propose: durationPtr(3 * time.Second), proposeDelta: durationPtr(500 * time.Millisecond), vote: durationPtr(time.Second), @@ -317,7 +320,7 @@ func TestConsensusParamsUpdate(t *testing.T) { }, // fine updates { - intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + initialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, @@ -339,7 +342,7 @@ func TestConsensusParamsUpdate(t *testing.T) { }), }, { - intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + initialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, @@ -364,7 +367,7 @@ func TestConsensusParamsUpdate(t *testing.T) { } for _, tc := range testCases { - assert.Equal(t, tc.updatedParams, tc.intialParams.UpdateConsensusParams(tc.updates)) + assert.Equal(t, tc.updatedParams, tc.initialParams.UpdateConsensusParams(tc.updates)) } } @@ -391,6 +394,14 @@ func TestProto(t *testing.T) { makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}), makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}), + makeParams(makeParamsArgs{ + propose: durationPtr(2 * time.Second), + proposeDelta: durationPtr(400 * time.Millisecond), + vote: durationPtr(5 * time.Second), + voteDelta: durationPtr(400 * time.Millisecond), + commit: durationPtr(time.Minute), + bypassCommitTimeout: true, + }), } for i := range params { diff --git a/types/part_set.go b/types/part_set.go index 9bf36279f7..d9341b61ff 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -145,6 +145,12 @@ func PartSetHeaderFromProto(ppsh *tmproto.PartSetHeader) (*PartSetHeader, error) return psh, psh.ValidateBasic() } +// ProtoPartSetHeaderIsZero is similar to the IsZero function for +// PartSetHeader, but for the Protobuf representation. +func ProtoPartSetHeaderIsZero(ppsh *tmproto.PartSetHeader) bool { + return ppsh.Total == 0 && len(ppsh.Hash) == 0 +} + //------------------------------------- type PartSet struct { diff --git a/types/part_set_test.go b/types/part_set_test.go index af65ca8db0..760abe9224 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -16,6 +16,10 @@ const ( ) func TestBasicPartSet(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // Construct random data of size partSize * 100 nParts := 100 data := tmrand.Bytes(testPartSize * nParts) @@ -64,6 +68,10 @@ func TestBasicPartSet(t *testing.T) { } func TestWrongProof(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // Construct random data of size partSize * 100 data := tmrand.Bytes(testPartSize * 100) partSet := NewPartSetFromData(data, testPartSize) @@ -89,6 +97,10 @@ func TestWrongProof(t *testing.T) { } func TestPartSetHeaderValidateBasic(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + testCases := []struct { testName string malleatePartSetHeader func(*PartSetHeader) @@ -110,6 +122,10 @@ func TestPartSetHeaderValidateBasic(t *testing.T) { } func TestPartValidateBasic(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + testCases := []struct { testName string malleatePart func(*Part) diff --git a/types/quorum.go b/types/quorum.go index 56b20350b7..777fe618d8 100644 --- a/types/quorum.go +++ b/types/quorum.go @@ -110,6 +110,7 @@ func MakeThresholdVoteExtensions(extensions []VoteExtension, thresholdSigs [][]b // QuorumSingsVerifier ... type QuorumSingsVerifier struct { QuorumSignData + shouldVerifyBlock bool shouldVerifyState bool shouldVerifyVoteExtensions bool } @@ -121,6 +122,13 @@ func WithVerifyExtensions(shouldVerify bool) func(*QuorumSingsVerifier) { } } +// WithVerifyBlock sets a flag that tells QuorumSingsVerifier to verify block signature or not +func WithVerifyBlock(shouldVerify bool) func(*QuorumSingsVerifier) { + return func(verifier *QuorumSingsVerifier) { + verifier.shouldVerifyBlock = shouldVerify + } +} + // WithVerifyState sets a flag that tells QuorumSingsVerifier to verify stateID signature or not func WithVerifyState(shouldVerify bool) func(*QuorumSingsVerifier) { return func(verifier *QuorumSingsVerifier) { @@ -137,11 +145,12 @@ func WithVerifyReachedQuorum(quorumReached bool) func(*QuorumSingsVerifier) { } } -// NewQuorumSingsVerifier creates and returns an instance of QuorumSingsVerifier that is used for verification +// NewQuorumSignsVerifier creates and returns an instance of QuorumSingsVerifier that is used for verification // quorum signatures -func NewQuorumSingsVerifier(quorumData QuorumSignData, opts ...func(*QuorumSingsVerifier)) *QuorumSingsVerifier { +func NewQuorumSignsVerifier(quorumData QuorumSignData, opts ...func(*QuorumSingsVerifier)) *QuorumSingsVerifier { verifier := &QuorumSingsVerifier{ QuorumSignData: quorumData, + shouldVerifyBlock: true, shouldVerifyState: true, shouldVerifyVoteExtensions: true, } @@ -165,6 +174,9 @@ func (q *QuorumSingsVerifier) Verify(pubKey crypto.PubKey, signs QuorumSigns) er } func (q *QuorumSingsVerifier) verifyBlock(pubKey crypto.PubKey, signs QuorumSigns) error { + if !q.shouldVerifyBlock { + return nil + } if !pubKey.VerifySignatureDigest(q.Block.ID, signs.BlockSign) { return fmt.Errorf( "threshold block signature is invalid: (%X) signID=%X: %w", diff --git a/types/quorum_sign_data.go b/types/quorum_sign_data.go index 1b3a57e865..d224ba9724 100644 --- a/types/quorum_sign_data.go +++ b/types/quorum_sign_data.go @@ -24,7 +24,7 @@ type QuorumSignData struct { // Verify verifies a quorum signatures: block, state and vote-extensions func (q QuorumSignData) Verify(pubKey crypto.PubKey, signs QuorumSigns) error { - return NewQuorumSingsVerifier(q).Verify(pubKey, signs) + return NewQuorumSignsVerifier(q).Verify(pubKey, signs) } // SignItem represents quorum sign data, like a request id, message bytes, sha256 hash of message and signID diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 49a58ac726..5f753af62d 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1154,6 +1154,10 @@ func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, vals } func TestValSetUpdatePriorityOrderTests(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + const nMaxElections int32 = 5000 testCases := []testVSetCfg{ @@ -1418,7 +1422,6 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { // create a commit with n validators commit, err := makeCommit(ctx, blockID, stateID, h, 0, voteSet, vals) require.NoError(b, err) - for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommit(chainID, blockID, stateID, h, commit) assert.NoError(b, err) diff --git a/types/vote.go b/types/vote.go index e649b79136..c4c45181c3 100644 --- a/types/vote.go +++ b/types/vote.go @@ -18,7 +18,8 @@ import ( ) const ( - nilVoteStr string = "nil-Vote" + absentVoteStr string = "Vote{absent}" + nilVoteStr string = "nil-Vote" // MaxVoteBytes is a maximum vote size (including amino overhead). MaxVoteBytesBLS12381 int64 = 241 MaxVoteBytesEd25519 int64 = 209 @@ -175,7 +176,7 @@ func (vote *Vote) Copy() *Vote { // 10. timestamp func (vote *Vote) String() string { if vote == nil { - return nilVoteStr + return absentVoteStr } var typeString string @@ -188,21 +189,25 @@ func (vote *Vote) String() string { panic("Unknown vote type") } - return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X %X %X}", + blockHashString := nilVoteStr + if len(vote.BlockID.Hash) > 0 { + blockHashString = fmt.Sprintf("%X", tmbytes.Fingerprint(vote.BlockID.Hash)) + } + + return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%s) %X %X %X}", vote.ValidatorIndex, tmbytes.Fingerprint(vote.ValidatorProTxHash), vote.Height, vote.Round, - vote.Type, typeString, - tmbytes.Fingerprint(vote.BlockID.Hash), + blockHashString, tmbytes.Fingerprint(vote.BlockSignature), tmbytes.Fingerprint(vote.StateSignature), vote.VoteExtensions.Fingerprint(), ) } -// VerifyWithExtension performs the same verification as Verify, but +// VerifyVoteAndExtension performs the same verification as Verify, but // additionally checks whether the vote extension signature corresponds to the // given chain ID and public key. We only verify vote extension signatures for // precommits. @@ -259,12 +264,30 @@ func (vote *Vote) verifyBasic(proTxHash ProTxHash, pubKey crypto.PubKey) error { return nil } +// VerifyExtensionSign checks whether the vote extension signature corresponds to the +// given chain ID and public key. +func (vote *Vote) VerifyExtensionSign(chainID string, pubKey crypto.PubKey, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash) error { + if vote.Type != tmproto.PrecommitType || vote.BlockID.IsNil() { + return nil + } + quorumSignData, err := MakeQuorumSigns(chainID, quorumType, quorumHash, vote.ToProto(), StateID{}) + if err != nil { + return err + } + verifier := NewQuorumSignsVerifier( + quorumSignData, + WithVerifyBlock(false), + WithVerifyState(false), + ) + return verifier.Verify(pubKey, vote.makeQuorumSigns()) +} + func (vote *Vote) verifySign( pubKey crypto.PubKey, quorumSignData QuorumSignData, opts ...func(verifier *QuorumSingsVerifier), ) error { - verifier := NewQuorumSingsVerifier( + verifier := NewQuorumSignsVerifier( quorumSignData, append(opts, WithVerifyState(vote.BlockID.Hash != nil))..., ) @@ -334,12 +357,19 @@ func (vote *Vote) ValidateBasic() error { } // We should only ever see vote extensions in precommits. - if vote.Type != tmproto.PrecommitType { + if vote.Type != tmproto.PrecommitType || (vote.Type == tmproto.PrecommitType && vote.BlockID.IsNil()) { if !vote.VoteExtensions.IsEmpty() { return errors.New("unexpected vote extensions") } } + if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() { + err := vote.VoteExtensions.Validate() + if err != nil { + return err + } + } + return nil } diff --git a/types/vote_set.go b/types/vote_set.go index bd7e2a63e0..fd6372bfb5 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -55,8 +55,8 @@ const ( type VoteSet struct { chainID string height int64 - stateID StateID // ID of state for which this voting is executed round int32 + stateID StateID // ID of state for which this voting is executed signedMsgType tmproto.SignedMsgType valSet *ValidatorSet @@ -74,7 +74,8 @@ type VoteSet struct { thresholdVoteExtSigs []ThresholdExtensionSign // If a 2/3 majority is seen, recover the vote extension sigs } -// NewVoteSet constructs a new VoteSet struct used to accumulate votes for given height/round. +// NewVoteSet instantiates all fields of a new vote set. This constructor requires +// that no vote extension data be present on the votes that are added to the set. func NewVoteSet(chainID string, height int64, round int32, signedMsgType tmproto.SignedMsgType, valSet *ValidatorSet, stateID StateID) *VoteSet { if height == 0 { @@ -250,13 +251,6 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok return nil, false } -func (voteSet *VoteSet) GetVotes() []*Vote { - if voteSet == nil { - return nil - } - return voteSet.votes -} - // Assumes signature is valid. // If conflicting vote exists, returns it. func (voteSet *VoteSet) addVerifiedVote( @@ -361,7 +355,7 @@ func (voteSet *VoteSet) recoverThresholdSignsAndVerify(blockVotes *blockVotes, q if err != nil { return err } - verifier := NewQuorumSingsVerifier( + verifier := NewQuorumSignsVerifier( quorumDataSigns, WithVerifyReachedQuorum(voteSet.IsQuorumReached()), ) @@ -578,7 +572,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string { voteStrings := make([]string, len(voteSet.votes)) for i, vote := range voteSet.votes { if vote == nil { - voteStrings[i] = nilVoteStr + voteStrings[i] = absentVoteStr } else { voteStrings[i] = vote.String() } @@ -643,7 +637,7 @@ func (voteSet *VoteSet) voteStrings() []string { voteStrings := make([]string, len(voteSet.votes)) for i, vote := range voteSet.votes { if vote == nil { - voteStrings[i] = nilVoteStr + voteStrings[i] = absentVoteStr } else { voteStrings[i] = vote.String() } @@ -697,8 +691,8 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { //-------------------------------------------------------------------------------- // Commit -// MakeCommit constructs a Commit from the VoteSet. It only includes precommits -// for the block, which has 2/3+ majority, and nil. +// MakeCommit constructs a Commit from the VoteSet. It only includes +// precommits for the block, which has 2/3+ majority, and nil. // // Panics if the vote type is not PrecommitType or if there's no +2/3 votes for // a single block. diff --git a/types/vote_test.go b/types/vote_test.go index c5d91cbef5..a51b1f04ee 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -2,6 +2,7 @@ package types import ( "context" + "fmt" "testing" "github.com/dashevo/dashd-go/btcjson" @@ -16,6 +17,18 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) +const ( + //nolint: lll + preCommitTestStr = `Vote{56789:959A8F5EF2BE 12345/02/Precommit(8B01023386C3) 000000000000 000000000000 000000000000}` + //nolint: lll + preVoteTestStr = `Vote{56789:959A8F5EF2BE 12345/02/Prevote(8B01023386C3) 000000000000 000000000000 000000000000}` +) + +var ( + // nolint: lll + nilVoteTestStr = fmt.Sprintf(`Vote{56789:959A8F5EF2BE 12345/02/Precommit(%s) 000000000000 000000000000 000000000000}`, nilVoteStr) +) + func examplePrevote(t *testing.T) *Vote { t.Helper() return exampleVote(t, byte(tmproto.PrevoteType)) @@ -249,31 +262,24 @@ func TestVoteExtension(t *testing.T) { { name: "all fields present", extensions: VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Extension: []byte("extension")}}, + tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("extension")}}, }, includeSignature: true, expectError: false, }, - // TODO(thane): Re-enable once - // https://github.com/tendermint/tendermint/issues/8272 is resolved - //{ - // name: "no extension signature", - // extension: []byte("extension"), - // includeSignature: false, - // expectError: true, - //}, + { + name: "no extension signature", + extensions: VoteExtensions{ + tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("extension")}}, + }, + includeSignature: false, + expectError: true, + }, { name: "empty extension", includeSignature: true, expectError: false, }, - // TODO: Re-enable once - // https://github.com/tendermint/tendermint/issues/8272 is resolved. - //{ - // name: "no extension and no signature", - // includeSignature: false, - // expectError: true, - //}, } logger := log.NewTestingLogger(t) @@ -300,7 +306,6 @@ func TestVoteExtension(t *testing.T) { BlockID: blockID, VoteExtensions: tc.extensions, } - v := vote.ToProto() err = privVal.SignVote(ctx, "test_chain_id", btcjson.LLMQType_5_60, quorumHash, v, stateID, logger) require.NoError(t, err) @@ -377,16 +382,35 @@ func TestVoteVerify(t *testing.T) { } func TestVoteString(t *testing.T) { - str := examplePrecommit(t).String() - expected := `Vote{56789:959A8F5EF2BE 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000 000000000000}` - if str != expected { - t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) + testcases := map[string]struct { + vote *Vote + expectedResult string + }{ + "pre-commit": { + vote: examplePrecommit(t), + expectedResult: preCommitTestStr, + }, + "pre-vote": { + vote: examplePrevote(t), + expectedResult: preVoteTestStr, + }, + "absent vote": { + expectedResult: absentVoteStr, + }, + "nil vote": { + vote: func() *Vote { + v := examplePrecommit(t) + v.BlockID.Hash = nil + return v + }(), + expectedResult: nilVoteTestStr, + }, } - str2 := examplePrevote(t).String() - expected = `Vote{56789:959A8F5EF2BE 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 000000000000 000000000000}` - if str2 != expected { - t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + require.Equal(t, tc.expectedResult, tc.vote.String()) + }) } } @@ -436,7 +460,6 @@ func TestValidVotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, tc.vote, stateID, nil) tc.malleateVote(tc.vote) require.NoError(t, tc.vote.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.NoError(t, tc.vote.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) } } @@ -465,13 +488,11 @@ func TestInvalidVotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, prevote, stateID, nil) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s in invalid prevote", tc.name) - require.Error(t, prevote.ValidateWithExtension(), "ValidateWithExtension for %s in invalid prevote", tc.name) precommit := examplePrecommit(t) signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) tc.malleateVote(precommit) require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s in invalid precommit", tc.name) - require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s in invalid precommit", tc.name) } } @@ -506,7 +527,6 @@ func TestInvalidPrevotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, prevote, stateID, nil) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.Error(t, prevote.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) } } @@ -524,29 +544,32 @@ func TestInvalidPrecommitExtensions(t *testing.T) { { "vote extension present without signature", func(v *Vote) { v.VoteExtensions = VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: {{Extension: []byte("extension")}}, + tmproto.VoteExtensionType_THRESHOLD_RECOVER: {{Extension: []byte("extension")}}, } - }}, + }, + }, // TODO(thane): Re-enable once https://github.com/tendermint/tendermint/issues/8272 is resolved //{"missing vote extension signature", func(v *Vote) { v.ExtensionSignature = nil }}, { "oversized vote extension signature", func(v *Vote) { v.VoteExtensions = VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Signature: make([]byte, SignatureSize+1)}}, + tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Signature: make([]byte, SignatureSize+1)}}, } - }}, + }, + }, } - for _, tc := range testCases { - precommit := examplePrecommit(t) - v := precommit.ToProto() - stateID := RandStateID().WithHeight(v.Height - 1) - signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) - tc.malleateVote(precommit) - // We don't expect an error from ValidateBasic, because it doesn't - // handle vote extensions. - require.NoError(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + precommit := examplePrecommit(t) + v := precommit.ToProto() + stateID := RandStateID().WithHeight(v.Height - 1) + signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) + tc.malleateVote(precommit) + // ValidateBasic ensures that vote extensions, if present, are well formed + require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) + require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + }) } }