From 37287ead94aa010f2497a5df414c64b85e4861ce Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 2 May 2022 10:52:57 -0400 Subject: [PATCH 001/203] p2p: remove message type from channel implementation (#8452) --- internal/p2p/channel.go | 20 ++++++-------------- internal/p2p/pex/reactor_test.go | 1 - internal/p2p/router.go | 2 +- internal/statesync/dispatcher_test.go | 2 +- internal/statesync/reactor_test.go | 4 ---- 5 files changed, 8 insertions(+), 21 deletions(-) diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index 8e6774612e..d3d7d104ff 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -59,25 +59,17 @@ type Channel struct { outCh chan<- Envelope // outbound messages (reactors to peers) errCh chan<- PeerError // peer error reporting - messageType proto.Message // the channel's message type, used for unmarshaling - name string + name string } // NewChannel creates a new channel. It is primarily for internal and test // use, reactors should use Router.OpenChannel(). -func NewChannel( - id ChannelID, - messageType proto.Message, - inCh <-chan Envelope, - outCh chan<- Envelope, - errCh chan<- PeerError, -) *Channel { +func NewChannel(id ChannelID, inCh <-chan Envelope, outCh chan<- Envelope, errCh chan<- PeerError) *Channel { return &Channel{ - ID: id, - messageType: messageType, - inCh: inCh, - outCh: outCh, - errCh: errCh, + ID: id, + inCh: inCh, + outCh: outCh, + errCh: errCh, } } diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index f2132fbba1..ec2f03d838 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -289,7 +289,6 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { pexErrCh := make(chan p2p.PeerError, chBuf) pexCh := p2p.NewChannel( p2p.ChannelID(pex.PexChannel), - new(p2pproto.PexMessage), pexInCh, pexOutCh, pexErrCh, diff --git a/internal/p2p/router.go b/internal/p2p/router.go index df096dbb61..459be79756 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -262,7 +262,7 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*C queue := r.queueFactory(chDesc.RecvBufferCapacity) outCh := make(chan Envelope, chDesc.RecvBufferCapacity) errCh := make(chan PeerError, chDesc.RecvBufferCapacity) - channel := NewChannel(id, messageType, queue.dequeue(), outCh, errCh) + channel := NewChannel(id, queue.dequeue(), outCh, errCh) channel.name = chDesc.Name var wrapper Wrapper diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index 65c517be43..8ec074bd19 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -30,7 +30,7 @@ func testChannel(size int) (*channelInternal, *p2p.Channel) { Out: make(chan p2p.Envelope, size), Error: make(chan p2p.PeerError, size), } - return in, p2p.NewChannel(0, nil, in.In, in.Out, in.Error) + return in, p2p.NewChannel(0, in.In, in.Out, in.Error) } func TestDispatcherBasic(t *testing.T) { diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index c6b2c2d2b7..55a9fcf8cd 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -102,7 +102,6 @@ func setup( rts.snapshotChannel = p2p.NewChannel( SnapshotChannel, - new(ssproto.Message), rts.snapshotInCh, rts.snapshotOutCh, rts.snapshotPeerErrCh, @@ -110,7 +109,6 @@ func setup( rts.chunkChannel = p2p.NewChannel( ChunkChannel, - new(ssproto.Message), rts.chunkInCh, rts.chunkOutCh, rts.chunkPeerErrCh, @@ -118,7 +116,6 @@ func setup( rts.blockChannel = p2p.NewChannel( LightBlockChannel, - new(ssproto.Message), rts.blockInCh, rts.blockOutCh, rts.blockPeerErrCh, @@ -126,7 +123,6 @@ func setup( rts.paramsChannel = p2p.NewChannel( ParamsChannel, - new(ssproto.Message), rts.paramsInCh, rts.paramsOutCh, rts.paramsPeerErrCh, From 9a028b7d8ac3906616f6f777addf89158227d833 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 May 2022 07:51:33 -0400 Subject: [PATCH 002/203] build(deps): Bump github.com/creachadair/atomicfile from 0.2.5 to 0.2.6 (#8460) Bumps [github.com/creachadair/atomicfile](https://github.com/creachadair/atomicfile) from 0.2.5 to 0.2.6. - [Release notes](https://github.com/creachadair/atomicfile/releases) - [Commits](https://github.com/creachadair/atomicfile/compare/v0.2.5...v0.2.6) --- updated-dependencies: - dependency-name: github.com/creachadair/atomicfile dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ff5b4e11f9..09ae2dad9f 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( ) require ( - github.com/creachadair/atomicfile v0.2.5 + github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.45.2 github.com/google/go-cmp v0.5.8 diff --git a/go.sum b/go.sum index a7b2d7bd88..9997d4c22f 100644 --- a/go.sum +++ b/go.sum @@ -227,8 +227,8 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/atomicfile v0.2.5 h1:wkOlpsjyJOvJ3Hd8juHKdirJnCSIPacvtY21/3nYjAo= -github.com/creachadair/atomicfile v0.2.5/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= +github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= +github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creachadair/tomledit v0.0.19 h1:zbpfUtYFYFdpRjwJY9HJlto1iZ4M5YwYB6qqc37F6UM= From c8e336f2e9a1b7a3161d2f35d31f138d4b8e13ea Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Wed, 4 May 2022 09:21:32 -0400 Subject: [PATCH 003/203] docs: minor fixups to pbts overview (#8454) --- .../tendermint-core/consensus/proposer-based-timestamps.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/tendermint-core/consensus/proposer-based-timestamps.md b/docs/tendermint-core/consensus/proposer-based-timestamps.md index 7f98f10d6b..17036a9f2e 100644 --- a/docs/tendermint-core/consensus/proposer-based-timestamps.md +++ b/docs/tendermint-core/consensus/proposer-based-timestamps.md @@ -13,14 +13,15 @@ order: 3 The PBTS algorithm defines a way for a Tendermint blockchain to create block timestamps that are within a reasonable bound of the clocks of the validators on the network. This replaces the original BFTTime algorithm for timestamp -assignment that relied on the timestamps included in precommit messages. +assignment that computed a timestamp using the timestamps included in precommit +messages. ## Algorithm Parameters The functionality of the PBTS algorithm is governed by two parameters within Tendermint. These two parameters are [consensus parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291), -meaning they are configured by the ABCI application and are expected to be the +meaning they are configured by the ABCI application and are therefore the same same across all nodes on the network. ### `Precision` @@ -51,7 +52,7 @@ useful for the protocols and applications built on top of Tendermint. The following protocols and application features require a reliable source of time: * Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. -* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification). +* Tendermint Evidence expiration is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification). * Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime). * IBC packets can use either a [timestamp or a height to timeout packet From dd4fee88ef6e4e0e5cf8eab0bf5a3e078df355c9 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 4 May 2022 11:08:26 -0700 Subject: [PATCH 004/203] keymigrate: improve filtering for legacy transaction hashes (#8466) This is a follow-up to #8352. The check for legacy evidence keys is only based on the prefix of the key. Hashes, which are unprefixed, could easily have this form and be misdiagnosed. Because the conversion for evidence checks the key structure, this should not cause corruption. The probability that a hash is a syntactically valid evidence key is negligible. The tool will report an error rather than storing bad data. But this does mean that such transaction hashes could cause the migration to stop and report an error before it is complete. To ensure we convert all the data, refine the legacy key check to filter these keys more precisely. Update the test cases to exercise this condition. * Update upgrading instructions. --- UPGRADING.md | 21 ++++++----- scripts/keymigrate/migrate.go | 60 +++++++++++++++++++++++------- scripts/keymigrate/migrate_test.go | 16 ++++++-- 3 files changed, 70 insertions(+), 27 deletions(-) diff --git a/UPGRADING.md b/UPGRADING.md index 28e44e58c0..93cd6c20fe 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -212,22 +212,25 @@ and one function have moved to the Tendermint `crypto` package: The format of all tendermint on-disk database keys changes in 0.35. Upgrading nodes must either re-sync all data or run a migration -script provided in this release. The script located in -`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` -provides the function `Migrate(context.Context, db.DB)` which you can -operationalize as makes sense for your deployment. +script provided in this release. + +The script located in +`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the +function `Migrate(context.Context, db.DB)` which you can operationalize as +makes sense for your deployment. For ease of use the `tendermint` command includes a CLI version of the migration script, which you can invoke, as in: tendermint key-migrate -This reads the configuration file as normal and allows the -`--db-backend` and `--db-dir` flags to change database operations as -needed. +This reads the configuration file as normal and allows the `--db-backend` and +`--db-dir` flags to override the database location as needed. -The migration operation is idempotent and can be run more than once, -if needed. +The migration operation is intended to be idempotent, and should be safe to +rerun on the same database multiple times. As a safety measure, however, we +recommend that operators test out the migration on a copy of the database +first, if it is practical to do so, before applying it to the production data. ### CLI Changes diff --git a/scripts/keymigrate/migrate.go b/scripts/keymigrate/migrate.go index ca2c528e2f..a0b43aef6f 100644 --- a/scripts/keymigrate/migrate.go +++ b/scripts/keymigrate/migrate.go @@ -86,27 +86,30 @@ const ( var prefixes = []struct { prefix []byte ktype keyType + check func(keyID) bool }{ - {[]byte("consensusParamsKey:"), consensusParamsKey}, - {[]byte("abciResponsesKey:"), abciResponsesKey}, - {[]byte("validatorsKey:"), validatorsKey}, - {[]byte("stateKey"), stateStoreKey}, - {[]byte("H:"), blockMetaKey}, - {[]byte("P:"), blockPartKey}, - {[]byte("C:"), commitKey}, - {[]byte("SC:"), seenCommitKey}, - {[]byte("BH:"), blockHashKey}, - {[]byte("size"), lightSizeKey}, - {[]byte("lb/"), lightBlockKey}, - {[]byte("\x00"), evidenceCommittedKey}, - {[]byte("\x01"), evidencePendingKey}, + {[]byte("consensusParamsKey:"), consensusParamsKey, nil}, + {[]byte("abciResponsesKey:"), abciResponsesKey, nil}, + {[]byte("validatorsKey:"), validatorsKey, nil}, + {[]byte("stateKey"), stateStoreKey, nil}, + {[]byte("H:"), blockMetaKey, nil}, + {[]byte("P:"), blockPartKey, nil}, + {[]byte("C:"), commitKey, nil}, + {[]byte("SC:"), seenCommitKey, nil}, + {[]byte("BH:"), blockHashKey, nil}, + {[]byte("size"), lightSizeKey, nil}, + {[]byte("lb/"), lightBlockKey, nil}, + {[]byte("\x00"), evidenceCommittedKey, checkEvidenceKey}, + {[]byte("\x01"), evidencePendingKey, checkEvidenceKey}, } // checkKeyType classifies a candidate key based on its structure. func checkKeyType(key keyID) keyType { for _, p := range prefixes { if bytes.HasPrefix(key, p.prefix) { - return p.ktype + if p.check == nil || p.check(key) { + return p.ktype + } } } @@ -342,6 +345,35 @@ func convertEvidence(key keyID, newPrefix int64) ([]byte, error) { return orderedcode.Append(nil, newPrefix, binary.BigEndian.Uint64(hb), string(evidenceHash)) } +// checkEvidenceKey reports whether a candidate key with one of the legacy +// evidence prefixes has the correct structure for a legacy evidence key. +// +// This check is needed because transaction hashes are stored without a prefix, +// so checking the one-byte prefix alone is not enough to distinguish them. +// Legacy evidence keys are suffixed with a string of the format: +// +// "%0.16X/%X" +// +// where the first element is the height and the second is the hash. Thus, we +// check +func checkEvidenceKey(key keyID) bool { + parts := bytes.SplitN(key[1:], []byte("/"), 2) + if len(parts) != 2 || len(parts[0]) != 16 || !isHex(parts[0]) || !isHex(parts[1]) { + return false + } + return true +} + +func isHex(data []byte) bool { + for _, b := range data { + if ('0' <= b && b <= '9') || ('a' <= b && b <= 'f') || ('A' <= b && b <= 'F') { + continue + } + return false + } + return len(data) != 0 +} + func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error { exists, err := db.Has(key) if err != nil { diff --git a/scripts/keymigrate/migrate_test.go b/scripts/keymigrate/migrate_test.go index b2727a5df3..f7322b352f 100644 --- a/scripts/keymigrate/migrate_test.go +++ b/scripts/keymigrate/migrate_test.go @@ -1,11 +1,11 @@ package keymigrate import ( - "bytes" "context" "errors" "fmt" "math" + "strings" "testing" "github.com/google/orderedcode" @@ -21,6 +21,7 @@ func makeKey(t *testing.T, elems ...interface{}) []byte { } func getLegacyPrefixKeys(val int) map[string][]byte { + vstr := fmt.Sprintf("%02x", byte(val)) return map[string][]byte{ "Height": []byte(fmt.Sprintf("H:%d", val)), "BlockPart": []byte(fmt.Sprintf("P:%d:%d", val, val)), @@ -40,14 +41,19 @@ func getLegacyPrefixKeys(val int) map[string][]byte { "UserKey1": []byte(fmt.Sprintf("foo/bar/baz/%d/%d", val, val)), "TxHeight": []byte(fmt.Sprintf("tx.height/%s/%d/%d", fmt.Sprint(val), val, val)), "TxHash": append( - bytes.Repeat([]byte{fmt.Sprint(val)[0]}, 16), - bytes.Repeat([]byte{fmt.Sprint(val)[len([]byte(fmt.Sprint(val)))-1]}, 16)..., + []byte(strings.Repeat(vstr[:1], 16)), + []byte(strings.Repeat(vstr[1:], 16))..., ), + + // Transaction hashes that could be mistaken for evidence keys. + "TxHashMimic0": append([]byte{0}, []byte(strings.Repeat(vstr, 16)[:31])...), + "TxHashMimic1": append([]byte{1}, []byte(strings.Repeat(vstr, 16)[:31])...), } } func getNewPrefixKeys(t *testing.T, val int) map[string][]byte { t.Helper() + vstr := fmt.Sprintf("%02x", byte(val)) return map[string][]byte{ "Height": makeKey(t, int64(0), int64(val)), "BlockPart": makeKey(t, int64(1), int64(val), int64(val)), @@ -66,7 +72,9 @@ func getNewPrefixKeys(t *testing.T, val int) map[string][]byte { "UserKey0": makeKey(t, "foo", "bar", int64(val), int64(val)), "UserKey1": makeKey(t, "foo", "bar/baz", int64(val), int64(val)), "TxHeight": makeKey(t, "tx.height", fmt.Sprint(val), int64(val), int64(val+2), int64(val+val)), - "TxHash": makeKey(t, "tx.hash", string(bytes.Repeat([]byte{[]byte(fmt.Sprint(val))[0]}, 32))), + "TxHash": makeKey(t, "tx.hash", strings.Repeat(vstr, 16)), + "TxHashMimic0": makeKey(t, "tx.hash", "\x00"+strings.Repeat(vstr, 16)[:31]), + "TxHashMimic1": makeKey(t, "tx.hash", "\x01"+strings.Repeat(vstr, 16)[:31]), } } From e980e1468ddb65d5b51b3f8b206b26bc8f889424 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 5 May 2022 12:08:22 -0400 Subject: [PATCH 005/203] RFC-018: initial research of BLS signature aggregation (#8358) This provides an initial document for understanding the landscape of implementing a BLS signature aggregation scheme into Tendermint. --- docs/rfc/README.md | 1 + docs/rfc/rfc-018-bls-agg-exploration.md | 555 ++++++++++++++++++++++++ 2 files changed, 556 insertions(+) create mode 100644 docs/rfc/rfc-018-bls-agg-exploration.md diff --git a/docs/rfc/README.md b/docs/rfc/README.md index f2ad6ad697..6b03cf21ab 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -53,6 +53,7 @@ sections. - [RFC-013: ABCI++](./rfc-013-abci++.md) - [RFC-014: Semantic Versioning](./rfc-014-semantic-versioning.md) - [RFC-015: ABCI++ Tx Mutation](./rfc-015-abci++-tx-mutation.md) +- [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md) - [RFC-019: Configuration File Versioning](./rfc-019-config-version.md) diff --git a/docs/rfc/rfc-018-bls-agg-exploration.md b/docs/rfc/rfc-018-bls-agg-exploration.md new file mode 100644 index 0000000000..70ca171a09 --- /dev/null +++ b/docs/rfc/rfc-018-bls-agg-exploration.md @@ -0,0 +1,555 @@ +# RFC 018: BLS Signature Aggregation Exploration + +## Changelog + +- 01-April-2022: Initial draft (@williambanfield). +- 15-April-2022: Draft complete (@williambanfield). + +## Abstract + +## Background + +### Glossary + +The terms that are attached to these types of cryptographic signing systems +become confusing quickly. Different sources appear to use slightly different +meanings of each term and this can certainly add to the confusion. Below is +a brief glossary that may be helpful in understanding the discussion that follows. + +* **Short Signature**: A signature that does not vary in length with the +number of signers. +* **Multi-Signature**: A signature generated over a single message +where, given the message and signature, a verifier is able to determine that +all parties signed the message. May be short or may vary with the number of signers. +* **Aggregated Signature**: A _short_ signature generated over messages with +possibly different content where, given the messages and signature, a verifier +should be able to determine that all the parties signed the designated messages. +* **Threshold Signature**: A _short_ signature generated from multiple signers +where, given a message and the signature, a verifier is able to determine that +a large enough share of the parties signed the message. The identities of the +parties that contributed to the signature are not revealed. +* **BLS Signature**: An elliptic-curve pairing-based signature system that +has some nice properties for short multi-signatures. May stand for +*Boneh-Lynn-Schacham* or *Barreto-Lynn-Scott* depending on the context. A +BLS signature is type of signature scheme that is distinct from other forms +of elliptic-curve signatures such as ECDSA and EdDSA. +* **Interactive**: Cryptographic scheme where parties need to perform one or +more request-response cycles to produce the cryptographic material. For +example, an interactive signature scheme may require the signer and the +verifier to cooperate to create and/or verify the signature, rather than a +signature being created ahead of time. +* **Non-interactive**: Cryptographic scheme where parties do not need to +perform any request-response cycles to produce the cryptographic material. + +### Brief notes on pairing-based elliptic-curve cryptography + +Pairing-based elliptic-curve cryptography is quite complex and relies on several +types of high-level math. Cryptography, in general, relies on being able to find +problems with an asymmetry between the difficulty of calculating the solution +and verifying that a given solution is correct. + +Pairing-based cryptography works by operating on mathematical functions that +satisfy the property of **bilinear mapping**. This property is satisfied for +functions `e` with values `P`, `Q`, `R` and `S` where `e(P, Q + R) = e(P, Q) * e(P, R)` +and `e(P + S, Q) = e(P, Q) * e(S, Q)`. The most familiar example of this is +exponentiation. Written in common notation, `g^P*(Q+R) = g^(P*Q) * g^(P*R)` for +some value `g`. + +Pairing-based elliptic-curve cryptography creates a bilinear mapping using +elliptic curves over a finite field. With some original curve, you can define two groups, +`G1` and `G2` which are points of the original curve _modulo_ different values. +Finally, you define a third group `Gt`, where points from `G1` and `G2` satisfy +the property of bilinearity with `Gt`. In this scheme, the function `e` takes +as inputs points in `G1` and `G2` and outputs values in `Gt`. Succintly, given +some point `P` in `G1` and some point `Q` in `G1`, `e(P, Q) = C` where `C` is in `Gt`. +You can efficiently compute the mapping of points in `G1` and `G2` into `Gt`, +but you cannot efficiently determine what points were summed and paired to +produce the value in `Gt`. + +Functions are then defined to map digital signatures, messages, and keys into +and out of points of `G1` or `G2` and signature verification is the process +of calculating if a set of values representing a message, public key, and digital +signature produce the same value in `Gt` through `e`. + +Signatures can be created as either points in `G1` with public keys being +created as points in `G2` or vice versa. For the case of BLS12-381, the popular +curve used, points in `G1` are represented with 48 bytes and points in `G2` are +represented with 96 bytes. It is up to the implementer of the cryptosystem to +decide which should be larger, the public keys or the signatures. + +BLS signatures rely on pairing-based elliptic-curve cryptography to produce +various types of signatures. For a more in-depth but still high level discussion +pairing-based elliptic-curve cryptography, see Vitalik Buterin's post on +[Exploring Elliptic Curve Pairings][vitalik-pairing-post]. For much more in +depth discussion, see the specific paper on BLS12-381, [Short signatures from + the Weil Pairing][bls-weil-pairing] and +[Compact Multi-Signatures for Smaller Blockchains][multi-signatures-smaller-blockchains]. + +### Adoption + +BLS signatures have already gained traction within several popular projects. + +* Algorand is working on an implementation. +* [Zcash][zcash-adoption] has adopted BLS12-381 into the protocol. +* [Ethereum 2.0][eth-2-adoption] has adopted BLS12-381 into the protocol. +* [Chia Network][chia-adoption] has adopted BLS for signing blocks. +* [Ostracon][line-ostracon-pr], a fork of Tendermint has adopted BLS for signing blocks. + +### What systems may be affected by adding aggregated signatures? + +#### Gossip + +Gossip could be updated to aggregate vote signatures during a consensus round. +This appears to be of frankly little utility. Creating an aggregated signature +incurs overhead, so frequently re-aggregating may incur a significant +overhead. How costly this is is still subject to further investigation and +performance testing. + +Even if vote signatures were aggregated before gossip, each validator would still +need to receive and verify vote extension data from each (individual) peer validator in +order for consensus to proceed. That displaces any advantage gained by aggregating signatures across the vote message in the presence of vote extensions. + +#### Block Creation + +When creating a block, the proposer may create a small set of short +multi-signatures and attach these to the block instead of including one +signature per validator. + +#### Block Verification + +Currently, we verify each validator signature using the public key associated +with that validator. With signature aggregation, verification of blocks would +not verify many signatures individually, but would instead check the (single) +multi-signature using the public keys stored by the validator. This would also +require a mechanism for indicating which validators are included in the +aggregated signature. + +#### IBC Relaying + +IBC would no longer need to transmit a large set of signatures when +updating state. These state updates do not happen for every IBC packet, only +when changing an IBC light client's view of the counterparty chain's state. +General [IBC packets][ibc-packet] only contain enough information to correctly +route the data to the counterparty chain. + +IBC does persist commit signatures to the chain in these `MsgUpdateClient` +message when updating state. This message would no longer need the full set +of unique signatures and would instead only need one signature for all of the +data in the header. + +Adding BLS signatures would create a new signature type that must be +understood by the IBC module and by the relayers. For some operations, such +as state updates, the set of data written into the chain and received by the +IBC module could be slightly smaller. + +## Discussion + +### What are the proposed benefits to aggregated signatures? + +#### Reduce Block Size + +At the moment, a commit contains a 64-byte (512-bit) signature for each validator +that voted for the block. For the Cosmos Hub, which has 175 validators in the +active set, this amounts to about 11 KiB per block. That gives an upper bound of +around 113 GiB over the lifetime of the chain's 10.12M blocks. (Note, the Hub has +increased the number of validators in the active set over time so the total +signature size over the history of the chain is likely somewhat less than that). + +Signature aggregation would only produce two signatures for the entire block. +One for the yeas and one for the nays. Each BLS aggregated signature is 48 +bytes, per the [IETF standard of BLS signatures][bls-ietf-ecdsa-compare]. +Over the lifetime of the same Cosmos Hub chain, that would amount to about 1 +GB, a savings of 112 GB. While that is a large factor of reduction it's worth +bearing in mind that, at [GCP's cost][gcp-storage-pricing] of $.026 USD per GB, +that is a total savings of around $2.50 per month. + +#### Reduce Signature Creation and Verification Time + +From the [IETF draft standard on BLS Signatures][bls-ietf], BLS signatures can be +created in 370 microseconds and verified in 2700 microseconds. Our current +[Ed25519 implementation][voi-ed25519-perf] was benchmarked locally to take +13.9 microseconds to produce a signature and 2.03 milliseconds to batch verify +128 signatures, which is slightly fewer than the 175 in the Hub. blst, a popular +implementation of BLS signature aggregation was benchmarked to perform verification +on 100 signatures in 1.5 milliseconds [when run locally][blst-verify-bench] +on an 8 thread machine and pre-aggregated public keys. It is worth noting that +the `ed25519` library verification time grew steadily with the number of signatures, +whereas the bls library verification time remains constant. This is because the +number of operations used to verify a signature does not grow at all with the +number of signatures included in the aggregate signature (as long as the signers +signed over the same message data as is the case in Tendermint). + +It is worth noting that this would also represent a _degredation_ in signature +verification time for chains with small validator sets. When batch verifying +only 32 signatures, our ed25519 library takes .57 milliseconds, whereas BLS +would still require the same 1.5 milliseconds. + +For massive validator sets, blst dominates, taking the same 1.5 milliseconds to +check an aggregated signature from 1024 validators versus our ed25519 library's +13.066 milliseconds to batch verify a set of that size. + +#### Reduce Light-Client Verification Time + +The light client aims to be a faster and lighter-weight way to verify that a +block was voted on by a Tendermint network. The light client fetches +Tendermint block headers and commit signatures, performing public key +verification to ensure that the associated validator set signed the block. +Reducing the size of the commit signature would allow the light client to fetch +block data more quickly. + +Additionally, the faster signature verification times of BLS signatures mean +that light client verification would proceed more quickly. + +However, verification of an aggregated signature is all-or-nothing. The verifier +cannot check that some singular signer had a signature included in the block. +Instead, the verifier must use all public keys to check if some signature +was included. This does mean that any light client implementation must always +be able to fetch all public keys for any height instead of potentially being +able to check if some singular validator's key signed the block. + +#### Reduce Gossip Bandwidth + +##### Vote Gossip + +It is possible to aggregate subsets of signatures during voting, so that the +network need not gossip all *n* validator signatures to all *n* validators. +Theoretically, subsets of the signatures could be aggregated during consensus +and vote messages could carry those aggregated signatures. Implementing this +would certainly increase the complexity of the gossip layer but could possibly +reduce the total number of signatures required to be verified by each validator. + +##### Block Gossip + +A reduction in the block size as a result of signature aggregation would +naturally lead to a reduction in the bandwidth required to gossip a block. +Each validator would only send and receive the smaller aggregated signatures +instead of the full list of multi-signatures as we have them now. + +### What are the drawbacks to aggregated signatures? + +#### Heterogeneous key types cannot be aggregated + +Aggregation requires a specific signature algorithm, and our legacy signing schemes +cannot be aggregated. In practice, this means that aggregated signatures could +be created for a subset of validators using BLS signatures, and validators +with other key types (such as Ed25519) would still have to be be separately +propagated in blocks and votes. + +#### Many HSMs do not support aggregated signatures + +**Hardware Signing Modules** (HSM) are a popular way to manage private keys. +They provide additional security for key management and should be used when +possible for storing highly sensitive private key material. + +Below is a list of popular HSMs along with their support for BLS signatures. + +* YubiKey + * [No support][yubi-key-bls-support] +* Amazon Cloud HSM + * [No support][cloud-hsm-support] +* Ledger + * [Lists support for the BLS12-381 curve][ledger-bls-announce] + +I cannot find support listed for Google Cloud, although perhaps it exists. + +## Feasibility of implementation + +This section outlines the various hurdles that would exist to implementing BLS +signature aggregation into Tendermint. It aims to demonstrate that we _could_ +implement BLS signatures but that it would incur risk and require breaking changes for a +reasonably unclear benefit. + +### Can aggregated signatures be added as soft-upgrades? + +In my estimation, yes. With the implementation of proposer-based timestamps, +all validators now produce signatures on only one of two messages: + +1. A [CanonicalVote][canonical-vote-proto] where the BlockID is the hash of the block or +2. A `CanonicalVote` where the `BlockID` is nil. + +The block structure can be updated to perform hashing and validation in a new +way as a soft upgrade. This would look like adding a new section to the [Block.Commit][commit-proto] structure +alongside the current `Commit.Signatures` field. This new field, tentatively named +`AggregatedSignature` would contain the following structure: + +```proto +message AggregatedSignature { + // yeas is a BitArray representing which validators in the active validator + // set issued a 'yea' vote for the block. + tendermint.libs.bits.BitArray yeas = 1; + + // absent is a BitArray representing which validators in the active + // validator set did not issue votes for the block. + tendermint.libs.bits.BitArray absent = 2; + + // yea_signature is an aggregated signature produced from all of the vote + // signatures for the block. + repeated bytes yea_signature = 3; + + // nay_signature is an aggregated signature produced from all of the vote + // signatures from votes for 'nil' for this block. + // nay_signature should be made from all of the validators that were both not + // in the 'yeas' BitArray and not in the 'absent' BitArray. + repeated bytes nay_signature = 4; +} +``` + +Adding this new field as a soft upgrade would mean hashing this data structure +into the blockID along with the old `Commit.Signatures` when both are present +as well as ensuring that the voting power represented in the new +`AggregatedSignature` and `Signatures` field was enough to commit the block +during block validation. One can certainly imagine other possible schemes for +implementing this but the above should serve as a simple enough proof of concept. + +### Implementing vote-time and commit-time signature aggregation separately + +Implementing aggregated BLS signatures as part of the block structure can easily be +achieved without implementing any 'vote-time' signature aggregation. +The block proposer would gather all of the votes, complete with signatures, +as it does now, and produce a set of aggregate signatures from all of the +individual vote signatures. + +Implementing 'vote-time' signature aggregation cannot be achieved without +also implementing commit-time signature aggregation. This is because such +signatures cannot be dis-aggregated into their constituent pieces. Therefore, +in order to implement 'vote-time' signature aggregation, we would need to +either first implement 'commit-time' signature aggregation, or implement both +'vote-time' signature aggregation while also updating the block creation and +verification protocols to allow for aggregated signatures. + +### Updating IBC clients + +In order for IBC clients to function, they must be able to perform light-client +verification of blocks on counterparty chains. Because BLS signatures are not +currently part of light-clients, chains that transmit messages over IBC +cannot update to using BLS signatures without their counterparties first +being upgraded to parse and verify BLS. If chains upgrade without their +counterparties first updating, they will lose the ability to interoperate with +non-updated chains. + +### New attack surfaces + +BLS signatures and signature aggregation comes with a new set of attack surfaces. +Additionally, it's not clear that all possible major attacks are currently known +on the BLS aggregation schemes since new ones have been discovered since the ietf +draft standard was written. The known attacks are manageable and are listed below. +Our implementation would need to prevent against these but this does not appear +to present a significant hurdle to implementation. + +#### Rogue key attack prevention + +Generating an aggregated signature requires guarding against what is called +a [rogue key attack][bls-ietf-terms]. A rogue key attack is one in which a +malicious actor can craft an _aggregate_ key that can produce signatures that +appear to include a signature from a private key that the malicious actor +does not actually know. In Tendermint terms, this would look like a Validator +producing a vote signed by both itself and some other validator where the other +validator did not actually produce the vote itself. + +The main mechanisms for preventing this require that each entity prove that it +can can sign data with just their private key. The options involve either +ensuring that each entity sign a _different_ message when producing every +signature _or_ producing a [proof of possession][bls-ietf-pop] (PoP) when announcing +their key to the network. + +A PoP is a message that demonstrates ownership of a private +key. A simple scheme for PoP is one where the entity announcing +its new public key to the network includes a digital signature over the bytes +of the public key generated using the associated private key. Everyone receiving +the public key and associated proof-of-possession can easily verify the +signature and be sure the entity owns the private key. + +This PoP scheme suits the Tendermint use case quite well since +validator keys change infrequently so the associated PoPs would not be onerous +to produce, verify, and store. Using this scheme allows signature verification +to proceed more quickly, since all signatures are over identical data and +can therefore be checked using an aggregated public key instead of one at a +time, public key by public key. + +#### Summing Zero Attacks + +[Summing zero attacks][summing-zero-paper] are attacks that rely on using the '0' point of an +elliptic curve. For BLS signatures, if the point 0 is chosen as the private +key, then the 0 point will also always be the public key and all signatures +produced by the key will also be the 0 point. This is easy enough to +detect when verifying each signature individually. + +However, because BLS signature aggregation creates an aggregated signature and +an aggregated public key, a set of colluding signers can create a pair or set +of signatures that are non-zero but which aggregate ("sum") to 0. The signatures that sum zero along with the +summed public key of the colluding signers will verify any message. This would +allow the colluding signers to sign any block or message with the same signature. +This would be reasonably easy to detect and create evidence for because, in +all other cases, the same signature should not verify more than message. It's +not exactly clear how such an attack would advantage the colluding validators +because the normal mechanisms of evidence gathering would still detect the +double signing, regardless of the signatures on both blocks being identical. + +### Backwards Compatibility + +Backwards compatibility is an important consideration for signature verification. +Specifically, it is important to consider whether chains using current versions +of IBC would be able to interact with chains adopting BLS. + +Because the `Block` shared by IBC and Tendermint is produced and parsed using +protobuf, new structures can be added to the Block without breaking the +ability of legacy users to parse the new structure. Breaking changes between +current users of IBC and new Tendermint blocks only occur if data that is +relied upon by the current users is no longer included in the current fields. + +For the case of BLS aggregated signatures, a new `AggregatedSignature` field +can therefore be added to the `Commit` field without breaking current users. +Current users will be broken when counterparty chains upgrade to the new version +and _begin using_ BLS signatures. Once counterparty chains begin using BLS +signatures, the BlockID hashes will include hashes of the `AggregatedSignature` +data structure that the legacy users will not be able to compute. Additionally, +the legacy software will not be able to parse and verify the signatures to +ensure that a supermajority of validators from the counterparty chain signed +the block. + +### Library Support + +Libraries for BLS signature creation are limited in number, although active +development appears to be ongoing. Cryptographic algorithms are difficult to +implement correctly and correctness issues are extremely serious and dangerous. +No further exploration of BLS should be undertaken without strong assurance of +a well-tested library with continuing support for creating and verifying BLS +signatures. + +At the moment, there is one candidate, `blst`, that appears to be the most +mature and well vetted. While this library is undergoing continuing auditing +and is supported by funds from the Ethereum foundation, adopting a new cryptographic +library presents some serious risks. Namely, if the support for the library were +to be discontinued, Tendermint may become saddled with the requirement of supporting +a very complex piece of software or force a massive ecosystem-wide migration away +from BLS signatures. + +This is one of the more serious reasons to avoid adopting BLS signatures at this +time. There is no gold standard library. Some projects look promising, but no +project has been formally verified with a long term promise of being supported +well into the future. + +#### Go Standard Library + +The Go Standard library has no implementation of BLS signatures. + +#### BLST + +[blst][blst], or 'blast' is an implementation of BLS signatures written in C +that provides bindings into Go as part of the repository. This library is +actively undergoing formal verification by Galois and previously received an +initial audit by NCC group, a firm I'd never heard of. + +`blst` is [targeted for use in prysm][prysm-blst], the golang implementation of Ethereum 2.0. + +#### Gnark-Crypto + +[Gnark-Crypto][gnark] is a Go-native implementation of elliptic-curve pairing-based +cryptography. It is not audited and is documented as 'as-is', although +development appears to be active so formal verification may be forthcoming. + +#### CIRCL + +[CIRCL][circl] is a go-native implementation of several cryptographic primitives, +bls12-381 among them. The library is written and maintained by Cloudflare and +appears to receive frequent contributions. However, it lists itself as experimental +and urges users to take caution before using it in production. + +### Added complexity to light client verification + +Implementing BLS signature aggregation in Tendermint would pose issues for the +light client. The light client currently validates a subset of the signatures +on a block when performing the verification algorithm. This is no longer possible +with an aggregated signature. Aggregated signature verification is all-or-nothing. +The light client could no longer check that a subset of validators from some +set of validators is represented in the signature. Instead, it would need to create +a new aggregated key with all the stated signers for each height it verified where +the validator set changed. + +This means that the speed advantages gained by using BLS cannot be fully realized +by the light client since the client needs to perform the expensive operation +of re-aggregating the public key. Aggregation is _not_ constant time in the +number of keys and instead grows linearly. When [benchmarked locally][blst-verify-bench-agg], +blst public key aggregation of 128 keys took 2.43 milliseconds. This, along with +the 1.5 milliseconds to verify a signature would raise light client signature +verification time to 3.9 milliseconds, a time above the previously mentioned +batch verification time using our ed25519 library of 2.0 milliseconds. + +Schemes to cache aggregated subsets of keys could certainly cut this time down at the +cost of adding complexity to the light client. + +### Added complexity to evidence handling + +Implementing BLS signature aggregation in Tendermint would add complexity to +the evidence handling within Tendermint. Currently, the light client can submit +evidence of a fork attempt to the chain. This evidence consists of the set of +validators that double-signed, including their public keys, with the conflicting +block. + +We can quickly check that the listed validators double signed by verifying +that each of their signatures are in the submitted conflicting block. A BLS +signature scheme would change this by requiring the light client to submit +the public keys of all of the validators that signed the conflicting block so +that the aggregated signature may be checked against the full signature set. +Again, aggregated signature verification is all-or-nothing, so without all of +the public keys, we cannot verify the signature at all. These keys would be +retrievable. Any party that wanted to create a fork would want to convince a +network that its fork is legitimate, so it would need to gossip the public keys. +This does not hamper the feasibility of implementing BLS signature aggregation +into Tendermint, but does represent yet another piece of added complexity to +the associated protocols. + +## Open Questions + +* *Q*: Can you aggregate Ed25519 signatures in Tendermint? + * There is a suggested scheme in github issue [7892][suggested-ed25519-agg], +but additional rigor would be required to fully verify its correctness. + +## Current Consideration + +Adopting a signature aggregation scheme presents some serious risks and costs +to the Tendermint project. It requires multiple backwards-incompatible changes +to the code, namely a change in the structure of the block and a new backwards-incompatible +signature and key type. It risks adding a new signature type for which new attack +types are still being discovered _and_ for which no industry standard, battle-tested +library yet exists. + +The gains boasted by this new signing scheme are modest: Verification time is +marginally faster and block sizes shrink by a few kilobytes. These are relatively +minor gains in exchange for the complexity of the change and the listed risks of the technology. +We should take a wait-and-see approach to BLS signature aggregation, monitoring +the up-and-coming projects and consider implementing it as the libraries and +standards develop. + +### References + +[line-ostracon-repo]: https://github.com/line/ostracon +[line-ostracon-pr]: https://github.com/line/ostracon/pull/117 +[mit-BLS-lecture]: https://youtu.be/BFwc2XA8rSk?t=2521 +[gcp-storage-pricing]: https://cloud.google.com/storage/pricing#north-america_2 +[yubi-key-bls-support]: https://github.com/Yubico/yubihsm-shell/issues/66 +[cloud-hsm-support]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/pkcs11-key-types.html +[bls-ietf]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04 +[bls-ietf-terms]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.3 +[bls-ietf-pop]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-3.3 +[multi-signatures-smaller-blockchains]: https://eprint.iacr.org/2018/483.pdf +[ibc-tendermint]: https://github.com/cosmos/ibc/tree/master/spec/client/ics-007-tendermint-client +[zcash-adoption]: https://github.com/zcash/zcash/issues/2502 +[chia-adoption]: https://github.com/Chia-Network/chia-blockchain#chia-blockchain +[bls-ietf-ecdsa-compare]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.1 +[voi-ed25519-perf]: https://github.com/williambanfield/curve25519-voi/blob/benchmark/primitives/ed25519/PERFORMANCE.txt#L79 +[blst-verify-bench]: https://github.com/williambanfield/blst/blame/bench/bindings/go/PERFORMANCE.md#L9 +[blst-verify-bench-agg]: https://github.com/williambanfield/blst/blame/bench/bindings/go/PERFORMANCE.md#L23 +[vitalik-pairing-post]: https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627 +[ledger-bls-announce]: https://www.ledger.com/first-ever-firmware-update-coming-to-the-ledger-nano-x +[commit-proto]: https://github.com/tendermint/tendermint/blob/be7cb50bb3432ee652f88a443e8ee7b8ef7122bc/proto/tendermint/types/types.proto#L121 +[canonical-vote-proto]: https://github.com/tendermint/tendermint/blob/be7cb50bb3432ee652f88a443e8ee7b8ef7122bc/spec/core/encoding.md#L283 +[blst]: https://github.com/supranational/blst +[prysm-blst]: https://github.com/prysmaticlabs/prysm/blob/develop/go.mod#L75 +[gnark]: https://github.com/ConsenSys/gnark-crypto/ +[eth-2-adoption]: https://notes.ethereum.org/@GW1ZUbNKR5iRjjKYx6_dJQ/Skxf3tNcg_ +[bls-weil-pairing]: https://www.iacr.org/archive/asiacrypt2001/22480516.pdf +[summing-zero-paper]: https://eprint.iacr.org/2021/323.pdf +[circl]: https://github.com/cloudflare/circl +[light-client-evidence]: https://github.com/tendermint/tendermint/blob/a6fd1fe20116d4b1f7e819cded81cece8e5c1ac7/types/evidence.go#L245 +[suggested-ed25519-agg]: https://github.com/tendermint/tendermint/issues/7892 From ce40697ea610901e9492b310d553cc936f915728 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 May 2022 10:25:34 +0000 Subject: [PATCH 006/203] build(deps): Bump github.com/vektra/mockery/v2 from 2.12.1 to 2.12.2 (#8474) Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.12.1 to 2.12.2.
Release notes

Sourced from github.com/vektra/mockery/v2's releases.

v2.12.2

Changelog

  • ea4c438 Add deprecation notice to logs
  • 735bc0c Add go-get deprecation note
  • bea853e Add missing mock
  • 989253d Fix *unsafe.Pointer
  • 9228ad4 Merge pull request #457 from LandonTClipp/readme_deprecation
  • 1d92e73 Merge pull request #460 from grongor/fix-unsafe-pointer
  • 2fcd83d Merge pull request #462 from LandonTClipp/deprecation
  • 9f67b8a More explicit deprecation for go-get
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/vektra/mockery/v2&package-manager=go_modules&previous-version=2.12.1&new-version=2.12.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 09ae2dad9f..69054d8bcc 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.45.2 github.com/google/go-cmp v0.5.8 - github.com/vektra/mockery/v2 v2.12.1 + github.com/vektra/mockery/v2 v2.12.2 gotest.tools v2.2.0+incompatible ) diff --git a/go.sum b/go.sum index 9997d4c22f..22ebb51ba5 100644 --- a/go.sum +++ b/go.sum @@ -1059,8 +1059,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektra/mockery/v2 v2.12.1 h1:BAJk2fGjVg/P9Fi+BxZD1/ZeKTOclpeAb/SKCc12zXc= -github.com/vektra/mockery/v2 v2.12.1/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U= +github.com/vektra/mockery/v2 v2.12.2 h1:JbRx9F+XcCJiDTyCm3V5lXYwl56m5ZouV6I9eZa1Dj0= +github.com/vektra/mockery/v2 v2.12.2/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= From ef44460c411b9dcb4e91e5cf5195b23bb63041ff Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 6 May 2022 06:44:09 -0700 Subject: [PATCH 007/203] Convert explicit zero comparison to a method. (#8475) Fixes #8472. I didn't see any other obvious cases of us doing this (although we do return zeroes in other places alongside errors, which is fine). --- light/verifier.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/light/verifier.go b/light/verifier.go index f6156c5de4..6bf0e787e6 100644 --- a/light/verifier.go +++ b/light/verifier.go @@ -279,8 +279,7 @@ func checkRequiredHeaderFields(h *types.SignedHeader) error { return errors.New("height in trusted header must be set (non zero") } - zeroTime := time.Time{} - if h.Time == zeroTime { + if h.Time.IsZero() { return errors.New("time in trusted header must be set") } From 97f2944db0f9a6ee1538eee0a9fd3a40463b6914 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 6 May 2022 07:35:35 -0700 Subject: [PATCH 008/203] Reintegrate docs deployment into the main TM repo (#8468) Per https://github.com/tendermint/docs/issues/20, it is no longer necessary to build the static documentation out of a separate repository. This change: - Adds an actions workflow to build and deploy the docs to GitHub Pages. - Updates some build settings in a compatible manner. This change does not affect the existing site deployment. To complete this change, we will need to update the custom domain pointer and disable the corresponding workflow in the tendermint/docs repository. Those changes can and must be done after this is merged. In the future should probably also move the build rule out of the Makefile and into the workflow directly. That will also make it easier to manage caching of build artifacts. For now, however, I've left it as-is, so that we do not break the active workflow on tendermint/docs, which depends on it. --- .github/workflows/docs-deployment.yml | 62 +++++++++++++++++++++++++++ Makefile | 3 +- docs/package.json | 2 +- 3 files changed, 65 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/docs-deployment.yml diff --git a/.github/workflows/docs-deployment.yml b/.github/workflows/docs-deployment.yml new file mode 100644 index 0000000000..082484dd58 --- /dev/null +++ b/.github/workflows/docs-deployment.yml @@ -0,0 +1,62 @@ +# Build and deploy the docs.tendermint.com website content. +# The static content is published to GitHub Pages. +# +# For documentation build info, see docs/DOCS_README.md. +name: Build static documentation site +on: + workflow_dispatch: # allow manual updates + push: + branches: + - master + paths: + - docs/** + - spec/** + +jobs: + # This is split into two jobs so that the build, which runs npm, does not + # have write access to anything. The deploy requires write access to publish + # to the branch used by GitHub Pages, however, so we can't just make the + # whole workflow read-only. + build: + name: VuePress build + runs-on: ubuntu-latest + container: + image: alpine:latest + permissions: + contents: read + steps: + - name: Install generator dependencies + run: | + apk add --no-cache make bash git npm + - uses: actions/checkout@v3 + with: + # We need to fetch full history so the backport branches for previous + # versions will be available for the build. + fetch-depth: 0 + - name: Build documentation + run: | + git config --global --add safe.directory "$PWD" + make build-docs + - uses: actions/upload-artifact@v3 + with: + name: build-output + path: ~/output/ + + deploy: + name: Deploy to GitHub Pages + runs-on: ubuntu-latest + needs: build + permissions: + contents: write + steps: + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 + with: + name: build-output + path: ~/output + - name: Deploy to GitHub Pages + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: 'docs-tendermint-com' + folder: ~/output + single-commit: true diff --git a/Makefile b/Makefile index 703220953f..a0b970f762 100644 --- a/Makefile +++ b/Makefile @@ -226,7 +226,8 @@ DESTINATION = ./index.html.md build-docs: @cd docs && \ while read -r branch path_prefix; do \ - (git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ + ( git checkout $${branch} && npm ci --quiet && \ + VUEPRESS_BASE="/$${path_prefix}/" npm run build --quiet ) ; \ mkdir -p ~/output/$${path_prefix} ; \ cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ cp ~/output/$${path_prefix}/index.html ~/output ; \ diff --git a/docs/package.json b/docs/package.json index 4b42527c7c..3200a5222e 100644 --- a/docs/package.json +++ b/docs/package.json @@ -15,7 +15,7 @@ "serve": "trap 'exit 0' SIGINT; vuepress dev --no-cache", "postserve": "./post.sh", "prebuild": "./pre.sh", - "build": "trap 'exit 0' SIGINT; vuepress build --no-cache", + "build": "trap 'exit 0' SIGINT; vuepress build --no-cache --silent", "postbuild": "./post.sh" }, "author": "", From 694ab2c6d1f9abc4e1db2e293b40cf0000e82753 Mon Sep 17 00:00:00 2001 From: elias-orijtech <103319121+elias-orijtech@users.noreply.github.com> Date: Fri, 6 May 2022 20:02:19 +0200 Subject: [PATCH 009/203] test/fuzz: replace outdated reference to go-fuzz in README (#8477) --- test/fuzz/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/fuzz/README.md b/test/fuzz/README.md index 11ec9d5216..68077ad235 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -1,6 +1,7 @@ # fuzz -Fuzzing for various packages in Tendermint using [go-fuzz](https://github.com/dvyukov/go-fuzz) library. +Fuzzing for various packages in Tendermint using the fuzzing infrastructure included in +Go 1.18. Inputs: From cec0a97987860bd753ae1b2d92963cb96afe6d7c Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Mon, 9 May 2022 02:15:08 +0200 Subject: [PATCH 010/203] RFC017: ABCI++ Vote Extension Propagation (#8317) * 1st version * Addressed (some of) @williambanfield's comments * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Daniel * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Daniel * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Daniel * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Sam Kleinman * Update docs/rfc/README.md Co-authored-by: Sam Kleinman * Addressed some comments * Addressed more comments. Improved description of Solution 3 * Work on 'definitions' section * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Callum Waters * bottom * Addressed Josef's valset-change comment. Other minor edits * Improved wording of 'disjoint valsets' case * Addressed TODOs: major revamp of various sections. First complete version. * Fixed minor wording problem * removed blank line * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Thane Thomson * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Thane Thomson * Addressed some of Thane's comments * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Thane Thomson * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Thane Thomson * Addressed outstanding comments * Addressed @williambanfield's 'catch-up message' comment * Removed TODO after confirming statesync is only run on nodes starting from scratch * Removed TODO (after checking with Jasmina) * Removed addressed TODO * Addressed Josef's feedback on case (h) * Typo * Update docs/rfc/rfc-017-abci++-vote-extension-propag.md Co-authored-by: Josef Widder <44643235+josef-widder@users.noreply.github.com> * Added log line Co-authored-by: Daniel Co-authored-by: Sam Kleinman Co-authored-by: Callum Waters Co-authored-by: Thane Thomson Co-authored-by: Josef Widder <44643235+josef-widder@users.noreply.github.com> --- docs/rfc/README.md | 3 + .../rfc-017-abci++-vote-extension-propag.md | 571 ++++++++++++++++++ 2 files changed, 574 insertions(+) create mode 100644 docs/rfc/rfc-017-abci++-vote-extension-propag.md diff --git a/docs/rfc/README.md b/docs/rfc/README.md index 6b03cf21ab..a83d802b8e 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -56,4 +56,7 @@ sections. - [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md) - [RFC-019: Configuration File Versioning](./rfc-019-config-version.md) + +- [RFC-017: ABCI++ Vote Extension Propagation](./rfc-017-abci++-vote-extension-propag.md) + diff --git a/docs/rfc/rfc-017-abci++-vote-extension-propag.md b/docs/rfc/rfc-017-abci++-vote-extension-propag.md new file mode 100644 index 0000000000..15d08f7bad --- /dev/null +++ b/docs/rfc/rfc-017-abci++-vote-extension-propag.md @@ -0,0 +1,571 @@ +# RFC 017: ABCI++ Vote Extension Propagation + +## Changelog + +- 11-Apr-2022: Initial draft (@sergio-mena). +- 15-Apr-2022: Addressed initial comments. First complete version (@sergio-mena). +- 09-May-2022: Addressed all outstanding comments. + +## Abstract + +According to the +[ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md) +(as of 11-Apr-2022), a validator MUST provide a signed vote extension for each non-`nil` precommit vote +of height *h* that it uses to propose a block in height *h+1*. When a validator is up to +date, this is easy to do, but when a validator needs to catch up this is far from trivial as this data +cannot be retrieved from the blockchain. + +This RFC presents and compares the different options to address this problem, which have been proposed +in several discussions by the Tendermint Core team. + +## Document Structure + +The RFC is structured as follows. In the [Background](#background) section, +subsections [Problem Description](#problem-description) and [Cases to Address](#cases-to-address) +explain the problem at hand from a high level perspective, i.e., abstracting away from the current +Tendermint implementation. In contrast, subsection +[Current Catch-up Mechanisms](#current-catch-up-mechanisms) delves into the details of the current +Tendermint code. + +In the [Discussion](#discussion) section, subsection [Solutions Proposed](#solutions-proposed) is also +worded abstracting away from implementation details, whilst subsections +[Feasibility of the Proposed Solutions](#feasibility-of-the-proposed-solutions) and +[Current Limitations and Possible Implementations](#current-limitations-and-possible-implementations) +analize the viability of one of the proposed solutions in the context of Tendermint's architecture +based on reactors. Finally, [Formalization Work](#formalization-work) briefly discusses the work +still needed demonstrate the correctness of the chosen solution. + +The high level subsections are aimed at readers who are familiar with consensus algorithms, in +particular with the one described in the Tendermint (white paper), but who are not necessarily +acquainted with the details of the Tendermint codebase. The other subsections, which go into +implementation details, are best understood by engineers with deep knowledge of the implementation of +Tendermint's blocksync and consensus reactors. + +## Background + +### Basic Definitions + +This document assumes that all validators have equal voting power for the sake of simplicity. This is done +without loss of generality. + +There are two types of votes in Tendermint: *prevotes* and *precommits*. Votes can be `nil` or refer to +a proposed block. This RFC focuses on precommits, +also known as *precommit votes*. In this document we sometimes call them simply *votes*. + +Validators send precommit votes to their peer nodes in *precommit messages*. According to the +[ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md), +a precommit message MUST also contain a *vote extension*. +This mandatory vote extension can be empty, but MUST be signed with the same key as the precommit +vote (i.e., the sending validator's). +Nevertheless, the vote extension is signed independently from the vote, so a vote can be separated from +its extension. +The reason for vote extensions to be mandatory in precommit messages is that, otherwise, a (malicious) +node can omit a vote extension while still providing/forwarding/sending the corresponding precommit vote. + +The validator set at height *h* is denoted *valseth*. A *commit* for height *h* consists of more +than *2nh/3* precommit votes voting for a block *b*, where *nh* denotes the size of +*valseth*. A commit does not contain `nil` precommit votes, and all votes in it refer to the +same block. An *extended commit* is a *commit* where every precommit vote has its respective vote extension +attached. + +### Problem Description + +In the version of [ABCI](https://github.com/tendermint/spec/blob/4fb99af/spec/abci/README.md) present up to +Tendermint v0.35, for any height *h*, a validator *v* MUST have the decided block *b* and a commit for +height *h* in order to decide at height *h*. Then, *v* just needs a commit for height *h* to propose at +height *h+1*, in the rounds of *h+1* where *v* is a proposer. + +In [ABCI++](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md), +the information that a validator *v* MUST have to be able to decide in *h* does not change with +respect to pre-existing ABCI: the decided block *b* and a commit for *h*. +In contrast, for proposing in *h+1*, a commit for *h* is not enough: *v* MUST now have an extended +commit. + +When a validator takes an active part in consensus at height *h*, it has all the data it needs in memory, +in its consensus state, to decide on *h* and propose in *h+1*. Things are not so easy in the cases when +*v* cannot take part in consensus because it is late (e.g., it falls behind, it crashes +and recovers, or it just starts after the others). If *v* does not take part, it cannot actively +gather precommit messages (which include vote extensions) in order to decide. +Before ABCI++, this was not a problem: full nodes are supposed to persist past blocks in the block store, +so other nodes would realise that *v* is late and send it the missing decided block at height *h* and +the corresponding commit (kept in block *h+1*) so that *v* can catch up. +However, we cannot apply this catch-up technique for ABCI++, as the vote extensions, which are part +of the needed *extended commit* are not part of the blockchain. + +### Cases to Address + +Before we tackle the description of the possible cases we need to address, let us describe the following +incremental improvement to the ABCI++ logic. Upon decision, a full node persists (e.g., in the block +store) the extended commit that allowed the node to decide. For the moment, let us assume the node only +needs to keep its *most recent* extended commit, and MAY remove any older extended commits from persistent +storage. +This improvement is so obvious that all solutions described in the [Discussion](#discussion) section use +it as a building block. Moreover, it completely addresses by itself some of the cases described in this +subsection. + +We now describe the cases (i.e. possible *runs* of the system) that have been raised in different +discussions and need to be addressed. They are (roughly) ordered from easiest to hardest to deal with. + +- **(a)** *Happy path: all validators advance together, no crash*. + + This case is included for completeness. All validators have taken part in height *h*. + Even if some of them did not manage to send a precommit message for the decided block, they all + receive enough precommit messages to be able to decide. As vote extensions are mandatory in + precommit messages, every validator *v* trivially has all the information, namely the decided block + and the extended commit, needed to propose in height *h+1* for the rounds in which *v* is the + proposer. + + No problem to solve here. + +- **(b)** *All validators advance together, then all crash at the same height*. + + This case has been raised in some discussions, the main concern being whether the vote extensions + for the previous height would be lost across the network. With the improvement described above, + namely persisting the latest extended commit at decision time, this case is solved. + When a crashed validator recovers, it recovers the last extended commit from persistent storage + and handshakes with the Application. + If need be, it also reconstructs messages for the unfinished height + (including all precommits received) from the WAL. + Then, the validator can resume where it was at the time of the crash. Thus, as extensions are + persisted, either in the WAL (in the form of received precommit messages), or in the latest + extended commit, the only way that vote extensions needed to start the next height could be lost + forever would be if all validators crashed and never recovered (e.g. disk corruption). + Since a *correct* node MUST eventually recover, this violates Tendermint's assumption of more than + *2nh/3* correct validators for every height *h*. + + No problem to solve here. + +- **(c)** *Lagging majority*. + + Let us assume the validator set does not change between *h* and *h+1*. + It is not possible by the nature of the Tendermint algorithm, which requires more + than *2nh/3* precommit votes for some round of height *h* in order to make progress. + So, only up to *nh/3* validators can lag behind. + + On the other hand, for the case where there are changes to the validator set between *h* and + *h+1* please see case (d) below, where the extreme case is discussed. + +- **(d)** *Validator set changes completely between* h *and* h+1. + + If sets *valseth* and *valseth+1* are disjoint, + more than *2nh/3* of validators in height *h* should + have actively participated in conensus in *h*. So, as of height *h*, only a minority of validators + in *h* can be lagging behind, although they could all lag behind from *h+1* on, as they are no + longer validators, only full nodes. This situation falls under the assumptions of case (h) below. + + As for validators in *valseth+1*, as they were not validators as of height *h*, they + could all be lagging behind by that time. However, by the time *h* finishes and *h+1* begins, the + chain will halt until more than *2nh+1/3* of them have caught up and started consensus + at height *h+1*. If set *valseth+1* does not change in *h+2* and subsequent + heights, only up to *nh+1/3* validators will be able to lag behind. Thus, we have + converted this case into case (h) below. + +- **(e)** *Enough validators crash to block the rest*. + + In this case, blockchain progress halts, i.e. surviving full nodes keep increasing rounds + indefinitely, until some of the crashed validators are able to recover. + Those validators that recover first will handshake with the Application and recover at the height + they crashed, which is still the same the nodes that did not crash are stuck in, so they don't need + to catch up. + Further, they had persisted the extended commit for the previous height. Nothing to solve. + + For those validators recovering later, we are in case (h) below. + +- **(f)** *Some validators crash, but not enough to block progress*. + + When the correct processes that crashed recover, they handshake with the Application and resume at + the height they were at when they crashed. As the blockchain did not stop making progress, the + recovered processes are likely to have fallen behind with respect to the progressing majority. + + At this point, the recovered processes are in case (h) below. + +- **(g)** *A new full node starts*. + + The reasoning here also applies to the case when more than one full node are starting. + When the full node starts from scratch, it has no state (its current height is 0). Ignoring + statesync for the time being, the node just needs to catch up by applying past blocks one by one + (after verifying them). + + Thus, the node is in case (h) below. + +- **(h)** *Advancing majority, lagging minority* + + In this case, some nodes are late. More precisely, at the present time, a set of full nodes, + denoted *Lhp*, are falling behind + (e.g., temporary disconnection or network partition, memory thrashing, crashes, new nodes) + an arbitrary + number of heights: + between *hs* and *hp*, where *hs < hp*, and + *hp* is the highest height + any correct full node has reached so far. + + The correct full nodes that reached *hp* were able to decide for *hp-1*. + Therefore, less than *nhp-1/3* validators of *hp-1* can be part + of *Lhp*, since enough up-to-date validators needed to actively participate + in consensus for *hp-1*. + + Since, at the present time, + no node in *Lhp* took part in any consensus between + *hs* and *hp-1*, + the reasoning above can be extended to validator set changes between *hs* and + *hp-1*. This results in the following restriction on the full nodes that can be part of *Lhp*. + + - ∀ *h*, where *hs ≤ h < hp*, + | *valseth* ∩ *Lhp* | *< nh/3* + + If this property does not hold for a particular height *h*, where + *hs ≤ h < hp*, Tendermint could not have progressed beyond *h* and + therefore no full node could have reached *hp* (a contradiction). + + These lagging nodes in *Lhp* need to catch up. They have to obtain the + information needed to make + progress from other nodes. For each height *h* between *hs* and *hp-2*, + this includes the decided block for *h*, and the + precommit votes also for *deciding h* (which can be extracted from the block at height *h+1*). + + At a given height *hc* (where possibly *hc << hp*), + a full node in *Lhp* will consider itself *caught up*, based on the + (maybe out of date) information it is getting from its peers. Then, the node needs to be ready to + propose at height *hc+1*, which requires having received the vote extensions for + *hc*. + As the vote extensions are *not* stored in the blocks, and it is difficult to have strong + guarantees on *when* a late node considers itself caught up, providing the late node with the right + vote extensions for the right height poses a problem. + +At this point, we have described and compared all cases raised in discussions leading up to this +RFC. The list above aims at being exhaustive. The analysis of each case included above makes all of +them converge into case (h). + +### Current Catch-up Mechanisms + +We now briefly describe the current catch-up mechanisms in the reactors concerned in Tendermint. + +#### Statesync + +Full nodes optionally run statesync just after starting, when they start from scratch. +If statesync succeeds, an Application snapshot is installed, and Tendermint jumps from height 0 directly +to the height the Application snapshop represents, without applying the block of any previous height. +Some light blocks are received and stored in the block store for running light-client verification of +all the skipped blocks. Light blocks are incomplete blocks, typically containing the header and the +canonical commit but, e.g., no transactions. They are stored in the block store as "signed headers". + +The statesync reactor is not really relevant for solving the problem discussed in this RFC. We will +nevertheless mention it when needed; in particular, to understand some corner cases. + +#### Blocksync + +The blocksync reactor kicks in after start up or recovery (and, optionally, after statesync is done) +and sends the following messages to its peers: + +- `StatusRequest` to query the height its peers are currently at, and +- `BlockRequest`, asking for blocks of heights the local node is missing. + +Using `BlockResponse` messages received from peers, the blocksync reactor validates each received +block using the block of the following height, saves the block in the block store, and sends the +block to the Application for execution. + +If blocksync has validated and applied the block for the height *previous* to the highest seen in +a `StatusResponse` message, or if no progress has been made after a timeout, the node considers +itself as caught up and switches to the consensus reactor. + +#### Consensus Reactor + +The consensus reactor runs the full Tendermint algorithm. For a validator this means it has to +propose blocks, and send/receive prevote/precommit messages, as mandated by Tendermint, before it can +decide and move on to the next height. + +If a full node that is running the consensus reactor falls behind at height *h*, when a peer node +realises this it will retrieve the canonical commit of *h+1* from the block store, and *convert* +it into a set of precommit votes and will send those to the late node. + +## Discussion + +### Solutions Proposed + +These are the solutions proposed in discussions leading up to this RFC. + +- **Solution 0.** *Vote extensions are made **best effort** in the specification*. + + This is the simplest solution, considered as a way to provide vote extensions in a simple enough + way so that it can be part of v0.36. + It consists in changing the specification so as to not *require* that precommit votes used upon + `PrepareProposal` contain their corresponding vote extensions. In other words, we render vote + extensions optional. + There are strong implications stemming from such a relaxation of the original specification. + + - As a vote extension is signed *separately* from the vote it is extending, an intermediate node + can now remove (i.e., censor) vote extensions from precommit messages at will. + - Further, there is no point anymore in the spec requiring the Application to accept a vote extension + passed via `VerifyVoteExtension` to consider a precommit message valid in its entirety. Remember + this behavior of `VerifyVoteExtension` is adding a constraint to Tendermint's conditions for + liveness. + In this situation, it is better and simpler to just drop the vote extension rejected by the + Application via `VerifyVoteExtension`, but still consider the precommit vote itself valid as long + as its signature verifies. + +- **Solution 1.** *Include vote extensions in the blockchain*. + + Another obvious solution, which has somehow been considered in the past, is to include the vote + extensions and their signatures in the blockchain. + The blockchain would thus include the extended commit, rather than a regular commit, as the structure + to be canonicalized in the next block. + With this solution, the current mechanisms implemented both in the blocksync and consensus reactors + would still be correct, as all the information a node needs to catch up, and to start proposing when + it considers itself as caught-up, can now be recovered from past blocks saved in the block store. + + This solution has two main drawbacks. + + - As the block format must change, upgrading a chain requires a hard fork. Furthermore, + all existing light client implementations will stop working until they are upgraded to deal with + the new format (e.g., how certain hashes calculated and/or how certain signatures are checked). + For instance, let us consider IBC, which relies on light clients. An IBC connection between + two chains will be broken if only one chain upgrades. + - The extra information (i.e., the vote extensions) that is now kept in the blockchain is not really + needed *at every height* for a late node to catch up. + - This information is only needed to be able to *propose* at the height the validator considers + itself as caught-up. If a validator is indeed late for height *h*, it is useless (although + correct) for it to call `PrepareProposal`, or `ExtendVote`, since the block is already decided. + - Moreover, some use cases require pretty sizeable vote extensions, which would result in an + important waste of space in the blockchain. + +- **Solution 2.** *Skip* propose *step in Tendermint algorithm*. + + This solution consists in modifying the Tendermint algorithm to skip the *send proposal* step in + heights where the node does not have the required vote extensions to populate the call to + `PrepareProposal`. The main idea behind this is that it should only happen when the validator is late + and, therefore, up-to-date validators have already proposed (and decided) for that height. + A small variation of this solution is, rather than skipping the *send proposal* step, the validator + sends a special *empty* or *bottom* (⊥) proposal to signal other nodes that it is not ready to propose + at (any round of) the current height. + + The appeal of this solution is its simplicity. A possible implementation does not need to extend + the data structures, or change the current catch-up mechanisms implemented in the blocksync or + in the consensus reactor. When we lack the needed information (vote extensions), we simply rely + on another correct validator to propose a valid block in other rounds of the current height. + + However, this solution can be attacked by a byzantine node in the network in the following way. + Let us consider the following scenario: + + - all validators in *valseth* send out precommit messages, with vote extensions, + for height *h*, round 0, roughly at the same time, + - all those precommit messages contain non-`nil` precommit votes, which vote for block *b* + - all those precommit messages sent in height *h*, round 0, and all messages sent in + height *h*, round *r > 0* get delayed indefinitely, so, + - all validators in *valseth* keep waiting for enough precommit + messages for height *h*, round 0, needed for deciding in height *h* + - an intermediate (malicious) full node *m* manages to receive block *b*, and gather more than + *2nh/3* precommit messages for height *h*, round 0, + - one way or another, the solution should have either (a) a mechanism for a full node to *tell* + another full node it is late, or (b) a mechanism for a full node to conclude it is late based + on other full nodes' messages; any of these mechanisms should, at the very least, + require the late node receiving the decided block and a commit (not necessarily an extended + commit) for *h*, + - node *m* uses the gathered precommit messages to build a commit for height *h*, round 0, + - in order to convince full nodes that they are late, node *m* either (a) *tells* them they + are late, or (b) shows them it (i.e. *m*) is ahead, by sending them block *b*, along with the + commit for height *h*, round 0, + - all full nodes conclude they are late from *m*'s behavior, and use block *b* and the commit for + height *h*, round 0, to decide on height *h*, and proceed to height *h+1*. + + At this point, *all* full nodes, including all validators in *valseth+1*, have advanced + to height *h+1* believing they are late, and so, expecting the *hypothetical* leading majority of + validators in *valseth+1* to propose for *h+1*. As a result, the blockhain + grinds to a halt. + A (rather complex) ad-hoc mechanism would need to be carried out by node operators to roll + back all validators to the precommit step of height *h*, round *r*, so that they can regenerate + vote extensions (remember vote extensions are non-deterministic) and continue execution. + +- **Solution 3.** *Require extended commits to be available at switching time*. + + This one is more involved than all previous solutions, and builds on an idea present in Solution 2: + vote extensions are actually not needed for Tendermint to make progress as long as the + validator is *certain* it is late. + + We define two modes. The first is denoted *catch-up mode*, and Tendermint only calls + `FinalizeBlock` for each height when in this mode. The second is denoted *consensus mode*, in + which the validator considers itself up to date and fully participates in consensus and calls + `PrepareProposal`/`ProcessProposal`, `ExtendVote`, and `VerifyVoteExtension`, before calling + `FinalizeBlock`. + + The catch-up mode does not need vote extension information to make progress, as all it needs is the + decided block at each height to call `FinalizeBlock` and keep the state-machine replication making + progress. The consensus mode, on the other hand, does need vote extension information when + starting every height. + + Validators are in consensus mode by default. When a validator in consensus mode falls behind + for whatever reason, e.g. cases (b), (d), (e), (f), (g), or (h) above, we introduce the following + key safety property: + + - for every height *hp*, a full node *f* in *hp* refuses to switch to catch-up + mode **until** there exists a height *h'* such that: + - *p* has received and (light-client) verified the blocks of + all heights *h*, where *hp ≤ h ≤ h'* + - it has received an extended commit for *h'* and has verified: + - the precommit vote signatures in the extended commit + - the vote extension signatures in the extended commit: each is signed with the same + key as the precommit vote it extends + + If the condition above holds for *hp*, namely receiving a valid sequence of blocks in + the *f*'s future, and an extended commit corresponding to the last block in the sequence, then + node *f*: + + - switches to catch-up mode, + - applies all blocks between *hp* and *h'* (calling `FinalizeBlock` only), and + - switches back to consensus mode using the extended commit for *h'* to propose in the rounds of + *h' + 1* where it is the proposer. + + This mechanism, together with the invariant it uses, ensures that the node cannot be attacked by + being fed a block without extensions to make it believe it is late, in a similar way as explained + for Solution 2. + +### Feasibility of the Proposed Solutions + +Solution 0, besides the drawbacks described in the previous section, provides guarantees that are +weaker than the rest. The Application does not have the assurance that more than *2nh/3* vote +extensions will *always* be available when calling `PrepareProposal` at height *h+1*. +This level of guarantees is probably not strong enough for vote extensions to be useful for some +important use cases that motivated them in the first place, e.g., encrypted mempool transactions. + +Solution 1, while being simple in that the changes needed in the current Tendermint codebase would +be rather small, is changing the block format, and would therefore require all blockchains using +Tendermint v0.35 or earlier to hard-fork when upgrading to v0.36. + +Since Solution 2 can be attacked, one might prefer Solution 3, even if it is more involved +to implement. Further, we must elaborate on how we can turn Solution 3, described in abstract +terms in the previous section, into a concrete implementation compatible with the current +Tendermint codebase. + +### Current Limitations and Possible Implementations + +The main limitations affecting the current version of Tendermint are the following. + +- The current version of the blocksync reactor does not use the full + [light client verification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/light-client/README.md) + algorithm to validate blocks coming from other peers. +- The code being structured into the blocksync and consensus reactors, only switching from the + blocksync reactor to the consensus reactor is supported; switching in the opposite direction is + not supported. Alternatively, the consensus reactor could have a mechanism allowing a late node + to catch up by skipping calls to `PrepareProposal`/`ProcessProposal`, and + `ExtendVote`/`VerifyVoteExtension` and only calling `FinalizeBlock` for each height. + Such a mechanism does not exist at the time of writing this RFC. + +The blocksync reactor featuring light client verification is being actively worked on (tentatively +for v0.37). So it is best if this RFC does not try to delve into that problem, but just makes sure +its outcomes are compatible with that effort. + +In subsection [Cases to Address](#cases-to-address), we concluded that we can focus on +solving case (h) in theoretical terms. +However, as the current Tendermint version does not yet support switching back to blocksync once a +node has switched to consensus, we need to split case (h) into two cases. When a full node needs to +catch up... + +- **(h.1)** ... it has not switched yet from the blocksync reactor to the consensus reactor, or + +- **(h.2)** ... it has already switched to the consensus reactor. + +This is important in order to discuss the different possible implementations. + +#### Base Implementation: Persist and Propagate Extended Commit History + +In order to circumvent the fact that we cannot switch from the consensus reactor back to blocksync, +rather than just keeping the few most recent extended commits, nodes will need to keep +and gossip a backlog of extended commits so that the consensus reactor can still propose and decide +in out-of-date heights (even if those proposals will be useless). + +The base implementation - for which an experimental patch exists - consists in the conservative +approach of persisting in the block store *all* extended commits for which we have also stored +the full block. Currently, when statesync is run at startup, it saves light blocks. +This base implementation does not seek +to receive or persist extended commits for those light blocks as they would not be of any use. + +Then, we modify the blocksync reactor so that peers *always* send requested full blocks together +with the corresponding extended commit in the `BlockResponse` messages. This guarantees that the +block store being reconstructed by blocksync has the same information as that of peers that are +up to date (at least starting from the latest snapshot applied by statesync before starting blocksync). +Thus, blocksync has all the data it requires to switch to the consensus reactor, as long as one of +the following exit conditions are met: + +- The node is still at height 0 (where no commit or extended commit is needed) +- The node has processed at least 1 block in blocksync + +The second condition is needed in case the node has installed an Application snapshot during statesync. +If that is the case, at the time blocksync starts, the block store only has the data statesync has saved: +light blocks, and no extended commits. +Hence we need to blocksync at least one block from another node, which will be sent with its corresponding extended commit, before we can switch to consensus. + +As a side note, a chain might be started at a height *hi > 0*, all other heights +*h < hi* being non-existent. In this case, the chain is still considered to be at height 0 before +block *hi* is applied, so the first condition above allows the node to switch to consensus even +if blocksync has not processed any block (which is always the case if all nodes are starting from scratch). + +When a validator falls behind while having already switched to the consensus reactor, a peer node can +simply retrieve the extended commit for the required height from the block store and reconstruct a set of +precommit votes together with their extensions and send them in the form of precommit messages to the +validator falling behind, regardless of whether the peer node holds the extended commit because it +actually participated in that consensus and thus received the precommit messages, or it received the extended commit via a `BlockResponse` message while running blocksync. + +This solution requires a few changes to the consensus reactor: + +- upon saving the block for a given height in the block store at decision time, save the + corresponding extended commit as well +- in the catch-up mechanism, when a node realizes that another peer is more than 2 heights + behind, it uses the extended commit (rather than the canoncial commit as done previously) to + reconstruct the precommit votes with their corresponding extensions + +The changes to the blocksync reactor are more substantial: + +- the `BlockResponse` message is extended to include the extended commit of the same height as + the block included in the response (just as they are stored in the block store) +- structure `bpRequester` is likewise extended to hold the received extended commits coming in + `BlockResponse` messages +- method `PeekTwoBlocks` is modified to also return the extended commit corresponding to the first block +- when successfully verifying a received block, the reactor saves its corresponding extended commit in + the block store + +The two main drawbacks of this base implementation are: + +- the increased size taken by the block store, in particular with big extensions +- the increased bandwith taken by the new format of `BlockResponse` + +#### Possible Optimization: Pruning the Extended Commit History + +If we cannot switch from the consensus reactor back to the blocksync reactor we cannot prune the extended commit backlog in the block store without sacrificing the implementation's correctness. The asynchronous +nature of our distributed system model allows a process to fall behing an arbitrary number of +heights, and thus all extended commits need to be kept *just in case* a node that late had +previously switched to the consensus reactor. + +However, there is a possibility to optimize the base implementation. Every time we enter a new height, +we could prune from the block store all extended commits that are more than *d* heights in the past. +Then, we need to handle two new situations, roughly equivalent to cases (h.1) and (h.2) described above. + +- (h.1) A node starts from scratch or recovers after a crash. In thisy case, we need to modify the + blocksync reactor's base implementation. + - when receiving a `BlockResponse` message, it MUST accept that the extended commit set to `nil`, + - when sending a `BlockResponse` message, if the block store contains the extended commit for that + height, it MUST set it in the message, otherwise it sets it to `nil`, + - the exit conditions used for the base implementation are no longer valid; the only reliable exit + condition now consists in making sure that the last block processed by blocksync was received with + the corresponding commit, and not `nil`; this extended commit will allow the node to switch from + the blocksync reactor to the consensus reactor and immediately act as a proposer if required. +- (h.2) A node already running the consensus reactor falls behind beyond *d* heights. In principle, + the node will be stuck forever as no other node can provide the vote extensions it needs to make + progress (they all have pruned the corresponding extended commit). + However we can manually have the node crash and recover as a workaround. This effectively converts + this case into (h.1). + +### Formalization Work + +A formalization work to show or prove the correctness of the different use cases and solutions +presented here (and any other that may be found) needs to be carried out. +A question that needs a precise answer is how many extended commits (one?, two?) a node needs +to keep in persistent memory when implementing Solution 3 described above without Tendermint's +current limitations. +Another important invariant we need to prove formally is that the set of vote extensions +required to make progress will always be held somewhere in the network. + +## References + +- [ABCI++ specification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/abci%2B%2B/README.md) +- [ABCI as of v0.35](https://github.com/tendermint/spec/blob/4fb99af/spec/abci/README.md) +- [Vote extensions issue](https://github.com/tendermint/tendermint/issues/8174) +- [Light client verification](https://github.com/tendermint/tendermint/blob/4743a7ad0/spec/light-client/README.md) From 494c5cddbe6cc3364708e4e5e6559da1beefdec5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 May 2022 14:31:59 +0000 Subject: [PATCH 011/203] build(deps): Bump docker/setup-buildx-action from 1.7.0 to 2.0.0 (#8483) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 1.7.0 to 2.0.0.
Release notes

Sourced from docker/setup-buildx-action's releases.

v2.0.0

  • Node 16 as default runtime by @​crazy-max (#131)
    • This requires a minimum Actions Runner version of v2.285.0, which is by default available in GHES 3.4 or later.

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v1.7.0...v2.0.0

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/setup-buildx-action&package-manager=github_actions&previous-version=1.7.0&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 524df1ef82..1c16133e0a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -39,7 +39,7 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v1.7.0 + uses: docker/setup-buildx-action@v2.0.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} From 083716b22a8d02b332950655634dd5bc9a7afd71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 May 2022 14:33:45 +0000 Subject: [PATCH 012/203] build(deps): Bump docker/build-push-action from 2.10.0 to 3.0.0 (#8482) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 2.10.0 to 3.0.0.
Release notes

Sourced from docker/build-push-action's releases.

v3.0.0

  • Node 16 as default runtime by @​crazy-max (#564)
    • This requires a minimum Actions Runner version of v2.285.0, which is by default available in GHES 3.4 or later.
  • Standalone mode support by @​crazy-max (#601 #609)
  • chore: update dev dependencies and workflow by @​crazy-max (#571)
  • Bump @​actions/exec from 1.1.0 to 1.1.1 (#573)
  • Bump @​actions/github from 5.0.0 to 5.0.1 (#582)
  • Bump minimist from 1.2.5 to 1.2.6 (#584)
  • Bump semver from 7.3.5 to 7.3.7 (#595)
  • Bump csv-parse from 4.16.3 to 5.0.4 (#533)

Full Changelog: https://github.com/docker/build-push-action/compare/v2.10.0...v3.0.0

Commits
  • e551b19 Merge pull request #564 from crazy-max/node-16
  • 3554377 Merge pull request #609 from crazy-max/ci-fix-test
  • a62bc1b ci: fix standalone test
  • c208583 Merge pull request #601 from crazy-max/standalone-mode
  • fcd9124 Merge pull request #607 from docker/dependabot/github_actions/docker/metadata...
  • 0ebe720 Bump docker/metadata-action from 3 to 4
  • 38b4580 Standalone mode support
  • ba31738 Merge pull request #533 from docker/dependabot/npm_and_yarn/csv-parse-5.0.4
  • 43721d2 Update generated content
  • 5ea21bf Fix csv-parse implementation since major update
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=2.10.0&new-version=3.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1c16133e0a..7547e8591f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -49,7 +49,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v2.10.0 + uses: docker/build-push-action@v3.0.0 with: context: . file: ./DOCKER/Dockerfile From b52b8f2740bf075e2a917784227a0c4ff4b9f470 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 May 2022 14:35:43 +0000 Subject: [PATCH 013/203] build(deps): Bump docker/login-action from 1.14.1 to 2.0.0 (#8481) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/login-action](https://github.com/docker/login-action) from 1.14.1 to 2.0.0.
Release notes

Sourced from docker/login-action's releases.

v2.0.0

  • Node 16 as default runtime by @​crazy-max (#161)
    • This requires a minimum Actions Runner version of v2.285.0, which is by default available in GHES 3.4 or later.
  • chore: update dev dependencies and workflow by @​crazy-max (#170)
  • Bump @​actions/exec from 1.1.0 to 1.1.1 (#167)
  • Bump @​actions/io from 1.1.1 to 1.1.2 (#168)
  • Bump minimist from 1.2.5 to 1.2.6 (#176)
  • Bump https-proxy-agent from 5.0.0 to 5.0.1 (#182)

Full Changelog: https://github.com/docker/login-action/compare/v1.14.1...v2.0.0

Commits
  • 49ed152 Merge pull request #161 from crazy-max/node16-runtime
  • b61a9ce Node 16 as default runtime
  • 3a136a8 Merge pull request #182 from docker/dependabot/npm_and_yarn/https-proxy-agent...
  • b312880 Update generated content
  • 795794e Bump https-proxy-agent from 5.0.0 to 5.0.1
  • 1edf618 Merge pull request #179 from docker/dependabot/github_actions/codecov/codecov...
  • 8e66ad4 Bump codecov/codecov-action from 2 to 3
  • 7c79b59 Merge pull request #176 from docker/dependabot/npm_and_yarn/minimist-1.2.6
  • 24a38e0 Bump minimist from 1.2.5 to 1.2.6
  • 70e1ff8 Merge pull request #170 from crazy-max/eslint
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/login-action&package-manager=github_actions&previous-version=1.14.1&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 7547e8591f..0a006f9b91 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -43,7 +43,7 @@ jobs: - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v1.14.1 + uses: docker/login-action@v2.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} From 4b36feaa2b1e6f2794becfa6023f74007c2b4abd Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Mon, 9 May 2022 18:52:10 -0400 Subject: [PATCH 014/203] scripts/metricsgen: add the initial version of metricsgen (#8479) This pull requests adds a new tool, metricsgen, for generating Tendermint metrics constructors from `Metrics` struct definitions. This tool aims to reduce the amount of boilerplate required to add additional metrics to Tendermint. Its working is fairly simple, it parses the go ast, extracts field information, and uses this field information to execute a go template. This pull request also adds a proof-of-concept of the tool's output and working by using it to generate the [indexer metrics](https://github.com/tendermint/tendermint/pull/8479/files#diff-4b0c597b6fa05332a2f9a8e0ce079e360602942fae99dc5485f1edfe71c0a29e) using `//go:generate` directives and a simple `make` target. The next steps for this tool are documented in https://github.com/tendermint/tendermint/issues/8485 and https://github.com/tendermint/tendermint/issues/8486, which detail using the tool to generate the `metrics.md` documentation file and using the tool to migrate away from `go-kit`. --- Makefile | 15 + internal/state/indexer/metrics.gen.go | 51 +++ internal/state/indexer/metrics.go | 52 +-- scripts/metricsgen/metricsgen.go | 334 ++++++++++++++++++ scripts/metricsgen/metricsgen_test.go | 259 ++++++++++++++ .../metricsgen/testdata/basic/metrics.gen.go | 30 ++ scripts/metricsgen/testdata/basic/metrics.go | 11 + .../testdata/commented/metrics.gen.go | 30 ++ .../metricsgen/testdata/commented/metrics.go | 11 + .../metricsgen/testdata/tags/metrics.gen.go | 54 +++ scripts/metricsgen/testdata/tags/metrics.go | 12 + 11 files changed, 809 insertions(+), 50 deletions(-) create mode 100644 internal/state/indexer/metrics.gen.go create mode 100644 scripts/metricsgen/metricsgen.go create mode 100644 scripts/metricsgen/metricsgen_test.go create mode 100644 scripts/metricsgen/testdata/basic/metrics.gen.go create mode 100644 scripts/metricsgen/testdata/basic/metrics.go create mode 100644 scripts/metricsgen/testdata/commented/metrics.gen.go create mode 100644 scripts/metricsgen/testdata/commented/metrics.go create mode 100644 scripts/metricsgen/testdata/tags/metrics.gen.go create mode 100644 scripts/metricsgen/testdata/tags/metrics.go diff --git a/Makefile b/Makefile index a0b970f762..cd9380768d 100644 --- a/Makefile +++ b/Makefile @@ -251,6 +251,21 @@ mockery: go generate -run="./scripts/mockery_generate.sh" ./... .PHONY: mockery +############################################################################### +### Metrics ### +############################################################################### + +metrics: testdata-metrics + go generate -run="scripts/metricsgen" ./... +.PHONY: metrics + + # By convention, the go tool ignores subdirectories of directories named + # 'testdata'. This command invokes the generate command on the folder directly + # to avoid this. +testdata-metrics: + ls ./scripts/metricsgen/testdata | xargs -I{} go generate -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{} +.PHONY: testdata-metrics + ############################################################################### ### Local testnet using docker ### ############################################################################### diff --git a/internal/state/indexer/metrics.gen.go b/internal/state/indexer/metrics.gen.go new file mode 100644 index 0000000000..8b079d8d5c --- /dev/null +++ b/internal/state/indexer/metrics.gen.go @@ -0,0 +1,51 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package indexer + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_events_seconds", + Help: "Latency for indexing block events.", + }, labels).With(labelsAndValues...), + TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_events_seconds", + Help: "Latency for indexing transaction events.", + }, labels).With(labelsAndValues...), + BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "blocks_indexed", + Help: "Number of complete blocks indexed.", + }, labels).With(labelsAndValues...), + TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "transactions_indexed", + Help: "Number of transactions indexed.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + BlockEventsSeconds: discard.NewHistogram(), + TxEventsSeconds: discard.NewHistogram(), + BlocksIndexed: discard.NewCounter(), + TransactionsIndexed: discard.NewCounter(), + } +} diff --git a/internal/state/indexer/metrics.go b/internal/state/indexer/metrics.go index aa64a4bb2b..0b92b879e3 100644 --- a/internal/state/indexer/metrics.go +++ b/internal/state/indexer/metrics.go @@ -2,12 +2,10 @@ package indexer import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - - prometheus "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) +//go:generate go run github.com/tendermint/tendermint/scripts/metricsgen -struct=Metrics + // MetricsSubsystem is a the subsystem label for the indexer package. const MetricsSubsystem = "indexer" @@ -25,49 +23,3 @@ type Metrics struct { // Number of transactions indexed. TransactionsIndexed metrics.Counter } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_events_seconds", - Help: "Latency for indexing block events.", - }, labels).With(labelsAndValues...), - TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "tx_events_seconds", - Help: "Latency for indexing transaction events.", - }, labels).With(labelsAndValues...), - BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "blocks_indexed", - Help: "Number of complete blocks indexed.", - }, labels).With(labelsAndValues...), - TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "transactions_indexed", - Help: "Number of transactions indexed.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns an indexer metrics stub that discards all samples. -func NopMetrics() *Metrics { - return &Metrics{ - BlockEventsSeconds: discard.NewHistogram(), - TxEventsSeconds: discard.NewHistogram(), - BlocksIndexed: discard.NewCounter(), - TransactionsIndexed: discard.NewCounter(), - } -} diff --git a/scripts/metricsgen/metricsgen.go b/scripts/metricsgen/metricsgen.go new file mode 100644 index 0000000000..70cb36a773 --- /dev/null +++ b/scripts/metricsgen/metricsgen.go @@ -0,0 +1,334 @@ +// metricsgen is a code generation tool for creating constructors for Tendermint +// metrics types. +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "io" + "io/fs" + "log" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "text/template" +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s -struct + +Generate constructors for the metrics type specified by -struct contained in +the current directory. The tool creates a new file in the current directory +containing the generated code. + +Options: +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +const metricsPackageName = "github.com/go-kit/kit/metrics" + +const ( + metricNameTag = "metrics_name" + labelsTag = "metrics_labels" + bucketTypeTag = "metrics_buckettype" + bucketSizeTag = "metrics_bucketsizes" +) + +var ( + dir = flag.String("dir", ".", "Path to the directory containing the target package") + strct = flag.String("struct", "Metrics", "Struct to parse for metrics") +) + +var bucketType = map[string]string{ + "exprange": "stdprometheus.ExponentialBucketsRange", + "exp": "stdprometheus.ExponentialBuckets", + "lin": "stdprometheus.LinearBuckets", +} + +var tmpl = template.Must(template.New("tmpl").Parse(`// Code generated by metricsgen. DO NOT EDIT. + +package {{ .Package }} + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + {{ range $metric := .ParsedMetrics }} + {{- $metric.FieldName }}: prometheus.New{{ $metric.TypeName }}From(stdprometheus.{{$metric.TypeName }}Opts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "{{$metric.MetricName }}", + Help: "{{ $metric.Description }}", + {{ if ne $metric.HistogramOptions.BucketType "" }} + Buckets: {{ $metric.HistogramOptions.BucketType }}({{ $metric.HistogramOptions.BucketSizes }}), + {{ else if ne $metric.HistogramOptions.BucketSizes "" }} + Buckets: []float64{ {{ $metric.HistogramOptions.BucketSizes }} }, + {{ end }} + {{- if eq (len $metric.Labels) 0 }} + }, labels).With(labelsAndValues...), + {{ else }} + }, append(labels, {{$metric.Labels | printf "%q" }})).With(labelsAndValues...), + {{- end }} + {{- end }} + } +} + + +func NopMetrics() *Metrics { + return &Metrics{ + {{- range $metric := .ParsedMetrics }} + {{ $metric.FieldName }}: discard.New{{ $metric.TypeName }}(), + {{- end }} + } +} +`)) + +// ParsedMetricField is the data parsed for a single field of a metric struct. +type ParsedMetricField struct { + TypeName string + FieldName string + MetricName string + Description string + Labels string + + HistogramOptions HistogramOpts +} + +type HistogramOpts struct { + BucketType string + BucketSizes string +} + +// TemplateData is all of the data required for rendering a metric file template. +type TemplateData struct { + Package string + ParsedMetrics []ParsedMetricField +} + +func main() { + flag.Parse() + if *strct == "" { + log.Fatal("You must specify a non-empty -struct") + } + td, err := ParseMetricsDir(".", *strct) + if err != nil { + log.Fatalf("Parsing file: %v", err) + } + out := filepath.Join(*dir, "metrics.gen.go") + f, err := os.Create(out) + if err != nil { + log.Fatalf("Opening file: %v", err) + } + err = GenerateMetricsFile(f, td) + if err != nil { + log.Fatalf("Generating code: %v", err) + } +} +func ignoreTestFiles(f fs.FileInfo) bool { + return !strings.Contains(f.Name(), "_test.go") +} + +// ParseMetricsDir parses the dir and scans for a struct matching structName, +// ignoring all test files. ParseMetricsDir iterates the fields of the metrics +// struct and builds a TemplateData using the data obtained from the abstract syntax tree. +func ParseMetricsDir(dir string, structName string) (TemplateData, error) { + fs := token.NewFileSet() + d, err := parser.ParseDir(fs, dir, ignoreTestFiles, parser.ParseComments) + if err != nil { + return TemplateData{}, err + } + if len(d) > 1 { + return TemplateData{}, fmt.Errorf("multiple packages found in %s", dir) + } + if len(d) == 0 { + return TemplateData{}, fmt.Errorf("no go pacakges found in %s", dir) + } + + // Grab the package name. + var pkgName string + var pkg *ast.Package + for pkgName, pkg = range d { + } + td := TemplateData{ + Package: pkgName, + } + // Grab the metrics struct + m, mPkgName, err := findMetricsStruct(pkg.Files, structName) + if err != nil { + return TemplateData{}, err + } + for _, f := range m.Fields.List { + if !isMetric(f.Type, mPkgName) { + continue + } + pmf := parseMetricField(f) + td.ParsedMetrics = append(td.ParsedMetrics, pmf) + } + + return td, err +} + +// GenerateMetricsFile executes the metrics file template, writing the result +// into the io.Writer. +func GenerateMetricsFile(w io.Writer, td TemplateData) error { + b := []byte{} + buf := bytes.NewBuffer(b) + err := tmpl.Execute(buf, td) + if err != nil { + return err + } + b, err = format.Source(buf.Bytes()) + if err != nil { + return err + } + _, err = io.Copy(w, bytes.NewBuffer(b)) + if err != nil { + return err + } + return nil +} + +func findMetricsStruct(files map[string]*ast.File, structName string) (*ast.StructType, string, error) { + var ( + st *ast.StructType + ) + for _, file := range files { + mPkgName, err := extractMetricsPackageName(file.Imports) + if err != nil { + return nil, "", fmt.Errorf("unable to determine metrics package name: %v", err) + } + if !ast.FilterFile(file, func(name string) bool { + return name == structName + }) { + continue + } + ast.Inspect(file, func(n ast.Node) bool { + switch f := n.(type) { + case *ast.TypeSpec: + if f.Name.Name == structName { + var ok bool + st, ok = f.Type.(*ast.StructType) + if !ok { + err = fmt.Errorf("found identifier for %q of wrong type", structName) + } + } + return false + default: + return true + } + }) + if err != nil { + return nil, "", err + } + if st != nil { + return st, mPkgName, nil + } + } + return nil, "", fmt.Errorf("target struct %q not found in dir", structName) +} + +func parseMetricField(f *ast.Field) ParsedMetricField { + var comment string + if f.Doc != nil { + for _, c := range f.Doc.List { + comment += strings.TrimPrefix(c.Text, "// ") + } + } + pmf := ParsedMetricField{ + Description: comment, + MetricName: extractFieldName(f.Names[0].String(), f.Tag), + FieldName: f.Names[0].String(), + TypeName: extractTypeName(f.Type), + Labels: extractLabels(f.Tag), + } + if pmf.TypeName == "Histogram" { + pmf.HistogramOptions = extractHistogramOptions(f.Tag) + } + return pmf +} + +func extractTypeName(e ast.Expr) string { + return strings.TrimPrefix(path.Ext(types.ExprString(e)), ".") +} + +func isMetric(e ast.Expr, mPkgName string) bool { + return strings.Contains(types.ExprString(e), fmt.Sprintf("%s.", mPkgName)) +} + +func extractLabels(bl *ast.BasicLit) string { + if bl != nil { + t := reflect.StructTag(strings.Trim(bl.Value, "`")) + if v := t.Get(labelsTag); v != "" { + return v + } + } + return "" +} + +func extractFieldName(name string, tag *ast.BasicLit) string { + if tag != nil { + t := reflect.StructTag(strings.Trim(tag.Value, "`")) + if v := t.Get(metricNameTag); v != "" { + return v + } + } + return toSnakeCase(name) +} + +func extractHistogramOptions(tag *ast.BasicLit) HistogramOpts { + h := HistogramOpts{} + if tag != nil { + t := reflect.StructTag(strings.Trim(tag.Value, "`")) + if v := t.Get(bucketTypeTag); v != "" { + h.BucketType = bucketType[v] + } + if v := t.Get(bucketSizeTag); v != "" { + h.BucketSizes = v + } + } + return h +} + +func extractMetricsPackageName(imports []*ast.ImportSpec) (string, error) { + for _, i := range imports { + u, err := strconv.Unquote(i.Path.Value) + if err != nil { + return "", err + } + if u == metricsPackageName { + if i.Name != nil { + return i.Name.Name, nil + } + return path.Base(u), nil + } + } + return "", nil +} + +var capitalChange = regexp.MustCompile("([a-z0-9])([A-Z])") + +func toSnakeCase(str string) string { + snake := capitalChange.ReplaceAllString(str, "${1}_${2}") + return strings.ToLower(snake) +} diff --git a/scripts/metricsgen/metricsgen_test.go b/scripts/metricsgen/metricsgen_test.go new file mode 100644 index 0000000000..83251e651b --- /dev/null +++ b/scripts/metricsgen/metricsgen_test.go @@ -0,0 +1,259 @@ +package main_test + +import ( + "bytes" + "fmt" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + metricsgen "github.com/tendermint/tendermint/scripts/metricsgen" +) + +const testDataDir = "./testdata" + +func TestSimpleTemplate(t *testing.T) { + m := metricsgen.ParsedMetricField{ + TypeName: "Histogram", + FieldName: "MyMetric", + MetricName: "request_count", + Description: "how many requests were made since the start of the process", + Labels: "first, second, third", + } + td := metricsgen.TemplateData{ + Package: "mypack", + ParsedMetrics: []metricsgen.ParsedMetricField{m}, + } + b := bytes.NewBuffer([]byte{}) + err := metricsgen.GenerateMetricsFile(b, td) + if err != nil { + t.Fatalf("unable to parse template %v", err) + } +} + +func TestFromData(t *testing.T) { + infos, err := ioutil.ReadDir(testDataDir) + if err != nil { + t.Fatalf("unable to open file %v", err) + } + for _, dir := range infos { + t.Run(dir.Name(), func(t *testing.T) { + if !dir.IsDir() { + t.Fatalf("expected file %s to be directory", dir.Name()) + } + dirName := path.Join(testDataDir, dir.Name()) + pt, err := metricsgen.ParseMetricsDir(dirName, "Metrics") + if err != nil { + t.Fatalf("unable to parse from dir %q: %v", dir, err) + } + outFile := path.Join(dirName, "out.go") + if err != nil { + t.Fatalf("unable to open file %s: %v", outFile, err) + } + of, err := os.Create(outFile) + if err != nil { + t.Fatalf("unable to open file %s: %v", outFile, err) + } + defer os.Remove(outFile) + if err := metricsgen.GenerateMetricsFile(of, pt); err != nil { + t.Fatalf("unable to generate metrics file %s: %v", outFile, err) + } + if _, err := parser.ParseFile(token.NewFileSet(), outFile, nil, parser.AllErrors); err != nil { + t.Fatalf("unable to parse generated file %s: %v", outFile, err) + } + bNew, err := ioutil.ReadFile(outFile) + if err != nil { + t.Fatalf("unable to read generated file %s: %v", outFile, err) + } + goldenFile := path.Join(dirName, "metrics.gen.go") + bOld, err := ioutil.ReadFile(goldenFile) + if err != nil { + t.Fatalf("unable to read file %s: %v", goldenFile, err) + } + if !bytes.Equal(bNew, bOld) { + t.Fatalf("newly generated code in file %s does not match golden file %s\n"+ + "if the output of the metricsgen tool is expected to change run the following make target: \n"+ + "\tmake metrics", outFile, goldenFile) + } + }) + } +} + +func TestParseMetricsStruct(t *testing.T) { + const pkgName = "mypkg" + metricsTests := []struct { + name string + shouldError bool + metricsStruct string + expected metricsgen.TemplateData + }{ + { + name: "basic", + metricsStruct: `type Metrics struct { + myGauge metrics.Gauge + }`, + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Gauge", + FieldName: "myGauge", + MetricName: "my_gauge", + }, + }, + }, + }, + { + name: "histogram", + metricsStruct: "type Metrics struct {\n" + + "myHistogram metrics.Histogram `metrics_buckettype:\"exp\" metrics_bucketsizes:\"1, 100, .8\"`\n" + + "}", + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Histogram", + FieldName: "myHistogram", + MetricName: "my_histogram", + + HistogramOptions: metricsgen.HistogramOpts{ + BucketType: "stdprometheus.ExponentialBuckets", + BucketSizes: "1, 100, .8", + }, + }, + }, + }, + }, + { + name: "labeled name", + metricsStruct: "type Metrics struct {\n" + + "myCounter metrics.Counter `metrics_name:\"new_name\"`\n" + + "}", + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "new_name", + }, + }, + }, + }, + { + name: "metric labels", + metricsStruct: "type Metrics struct {\n" + + "myCounter metrics.Counter `metrics_labels:\"label1, label2\"`\n" + + "}", + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "my_counter", + Labels: "label1, label2", + }, + }, + }, + }, + { + name: "ignore non-metric field", + metricsStruct: `type Metrics struct { + myCounter metrics.Counter + nonMetric string + }`, + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "my_counter", + }, + }, + }, + }, + } + for _, testCase := range metricsTests { + t.Run(testCase.name, func(t *testing.T) { + dir, err := os.MkdirTemp(os.TempDir(), "metricsdir") + if err != nil { + t.Fatalf("unable to create directory: %v", err) + } + defer os.Remove(dir) + f, err := os.Create(filepath.Join(dir, "metrics.go")) + if err != nil { + t.Fatalf("unable to open file: %v", err) + } + pkgLine := fmt.Sprintf("package %s\n", pkgName) + importClause := ` + import( + "github.com/go-kit/kit/metrics" + ) + ` + + _, err = io.WriteString(f, pkgLine) + require.NoError(t, err) + _, err = io.WriteString(f, importClause) + require.NoError(t, err) + _, err = io.WriteString(f, testCase.metricsStruct) + require.NoError(t, err) + + td, err := metricsgen.ParseMetricsDir(dir, "Metrics") + if testCase.shouldError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, testCase.expected, td) + } + }) + } +} + +func TestParseAliasedMetric(t *testing.T) { + aliasedData := ` + package mypkg + + import( + mymetrics "github.com/go-kit/kit/metrics" + ) + type Metrics struct { + m mymetrics.Gauge + } + ` + dir, err := os.MkdirTemp(os.TempDir(), "metricsdir") + if err != nil { + t.Fatalf("unable to create directory: %v", err) + } + defer os.Remove(dir) + f, err := os.Create(filepath.Join(dir, "metrics.go")) + if err != nil { + t.Fatalf("unable to open file: %v", err) + } + _, err = io.WriteString(f, aliasedData) + if err != nil { + t.Fatalf("unable to write to file: %v", err) + } + td, err := metricsgen.ParseMetricsDir(dir, "Metrics") + require.NoError(t, err) + + expected := + metricsgen.TemplateData{ + Package: "mypkg", + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Gauge", + FieldName: "m", + MetricName: "m", + }, + }, + } + require.Equal(t, expected, td) +} diff --git a/scripts/metricsgen/testdata/basic/metrics.gen.go b/scripts/metricsgen/testdata/basic/metrics.gen.go new file mode 100644 index 0000000000..d541cb2dbb --- /dev/null +++ b/scripts/metricsgen/testdata/basic/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package basic + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "height", + Help: "simple metric that tracks the height of the chain.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Height: discard.NewGauge(), + } +} diff --git a/scripts/metricsgen/testdata/basic/metrics.go b/scripts/metricsgen/testdata/basic/metrics.go new file mode 100644 index 0000000000..1a361f90f6 --- /dev/null +++ b/scripts/metricsgen/testdata/basic/metrics.go @@ -0,0 +1,11 @@ +package basic + +import "github.com/go-kit/kit/metrics" + +//go:generate go run ../../../../scripts/metricsgen -struct=Metrics + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // simple metric that tracks the height of the chain. + Height metrics.Gauge +} diff --git a/scripts/metricsgen/testdata/commented/metrics.gen.go b/scripts/metricsgen/testdata/commented/metrics.gen.go new file mode 100644 index 0000000000..038da3d463 --- /dev/null +++ b/scripts/metricsgen/testdata/commented/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package commented + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Field: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "field", + Help: "Height of the chain.We expect multi-line comments to parse correctly.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Field: discard.NewGauge(), + } +} diff --git a/scripts/metricsgen/testdata/commented/metrics.go b/scripts/metricsgen/testdata/commented/metrics.go new file mode 100644 index 0000000000..174f1e2333 --- /dev/null +++ b/scripts/metricsgen/testdata/commented/metrics.go @@ -0,0 +1,11 @@ +package commented + +import "github.com/go-kit/kit/metrics" + +//go:generate go run ../../../../scripts/metricsgen -struct=Metrics + +type Metrics struct { + // Height of the chain. + // We expect multi-line comments to parse correctly. + Field metrics.Gauge +} diff --git a/scripts/metricsgen/testdata/tags/metrics.gen.go b/scripts/metricsgen/testdata/tags/metrics.gen.go new file mode 100644 index 0000000000..7ac292d3c4 --- /dev/null +++ b/scripts/metricsgen/testdata/tags/metrics.gen.go @@ -0,0 +1,54 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package tags + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + WithLabels: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "with_labels", + Help: "", + }, append(labels, "step,time")).With(labelsAndValues...), WithExpBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "with_exp_buckets", + Help: "", + + Buckets: stdprometheus.ExponentialBuckets(.1, 100, 8), + }, labels).With(labelsAndValues...), + WithBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "with_buckets", + Help: "", + + Buckets: []float64{1, 2, 3, 4, 5}, + }, labels).With(labelsAndValues...), + Named: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "metric_with_name", + Help: "", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + WithLabels: discard.NewCounter(), + WithExpBuckets: discard.NewHistogram(), + WithBuckets: discard.NewHistogram(), + Named: discard.NewCounter(), + } +} diff --git a/scripts/metricsgen/testdata/tags/metrics.go b/scripts/metricsgen/testdata/tags/metrics.go new file mode 100644 index 0000000000..8562dcf437 --- /dev/null +++ b/scripts/metricsgen/testdata/tags/metrics.go @@ -0,0 +1,12 @@ +package tags + +import "github.com/go-kit/kit/metrics" + +//go:generate go run ../../../../scripts/metricsgen -struct=Metrics + +type Metrics struct { + WithLabels metrics.Counter `metrics_labels:"step,time"` + WithExpBuckets metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:".1,100,8"` + WithBuckets metrics.Histogram `metrics_bucketsizes:"1, 2, 3, 4, 5"` + Named metrics.Counter `metrics_name:"metric_with_name"` +} From 412a77915ddb4c7eeef8e252827d16ef31ea1242 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 May 2022 08:25:15 -0400 Subject: [PATCH 015/203] build(deps): Bump github.com/golangci/golangci-lint (#8490) --- go.mod | 64 +++++++++++++----------- go.sum | 150 +++++++++++++++++++++++++++++++-------------------------- 2 files changed, 119 insertions(+), 95 deletions(-) diff --git a/go.mod b/go.mod index 69054d8bcc..6922847a33 100644 --- a/go.mod +++ b/go.mod @@ -40,18 +40,26 @@ require ( require ( github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 - github.com/golangci/golangci-lint v1.45.2 + github.com/golangci/golangci-lint v1.46.0 github.com/google/go-cmp v0.5.8 github.com/vektra/mockery/v2 v2.12.2 gotest.tools v2.2.0+incompatible ) -require github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect +require ( + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect + github.com/firefart/nonamedreturns v1.0.1 // indirect + github.com/lufeee/execinquery v1.0.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.0 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect +) require ( 4d63.com/gochecknoglobals v0.1.0 // indirect - github.com/Antonboom/errname v0.1.5 // indirect - github.com/Antonboom/nilnil v0.1.0 // indirect + github.com/Antonboom/errname v0.1.6 // indirect + github.com/Antonboom/nilnil v0.1.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/DataDog/zstd v1.4.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect @@ -64,16 +72,16 @@ require ( github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.0 // indirect - github.com/blizzy78/varnamelen v0.6.1 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bombsimon/wsl/v3 v3.3.0 // indirect - github.com/breml/bidichk v0.2.2 // indirect - github.com/breml/errchkjson v0.2.3 // indirect + github.com/breml/bidichk v0.2.3 // indirect + github.com/breml/errchkjson v0.3.0 // indirect github.com/butuzov/ireturn v0.1.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/charithe/durationcheck v0.0.9 // indirect - github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect + github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect github.com/containerd/continuity v0.2.1 // indirect github.com/creachadair/tomledit v0.0.19 github.com/daixiang0/gci v0.3.3 // indirect @@ -92,9 +100,9 @@ require ( github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/fzipp/gocyclo v0.4.0 // indirect - github.com/go-critic/go-critic v0.6.2 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fzipp/gocyclo v0.5.1 // indirect + github.com/go-critic/go-critic v0.6.3 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect github.com/go-toolsmith/astcopy v1.0.0 // indirect github.com/go-toolsmith/astequal v1.0.1 // indirect @@ -108,7 +116,7 @@ require ( github.com/golang/snappy v0.0.3 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect + github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect @@ -134,10 +142,10 @@ require ( github.com/julz/importas v0.1.0 // indirect github.com/kisielk/errcheck v1.6.0 // indirect github.com/kisielk/gotool v1.0.0 // indirect - github.com/kulti/thelper v0.5.1 // indirect + github.com/kulti/thelper v0.6.2 // indirect github.com/kunwardeep/paralleltest v1.0.3 // indirect github.com/kyoh86/exportloopref v0.1.8 // indirect - github.com/ldez/gomoddirectives v0.2.2 // indirect + github.com/ldez/gomoddirectives v0.2.3 // indirect github.com/ldez/tagliatelle v0.3.1 // indirect github.com/leonklingele/grouper v1.1.0 // indirect github.com/magiconair/properties v1.8.6 // indirect @@ -148,19 +156,19 @@ require ( github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.1.4 // indirect + github.com/mgechev/revive v1.2.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moricho/tparallel v0.2.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect github.com/nishanths/exhaustive v0.7.11 // indirect - github.com/nishanths/predeclared v0.2.1 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.0.3 // indirect - github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -168,17 +176,17 @@ require ( github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/quasilyte/go-ruleguard v0.3.15 // indirect - github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 // indirect + github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect + github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect github.com/ryancurrah/gomodguard v1.2.3 // indirect github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.10.0 // indirect + github.com/securego/gosec/v2 v2.11.0 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/sivchari/containedctx v1.0.2 // indirect - github.com/sivchari/tenv v1.4.7 // indirect + github.com/sivchari/tenv v1.5.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect github.com/spf13/afero v1.8.2 // indirect @@ -194,28 +202,28 @@ require ( github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.5.0 // indirect + github.com/tomarrell/wrapcheck/v2 v2.6.1 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect github.com/ultraware/funlen v0.0.3 // indirect github.com/ultraware/whitespace v0.0.5 // indirect github.com/uudashr/gocognit v1.0.5 // indirect github.com/yagipy/maintidx v1.0.0 // indirect - github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 // indirect + github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.10 // indirect + golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - honnef.co/go/tools v0.2.2 // indirect - mvdan.cc/gofumpt v0.3.0 // indirect + honnef.co/go/tools v0.3.1 // indirect + mvdan.cc/gofumpt v0.3.1 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect diff --git a/go.sum b/go.sum index 22ebb51ba5..96f0e92e24 100644 --- a/go.sum +++ b/go.sum @@ -46,7 +46,6 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= @@ -62,17 +61,17 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCUEg= -github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= -github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74= -github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= +github.com/Antonboom/errname v0.1.6 h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24= +github.com/Antonboom/errname v0.1.6/go.mod h1:7lz79JAnuoMNDAWE9MeeIr1/c/VpSUWatBv2FH9NYpI= +github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q= +github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -83,6 +82,8 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0/go.mod h1:LGOGuvEgCfCQsy3JF2tRmpGDpzA53iZfyGEWSPwQ6/4= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= @@ -148,14 +149,14 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/blizzy78/varnamelen v0.6.1 h1:kttPCLzXFa+0nt++Cw9fb7GrSSM4KkyIAoX/vXsbuqA= -github.com/blizzy78/varnamelen v0.6.1/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/breml/bidichk v0.2.2 h1:w7QXnpH0eCBJm55zGCTJveZEkQBt6Fs5zThIdA6qQ9Y= -github.com/breml/bidichk v0.2.2/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= -github.com/breml/errchkjson v0.2.3 h1:97eGTmR/w0paL2SwfRPI1jaAZHaH/fXnxWTw2eEIqE0= -github.com/breml/errchkjson v0.2.3/go.mod h1:jZEATw/jF69cL1iy7//Yih8yp/mXp2CBoBr9GJwCAsY= +github.com/breml/bidichk v0.2.3 h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI= +github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A= +github.com/breml/errchkjson v0.3.0 h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw= +github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= @@ -187,8 +188,8 @@ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cb github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= -github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M= +github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -299,6 +300,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.1 h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag= +github.com/firefart/nonamedreturns v1.0.1/go.mod h1:D3dpIBojGGNh5UfElmwPu73SwDCm+VKhHYqwlNOk2uQ= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -309,14 +312,15 @@ github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnX github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.4.0 h1:IykTnjwh2YLyYkGa0y92iTTEQcnyAz0r9zOo15EbJ7k= -github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/fzipp/gocyclo v0.5.1 h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw= +github.com/fzipp/gocyclo v0.5.1/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-critic/go-critic v0.6.2 h1:L5SDut1N4ZfsWZY0sH4DCrsHLHnhuuWak2wa165t9gs= -github.com/go-critic/go-critic v0.6.2/go.mod h1:td1s27kfmLpe5G/DPjlnFI7o1UCzePptwU7Az0V5iCM= +github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw= +github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -418,12 +422,12 @@ github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5 github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.45.2 h1:9I3PzkvscJkFAQpTQi5Ga0V4qWdJERajX1UZ7QqkW+I= -github.com/golangci/golangci-lint v1.45.2/go.mod h1:f20dpzMmUTRp+oYnX0OGjV1Au3Jm2JeI9yLqHq1/xsI= +github.com/golangci/golangci-lint v1.46.0 h1:uz9AtEcIP63FH+FIyuAXcQGVQO4vCUavEsMTJpPeD4s= +github.com/golangci/golangci-lint v1.46.0/go.mod h1:IJpcNOUfx/XLRwE95FHQ6QtbhYwwqcm0H5QkwUfF4ZE= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -655,15 +659,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.5.1 h1:Uf4CUekH0OvzQTFPrWkstJvXgm6pnNEtQu3HiqEkpB0= -github.com/kulti/thelper v0.5.1/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kulti/thelper v0.6.2 h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs= +github.com/kulti/thelper v0.6.2/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI= github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg= -github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= +github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM= github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= @@ -677,7 +681,8 @@ github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lufeee/execinquery v1.0.0 h1:1XUTuLIVPDlFvUU3LXmmZwHDsolsxXnY67lzhpeqe0I= +github.com/lufeee/execinquery v1.0.0/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -718,8 +723,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.1.4 h1:sZOjY6GU35Kr9jKa/wsKSHgrFz8eASIB5i3tqWZMp0A= -github.com/mgechev/revive v1.1.4/go.mod h1:ZZq2bmyssGh8MSPz3VVziqRNIMYTJXzP8MUKG90vZ9A= +github.com/mgechev/revive v1.2.1 h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4= +github.com/mgechev/revive v1.2.1/go.mod h1:+Ro3wqY4vakcYNtkBWdZC7dBg1xSB6sp054wWwmeFm0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= @@ -738,8 +743,9 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -777,8 +783,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA= github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -832,10 +838,12 @@ github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.0 h1:P7Bq0SaI8nsexyay5UAyDo+ICWy5MQPgEZ5+l8JQTKo= +github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= @@ -892,19 +900,23 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.15 h1:iWYzp1z72IlXTioET0+XI6SjQdPfMGfuAiZiKznOt7g= -github.com/quasilyte/go-ruleguard v0.3.15/go.mod h1:NhuWhnlVEM1gT1A4VJHYfy9MuYSxxwHgxWoPsn9llB4= +github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a h1:sWFavxtIctGrVs5SYZ5Ml1CvrDAs8Kf5kx2PI3C41dA= +github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a/go.mod h1:VMX+OnnSw4LicdiEGtRSD/1X8kW7GuEscjYNr4cOIT4= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.12-0.20220101150716-969a394a9451/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.12/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.17/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.16/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.19/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 h1:P4QPNn+TK49zJjXKERt/vyPbv/mCHB/zQ4flDYOMN+M= -github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= +github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8= +github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/go-dbus v0.0.0-20121104212943-b7232d34b1d5/go.mod h1:+u151txRmLpwxBmpYn9z3d1sdJdjRPQpsXuYeY9jNls= +github.com/remyoudompheng/go-liblzma v0.0.0-20190506200333-81bf2d431b96/go.mod h1:90HvCY7+oHHUKkbeMCiHt1WuFR2/hPJ9QrljDG+v6ls= +github.com/remyoudompheng/go-misc v0.0.0-20190427085024-2d6ac652a50e/go.mod h1:80FQABjoFzZ2M5uEa6FUaJYEmqU2UOKojlFVak1UAwI= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -925,7 +937,6 @@ github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoL github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= @@ -933,12 +944,12 @@ github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3 github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/securego/gosec/v2 v2.10.0 h1:l6BET4EzWtyUXCpY2v7N92v0DDCas0L7ngg3bpqbr8g= -github.com/securego/gosec/v2 v2.10.0/go.mod h1:PVq8Ewh/nCN8l/kKC6zrGXSr7m2NmEK6ITIAWMtIaA0= +github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI= +github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -950,8 +961,8 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= -github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8= -github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= +github.com/sivchari/tenv v1.5.0 h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ= +github.com/sivchari/tenv v1.5.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= @@ -989,13 +1000,14 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -1033,13 +1045,13 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.5.0 h1:g27SGGHNoQdvHz4KZA9o4v09RcWzylR+b1yueE5ECiw= -github.com/tomarrell/wrapcheck/v2 v2.5.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= +github.com/tomarrell/wrapcheck/v2 v2.6.1 h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw= +github.com/tomarrell/wrapcheck/v2 v2.6.1/go.mod h1:Eo+Opt6pyMW1b6cNllOcDSSoHO0aTJ+iF6BfCUbHltA= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= @@ -1069,8 +1081,8 @@ github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1z github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= -github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 h1:YAaOqqMTstELMMGblt6yJ/fcOt4owSYuw3IttMnKfAM= -github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= +github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= @@ -1152,7 +1164,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1168,7 +1180,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= +golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1385,11 +1400,9 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1400,14 +1413,17 @@ golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1435,6 +1451,7 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190228203856-589c23e65e65/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1508,7 +1525,6 @@ golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1528,8 +1544,9 @@ golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II= +golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1733,7 +1750,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= @@ -1765,10 +1781,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= -honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4= -mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= +honnef.co/go/tools v0.3.1 h1:1kJlrWJLkaGXgcaeosRXViwviqjI7nkBvU2+sZW0AYc= +honnef.co/go/tools v0.3.1/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= +mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= +mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= From 9dae97d845f1dffb134762e9b39e55d8009fe677 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Tue, 10 May 2022 15:42:22 +0200 Subject: [PATCH 016/203] RFC 016: Node Architecture (#8285) --- docs/rfc/README.md | 5 +- docs/rfc/images/node-dependency-tree.svg | 3 + .../rfc-009-consensus-parameter-upgrades.md | 6 +- docs/rfc/rfc-016-node-architecture.md | 83 +++++++++++++++++++ 4 files changed, 91 insertions(+), 6 deletions(-) create mode 100644 docs/rfc/images/node-dependency-tree.svg create mode 100644 docs/rfc/rfc-016-node-architecture.md diff --git a/docs/rfc/README.md b/docs/rfc/README.md index a83d802b8e..d944e72e78 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -53,10 +53,9 @@ sections. - [RFC-013: ABCI++](./rfc-013-abci++.md) - [RFC-014: Semantic Versioning](./rfc-014-semantic-versioning.md) - [RFC-015: ABCI++ Tx Mutation](./rfc-015-abci++-tx-mutation.md) +- [RFC-016: Node Architecture](./rfc-016-node-architecture.md) +- [RFC-017: ABCI++ Vote Extension Propagation](./rfc-017-abci++-vote-extension-propag.md) - [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md) - [RFC-019: Configuration File Versioning](./rfc-019-config-version.md) - -- [RFC-017: ABCI++ Vote Extension Propagation](./rfc-017-abci++-vote-extension-propag.md) - diff --git a/docs/rfc/images/node-dependency-tree.svg b/docs/rfc/images/node-dependency-tree.svg new file mode 100644 index 0000000000..6d95e0e155 --- /dev/null +++ b/docs/rfc/images/node-dependency-tree.svg @@ -0,0 +1,3 @@ + + +
Node
Node
Statesync
Statesync
Blocksync
Blocksync
Consensus
Consensus
Mempool
Mempool
Evidence
Evidence
Block Executor
Block Executor
Blockchain
Blockchain
Evidence
Evidence
PEX
PEX
Peer Store
Peer Store
Peer Networking
Peer Networking
RPC External
RPC External
ABCI Layer
ABCI Layer
Events System
Events System
RPC Internal
RPC Internal
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/rfc/rfc-009-consensus-parameter-upgrades.md b/docs/rfc/rfc-009-consensus-parameter-upgrades.md index 60be878df1..d5077840db 100644 --- a/docs/rfc/rfc-009-consensus-parameter-upgrades.md +++ b/docs/rfc/rfc-009-consensus-parameter-upgrades.md @@ -31,12 +31,12 @@ not reference the new parameters. Any nodes joining the network with the newer version of Tendermint will have the new consensus parameters. Tendermint will need to handle this case so that new versions of Tendermint with new consensus parameters can still validate old blocks correctly without having to do anything overly complex -or hacky. +or hacky. ### Allowing Developer-Defined Values and the `EndBlock` Problem When new consensus parameters are added, application developers may wish to set -values for them so that the developer-defined values may be used as soon as the +values for them so that the developer-defined values may be used as soon as the software upgrades. We do not currently have a clean mechanism for handling this. Consensus parameter updates are communicated from the application to Tendermint @@ -51,7 +51,7 @@ can take effect is height `H+1`. As of now, height `H` must run with the default ### Hash Compatibility -This section discusses possible solutions to the problem of maintaining backwards-compatibility +This section discusses possible solutions to the problem of maintaining backwards-compatibility of hashed parameters while adding new parameters. #### Never Hash Defaults diff --git a/docs/rfc/rfc-016-node-architecture.md b/docs/rfc/rfc-016-node-architecture.md new file mode 100644 index 0000000000..29098d2973 --- /dev/null +++ b/docs/rfc/rfc-016-node-architecture.md @@ -0,0 +1,83 @@ +# RFC 016: Node Architecture + +## Changelog + +- April 8, 2022: Initial draft (@cmwaters) +- April 15, 2022: Incorporation of feedback + +## Abstract + +The `node` package is the entry point into the Tendermint codebase, used both by the command line and programatically to create the nodes that make up a network. The package has suffered the most from the evolution of the codebase, becoming bloated as developers clipped on their bits of code here and there to get whatever feature they wanted working. + +The decisions made at the node level have the biggest impact to simplifying the protocols within them, unlocking better internal designs and making Tendermint more intuitive to use and easier to understand from the outside. Work, in minor increments, has already begun on this section of the codebase. This document exists to spark forth the necessary discourse in a few related areas that will help the team to converge on the long term makeup of the node. + +## Discussion + +The following is a list of points of discussion around the architecture of the node: + +### Dependency Tree + +The node object is currently stuffed with every component that possibly exists within Tendermint. In the constructor, all objects are built and interlaid with one another in some awkward dance. My guiding principle is that the node should only be made up of the components that it wants to have direct control of throughout its life. The node is a service which currently has the purpose of starting other services up in a particular order and stopping them all when commanded to do so. However, there are many services which are not direct dependents i.e. the mempool and evidence services should only be working when the consensus service is running. I propose to form more of a hierarchical structure of dependents which forces us to be clear about the relations that one component has to the other. More concretely, I propose the following dependency tree: + +![node dependency tree](./images/node-dependency-tree.svg) + +Many of the further discussion topics circle back to this representation of the node. + +It's also important to distinguish two dimensions which may require different characteristics of the architecture. There is the starting and stopping of services and their general lifecycle management. What is the correct order of operations to starting a node for example. Then there is the question of the needs of the service during actual operation. Then there is the question of what resources each service needs access to during its operation. Some need to publish events, others need access to data stores, and so forth. + +An alternative model and one that perhaps better suits the latter of these dimensions is the notion of an internal message passing system. Either the events bus or p2p layer could serve as a viable transport. This would essentially allow all services to communicate with any other service and could perhaps provide a solution to the coordination problem (presented below) without a centralized coordinator. The other main advantage is that such a system would be more robust to disruptions and changes to the code which may make a hierarchical structure quickly outdated and suboptimal. The addition of message routing is an added complexity to implement, will increase the degree of asynchronicity in the system and may make it harder to debug problems that are across multiple services. + +### Coordination of State Advancing Mechanisms + +Advancement of state in Tendermint is simply defined in heights: If the node is at height n, how does it get to height n + 1 and so on. Based on this definition we have three components that help a node to advance in height: consensus, statesync and blocksync. The way these components behave currently is very tightly coupled to one another with references passed back and forth. My guiding principle is that each of these should be able to operate completely independently of each other, e.g. a node should be able to run solely blocksync indefinitely. There have been several ideas suggested towards improving this flow. I've been leaning strongly towards a centralized system, whereby an orchestrator (in this case the node) decides what services to start and stop. +In a decentralized message passing system, individual services make their decision based upon a "global" shared state i.e. if my height is less that 10 below the average peer height, I as consensus, should stop (knowing that blocksync has the same condition for starting). As the example illustrates, each mechanism will still need to be aware of the presence of other mechanisms. + +Both centralized and decentralized systems rely on the communication of the nodes current height and a judgement on the height of the head of the chain. The latter, working out the head of the chain, is quite a difficult challenge as their is nothing preventing the node from acting maliciously and providing a different height. Currently both blocksync, consensus (and to a certain degree statesync), have parallel systems where peers communicate their height. This could be streamlined with the consensus (or even the p2p layer), broadcasting peer heights and either the node or the other state advancing mechanisms acting accordingly. + +Currently, when a node starts, it turns on every service that it is attached to. This means that while a node is syncing up by requesting blocks, it is also receiving transactions and votes, as well as snapshot and block requests. This is a needless use of bandwidth. An implementation of an orchestrator, regardless of whether the system is heirachical or not, should look to be able to open and close channels dynamically and effectively broadcast which services it is running. Integrating this with service discovery may also lead to a better serivce to peers. + +The orchestrator allows for some deal of variablity in how a node is constructed. Does it just run blocksync, shadowing the head of the chain and be highly available for querying. Does it rely on state sync at all? An important question that arises from this dynamicism is we ideally want to encourage nodes to provide as much of their resources as possible so that their is a healthy amount of providers to consumers. Do we make all services compulsory or allow for them to be disabled? Arguably it's possible that a user forks the codebase and rips out the blocksync code because they want to reduce bandwidth so this is more a question of how easy do we want to make this for users. + +### Block Executor + +The block executor is an important component that is currently used by both consensus and blocksync to execute transactions and update application state. Principally, I think it should be the only component that can write (and possibly even read) the block and state stores, and we should clean up other direct dependencies on the storage engine if we can. This would mean: + +- The reactors Consensus, BlockSync and StateSync should all import the executor for advancing state ie. `ApplyBlock` and `BootstrapState`. +- Pruning should also be a concern of the block executor as well as `FinalizeBlock` and `Commit`. This can simplify consensus to focus just on the consensus part. + +### The Interprocess communication systems: RPC, P2P, ABCI, and Events + +The schematic supplied above shows the relations between the different services, the node, the block executor, and the storage layer. Represented as colored dots are the components responsible for different roles of interprocess communication (IPC). These components permeate throughout the code base, seeping into most services. What can provide powerful functionality on one hand can also become a twisted vine, creating messy corner cases and convoluting the protocols themselves. A lot of the thinking around +how we want our IPC systens to function has been summarised in this [RFC](./rfc-002-ipc-ecosystem.md). In this section, I'd like to focus the reader on the relation between the IPC and the node structure. An issue that has frequently risen is that the RPC has control of the components where it strikes me as being more logical for the component to dictate the information that is emitted/available and the knobs it wishes to expose. The RPC is also inextricably tied to the node instance and has situations where it is passed pointers directly to the storage engine and other components. + +I am currently convinced of the approach that the p2p layer takes and would like to see other IPC components follow suit. This would mean that the RPC and events system would be constructed in the node yet would pass the adequate methods to register endpoints and topics to the sub components. For example, + +```go +// Methods from the RPC and event bus that would be passed into the constructor of components like "consensus" +// NOTE: This is a hypothetical construction to convey the idea. An actual implementation may differ. +func RegisterRoute(path string, handler func(http.ResponseWriter, *http.Request)) + +func RegisterTopic(name string) EventPublisher + +type EventPublisher func (context.Context, types.EventData, []abci.Event) +``` + +This would give the components control to the information they want to expose and keep all relevant logic within that package. It accomodates more to a dynamic system where services can switch on and off. Each component would also receive access to the logger and metrics system for introspection and debuggability. + +#### IPC Rubric + +I'd like to aim to reach a state where we as a team have either an implicit or explicit rubric which can determine, in the event of some new need to communicate information, what tool it should use for doing this. In the case of inter node communication, this is obviously the p2p stack (with perhaps the exception of the light client). Metrics and logging also have clear usage patterns. RPC and the events system are less clear. The RPC is used for debugging data and fine tuned operator control as it is for general public querying and transaction submission. The RPC is also known to have been plumbed back into the application for historical queries. The events system, similarly, is used for consuming transaction events as it is for the testing of consensus state transitions. + +Principally, I think we should look to change our language away from what the actual transport is and more towards what it's being used for and to whom. We call it a peer to peer layer and not the underlying tcp connection. In the same way, we should look to split RPC into an operator interface (RPC Internal), a public interface (RPC External) and a bidirectional ABCI. + +### Seperation of consumers and suppliers + +When a service such as blocksync is turned on, it automatically begins requesting blocks to verify and apply them as it also tries to serve them to other peers catching up. We should look to distinguish these two aspects: supplying of information and consuming of information in many of these components. More concretely, I'd suggest: + +- The blocksync and statesync service, i.e. supplying information for those trying to catch up should only start running once a node has caught up i.e. after running the blocksync and/or state sync *processes* +- The blocksync and state sync processes have defined termination clauses that inform the orchestrator when they are done and where they finished. + - One way of achieving this would be that every process both passes and returns the `State` object + - In some cases, a node may specify that it wants to run blocksync indefinitely. +- The mempool should also indicate whether it wants to receive transactions or to send them only (one-directional mempool) +- Similarly, the light client itself only requests information whereas the light client service (currently part of state sync) can do both. +- This distinction needs to be communicated in the p2p layer handshake itself but should also be changeable over the lifespan of the connection. From a4c3b5cab4185305cca4da6bccc920a015ffc792 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Tue, 10 May 2022 17:34:53 +0200 Subject: [PATCH 017/203] validate block before we persist it (#8493) --- internal/blocksync/reactor.go | 58 +++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 26 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 0bf0561d3c..bf9845370e 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -517,8 +517,16 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // NOTE: We can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - if err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit); err != nil { - err = fmt.Errorf("invalid last commit: %w", err) + err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit) + + if err == nil { + // validate the block before we persist it + err = r.blockExec.ValidateBlock(ctx, state, first) + } + + // If either of the checks failed we log the error and request for a new block + // at that height + if err != nil { r.logger.Error( err.Error(), "last_commit", second.LastCommit, @@ -545,37 +553,35 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh return } } - } else { - r.pool.PopRequest() + return + } - // TODO: batch saves so we do not persist to disk every block - r.store.SaveBlock(first, firstParts, second.LastCommit) + r.pool.PopRequest() - var err error + // TODO: batch saves so we do not persist to disk every block + r.store.SaveBlock(first, firstParts, second.LastCommit) - // TODO: Same thing for app - but we would need a way to get the hash - // without persisting the state. - state, err = r.blockExec.ApplyBlock(ctx, state, firstID, first) - if err != nil { - // TODO: This is bad, are we zombie? - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } + // TODO: Same thing for app - but we would need a way to get the hash + // without persisting the state. + state, err = r.blockExec.ApplyBlock(ctx, state, firstID, first) + if err != nil { + panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) + } - r.metrics.RecordConsMetrics(first) + r.metrics.RecordConsMetrics(first) - blocksSynced++ + blocksSynced++ - if blocksSynced%100 == 0 { - lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - r.logger.Info( - "block sync rate", - "height", r.pool.height, - "max_peer_height", r.pool.MaxPeerHeight(), - "blocks/s", lastRate, - ) + if blocksSynced%100 == 0 { + lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) + r.logger.Info( + "block sync rate", + "height", r.pool.height, + "max_peer_height", r.pool.MaxPeerHeight(), + "blocks/s", lastRate, + ) - lastHundred = time.Now() - } + lastHundred = time.Now() } } } From c052181e328a12a612df77b78a1d5427ff5c4fa5 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 10 May 2022 12:48:13 -0400 Subject: [PATCH 018/203] consensus: add additional metrics for abci++ data (#8480) This pull request adds an additional set of metrics targeted at providing more visibility into `abci++`. The following set of metrics are added and exposed through the `metrics` endpoint: ``` tendermint_consensus_proposal_receive_count{chain_id="test-chain-IrF74Y",status="accepted"} 34 tendermint_consensus_proposal_create_count{chain_id="test-chain-IrF74Y"} 34 tendermint_consensus_vote_extension_receive_count{chain_id="test-chain-IrF74Y",status="accepted"} 34 tendermint_consensus_round_voting_power_percent{chain_id="test-chain-IrF74Y",vote_type="precommit"} 1 tendermint_consensus_round_voting_power_percent{chain_id="test-chain-IrF74Y",vote_type="prevote"} 1 tendermint_state_consensus_param_updates{chain_id="test-chain-IrF74Y"} 0 tendermint_state_validator_set_updates{chain_id="test-chain-IrF74Y"} 0 tendermint_consensus_late_votes{chain_id="test-chain-IrF74Y",vote_type="precommit"} 16 ``` This pull request also updates the `metrics.md` file to include some metrics that were previously missed. My hope is to generate the `metrics.md` file with a future version of the tool being architected in #8479 --- docs/nodes/metrics.md | 81 +++++++++++++++----------- internal/consensus/metrics.go | 105 ++++++++++++++++++++++++++++++++++ internal/consensus/state.go | 15 ++++- internal/state/execution.go | 4 ++ internal/state/metrics.go | 27 ++++++++- 5 files changed, 196 insertions(+), 36 deletions(-) diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md index 1b2e9f0070..7b0622519b 100644 --- a/docs/nodes/metrics.md +++ b/docs/nodes/metrics.md @@ -18,40 +18,53 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Tags** | **Description** | -| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | -| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods | -| consensus_height | Gauge | | Height of the chain | -| consensus_validators | Gauge | | Number of validators | -| consensus_validators_power | Gauge | | Total voting power of all validators | -| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | -| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | -| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | -| consensus_missing_validators | Gauge | | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | | Number of rounds | -| consensus_num_txs | Gauge | | Number of transactions | -| consensus_total_txs | Gauge | | Total number of transactions committed | -| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer | -| consensus_latest_block_height | gauge | | /status sync_info number | -| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | -| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | -| consensus_block_size_bytes | Gauge | | Block size in bytes | -| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool -| p2p_peers | Gauge | | Number of peers node's connected to | -| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | -| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | -| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer | -| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id | -| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer | -| mempool_size | Gauge | | Number of uncommitted transactions | -| mempool_tx_size_bytes | histogram | | transaction sizes in bytes | -| mempool_failed_txs | counter | | number of failed transactions | -| mempool_recheck_times | counter | | number of transactions rechecked in the mempool | -| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms | +| **Name** | **Type** | **Tags** | **Description** | +|-----------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods | +| consensus_height | Gauge | | Height of the chain | +| consensus_validators | Gauge | | Number of validators | +| consensus_validators_power | Gauge | | Total voting power of all validators | +| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | +| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | +| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| consensus_missing_validators | Gauge | | Number of validators who did not sign | +| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | +| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | +| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | +| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| consensus_rounds | Gauge | | Number of rounds | +| consensus_num_txs | Gauge | | Number of transactions | +| consensus_total_txs | Gauge | | Total number of transactions committed | +| consensus_block_parts | Counter | peer_id | number of blockparts transmitted by peer | +| consensus_latest_block_height | gauge | | /status sync_info number | +| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | +| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | +| consensus_block_size_bytes | Gauge | | Block size in bytes | +| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol | +| consensus_block_gossip_receive_latency | Histogram | | Histogram of time taken to receive a block in seconds, measure between when a new block is first discovered to when the block is completed | +| consensus_block_gossip_parts_received | Counter | matches_current | Number of block parts received by the node | +| consensus_quorum_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | +| consensus_full_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | +| consensus_proposal_timestamp_difference | Histogram | | Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message | +| consensus_vote_extension_receive_count | Counter | status | Number of vote extensions received | +| consensus_proposal_receive_count | Counter | status | Total number of proposals received by the node since process start | +| consensus_proposal_create_count | Counter | | Total number of proposals created by the node since process start | +| consensus_round_voting_power_percent | Gauge | vote_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | +| consensus_late_votes | Counter | vote_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | +| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool | +| p2p_peers | Gauge | | Number of peers node's connected to | +| p2p_peer_receive_bytes_total | Counter | peer_id, chID | number of bytes per channel received from a given peer | +| p2p_peer_send_bytes_total | Counter | peer_id, chID | number of bytes per channel sent to a given peer | +| p2p_peer_pending_send_bytes | Gauge | peer_id | number of pending bytes to be sent to a given peer | +| p2p_num_txs | Gauge | peer_id | number of transactions submitted by each peer_id | +| p2p_pending_send_bytes | Gauge | peer_id | amount of data pending to be sent to peer | +| mempool_size | Gauge | | Number of uncommitted transactions | +| mempool_tx_size_bytes | Histogram | | transaction sizes in bytes | +| mempool_failed_txs | Counter | | number of failed transactions | +| mempool_recheck_times | Counter | | number of transactions rechecked in the mempool | +| state_block_processing_time | Histogram | | time between BeginBlock and EndBlock in ms | +| state_consensus_param_updates | Counter | | number of consensus parameter updates returned by the application since process start | +| state_validator_set_updates | Counter | | number of validator set updates returned by the application since process start | ## Useful queries diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index ed31ec636e..e5c0162f40 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -8,6 +8,7 @@ import ( "github.com/go-kit/kit/metrics/discard" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" prometheus "github.com/go-kit/kit/metrics/prometheus" @@ -103,6 +104,33 @@ type Metrics struct { // the proposal message and the local time of the validator at the time // that the validator received the message. ProposalTimestampDifference metrics.Histogram + + // VoteExtensionReceiveCount is the number of vote extensions received by this + // node. The metric is annotated by the status of the vote extension from the + // application, either 'accepted' or 'rejected'. + VoteExtensionReceiveCount metrics.Counter + + // ProposalReceiveCount is the total number of proposals received by this node + // since process start. + // The metric is annotated by the status of the proposal from the application, + // either 'accepted' or 'rejected'. + ProposalReceiveCount metrics.Counter + + // ProposalCreationCount is the total number of proposals created by this node + // since process start. + // The metric is annotated by the status of the proposal from the application, + // either 'accepted' or 'rejected'. + ProposalCreateCount metrics.Counter + + // RoundVotingPowerPercent is the percentage of the total voting power received + // with a round. The value begins at 0 for each round and approaches 1.0 as + // additional voting power is observed. The metric is labeled by vote type. + RoundVotingPowerPercent metrics.Gauge + + // LateVotes stores the number of votes that were received by this node that + // correspond to earlier heights and rounds than this node is currently + // in. + LateVotes metrics.Counter } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -280,6 +308,43 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { "Only calculated when a new block is proposed.", Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10}, }, append(labels, "is_timely")).With(labelsAndValues...), + VoteExtensionReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "vote_extension_receive_count", + Help: "Number of vote extensions received by the node since process start, labeled by " + + "the application's response to VerifyVoteExtension, either accept or reject.", + }, append(labels, "status")).With(labelsAndValues...), + + ProposalReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_receive_count", + Help: "Number of vote proposals received by the node since process start, labeled by " + + "the application's response to ProcessProposal, either accept or reject.", + }, append(labels, "status")).With(labelsAndValues...), + + ProposalCreateCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_create_count", + Help: "Number of proposals created by the node since process start.", + }, labels).With(labelsAndValues...), + + RoundVotingPowerPercent: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "round_voting_power_percent", + Help: "Percentage of the total voting power received with a round. " + + "The value begins at 0 for each round and approaches 1.0 as additional " + + "voting power is observed.", + }, append(labels, "vote_type")).With(labelsAndValues...), + LateVotes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "late_votes", + Help: "Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in.", + }, append(labels, "vote_type")).With(labelsAndValues...), } } @@ -317,6 +382,11 @@ func NopMetrics() *Metrics { QuorumPrevoteDelay: discard.NewGauge(), FullPrevoteDelay: discard.NewGauge(), ProposalTimestampDifference: discard.NewHistogram(), + VoteExtensionReceiveCount: discard.NewCounter(), + ProposalReceiveCount: discard.NewCounter(), + ProposalCreateCount: discard.NewCounter(), + RoundVotingPowerPercent: discard.NewGauge(), + LateVotes: discard.NewCounter(), } } @@ -336,10 +406,45 @@ func (m *Metrics) MarkBlockGossipComplete() { m.BlockGossipReceiveLatency.Observe(time.Since(m.blockGossipStart).Seconds()) } +func (m *Metrics) MarkProposalProcessed(accepted bool) { + status := "accepted" + if !accepted { + status = "rejected" + } + m.ProposalReceiveCount.With("status", status).Add(1) +} + +func (m *Metrics) MarkVoteExtensionReceived(accepted bool) { + status := "accepted" + if !accepted { + status = "rejected" + } + m.VoteExtensionReceiveCount.With("status", status).Add(1) +} + +func (m *Metrics) MarkVoteReceived(vt tmproto.SignedMsgType, power, totalPower int64) { + p := float64(power) / float64(totalPower) + n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) + m.RoundVotingPowerPercent.With("vote_type", n).Add(p) +} + func (m *Metrics) MarkRound(r int32, st time.Time) { m.Rounds.Set(float64(r)) roundTime := time.Since(st).Seconds() m.RoundDuration.Observe(roundTime) + + pvt := tmproto.PrevoteType + pvn := strings.ToLower(strings.TrimPrefix(pvt.String(), "SIGNED_MSG_TYPE_")) + m.RoundVotingPowerPercent.With("vote_type", pvn).Set(0) + + pct := tmproto.PrecommitType + pcn := strings.ToLower(strings.TrimPrefix(pct.String(), "SIGNED_MSG_TYPE_")) + m.RoundVotingPowerPercent.With("vote_type", pcn).Set(0) +} + +func (m *Metrics) MarkLateVote(vt tmproto.SignedMsgType) { + n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) + m.LateVotes.With("vote_type", n).Add(1) } func (m *Metrics) MarkStep(s cstypes.RoundStepType) { diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 490801ad26..90efbab770 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -1334,6 +1334,7 @@ func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round } else if block == nil { return } + cs.metrics.ProposalCreateCount.Add(1) blockParts, err = block.MakePartSet(types.BlockPartSizeBytes) if err != nil { cs.logger.Error("unable to create proposal block part set", "error", err) @@ -1531,6 +1532,7 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 if err != nil { panic(fmt.Sprintf("ProcessProposal: %v", err)) } + cs.metrics.MarkProposalProcessed(isAppValid) // Vote nil if the Application rejected the block if !isAppValid { @@ -2297,6 +2299,10 @@ func (cs *State) addVote( "cs_height", cs.Height, ) + if vote.Height < cs.Height || (vote.Height == cs.Height && vote.Round < cs.Round) { + cs.metrics.MarkLateVote(vote.Type) + } + // A precommit for the previous height? // These come in while we wait timeoutCommit if vote.Height+1 == cs.Height && vote.Type == tmproto.PrecommitType { @@ -2337,7 +2343,9 @@ func (cs *State) addVote( // Verify VoteExtension if precommit if vote.Type == tmproto.PrecommitType { - if err = cs.blockExec.VerifyVoteExtension(ctx, vote); err != nil { + err := cs.blockExec.VerifyVoteExtension(ctx, vote) + cs.metrics.MarkVoteExtensionReceived(err == nil) + if err != nil { return false, err } } @@ -2348,6 +2356,11 @@ func (cs *State) addVote( // Either duplicate, or error upon cs.Votes.AddByIndex() return } + if vote.Round == cs.Round { + vals := cs.state.Validators + _, val := vals.GetByIndex(vote.ValidatorIndex) + cs.metrics.MarkVoteReceived(vote.Type, val.VotingPower, vals.TotalVotingPower()) + } if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { return added, err diff --git a/internal/state/execution.go b/internal/state/execution.go index 06dfc0b5c5..cfacb816d7 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -247,6 +247,10 @@ func (blockExec *BlockExecutor) ApplyBlock( } if len(validatorUpdates) > 0 { blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.metrics.ValidatorSetUpdates.Add(1) + } + if finalizeBlockResponse.ConsensusParamUpdates != nil { + blockExec.metrics.ConsensusParamUpdates.Add(1) } // Update the state with the block and responses. diff --git a/internal/state/metrics.go b/internal/state/metrics.go index bcd713f5ff..1d4a13b941 100644 --- a/internal/state/metrics.go +++ b/internal/state/metrics.go @@ -17,6 +17,14 @@ const ( type Metrics struct { // Time between BeginBlock and EndBlock. BlockProcessingTime metrics.Histogram + + // ConsensusParamUpdates is the total number of times the application has + // udated the consensus params since process start. + ConsensusParamUpdates metrics.Counter + + // ValidatorSetUpdates is the total number of times the application has + // udated the validator set since process start. + ValidatorSetUpdates metrics.Counter } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -35,12 +43,29 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Time between BeginBlock and EndBlock in ms.", Buckets: stdprometheus.LinearBuckets(1, 10, 10), }, labels).With(labelsAndValues...), + ConsensusParamUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "consensus_param_updates", + Help: "The total number of times the application as updated the consensus " + + "parameters since process start.", + }, labels).With(labelsAndValues...), + + ValidatorSetUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_set_updates", + Help: "The total number of times the application as updated the validator " + + "set since process start.", + }, labels).With(labelsAndValues...), } } // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - BlockProcessingTime: discard.NewHistogram(), + BlockProcessingTime: discard.NewHistogram(), + ConsensusParamUpdates: discard.NewCounter(), + ValidatorSetUpdates: discard.NewCounter(), } } From 735a1a537b2f435918bd2deffacfd02274a3aa68 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Tue, 10 May 2022 21:50:28 +0200 Subject: [PATCH 019/203] Fixed math notation in ABCI++ app requirements (#8499) * Fixed math notation in ABCI++ app requirements * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel Co-authored-by: Daniel --- .../abci++_app_requirements_002_draft.md | 188 ++++++++++-------- 1 file changed, 101 insertions(+), 87 deletions(-) diff --git a/spec/abci++/abci++_app_requirements_002_draft.md b/spec/abci++/abci++_app_requirements_002_draft.md index 620b1cd5e0..68014a536f 100644 --- a/spec/abci++/abci++_app_requirements_002_draft.md +++ b/spec/abci++/abci++_app_requirements_002_draft.md @@ -6,47 +6,49 @@ title: Application Requirements # Application Requirements This section specifies what Tendermint expects from the Application. It is structured as a set -of formal requirement that can be used for testing and verification of the Application's logic. - -Let $p$ and $q$ be two different correct proposers in rounds $r_p$ and $r_q$ respectively, in height $h$. -Let $s_{p,h-1}$ be $p$'s Application's state committed for height $h-1$. -Let $v_p$ (resp. $v_q$) be the block that $p$'s (resp. $q$'s) Tendermint passes on to the Application -via `RequestPrepareProposal` as proposer of round $r_p$ (resp $r_q$), height $h$, also known as the -raw proposal. -Let $v'_p$ (resp. $v'_q$) the possibly modified block $p$'s (resp. $q$'s) Application returns via -`ResponsePrepareProposal` to Tendermint, also known as the prepared proposal. - -Process $p$'s prepared proposal can differ in two different rounds where $p$ is the proposer. - -* Requirement 1 [`PrepareProposal`, header-changes] When the blockchain is in same-block execution mode, - $p$'s Application provides values for the following parameters in `ResponsePrepareProposal`: - _AppHash_, _TxResults_, _ConsensusParams_, _ValidatorUpdates_. Provided values for - _ConsensusParams_ and _ValidatorUpdates_ MAY be empty to denote that the Application +of formal requirements that can be used for testing and verification of the Application's logic. + +Let *p* and *q* be two different correct proposers in rounds *rp* and *rq* +respectively, in height *h*. +Let *sp,h-1* be *p*'s Application's state committed for height *h-1*. +Let *vp* (resp. *vq*) be the block that *p*'s (resp. *q*'s) Tendermint passes +on to the Application +via `RequestPrepareProposal` as proposer of round *rp* (resp *rq*), height *h*, +also known as the raw proposal. +Let *v'p* (resp. *v'q*) the possibly modified block *p*'s (resp. *q*'s) Application +returns via `ResponsePrepareProposal` to Tendermint, also known as the prepared proposal. + +Process *p*'s prepared proposal can differ in two different rounds where *p* is the proposer. + +* Requirement 1 [`PrepareProposal`, header-changes]: When the blockchain is in same-block execution mode, + *p*'s Application provides values for the following parameters in `ResponsePrepareProposal`: + `AppHash`, `TxResults`, `ConsensusParams`, `ValidatorUpdates`. Provided values for + `ConsensusParams` and `ValidatorUpdates` MAY be empty to denote that the Application wishes to keep the current values. -Parameters _AppHash_, _TxResults_, _ConsensusParams_, and _ValidatorUpdates_ are used by Tendermint to +Parameters `AppHash`, `TxResults`, `ConsensusParams`, and `ValidatorUpdates` are used by Tendermint to compute various hashes in the block header that will finally be part of the proposal. -* Requirement 2 [`PrepareProposal`, no-header-changes] When the blockchain is in next-block execution - mode, $p$'s Application does not provide values for the following parameters in `ResponsePrepareProposal`: - _AppHash_, _TxResults_, _ConsensusParams_, _ValidatorUpdates_. +* Requirement 2 [`PrepareProposal`, no-header-changes]: When the blockchain is in next-block execution + mode, *p*'s Application does not provide values for the following parameters in `ResponsePrepareProposal`: + `AppHash`, `TxResults`, `ConsensusParams`, `ValidatorUpdates`. In practical terms, Requirements 1 and 2 imply that Tendermint will (a) panic if the Application is in -same-block execution mode and _does_ _not_ provide values for -_AppHash_, _TxResults_, _ConsensusParams_, and _ValidatorUpdates_, or -(b) log an error if the Application is in next-block execution mode and _does_ provide values for -_AppHash_, _TxResults_, _ConsensusParams_, or _ValidatorUpdates_ (the values provided will be ignored). +same-block execution mode and *does not* provide values for +`AppHash`, `TxResults`, `ConsensusParams`, and `ValidatorUpdates`, or +(b) log an error if the Application is in next-block execution mode and *does* provide values for +`AppHash`, `TxResults`, `ConsensusParams`, or `ValidatorUpdates` (the values provided will be ignored). -* Requirement 3 [`PrepareProposal`, timeliness] If $p$'s Application fully executes prepared blocks in - `PrepareProposal` and the network is in a synchronous period while processes $p$ and $q$ are in $r_p$, then - the value of *TimeoutPropose* at $q$ must be such that $q$'s propose timer does not time out - (which would result in $q$ prevoting *nil* in $r_p$). +* Requirement 3 [`PrepareProposal`, timeliness]: If *p*'s Application fully executes prepared blocks in + `PrepareProposal` and the network is in a synchronous period while processes *p* and *q* are in *rp*, + then the value of *TimeoutPropose* at *q* must be such that *q*'s propose timer does not time out + (which would result in *q* prevoting `nil` in *rp*). Full execution of blocks at `PrepareProposal` time stands on Tendermint's critical path. Thus, -Requirement 3 ensures the Application will set a value for _TimeoutPropose_ such that the time it takes +Requirement 3 ensures the Application will set a value for `TimeoutPropose` such that the time it takes to fully execute blocks in `PrepareProposal` does not interfere with Tendermint's propose timer. -* Requirement 4 [`PrepareProposal`, tx-size] When $p$'s Application calls `ResponsePrepareProposal`, the +* Requirement 4 [`PrepareProposal`, tx-size]: When *p*'s Application calls `ResponsePrepareProposal`, the total size in bytes of the transactions returned does not exceed `RequestPrepareProposal.max_tx_bytes`. Busy blockchains might seek to maximize the amount of transactions included in each block. Under those conditions, @@ -54,29 +56,31 @@ Tendermint might choose to increase the transactions passed to the Application v beyond the `RequestPrepareProposal.max_tx_bytes` limit. The idea is that, if the Application drops some of those transactions, it can still return a transaction list whose byte size is as close to `RequestPrepareProposal.max_tx_bytes` as possible. Thus, Requirement 4 ensures that the size in bytes of the -transaction list returned by the application will never cause the resulting block to go beyond its byte limit. +transaction list returned by the application will never cause the resulting block to go beyond its byte size +limit. -* Requirement 5 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes $p$ and $q$, - if $q$'s Tendermint calls `RequestProcessProposal` on $v'_p$, - $q$'s Application returns Accept in `ResponseProcessProposal`. +* Requirement 5 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*, + if *q*'s Tendermint calls `RequestProcessProposal` on *v'p*, + *q*'s Application returns Accept in `ResponseProcessProposal`. -Requirement 5 makes sure that blocks proposed by correct processes _always_ pass the correct receiving process's +Requirement 5 makes sure that blocks proposed by correct processes *always* pass the correct receiving process's `ProcessProposal` check. On the other hand, if there is a deterministic bug in `PrepareProposal` or `ProcessProposal` (or in both), strictly speaking, this makes all processes that hit the bug byzantine. This is a problem in practice, -as very often validators are running the Application from the same codebase, so potentially _all_ would +as very often validators are running the Application from the same codebase, so potentially *all* would likely hit the bug at the same time. This would result in most (or all) processes prevoting `nil`, with the serious consequences on Tendermint's liveness that this entails. Due to its criticality, Requirement 5 is a target for extensive testing and automated verification. * Requirement 6 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current - state and the block that is about to be applied. In other words, for any correct process $p$, and any arbitrary block $v'$, - if $p$'s Tendermint calls `RequestProcessProposal` on $v'$ at height $h$, - then $p$'s Application's acceptance or rejection **exclusively** depends on $v'$ and $s_{p,h-1}$. - -* Requirement 7 [`ProcessProposal`, determinism-2]: For any two correct processes $p$ and $q$, and any arbitrary block $v'$, - if $p$'s (resp. $q$'s) Tendermint calls `RequestProcessProposal` on $v'$ at height $h$, - then $p$'s Application accepts $v'$ if and only if $q$'s Application accepts $v'$. + state and the block that is about to be applied. In other words, for any correct process *p*, and any arbitrary block *v'*, + if *p*'s Tendermint calls `RequestProcessProposal` on *v'* at height *h*, + then *p*'s Application's acceptance or rejection **exclusively** depends on *v'* and *sp,h-1*. + +* Requirement 7 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary + block *v'*, + if *p*'s (resp. *q*'s) Tendermint calls `RequestProcessProposal` on *v'* at height *h*, + then *p*'s Application accepts *v'* if and only if *q*'s Application accepts *v'*. Note that this requirement follows from Requirement 6 and the Agreement property of consensus. Requirements 6 and 7 ensure that all correct processes will react in the same way to a proposed block, even @@ -87,20 +91,26 @@ In such a scenario, Tendermint's liveness cannot be guaranteed. Again, this is a problem in practice if most validators are running the same software, as they are likely to hit the bug at the same point. There is currently no clear solution to help with this situation, so the Application designers/implementors must proceed very carefully with the logic/implementation -of `ProcessProposal`. As a general rule `ProcessProposal` _should_ always accept the block. - -According to the Tendermint algorithm, a correct process can broadcast at most one precommit message in round $r$, height $h$. -Since, as stated in the [Description](#description) section, `ResponseExtendVote` is only called when Tendermint -is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension in round $r$, height $h$. -Let $e^r_p$ be the vote extension that the Application of a correct process $p$ returns via `ResponseExtendVote` in round $r$, height $h$. -Let $w^r_p$ be the proposed block that $p$'s Tendermint passes to the Application via `RequestExtendVote` in round $r$, height $h$. - -* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two correct processes $p$ and $q$, if $q$ receives $e^r_p$ - from $p$ in height $h$, $q$'s Application returns Accept in `ResponseVerifyVoteExtension`. +of `ProcessProposal`. As a general rule `ProcessProposal` SHOULD always accept the block. + +According to the Tendermint algorithm, a correct process can broadcast at most one precommit +message in round *r*, height *h*. +Since, as stated in the [Methods](./abci++_methods_002_draft.md#extendvote) section, `ResponseExtendVote` +is only called when Tendermint +is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension +in round *r*, height *h*. +Let *erp* be the vote extension that the Application of a correct process *p* returns via +`ResponseExtendVote` in round *r*, height *h*. +Let *wrp* be the proposed block that *p*'s Tendermint passes to the Application via `RequestExtendVote` +in round *r*, height *h*. + +* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two correct processes *p* and *q*, if *q* +receives *erp* + from *p* in height *h*, *q*'s Application returns Accept in `ResponseVerifyVoteExtension`. Requirement 8 constrains the creation and handling of vote extensions in a similar way as Requirement 5 -contrains the creation and handling of proposed blocks. -Requirement 8 ensures that extensions created by correct processes _always_ pass the `VerifyVoteExtension` +constrains the creation and handling of proposed blocks. +Requirement 8 ensures that extensions created by correct processes *always* pass the `VerifyVoteExtension` checks performed by correct processes receiving those extensions. However, if there is a (deterministic) bug in `ExtendVote` or `VerifyVoteExtension` (or in both), we will face the same liveness issues as described for Requirement 5, as Precommit messages with invalid vote @@ -108,58 +118,62 @@ extensions will be discarded. * Requirement 9 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of the current state, the vote extension received, and the prepared proposal that the extension refers to. - In other words, for any correct process $p$, and any arbitrary vote extension $e$, and any arbitrary - block $w$, if $p$'s (resp. $q$'s) Tendermint calls `RequestVerifyVoteExtension` on $e$ and $w$ at height $h$, - then $p$'s Application's acceptance or rejection **exclusively** depends on $e$, $w$ and $s_{p,h-1}$. - -* Requirement 10 [`VerifyVoteExtension`, determinism-2]: For any two correct processes $p$ and $q$, - and any arbitrary vote extension $e$, and any arbitrary block $w$, - if $p$'s (resp. $q$'s) Tendermint calls `RequestVerifyVoteExtension` on $e$ and $w$ at height $h$, - then $p$'s Application accepts $e$ if and only if $q$'s Application accepts $e$. + In other words, for any correct process *p*, and any arbitrary vote extension *e*, and any arbitrary + block *w*, if *p*'s (resp. *q*'s) Tendermint calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, + then *p*'s Application's acceptance or rejection **exclusively** depends on *e*, *w* and *sp,h-1*. + +* Requirement 10 [`VerifyVoteExtension`, determinism-2]: For any two correct processes *p* and *q*, + and any arbitrary vote extension *e*, and any arbitrary block *w*, + if *p*'s (resp. *q*'s) Tendermint calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, + then *p*'s Application accepts *e* if and only if *q*'s Application accepts *e*. Note that this requirement follows from Requirement 9 and the Agreement property of consensus. Requirements 9 and 10 ensure that the validation of vote extensions will be deterministic at all correct processes. -Requirements 9 and 10 protect against arbitrary vote extension data from Byzantine processes -similarly to Requirements 6 and 7 and proposed blocks. +Requirements 9 and 10 protect against arbitrary vote extension data from Byzantine processes, +in a similar way as Requirements 6 and 7 protect against arbitrary proposed blocks. Requirements 9 and 10 can be violated by a bug inducing non-determinism in `VerifyVoteExtension`. In this case liveness can be compromised. -Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension` and, -as a general rule, `VerifyVoteExtension` _should_ always accept the vote extension. +Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension`. +As a general rule, `VerifyVoteExtension` SHOULD always accept the vote extension. -* Requirement 11 [_all_, no-side-effects]: $p$'s calls to `RequestPrepareProposal`, - `RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height $h$ do - not modify $s_{p,h-1}$. +* Requirement 11 [*all*, no-side-effects]: *p*'s calls to `RequestPrepareProposal`, + `RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height *h* do + not modify *sp,h-1*. -* Requirement 12 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process $p$, -and any vote extension $e$ that $p$ received at height $h$, the computation of -$s_{p,h}$ does not depend on $e$. +* Requirement 12 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process *p*, +and any vote extension *e* that *p* received at height *h*, the computation of +*sp,h* does not depend on *e*. -The call to correct process $p$'s `RequestFinalizeBlock` at height $h$, with block $v_{p,h}$ -passed as parameter, creates state $s_{p,h}$. +The call to correct process *p*'s `RequestFinalizeBlock` at height *h*, with block *vp,h* +passed as parameter, creates state *sp,h*. Additionally, -* in next-block execution mode, $p$'s `FinalizeBlock` creates a set of transaction results $T_{p,h}$, -* in same-block execution mode, $p$'s `PrepareProposal` creates a set of transaction results $T_{p,h}$ - if $p$ was the proposer of $v_{p,h}$, otherwise `FinalizeBlock` creates $T_{p,h}$. +* in next-block execution mode, *p*'s `FinalizeBlock` creates a set of transaction results *Tp,h*, +* in same-block execution mode, *p*'s `PrepareProposal` creates a set of transaction results *Tp,h* + if *p* was the proposer of *vp,h*. If *p* was not the proposer of *vp,h*, + `ProcessProposal` creates *Tp,h*. `FinalizeBlock` MAY re-create *Tp,h* if it was + removed from memory during the execution of height *h*. -* Requirement 13 [`FinalizeBlock`, determinism-1]: For any correct process $p$, - $s_{p,h}$ exclusively depends on $s_{p,h-1}$ and $v_{p,h}$. +* Requirement 13 [`FinalizeBlock`, determinism-1]: For any correct process *p*, + *sp,h* exclusively depends on *sp,h-1* and *vp,h*. -* Requirement 14 [`FinalizeBlock`, determinism-2]: For any correct process $p$, - the contents of $T_{p,h}$ exclusively depend on $s_{p,h-1}$ and $v_{p,h}$. +* Requirement 14 [`FinalizeBlock`, determinism-2]: For any correct process *p*, + the contents of *Tp,h* exclusively depend on *sp,h-1* and *vp,h*. Note that Requirements 13 and 14, combined with Agreement property of consensus ensure -the Application state evolves consistently at all correct processes. +state machine replication, i.e., the Application state evolves consistently at all correct processes. Finally, notice that neither `PrepareProposal` nor `ExtendVote` have determinism-related requirements associated. Indeed, `PrepareProposal` is not required to be deterministic: -* $v'_p$ may depend on $v_p$ and $s_{p,h-1}$, but may also depend on other values or operations. -* $v_p = v_q \nRightarrow v'_p = v'_q$. +* *v'p* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. +* *vp = vq ⇏ v'p = v'q*. Likewise, `ExtendVote` can also be non-deterministic: -* $e^r_p$ may depend on $w^r_p$ and $s_{p,h-1}$, but may also depend on other values or operations. -* $w^r_p = w^r_q \nRightarrow e^r_p = e^r_q$ +* *erp* may depend on *wrp* and *sp,h-1*, + but may also depend on other values or operations. +* *wrp = wrq ⇏ + erp = erq* From 06b1812094dba6086f53ec385ed6238ffbc255fa Mon Sep 17 00:00:00 2001 From: Thane Thomson Date: Wed, 11 May 2022 07:10:32 -0400 Subject: [PATCH 020/203] abci++: Propagate vote extensions (RFC 017) (#8433) * Add protos for ExtendedCommit Cherry-pick from e73f0178b72a16ee81f8e856aadf651f2c62ec6e just the changes to the .proto files, since we have deleted the .intermediate files. Signed-off-by: Thane Thomson * make proto-gen Signed-off-by: Thane Thomson * BlockStore holds extended commit Cherry-pick 8d504d4b50ec6afbdffe2df7ababbef30e15053d and fix conflicts. Signed-off-by: Thane Thomson * Reshuffle ExtendedCommit and ExtendedCommitSig Separate the data structures and functions from their Commit-oriented counterparts to adhere to the current coding style. Signed-off-by: Thane Thomson * Fix exit condition in blocksync * Add note to remove TxResult proto As Sergio pointed out in 3e31aa6f583cdc71e208ed03a82f1d804ec0de49, this proto message can probably be removed. We should do this in a separate PR. Signed-off-by: Thane Thomson * Lift termination condition into for loop Signed-off-by: Thane Thomson * Enforce vote extension signature requirement Signed-off-by: Thane Thomson * Expand on comment for PeekTwoBlocks for posterity Signed-off-by: Thane Thomson * Isolate TODO more clearly Signed-off-by: Thane Thomson * make mockery Signed-off-by: Thane Thomson * Fix comment Signed-off-by: Thane Thomson * Make panic output from BlockStore.SaveBlock more readable Signed-off-by: Thane Thomson * Add helper methods to ExtendedCommitSig and ExtendedCommit Signed-off-by: Thane Thomson * Fix most tests except TestHandshake* Signed-off-by: Thane Thomson * Fix store prefix collision Signed-off-by: Thane Thomson * Fix TestBlockFetchAtHeight Signed-off-by: Thane Thomson * Remove global state from store tests Signed-off-by: Thane Thomson * Apply suggestions from code review Co-authored-by: M. J. Fromberger Co-authored-by: Sergio Mena * blocksync: Just return error Signed-off-by: Thane Thomson * make format Signed-off-by: Thane Thomson * types: Remove unused/commented-out code Signed-off-by: Thane Thomson * blocksync: Change pool AddBlock function signature to return errors Signed-off-by: Thane Thomson * types: Improve legibility of switch statements Signed-off-by: Thane Thomson * blocksync: Expand on extended commit requirement in AddBlock description Signed-off-by: Thane Thomson * blocksync: Return error without also logging it Signed-off-by: Thane Thomson * consensus: Rename short-lived local variable Signed-off-by: Thane Thomson * consensus: Allocate TODO to Sergio Signed-off-by: Thane Thomson * evidence/pool_test: Inline slice construction Signed-off-by: Thane Thomson * state: Rename LoadBlockExtCommit to LoadBlockExtendedCommit Signed-off-by: Thane Thomson * proto: Remove TODO on TxResult Signed-off-by: Thane Thomson * types: Minor format Signed-off-by: Thane Thomson * types: Reformat ExtendedCommitSig.BlockID Signed-off-by: Thane Thomson * types: Remove NewExtendedCommit constructor Signed-off-by: Thane Thomson * types: Remove NewCommit constructor Signed-off-by: Thane Thomson * types: Shorten receiver names for ExtendedCommit Signed-off-by: Thane Thomson * types: Convert ExtendedCommit.Copy to a deep clone Signed-off-by: Thane Thomson * types: Assign TODO to Sergio Signed-off-by: Thane Thomson * types: Fix legibility nits Signed-off-by: Thane Thomson * types: Improve legibility Signed-off-by: Thane Thomson * store/state: Add TODO to move prefixes to common package Signed-off-by: Thane Thomson * Propagate validator info to PrepareProposal In order to propagate validator voting power through to PrepareProposal, we need to load the validator set info from the height corresponding to the extended commit that we're passing through to PrepareProposal as the "LocalLastCommit". Signed-off-by: Thane Thomson * Rename local var for clarity Signed-off-by: Thane Thomson * Fix TestMaxProposalBlockSize Signed-off-by: Thane Thomson * Rename local var for clarity Signed-off-by: Thane Thomson * Remove debug log Signed-off-by: Thane Thomson * Remove CommigSig.ForBlock helper Signed-off-by: Thane Thomson * Remove CommigSig.Absent helper Signed-off-by: Thane Thomson * Remove ExtendedCommitSig.ForBlock helper Signed-off-by: Thane Thomson * Remove ExtendedCommitSig.Absent helper Signed-off-by: Thane Thomson * There are no extended commits below the initial height Signed-off-by: Thane Thomson * Fix comment grammar Signed-off-by: Thane Thomson * Remove JSON encoding from ExtendedCommit Signed-off-by: Thane Thomson * Embed CommitSig into ExtendedCommitSig instead of duplicating fields Signed-off-by: Thane Thomson * Rename ExtendedCommit vote_extension field to extension for consistency with domain types Signed-off-by: Thane Thomson * blocksync: Panic if we peek a block without an extended commit Signed-off-by: Thane Thomson * Apply suggestions from code review Co-authored-by: M. J. Fromberger * Remove Sergio from TODO Signed-off-by: Thane Thomson * Increase hard-coded vote extension max size to 1MB Signed-off-by: Thane Thomson * state: Remove unnecessary comment Signed-off-by: Thane Thomson * state: Ensure no of commit sigs equals validator set length Signed-off-by: Thane Thomson * make format Signed-off-by: Thane Thomson * types: Minor legibility improvements Signed-off-by: Thane Thomson * Improve legibility Signed-off-by: Thane Thomson * types: Remove unused GetVotes function on VoteSet Signed-off-by: Thane Thomson * Refactor TestMaxProposalBlockSize to construct more realistic extended commit Signed-off-by: Thane Thomson * Refactor buildExtendedCommitInfo to resemble buildLastCommitInfo Signed-off-by: Thane Thomson * Apply suggestions from code review Co-authored-by: M. J. Fromberger * abci++: Disable VerifyVoteExtension call on nil precommits (#8491) Signed-off-by: Thane Thomson * types: Require vote extensions on non-nil precommits and not otherwise Signed-off-by: Thane Thomson * Disable lint Signed-off-by: Thane Thomson * Increase timeout for TestReactorVotingPowerChange to counter flakiness Signed-off-by: Thane Thomson * Only sign and verify vote extensions in non-nil precommits Signed-off-by: Thane Thomson * Revert "Disable lint" This reverts commit 6fffbf94028a1ae78289abbad1b602c251f6f652. Signed-off-by: Thane Thomson * Add missing non-nil check uncovered non-deterministically in TestHandshakeReplayAll Signed-off-by: Thane Thomson * Expand error message for accuracy Signed-off-by: Thane Thomson * Only call ExtendVote when we make non-nil precommits Signed-off-by: Thane Thomson * Revert "Increase timeout for TestReactorVotingPowerChange to counter flakiness" This reverts commit af514939dbdf72ce275ef290a34c390a5e982563. Signed-off-by: Thane Thomson * Refactor ValidateBasic for ExtendedCommitSig for legibility Signed-off-by: Thane Thomson Co-authored-by: Sergio Mena Co-authored-by: M. J. Fromberger --- internal/blocksync/pool.go | 58 +- internal/blocksync/pool_test.go | 9 +- internal/blocksync/reactor.go | 54 +- internal/blocksync/reactor_test.go | 52 +- internal/consensus/byzantine_test.go | 10 +- internal/consensus/common_test.go | 3 +- internal/consensus/mocks/cons_sync_reactor.go | 1 + internal/consensus/msgs_test.go | 2 +- internal/consensus/reactor.go | 6 +- internal/consensus/replay_test.go | 70 +- internal/consensus/state.go | 39 +- internal/consensus/state_test.go | 14 +- internal/evidence/pool_test.go | 31 +- internal/evidence/verify_test.go | 18 +- internal/state/execution.go | 102 +- internal/state/execution_test.go | 64 +- internal/state/helpers_test.go | 14 +- internal/state/indexer/mocks/event_sink.go | 1 + internal/state/mocks/block_store.go | 18 +- internal/state/mocks/evidence_pool.go | 1 + internal/state/mocks/store.go | 1 + internal/state/services.go | 3 +- internal/state/store.go | 3 + internal/state/test/factory/block.go | 10 +- internal/state/validation_test.go | 42 +- internal/statesync/mocks/state_provider.go | 1 + internal/statesync/reactor_test.go | 4 +- internal/store/store.go | 50 +- internal/store/store_test.go | 140 ++- internal/test/factory/commit.go | 4 +- light/helpers_test.go | 7 +- node/node_test.go | 54 +- privval/file.go | 8 +- proto/tendermint/blocksync/types.pb.go | 113 +- proto/tendermint/blocksync/types.proto | 2 + proto/tendermint/types/types.pb.go | 965 +++++++++++++++--- proto/tendermint/types/types.proto | 22 + scripts/confix/confix.go | 1 + test/e2e/runner/evidence.go | 4 +- types/block.go | 404 ++++++-- types/block_test.go | 49 +- types/evidence.go | 6 +- types/evidence_test.go | 12 +- types/part_set.go | 6 + types/priv_validator.go | 8 +- types/test_util.go | 16 +- types/validation.go | 8 +- types/validation_test.go | 29 +- types/validator_set_test.go | 9 +- types/vote.go | 46 +- types/vote_set.go | 36 +- types/vote_set_test.go | 8 +- types/vote_test.go | 30 +- 53 files changed, 1952 insertions(+), 716 deletions(-) diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index f00a2fab5b..30bb6962e1 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -200,16 +200,20 @@ func (pool *BlockPool) IsCaughtUp() bool { return pool.height >= (pool.maxPeerHeight - 1) } -// PeekTwoBlocks returns blocks at pool.height and pool.height+1. -// We need to see the second block's Commit to validate the first block. -// So we peek two blocks at a time. +// PeekTwoBlocks returns blocks at pool.height and pool.height+1. We need to +// see the second block's Commit to validate the first block. So we peek two +// blocks at a time. We return an extended commit, containing vote extensions +// and their associated signatures, as this is critical to consensus in ABCI++ +// as we switch from block sync to consensus mode. +// // The caller will verify the commit. -func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { +func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.ExtendedCommit) { pool.mtx.RLock() defer pool.mtx.RUnlock() if r := pool.requesters[pool.height]; r != nil { first = r.getBlock() + firstExtCommit = r.getExtendedCommit() } if r := pool.requesters[pool.height+1]; r != nil { second = r.getBlock() @@ -218,7 +222,8 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) } // PopRequest pops the first block at pool.height. -// It must have been validated by 'second'.Commit from PeekTwoBlocks(). +// It must have been validated by the second Commit from PeekTwoBlocks. +// TODO(thane): (?) and its corresponding ExtendedCommit. func (pool *BlockPool) PopRequest() { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -262,16 +267,25 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID { return peerID } -// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it. +// AddBlock validates that the block comes from the peer it was expected from +// and calls the requester to store it. +// +// This requires an extended commit at the same height as the supplied block - +// the block contains the last commit, but we need the latest commit in case we +// need to switch over from block sync to consensus at this height. If the +// height of the extended commit and the height of the block do not match, we +// do not add the block and return an error. // TODO: ensure that blocks come in order for each peer. -func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) { +func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, extCommit *types.ExtendedCommit, blockSize int) error { pool.mtx.Lock() defer pool.mtx.Unlock() + if block.Height != extCommit.Height { + return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, extCommit.Height) + } + requester := pool.requesters[block.Height] if requester == nil { - pool.logger.Error("peer sent us a block we didn't expect", - "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height) diff := pool.height - block.Height if diff < 0 { diff *= -1 @@ -279,10 +293,10 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi if diff > maxDiffBetweenCurrentAndReceivedBlockHeight { pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID) } - return + return fmt.Errorf("peer sent us a block we didn't expect (peer: %s, current height: %d, block height: %d)", peerID, pool.height, block.Height) } - if requester.setBlock(block, peerID) { + if requester.setBlock(block, extCommit, peerID) { atomic.AddInt32(&pool.numPending, -1) peer := pool.peers[peerID] if peer != nil { @@ -290,9 +304,11 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi } } else { err := errors.New("requester is different or block already exists") - pool.logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height) pool.sendError(err, peerID) + return fmt.Errorf("%w (peer: %s, requester: %s, block height: %d)", err, peerID, requester.getPeerID(), block.Height) } + + return nil } // MaxPeerHeight returns the highest reported height. @@ -456,6 +472,7 @@ func (pool *BlockPool) debug() string { } else { str += fmt.Sprintf("H(%v):", h) str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil) + str += fmt.Sprintf("C?(%v) ", pool.requesters[h].extCommit != nil) } } return str @@ -544,9 +561,10 @@ type bpRequester struct { gotBlockCh chan struct{} redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat - mtx sync.Mutex - peerID types.NodeID - block *types.Block + mtx sync.Mutex + peerID types.NodeID + block *types.Block + extCommit *types.ExtendedCommit } func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester { @@ -572,13 +590,14 @@ func (bpr *bpRequester) OnStart(ctx context.Context) error { func (*bpRequester) OnStop() {} // Returns true if the peer matches and block doesn't already exist. -func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool { +func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCommit, peerID types.NodeID) bool { bpr.mtx.Lock() if bpr.block != nil || bpr.peerID != peerID { bpr.mtx.Unlock() return false } bpr.block = block + bpr.extCommit = extCommit bpr.mtx.Unlock() select { @@ -594,6 +613,12 @@ func (bpr *bpRequester) getBlock() *types.Block { return bpr.block } +func (bpr *bpRequester) getExtendedCommit() *types.ExtendedCommit { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.extCommit +} + func (bpr *bpRequester) getPeerID() types.NodeID { bpr.mtx.Lock() defer bpr.mtx.Unlock() @@ -611,6 +636,7 @@ func (bpr *bpRequester) reset() { bpr.peerID = "" bpr.block = nil + bpr.extCommit = nil } // Tells bpRequester to pick another peer and try again. diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go index 1cb8cca40c..3c47b4a647 100644 --- a/internal/blocksync/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -43,7 +43,10 @@ func (p testPeer) runInputRoutine() { // Request desired, pretend like we got the block immediately. func (p testPeer) simulateInput(input inputData) { block := &types.Block{Header: types.Header{Height: input.request.Height}} - input.pool.AddBlock(input.request.PeerID, block, 123) + extCommit := &types.ExtendedCommit{ + Height: input.request.Height, + } + _ = input.pool.AddBlock(input.request.PeerID, block, extCommit, 123) // TODO: uncommenting this creates a race which is detected by: // https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 @@ -110,7 +113,7 @@ func TestBlockPoolBasic(t *testing.T) { if !pool.IsRunning() { return } - first, second := pool.PeekTwoBlocks() + first, second, _ := pool.PeekTwoBlocks() if first != nil && second != nil { pool.PopRequest() } else { @@ -164,7 +167,7 @@ func TestBlockPoolTimeout(t *testing.T) { if !pool.IsRunning() { return } - first, second := pool.PeekTwoBlocks() + first, second, _ := pool.PeekTwoBlocks() if first != nil && second != nil { pool.PopRequest() } else { diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index bf9845370e..144595889a 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -76,7 +76,7 @@ type Reactor struct { stateStore sm.Store blockExec *sm.BlockExecutor - store *store.BlockStore + store sm.BlockStore pool *BlockPool consReactor consensusReactor blockSync *atomicBool @@ -186,15 +186,21 @@ func (r *Reactor) OnStop() { func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error { block := r.store.LoadBlock(msg.Height) if block != nil { + extCommit := r.store.LoadBlockExtendedCommit(msg.Height) + if extCommit == nil { + return fmt.Errorf("found block in store without extended commit: %v", block) + } blockProto, err := block.ToProto() if err != nil { - r.logger.Error("failed to convert msg to protobuf", "err", err) - return err + return fmt.Errorf("failed to convert block to protobuf: %w", err) } return blockSyncCh.Send(ctx, p2p.Envelope{ - To: peerID, - Message: &bcproto.BlockResponse{Block: blockProto}, + To: peerID, + Message: &bcproto.BlockResponse{ + Block: blockProto, + ExtCommit: extCommit.ToProto(), + }, }) } @@ -236,8 +242,17 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo "err", err) return err } + extCommit, err := types.ExtendedCommitFromProto(msg.ExtCommit) + if err != nil { + r.logger.Error("failed to convert extended commit from proto", + "peer", envelope.From, + "err", err) + return err + } - r.pool.AddBlock(envelope.From, block, block.Size()) + if err := r.pool.AddBlock(envelope.From, block, extCommit, block.Size()); err != nil { + r.logger.Error("failed to add block", "err", err) + } case *bcproto.StatusRequest: return blockSyncCh.Send(ctx, p2p.Envelope{ @@ -448,6 +463,20 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh ) switch { + // TODO(sergio) Might be needed for implementing the upgrading solution. Remove after that + //case state.LastBlockHeight > 0 && r.store.LoadBlockExtCommit(state.LastBlockHeight) == nil: + case state.LastBlockHeight > 0 && blocksSynced == 0: + // Having state-synced, we need to blocksync at least one block + r.logger.Info( + "no seen commit yet", + "height", height, + "last_block_height", state.LastBlockHeight, + "initial_height", state.InitialHeight, + "max_peer_height", r.pool.MaxPeerHeight(), + "timeout_in", syncTimeout-time.Since(lastAdvance), + ) + continue + case r.pool.IsCaughtUp(): r.logger.Info("switching to consensus reactor", "height", height) @@ -490,9 +519,13 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // TODO: Uncouple from request routine. // see if there are any blocks to sync - first, second := r.pool.PeekTwoBlocks() - if first == nil || second == nil { - // we need both to sync the first block + first, second, extCommit := r.pool.PeekTwoBlocks() + if first == nil || second == nil || extCommit == nil { + if first != nil && extCommit == nil { + // See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631 + panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height)) + } + // we need all to sync the first block continue } else { // try again quickly next loop @@ -517,6 +550,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // NOTE: We can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. + // TODO(sergio): Should we also validate against the extended commit? err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit) if err == nil { @@ -559,7 +593,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh r.pool.PopRequest() // TODO: batch saves so we do not persist to disk every block - r.store.SaveBlock(first, firstParts, second.LastCommit) + r.store.SaveBlock(first, firstParts, extCommit) // TODO: Same thing for app - but we would need a way to get the hash // without persisting the state. diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 857b0a5190..1d4d7d4d61 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -147,39 +147,43 @@ func (rts *reactorTestSuite) addNode( sm.NopMetrics(), ) + var lastExtCommit *types.ExtendedCommit + + // The commit we are building for the current height. + seenExtCommit := &types.ExtendedCommit{} + for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) - - if blockHeight > 1 { - lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) - lastBlock := blockStore.LoadBlock(blockHeight - 1) - - vote, err := factory.MakeVote( - ctx, - privVal, - lastBlock.Header.ChainID, 0, - lastBlock.Header.Height, 0, 2, - lastBlockMeta.BlockID, - time.Now(), - ) - require.NoError(t, err) - lastCommit = types.NewCommit( - vote.Height, - vote.Round, - lastBlockMeta.BlockID, - []types.CommitSig{vote.CommitSig()}, - ) - } + lastExtCommit = seenExtCommit.Clone() - thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) + thisBlock := sf.MakeBlock(state, blockHeight, lastExtCommit.StripExtensions()) thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} + // Simulate a commit for the current height + vote, err := factory.MakeVote( + ctx, + privVal, + thisBlock.Header.ChainID, + 0, + thisBlock.Header.Height, + 0, + 2, + blockID, + time.Now(), + ) + require.NoError(t, err) + seenExtCommit = &types.ExtendedCommit{ + Height: vote.Height, + Round: vote.Round, + BlockID: blockID, + ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, + } + state, err = blockExec.ApplyBlock(ctx, state, blockID, thisBlock) require.NoError(t, err) - blockStore.SaveBlock(thisBlock, thisParts, lastCommit) + blockStore.SaveBlock(thisBlock, thisParts, seenExtCommit) } rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 804ebdb18a..9c6f4a2954 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -178,22 +178,22 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { lazyNodeState.decideProposal = func(ctx context.Context, height int64, round int32) { require.NotNil(t, lazyNodeState.privValidator) - var commit *types.Commit + var extCommit *types.ExtendedCommit switch { case lazyNodeState.Height == lazyNodeState.state.InitialHeight: // We're creating a proposal for the first block. // The commit is empty, but not nil. - commit = types.NewCommit(0, 0, types.BlockID{}, nil) + extCommit = &types.ExtendedCommit{} case lazyNodeState.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit - commit = lazyNodeState.LastCommit.MakeCommit() + extCommit = lazyNodeState.LastCommit.MakeExtendedCommit() default: // This shouldn't happen. lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return } // omit the last signature in the commit - commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent() + extCommit.ExtendedSignatures[len(extCommit.ExtendedSignatures)-1] = types.NewExtendedCommitSigAbsent() if lazyNodeState.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will @@ -204,7 +204,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { proposerAddr := lazyNodeState.privValidatorPubKey.Address() block, err := lazyNodeState.blockExec.CreateProposalBlock( - ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, lazyNodeState.LastCommit.GetVotes()) + ctx, lazyNodeState.Height, lazyNodeState.state, extCommit, proposerAddr) require.NoError(t, err) blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index ca1db84252..1dc92b33c6 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -160,7 +160,8 @@ func signVote( blockID types.BlockID) *types.Vote { var ext []byte - if voteType == tmproto.PrecommitType { + // Only non-nil precommits are allowed to carry vote extensions. + if voteType == tmproto.PrecommitType && !blockID.IsNil() { ext = []byte("extension") } v, err := vs.signVote(ctx, voteType, chainID, blockID, ext) diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index f904e9129a..3aa02e9fe0 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -6,6 +6,7 @@ import ( testing "testing" mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" ) diff --git a/internal/consensus/msgs_test.go b/internal/consensus/msgs_test.go index 5a6465294f..b8d18a1090 100644 --- a/internal/consensus/msgs_test.go +++ b/internal/consensus/msgs_test.go @@ -67,7 +67,7 @@ func TestMsgToProto(t *testing.T) { pv := types.NewMockPV() vote, err := factory.MakeVote(ctx, pv, factory.DefaultTestChainID, - 0, 1, 0, 2, types.BlockID{}, time.Now()) + 0, 1, 0, 2, bi, time.Now()) require.NoError(t, err) pbVote := vote.ToProto() diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index eea74b5e1d..1a9d49057e 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -794,10 +794,10 @@ func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh // catchup logic -- if peer is lagging by more than 1, send Commit blockStoreBase := r.state.blockStore.Base() if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase { - // Load the block commit for prs.Height, which contains precommit + // Load the block's extended commit for prs.Height, which contains precommit // signatures for prs.Height. - if commit := r.state.blockStore.LoadBlockCommit(prs.Height); commit != nil { - if ok, err := r.pickSendVote(ctx, ps, commit, voteCh); err != nil { + if ec := r.state.blockStore.LoadBlockExtendedCommit(prs.Height); ec != nil { + if ok, err := r.pickSendVote(ctx, ps, ec, voteCh); err != nil { return } else if ok { logger.Debug("picked Catchup commit to send", "height", prs.Height) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index f112f23e80..c8f04655b2 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -297,7 +297,7 @@ type simulatorTestSuite struct { GenesisState sm.State Config *config.Config Chain []*types.Block - Commits []*types.Commit + ExtCommits []*types.ExtendedCommit CleanupFunc cleanupFunc Mempool mempool.Mempool @@ -578,11 +578,11 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { } ensureNewRound(t, newRoundCh, height+1, 0) - sim.Chain = make([]*types.Block, 0) - sim.Commits = make([]*types.Commit, 0) + sim.Chain = []*types.Block{} + sim.ExtCommits = []*types.ExtendedCommit{} for i := 1; i <= numBlocks; i++ { sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) - sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) + sim.ExtCommits = append(sim.ExtCommits, css[0].blockStore.LoadBlockExtendedCommit(int64(i))) } return sim @@ -679,7 +679,7 @@ func testHandshakeReplay( testValidatorsChange bool, ) { var chain []*types.Block - var commits []*types.Commit + var extCommits []*types.ExtendedCommit var store *mockBlockStore var stateDB dbm.DB var genesisState sm.State @@ -699,7 +699,7 @@ func testHandshakeReplay( genesisState = sim.GenesisState cfg = sim.Config chain = append([]*types.Block{}, sim.Chain...) // copy chain - commits = sim.Commits + extCommits = sim.ExtCommits store = newMockBlockStore(t, cfg, genesisState.ConsensusParams) } else { // test single node testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_s", t.Name(), mode)) @@ -718,7 +718,7 @@ func testHandshakeReplay( err = wal.Start(ctx) require.NoError(t, err) t.Cleanup(func() { cancel(); wal.Wait() }) - chain, commits = makeBlockchainFromWAL(t, wal) + chain, extCommits = makeBlockchainFromWAL(t, wal) pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) stateDB, genesisState, store = stateAndStore(t, cfg, pubKey, kvstore.ProtocolVersion) @@ -726,7 +726,7 @@ func testHandshakeReplay( } stateStore := sm.NewStore(stateDB) store.chain = chain - store.commits = commits + store.extCommits = extCommits state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state @@ -1034,7 +1034,7 @@ func (app *badApp) Commit(context.Context) (*abci.ResponseCommit, error) { //-------------------------- // utils for making blocks -func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Commit) { +func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.ExtendedCommit) { t.Helper() var height int64 @@ -1047,10 +1047,10 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm // log.Notice("Build a blockchain by reading from the WAL") var ( - blocks []*types.Block - commits []*types.Commit - thisBlockParts *types.PartSet - thisBlockCommit *types.Commit + blocks []*types.Block + extCommits []*types.ExtendedCommit + thisBlockParts *types.PartSet + thisBlockExtCommit *types.ExtendedCommit ) dec := NewWALDecoder(gr) @@ -1082,12 +1082,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1) - commitHeight := thisBlockCommit.Height + commitHeight := thisBlockExtCommit.Height require.Equal(t, commitHeight, height+1, "commit doesnt match. got height %d, expected %d", commitHeight, height+1) blocks = append(blocks, block) - commits = append(commits, thisBlockCommit) + extCommits = append(extCommits, thisBlockExtCommit) height++ } case *types.PartSetHeader: @@ -1097,8 +1097,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm require.NoError(t, err) case *types.Vote: if p.Type == tmproto.PrecommitType { - thisBlockCommit = types.NewCommit(p.Height, p.Round, - p.BlockID, []types.CommitSig{p.CommitSig()}) + thisBlockExtCommit = &types.ExtendedCommit{ + Height: p.Height, + Round: p.Round, + BlockID: p.BlockID, + ExtendedSignatures: []types.ExtendedCommitSig{p.ExtendedCommitSig()}, + } } } } @@ -1113,12 +1117,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm require.NoError(t, err) require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1) - commitHeight := thisBlockCommit.Height + commitHeight := thisBlockExtCommit.Height require.Equal(t, commitHeight, height+1, "commit does not match. got height %d, expected %d", commitHeight, height+1) blocks = append(blocks, block) - commits = append(commits, thisBlockCommit) - return blocks, commits + extCommits = append(extCommits, thisBlockExtCommit) + return blocks, extCommits } func readPieceFromWAL(msg *TimedWALMessage) interface{} { @@ -1162,14 +1166,16 @@ func stateAndStore( // mock block store type mockBlockStore struct { - cfg *config.Config - params types.ConsensusParams - chain []*types.Block - commits []*types.Commit - base int64 - t *testing.T + cfg *config.Config + params types.ConsensusParams + chain []*types.Block + extCommits []*types.ExtendedCommit + base int64 + t *testing.T } +var _ sm.BlockStore = &mockBlockStore{} + // TODO: NewBlockStore(db.NewMemDB) ... func newMockBlockStore(t *testing.T, cfg *config.Config, params types.ConsensusParams) *mockBlockStore { return &mockBlockStore{ @@ -1198,20 +1204,24 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } } func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { } + func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { - return bs.commits[height-1] + return bs.extCommits[height-1].StripExtensions() } func (bs *mockBlockStore) LoadSeenCommit() *types.Commit { - return bs.commits[len(bs.commits)-1] + return bs.extCommits[len(bs.extCommits)-1].StripExtensions() +} +func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { + return bs.extCommits[height-1] } func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { pruned := uint64(0) for i := int64(0); i < height-1; i++ { bs.chain[i] = nil - bs.commits[i] = nil + bs.extCommits[i] = nil pruned++ } bs.base = height diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 90efbab770..b016e26878 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -695,19 +695,15 @@ func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) { // Reconstruct LastCommit from SeenCommit, which we saved along with the block, // (which happens even before saving the state) func (cs *State) reconstructLastCommit(state sm.State) { - commit := cs.blockStore.LoadSeenCommit() - if commit == nil || commit.Height != state.LastBlockHeight { - commit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight) - } - - if commit == nil { + extCommit := cs.blockStore.LoadBlockExtendedCommit(state.LastBlockHeight) + if extCommit == nil { panic(fmt.Sprintf( "failed to reconstruct last commit; commit for height %v not found", state.LastBlockHeight, )) } - lastPrecommits := types.CommitToVoteSet(state.ChainID, commit, state.LastValidators) + lastPrecommits := extCommit.ToVoteSet(state.ChainID, state.LastValidators) if !lastPrecommits.HasTwoThirdsMajority() { panic("failed to reconstruct last commit; does not have +2/3 maj") } @@ -1401,16 +1397,17 @@ func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error) return nil, errors.New("entered createProposalBlock with privValidator being nil") } - var commit *types.Commit + // TODO(sergio): wouldn't it be easier if CreateProposalBlock accepted cs.LastCommit directly? + var lastExtCommit *types.ExtendedCommit switch { case cs.Height == cs.state.InitialHeight: // We're creating a proposal for the first block. // The commit is empty, but not nil. - commit = types.NewCommit(0, 0, types.BlockID{}, nil) + lastExtCommit = &types.ExtendedCommit{} case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit - commit = cs.LastCommit.MakeCommit() + lastExtCommit = cs.LastCommit.MakeExtendedCommit() default: // This shouldn't happen. cs.logger.Error("propose step; cannot propose anything without commit for the previous block") @@ -1426,7 +1423,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error) proposerAddr := cs.privValidatorPubKey.Address() - ret, err := cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerAddr, cs.LastCommit.GetVotes()) + ret, err := cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, lastExtCommit, proposerAddr) if err != nil { panic(err) } @@ -1925,8 +1922,7 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) { // NOTE: the seenCommit is local justification to commit this block, // but may differ from the LastCommit included in the next block precommits := cs.Votes.Precommits(cs.CommitRound) - seenCommit := precommits.MakeCommit() - cs.blockStore.SaveBlock(block, blockParts, seenCommit) + cs.blockStore.SaveBlock(block, blockParts, precommits.MakeExtendedCommit()) } else { // Happens during replay if we already saved the block but didn't commit logger.Debug("calling finalizeCommit on already stored block", "height", block.Height) @@ -2028,7 +2024,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { for i, val := range cs.LastValidators.Validators { commitSig := block.LastCommit.Signatures[i] - if commitSig.Absent() { + if commitSig.BlockIDFlag == types.BlockIDFlagAbsent { missingValidators++ missingValidatorsPower += val.VotingPower } @@ -2038,7 +2034,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { "validator_address", val.Address.String(), } cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) - if commitSig.ForBlock() { + if commitSig.BlockIDFlag == types.BlockIDFlagCommit { cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) } else { cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) @@ -2341,8 +2337,9 @@ func (cs *State) addVote( return } - // Verify VoteExtension if precommit - if vote.Type == tmproto.PrecommitType { + // Verify VoteExtension if precommit and not nil + // https://github.com/tendermint/tendermint/issues/8487 + if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() { err := cs.blockExec.VerifyVoteExtension(ctx, vote) cs.metrics.MarkVoteExtensionReceived(err == nil) if err != nil { @@ -2497,15 +2494,15 @@ func (cs *State) signVote( // use our local precommit Timeout as the max wait time for getting a singed commit. The same goes for prevote. timeout := cs.voteTimeout(cs.Round) - switch msgType { - case tmproto.PrecommitType: - // if the signedMessage type is for a precommit, add VoteExtension + if msgType == tmproto.PrecommitType && !vote.BlockID.IsNil() { + // if the signedMessage type is for a non-nil precommit, add + // VoteExtension ext, err := cs.blockExec.ExtendVote(ctx, vote) if err != nil { return nil, err } vote.Extension = ext - default: + } else { timeout = time.Second } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 93aa4a49d0..6fa69a1a3c 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -1950,7 +1950,7 @@ func TestFinalizeBlockCalled(t *testing.T) { expectCalled bool }{ { - name: "finalze block called when block committed", + name: "finalize block called when block committed", voteNil: false, expectCalled: true, }, @@ -1970,11 +1970,15 @@ func TestFinalizeBlockCalled(t *testing.T) { Status: abci.ResponseProcessProposal_ACCEPT, }, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, - }, nil) + // We only expect VerifyVoteExtension to be called on non-nil precommits. + // https://github.com/tendermint/tendermint/issues/8487 + if !testCase.voteNil { + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) + } m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index dcf44a5dfe..4047d3e7fb 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -249,8 +249,8 @@ func TestEvidencePoolUpdate(t *testing.T) { evidenceChainID, ) require.NoError(t, err) - lastCommit := makeCommit(height, val.PrivKey.PubKey().Address()) - block := types.MakeBlock(height+1, []types.Tx{}, lastCommit, []types.Evidence{ev}) + lastExtCommit := makeExtCommit(height, val.PrivKey.PubKey().Address()) + block := types.MakeBlock(height+1, []types.Tx{}, lastExtCommit.StripExtensions(), []types.Evidence{ev}) // update state (partially) state.LastBlockHeight = height + 1 @@ -568,8 +568,8 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo blockStore := store.NewBlockStore(db) for i := int64(1); i <= state.LastBlockHeight; i++ { - lastCommit := makeCommit(i-1, valAddr) - block := sf.MakeBlock(state, i, lastCommit) + lastCommit := makeExtCommit(i-1, valAddr) + block := sf.MakeBlock(state, i, lastCommit.StripExtensions()) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1} @@ -579,22 +579,25 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo return nil, err } - seenCommit := makeCommit(i, valAddr) + seenCommit := makeExtCommit(i, valAddr) blockStore.SaveBlock(block, partSet, seenCommit) } return blockStore, nil } -func makeCommit(height int64, valAddr []byte) *types.Commit { - commitSigs := []types.CommitSig{{ - BlockIDFlag: types.BlockIDFlagCommit, - ValidatorAddress: valAddr, - Timestamp: defaultEvidenceTime, - Signature: []byte("Signature"), - }} - - return types.NewCommit(height, 0, types.BlockID{}, commitSigs) +func makeExtCommit(height int64, valAddr []byte) *types.ExtendedCommit { + return &types.ExtendedCommit{ + Height: height, + ExtendedSignatures: []types.ExtendedCommitSig{{ + CommitSig: types.CommitSig{ + BlockIDFlag: types.BlockIDFlagCommit, + ValidatorAddress: valAddr, + Timestamp: defaultEvidenceTime, + Signature: []byte("Signature"), + }, + }}, + } } func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV, *eventbus.EventBus) { diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index b2056186f1..2ed84fa692 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -234,8 +234,9 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { // except the last validator vote twice blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(ctx, blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) + extCommit, err := factory.MakeExtendedCommit(ctx, blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) require.NoError(t, err) + commit := extCommit.StripExtensions() ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -253,9 +254,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) - trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, 10, 1, + trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, 10, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + trustedCommit := trustedExtCommit.StripExtensions() trustedSignedHeader := &types.SignedHeader{ Header: trustedHeader, @@ -335,8 +337,9 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { // except the last validator vote twice. However this time the commits are of different rounds. blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) voteSet := types.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(ctx, blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime) + extCommit, err := factory.MakeExtendedCommit(ctx, blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + commit := extCommit.StripExtensions() ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -354,9 +357,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) - trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, height, 1, + trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + trustedCommit := trustedExtCommit.StripExtensions() trustedSignedHeader := &types.SignedHeader{ Header: trustedHeader, @@ -550,8 +554,9 @@ func makeLunaticEvidence( blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(ctx, blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) + extCommit, err := factory.MakeExtendedCommit(ctx, blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + commit := extCommit.StripExtensions() ev = &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -578,8 +583,9 @@ func makeLunaticEvidence( trustedBlockID := factory.MakeBlockIDWithHash(trustedHeader.Hash()) trustedVals, privVals := factory.ValidatorSet(ctx, t, totalVals, defaultVotingPower) trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals) - trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime) + trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime) require.NoError(t, err) + trustedCommit := trustedExtCommit.StripExtensions() trusted = &types.LightBlock{ SignedHeader: &types.SignedHeader{ diff --git a/internal/state/execution.go b/internal/state/execution.go index cfacb816d7..2c88c793bd 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -1,13 +1,13 @@ package state import ( + "bytes" "context" "fmt" "time" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/internal/eventbus" @@ -87,9 +87,8 @@ func (blockExec *BlockExecutor) CreateProposalBlock( ctx context.Context, height int64, state State, - commit *types.Commit, + lastExtCommit *types.ExtendedCommit, proposerAddr []byte, - votes []*types.Vote, ) (*types.Block, error) { maxBytes := state.ConsensusParams.Block.MaxBytes @@ -101,15 +100,15 @@ func (blockExec *BlockExecutor) CreateProposalBlock( maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + commit := lastExtCommit.StripExtensions() block := state.MakeBlock(height, txs, commit, evidence, proposerAddr) - localLastCommit := buildLastCommitInfo(block, blockExec.store, state.InitialHeight) rpp, err := blockExec.appClient.PrepareProposal( ctx, &abci.RequestPrepareProposal{ MaxTxBytes: maxDataBytes, Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: extendedCommitInfo(localLastCommit, votes), + LocalLastCommit: buildExtendedCommitInfo(lastExtCommit, blockExec.store, state.InitialHeight), ByzantineValidators: block.Evidence.ToABCI(), Height: block.Height, Time: block.Time, @@ -381,14 +380,14 @@ func (blockExec *BlockExecutor) Commit( func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { if block.Height == initialHeight { - // there is no last commmit for the initial height. + // there is no last commit for the initial height. // return an empty value. return abci.CommitInfo{} } lastValSet, err := store.LoadValidators(block.Height - 1) if err != nil { - panic(err) + panic(fmt.Errorf("failed to load validator set at height %d: %w", block.Height-1, err)) } var ( @@ -410,7 +409,7 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a commitSig := block.LastCommit.Signatures[i] votes[i] = abci.VoteInfo{ Validator: types.TM2PB.Validator(val), - SignedLastBlock: !commitSig.Absent(), + SignedLastBlock: commitSig.BlockIDFlag != types.BlockIDFlagAbsent, } } @@ -420,44 +419,69 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a } } -// extendedCommitInfo expects a CommitInfo struct along with all of the -// original votes relating to that commit, including their vote extensions. The -// order of votes does not matter. -func extendedCommitInfo(c abci.CommitInfo, votes []*types.Vote) abci.ExtendedCommitInfo { - if len(c.Votes) != len(votes) { - panic(fmt.Sprintf("extendedCommitInfo: number of votes from commit differ from the number of votes supplied (%d != %d)", len(c.Votes), len(votes))) - } - votesByVal := make(map[string]*types.Vote) - for _, vote := range votes { - if vote != nil { - valAddr := vote.ValidatorAddress.String() - if _, ok := votesByVal[valAddr]; ok { - panic(fmt.Sprintf("extendedCommitInfo: found duplicate vote for validator with address %s", valAddr)) - } - votesByVal[valAddr] = vote - } +// buildExtendedCommitInfo populates an ABCI extended commit from the +// corresponding Tendermint extended commit ec, using the stored validator set +// from ec. It requires ec to include the original precommit votes along with +// the vote extensions from the last commit. +// +// For heights below the initial height, for which we do not have the required +// data, it returns an empty record. +// +// Assumes that the commit signatures are sorted according to validator index. +func buildExtendedCommitInfo(ec *types.ExtendedCommit, store Store, initialHeight int64) abci.ExtendedCommitInfo { + if ec.Height < initialHeight { + // There are no extended commits for heights below the initial height. + return abci.ExtendedCommitInfo{} + } + + valSet, err := store.LoadValidators(ec.Height) + if err != nil { + panic(fmt.Errorf("failed to load validator set at height %d, initial height %d: %w", ec.Height, initialHeight, err)) } - vs := make([]abci.ExtendedVoteInfo, len(c.Votes)) - for i := range vs { + + var ( + ecSize = ec.Size() + valSetLen = len(valSet.Validators) + ) + + // Ensure that the size of the validator set in the extended commit matches + // the size of the validator set in the state store. + if ecSize != valSetLen { + panic(fmt.Errorf( + "extended commit size (%d) does not match validator set length (%d) at height %d\n\n%v\n\n%v", + ecSize, valSetLen, ec.Height, ec.ExtendedSignatures, valSet.Validators, + )) + } + + votes := make([]abci.ExtendedVoteInfo, ecSize) + for i, val := range valSet.Validators { + ecs := ec.ExtendedSignatures[i] + + // Absent signatures have empty validator addresses, but otherwise we + // expect the validator addresses to be the same. + if ecs.BlockIDFlag != types.BlockIDFlagAbsent && !bytes.Equal(ecs.ValidatorAddress, val.Address) { + panic(fmt.Errorf("validator address of extended commit signature in position %d (%s) does not match the corresponding validator's at height %d (%s)", + i, ecs.ValidatorAddress, ec.Height, val.Address, + )) + } + var ext []byte - // votes[i] will be nil if c.Votes[i].SignedLastBlock is false - if c.Votes[i].SignedLastBlock { - valAddr := crypto.Address(c.Votes[i].Validator.Address).String() - vote, ok := votesByVal[valAddr] - if !ok || vote == nil { - panic(fmt.Sprintf("extendedCommitInfo: validator with address %s signed last block, but could not find vote for it", valAddr)) - } - ext = vote.Extension + if ecs.BlockIDFlag == types.BlockIDFlagCommit { + // We only care about vote extensions if a validator has voted to + // commit. + ext = ecs.Extension } - vs[i] = abci.ExtendedVoteInfo{ - Validator: c.Votes[i].Validator, - SignedLastBlock: c.Votes[i].SignedLastBlock, + + votes[i] = abci.ExtendedVoteInfo{ + Validator: types.TM2PB.Validator(val), + SignedLastBlock: ecs.BlockIDFlag != types.BlockIDFlagAbsent, VoteExtension: ext, } } + return abci.ExtendedCommitInfo{ - Round: c.Round, - Votes: vs, + Round: ec.Round, + Votes: votes, } } diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 0937b99906..ffe9cb6f88 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -79,9 +79,10 @@ func TestApplyBlock(t *testing.T) { assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") } -// TestFinalizeBlockDecidedLastCommit ensures we correctly send the DecidedLastCommit to the -// application. The test ensures that the DecidedLastCommit properly reflects -// which validators signed the preceding block. +// TestFinalizeBlockDecidedLastCommit ensures we correctly send the +// DecidedLastCommit to the application. The test ensures that the +// DecidedLastCommit properly reflects which validators signed the preceding +// block. func TestFinalizeBlockDecidedLastCommit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -96,7 +97,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { state, stateDB, privVals := makeState(t, 7, 1) stateStore := sm.NewStore(stateDB) - absentSig := types.NewCommitSigAbsent() + absentSig := types.NewExtendedCommitSigAbsent() testCases := []struct { name string @@ -134,12 +135,12 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { for idx, isAbsent := range tc.absentCommitSigs { if isAbsent { - lastCommit.Signatures[idx] = absentSig + lastCommit.ExtendedSignatures[idx] = absentSig } } // block for height 2 - block := sf.MakeBlock(state, 2, lastCommit) + block := sf.MakeBlock(state, 2, lastCommit.StripExtensions()) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} @@ -198,12 +199,15 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) { ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: header, - Commit: types.NewCommit(10, 0, makeBlockID(header.Hash(), 100, []byte("partshash")), []types.CommitSig{{ - BlockIDFlag: types.BlockIDFlagNil, - ValidatorAddress: crypto.AddressHash([]byte("validator_address")), - Timestamp: defaultEvidenceTime, - Signature: crypto.CRandBytes(types.MaxSignatureSize), - }}), + Commit: &types.Commit{ + Height: 10, + BlockID: makeBlockID(header.Hash(), 100, []byte("partshash")), + Signatures: []types.CommitSig{{ + BlockIDFlag: types.BlockIDFlagNil, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + Timestamp: defaultEvidenceTime, + Signature: crypto.CRandBytes(types.MaxSignatureSize)}}, + }, }, ValidatorSet: state.Validators, }, @@ -324,8 +328,10 @@ func TestProcessProposal(t *testing.T) { lastCommitSig = append(lastCommitSig, vote.CommitSig()) } - lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, lastCommitSig) - block1 := sf.MakeBlock(state, height, lastCommit) + block1 := sf.MakeBlock(state, height, &types.Commit{ + Height: height - 1, + Signatures: lastCommitSig, + }) block1.Txs = txs expectedRpp := &abci.RequestProcessProposal{ @@ -653,8 +659,8 @@ func TestEmptyPrepareProposal(t *testing.T) { sm.NopMetrics(), ) pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - _, err = blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + _, err = blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.NoError(t, err) } @@ -708,8 +714,8 @@ func TestPrepareProposalErrorOnNonExistingRemoved(t *testing.T) { sm.NopMetrics(), ) pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.ErrorContains(t, err, "new transaction incorrectly marked as removed") require.Nil(t, block) @@ -764,8 +770,8 @@ func TestPrepareProposalRemoveTxs(t *testing.T) { sm.NopMetrics(), ) pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.NoError(t, err) require.Len(t, block.Data.Txs.ToSliceOfBytes(), len(trs)-2) @@ -823,8 +829,8 @@ func TestPrepareProposalAddedTxsIncluded(t *testing.T) { sm.NopMetrics(), ) pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.NoError(t, err) require.Equal(t, txs[0], block.Data.Txs[0]) @@ -879,8 +885,8 @@ func TestPrepareProposalReorderTxs(t *testing.T) { sm.NopMetrics(), ) pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.NoError(t, err) for i, tx := range block.Data.Txs { require.Equal(t, types.Tx(trs[i].Tx), tx) @@ -939,9 +945,8 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { sm.NopMetrics(), ) pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.ErrorContains(t, err, "transaction data size exceeds maximum") require.Nil(t, block, "") @@ -991,9 +996,8 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { sm.NopMetrics(), ) pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.Nil(t, block) require.ErrorContains(t, err, "an injected error") diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 07dd0d865d..dec5afc667 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -39,7 +39,7 @@ func makeAndCommitGoodBlock( blockExec *sm.BlockExecutor, privVals map[string]types.PrivValidator, evidence []types.Evidence, -) (sm.State, types.BlockID, *types.Commit) { +) (sm.State, types.BlockID, *types.ExtendedCommit) { t.Helper() // A good block passes @@ -82,19 +82,23 @@ func makeValidCommit( blockID types.BlockID, vals *types.ValidatorSet, privVals map[string]types.PrivValidator, -) (*types.Commit, []*types.Vote) { +) (*types.ExtendedCommit, []*types.Vote) { t.Helper() - sigs := make([]types.CommitSig, vals.Size()) + sigs := make([]types.ExtendedCommitSig, vals.Size()) votes := make([]*types.Vote, vals.Size()) for i := 0; i < vals.Size(); i++ { _, val := vals.GetByIndex(int32(i)) vote, err := factory.MakeVote(ctx, privVals[val.Address.String()], chainID, int32(i), height, 0, 2, blockID, time.Now()) require.NoError(t, err) - sigs[i] = vote.CommitSig() + sigs[i] = vote.ExtendedCommitSig() votes[i] = vote } - return types.NewCommit(height, 0, blockID, sigs), votes + return &types.ExtendedCommit{ + Height: height, + BlockID: blockID, + ExtendedSignatures: sigs, + }, votes } func makeState(t *testing.T, nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index decf551abd..69abe39071 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -6,6 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/internal/pubsub/query" diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index 7cc7fa883c..4eafb12739 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -107,6 +107,22 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { return r0 } +// LoadBlockExtendedCommit provides a mock function with given fields: height +func (_m *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { + ret := _m.Called(height) + + var r0 *types.ExtendedCommit + if rf, ok := ret.Get(0).(func(int64) *types.ExtendedCommit); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ExtendedCommit) + } + } + + return r0 +} + // LoadBlockMeta provides a mock function with given fields: height func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) @@ -193,7 +209,7 @@ func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) { } // SaveBlock provides a mock function with given fields: block, blockParts, seenCommit -func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { _m.Called(block, blockParts, seenCommit) } diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 49633269b1..0ea3ba17b9 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -6,6 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" testing "testing" diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 9b41f3c1bc..1d9ef2f6ff 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,6 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" diff --git a/internal/state/services.go b/internal/state/services.go index 5d04d2c822..35a91aa11d 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -26,7 +26,7 @@ type BlockStore interface { LoadBlockMeta(height int64) *types.BlockMeta LoadBlock(height int64) *types.Block - SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) + SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) PruneBlocks(height int64) (uint64, error) @@ -36,6 +36,7 @@ type BlockStore interface { LoadBlockCommit(height int64) *types.Commit LoadSeenCommit() *types.Commit + LoadBlockExtendedCommit(height int64) *types.ExtendedCommit } //----------------------------------------------------------------------------- diff --git a/internal/state/store.go b/internal/state/store.go index 87f5e0c4ff..2d2e4dc81e 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -26,6 +26,9 @@ const ( //------------------------------------------------------------------------ +// NB: Before modifying these, cross-check them with those in +// internal/store/store.go +// TODO(thane): Move these and the ones in internal/store/store.go to their own package. const ( // prefixes are unique across all tm db's prefixValidators = int64(5) diff --git a/internal/state/test/factory/block.go b/internal/state/test/factory/block.go index 1b33513638..0ccd46dcbd 100644 --- a/internal/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -63,7 +63,7 @@ func makeBlockAndPartSet( ) (*types.Block, *types.PartSet) { t.Helper() - lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + lastCommit := &types.Commit{Height: height - 1} if height > 1 { vote, err := factory.MakeVote( ctx, @@ -73,8 +73,12 @@ func makeBlockAndPartSet( lastBlockMeta.BlockID, time.Now()) require.NoError(t, err) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) + lastCommit = &types.Commit{ + Height: vote.Height, + Round: vote.Round, + BlockID: lastBlock.LastBlockID, + Signatures: []types.CommitSig{vote.CommitSig()}, + } } block := state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 376ce61bcb..b29cfd0f9c 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -65,7 +65,8 @@ func TestValidateBlockHeader(t *testing.T) { eventBus, sm.NopMetrics(), ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) + lastCommit := &types.Commit{} + var lastExtCommit *types.ExtendedCommit // some bad values wrongHash := crypto.Checksum([]byte("this hash is wrong")) @@ -100,7 +101,7 @@ func TestValidateBlockHeader(t *testing.T) { {"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }}, {"first LastCommit contains signatures", func(block *types.Block) { - block.LastCommit = types.NewCommit(0, 0, types.BlockID{}, []types.CommitSig{types.NewCommitSigAbsent()}) + block.LastCommit = &types.Commit{Signatures: []types.CommitSig{types.NewCommitSigAbsent()}} block.LastCommitHash = block.LastCommit.Hash() }}, } @@ -121,8 +122,9 @@ func TestValidateBlockHeader(t *testing.T) { /* A good block passes */ - state, _, lastCommit = makeAndCommitGoodBlock(ctx, t, + state, _, lastExtCommit = makeAndCommitGoodBlock(ctx, t, state, height, lastCommit, state.Validators.GetProposer().Address, blockExec, privVals, nil) + lastCommit = lastExtCommit.StripExtensions() } nextHeight := validationTestsStopHeight @@ -169,8 +171,9 @@ func TestValidateBlockCommit(t *testing.T) { eventBus, sm.NopMetrics(), ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) - wrongSigsCommit := types.NewCommit(1, 0, types.BlockID{}, nil) + lastCommit := &types.Commit{} + var lastExtCommit *types.ExtendedCommit + wrongSigsCommit := &types.Commit{Height: 1} badPrivVal := types.NewMockPV() for height := int64(1); height < validationTestsStopHeight; height++ { @@ -192,12 +195,12 @@ func TestValidateBlockCommit(t *testing.T) { time.Now(), ) require.NoError(t, err) - wrongHeightCommit := types.NewCommit( - wrongHeightVote.Height, - wrongHeightVote.Round, - state.LastBlockID, - []types.CommitSig{wrongHeightVote.CommitSig()}, - ) + wrongHeightCommit := &types.Commit{ + Height: wrongHeightVote.Height, + Round: wrongHeightVote.Round, + BlockID: state.LastBlockID, + Signatures: []types.CommitSig{wrongHeightVote.CommitSig()}, + } block := statefactory.MakeBlock(state, height, wrongHeightCommit) err = blockExec.ValidateBlock(ctx, state, block) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) @@ -220,7 +223,7 @@ func TestValidateBlockCommit(t *testing.T) { A good block passes */ var blockID types.BlockID - state, blockID, lastCommit = makeAndCommitGoodBlock( + state, blockID, lastExtCommit = makeAndCommitGoodBlock( ctx, t, state, @@ -231,6 +234,7 @@ func TestValidateBlockCommit(t *testing.T) { privVals, nil, ) + lastCommit = lastExtCommit.StripExtensions() /* wrongSigsCommit is fine except for the extra bad precommit @@ -270,8 +274,12 @@ func TestValidateBlockCommit(t *testing.T) { goodVote.Signature, badVote.Signature = g.Signature, b.Signature - wrongSigsCommit = types.NewCommit(goodVote.Height, goodVote.Round, - blockID, []types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()}) + wrongSigsCommit = &types.Commit{ + Height: goodVote.Height, + Round: goodVote.Round, + BlockID: blockID, + Signatures: []types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()}, + } } } @@ -319,7 +327,8 @@ func TestValidateBlockEvidence(t *testing.T) { eventBus, sm.NopMetrics(), ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) + lastCommit := &types.Commit{} + var lastExtCommit *types.ExtendedCommit for height := int64(1); height < validationTestsStopHeight; height++ { proposerAddr := state.Validators.GetProposer().Address @@ -364,7 +373,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) } - state, _, lastCommit = makeAndCommitGoodBlock( + state, _, lastExtCommit = makeAndCommitGoodBlock( ctx, t, state, @@ -375,6 +384,7 @@ func TestValidateBlockEvidence(t *testing.T) { privVals, evidence, ) + lastCommit = lastExtCommit.StripExtensions() } } diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 582ebcd9c4..82e4bd60eb 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -6,6 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" testing "testing" diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 55a9fcf8cd..904fb2b74b 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -856,12 +856,12 @@ func mockLB(ctx context.Context, t *testing.T, height int64, time time.Time, las header.ConsensusHash = types.DefaultConsensusParams().HashConsensusParams() lastBlockID = factory.MakeBlockIDWithHash(header.Hash()) voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals) - commit, err := factory.MakeCommit(ctx, lastBlockID, height, 0, voteSet, currentPrivVals, time) + extCommit, err := factory.MakeExtendedCommit(ctx, lastBlockID, height, 0, voteSet, currentPrivVals, time) require.NoError(t, err) return nextVals, nextPrivVals, &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: header, - Commit: commit, + Commit: extCommit.StripExtensions(), }, ValidatorSet: currentVals, } diff --git a/internal/store/store.go b/internal/store/store.go index eb03e5fe61..5617674a2f 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -273,11 +273,31 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { } commit, err := types.CommitFromProto(pbc) if err != nil { - panic(fmt.Errorf("error reading block commit: %w", err)) + panic(fmt.Errorf("converting commit to proto: %w", err)) } return commit } +func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { + pbec := new(tmproto.ExtendedCommit) + bz, err := bs.db.Get(extCommitKey(height)) + if err != nil { + panic(fmt.Errorf("fetching extended commit: %w", err)) + } + if len(bz) == 0 { + return nil + } + err = proto.Unmarshal(bz, pbec) + if err != nil { + panic(fmt.Errorf("decoding extended commit: %w", err)) + } + extCommit, err := types.ExtendedCommitFromProto(pbec) + if err != nil { + panic(fmt.Errorf("converting extended commit: %w", err)) + } + return extCommit +} + // LoadSeenCommit returns the last locally seen Commit before being // cannonicalized. This is useful when we've seen a commit, but there // has not yet been a new block at `height + 1` that includes this @@ -298,7 +318,7 @@ func (bs *BlockStore) LoadSeenCommit() *types.Commit { commit, err := types.CommitFromProto(pbc) if err != nil { - panic(fmt.Errorf("error from proto commit: %w", err)) + panic(fmt.Errorf("converting seen commit: %w", err)) } return commit } @@ -446,7 +466,7 @@ func (bs *BlockStore) batchDelete( // If all the nodes restart after committing a block, // we need this to reload the precommits to catch-up nodes to the // most recent height. Otherwise they'd stall at H-1. -func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { if block == nil { panic("BlockStore can only save a non-nil block") } @@ -462,6 +482,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s if !blockParts.IsComplete() { panic("BlockStore can only save complete block part sets") } + if height != seenCommit.Height { + panic(fmt.Sprintf("BlockStore cannot save seen commit of a different height (block: %d, commit: %d)", + height, seenCommit.Height)) + } // Save block parts. This must be done before the block meta, since callers // typically load the block meta first as an indication that the block exists @@ -494,12 +518,18 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s } // Save seen commit (seen +2/3 precommits for block) - pbsc := seenCommit.ToProto() + pbsc := seenCommit.StripExtensions().ToProto() seenCommitBytes := mustEncode(pbsc) if err := batch.Set(seenCommitKey(), seenCommitBytes); err != nil { panic(err) } + pbec := seenCommit.ToProto() + extCommitBytes := mustEncode(pbec) + if err := batch.Set(extCommitKey(height), extCommitBytes); err != nil { + panic(err) + } + if err := batch.WriteSync(); err != nil { panic(err) } @@ -579,6 +609,9 @@ func (bs *BlockStore) Close() error { //---------------------------------- KEY ENCODING ----------------------------------------- // key prefixes +// NB: Before modifying these, cross-check them with those in +// internal/state/store.go +// TODO(thane): Move these and the ones in internal/state/store.go to their own package. const ( // prefixes are unique across all tm db's prefixBlockMeta = int64(0) @@ -586,6 +619,7 @@ const ( prefixBlockCommit = int64(2) prefixSeenCommit = int64(3) prefixBlockHash = int64(4) + prefixExtCommit = int64(9) // 5..8 are used by state/store ) func blockMetaKey(height int64) []byte { @@ -635,6 +669,14 @@ func seenCommitKey() []byte { return key } +func extCommitKey(height int64) []byte { + key, err := orderedcode.Append(nil, prefixExtCommit, height) + if err != nil { + panic(err) + } + return key +} + func blockHashKey(hash []byte) []byte { key, err := orderedcode.Append(nil, prefixBlockHash, string(hash)) if err != nil { diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 4fa577cc41..9df3eed9f4 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -2,7 +2,6 @@ package store import ( "fmt" - stdlog "log" "os" "runtime/debug" "strings" @@ -27,22 +26,25 @@ import ( // test. type cleanupFunc func() -// make a Commit with a single vote containing just the height and a timestamp -func makeTestCommit(height int64, timestamp time.Time) *types.Commit { - commitSigs := []types.CommitSig{{ - BlockIDFlag: types.BlockIDFlagCommit, - ValidatorAddress: tmrand.Bytes(crypto.AddressSize), - Timestamp: timestamp, - Signature: []byte("Signature"), +// make an extended commit with a single vote containing just the height and a +// timestamp +func makeTestExtCommit(height int64, timestamp time.Time) *types.ExtendedCommit { + extCommitSigs := []types.ExtendedCommitSig{{ + CommitSig: types.CommitSig{ + BlockIDFlag: types.BlockIDFlagCommit, + ValidatorAddress: tmrand.Bytes(crypto.AddressSize), + Timestamp: timestamp, + Signature: []byte("Signature"), + }, }} - return types.NewCommit( - height, - 0, - types.BlockID{ + return &types.ExtendedCommit{ + Height: height, + BlockID: types.BlockID{ Hash: crypto.CRandBytes(32), PartSetHeader: types.PartSetHeader{Hash: crypto.CRandBytes(32), Total: 2}, }, - commitSigs) + ExtendedSignatures: extCommitSigs, + } } func makeStateAndBlockStore(dir string) (sm.State, *BlockStore, cleanupFunc, error) { @@ -59,47 +61,11 @@ func makeStateAndBlockStore(dir string) (sm.State, *BlockStore, cleanupFunc, err return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) }, nil } -func freshBlockStore() (*BlockStore, dbm.DB) { +func newInMemoryBlockStore() (*BlockStore, dbm.DB) { db := dbm.NewMemDB() return NewBlockStore(db), db } -var ( - state sm.State - block *types.Block - partSet *types.PartSet - part1 *types.Part - part2 *types.Part - seenCommit1 *types.Commit -) - -func TestMain(m *testing.M) { - dir, err := os.MkdirTemp("", "store_test") - if err != nil { - stdlog.Fatal(err) - } - var cleanup cleanupFunc - - state, _, cleanup, err = makeStateAndBlockStore(dir) - if err != nil { - stdlog.Fatal(err) - } - - block = factory.MakeBlock(state, 1, new(types.Commit)) - - partSet, err = block.MakePartSet(2) - if err != nil { - stdlog.Fatal(err) - } - part1 = partSet.GetPart(0) - part2 = partSet.GetPart(1) - seenCommit1 = makeTestCommit(10, tmtime.Now()) - code := m.Run() - cleanup() - os.RemoveAll(dir) // best-effort - os.Exit(code) -} - // TODO: This test should be simplified ... func TestBlockStoreSaveLoadBlock(t *testing.T) { state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir()) @@ -120,8 +86,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) validPartSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(10, tmtime.Now()) - bs.SaveBlock(block, partSet, seenCommit) + part2 := validPartSet.GetPart(1) + + seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) + bs.SaveBlock(block, validPartSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") @@ -139,11 +107,11 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // End of setup, test data - commitAtH10 := makeTestCommit(10, tmtime.Now()) + commitAtH10 := makeTestExtCommit(10, tmtime.Now()).StripExtensions() tuples := []struct { block *types.Block parts *types.PartSet - seenCommit *types.Commit + seenCommit *types.ExtendedCommit wantPanic string wantErr bool @@ -156,7 +124,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, }, { @@ -172,10 +140,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { ChainID: "block_test", Time: tmtime.Now(), ProposerAddress: tmrand.Bytes(crypto.AddressSize)}, - makeTestCommit(5, tmtime.Now()), + makeTestExtCommit(5, tmtime.Now()).StripExtensions(), ), parts: validPartSet, - seenCommit: makeTestCommit(5, tmtime.Now()), + seenCommit: makeTestExtCommit(5, tmtime.Now()), }, { @@ -187,7 +155,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, corruptCommitInDB: true, // Corrupt the DB's commit entry wantPanic: "error reading block commit", }, @@ -195,7 +163,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, wantPanic: "unmarshal to tmproto.BlockMeta", corruptBlockInDB: true, // Corrupt the DB's block entry }, @@ -203,7 +171,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, // Expecting no error and we want a nil back eraseSeenCommitInDB: true, @@ -212,7 +180,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, corruptSeenCommitInDB: true, wantPanic: "error reading block seen commit", @@ -221,7 +189,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, // Expecting no error and we want a nil back eraseCommitInDB: true, @@ -238,7 +206,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { for i, tuple := range tuples { tuple := tuple - bs, db := freshBlockStore() + bs, db := newInMemoryBlockStore() // SaveBlock res, err, panicErr := doFn(func() (interface{}, error) { bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit) @@ -324,7 +292,7 @@ func TestLoadBaseMeta(t *testing.T) { block := factory.MakeBlock(state, h, new(types.Commit)) partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(h, tmtime.Now()) + seenCommit := makeTestExtCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) } @@ -338,13 +306,19 @@ func TestLoadBaseMeta(t *testing.T) { } func TestLoadBlockPart(t *testing.T) { - bs, db := freshBlockStore() - height, index := int64(10), 1 + cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") + require.NoError(t, err) + + bs, db := newInMemoryBlockStore() + const height, index = 10, 1 loadPart := func() (interface{}, error) { part := bs.LoadBlockPart(height, index) return part, nil } + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) + require.NoError(t, err) + // Initially no contents. // 1. Requesting for a non-existent block shouldn't fail res, _, panicErr := doFn(loadPart) @@ -352,13 +326,18 @@ func TestLoadBlockPart(t *testing.T) { require.Nil(t, res, "a non-existent block part should return nil") // 2. Next save a corrupted block then try to load it - err := db.Set(blockPartKey(height, index), []byte("Tendermint")) + err = db.Set(blockPartKey(height, index), []byte("Tendermint")) require.NoError(t, err) res, _, panicErr = doFn(loadPart) require.NotNil(t, panicErr, "expecting a non-nil panic") require.Contains(t, panicErr.Error(), "unmarshal to tmproto.Part failed") // 3. A good block serialized and saved to the DB should be retrievable + block := factory.MakeBlock(state, height, new(types.Commit)) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) + part1 := partSet.GetPart(0) + pb1, err := part1.ToProto() require.NoError(t, err) err = db.Set(blockPartKey(height, index), mustEncode(pb1)) @@ -391,7 +370,7 @@ func TestPruneBlocks(t *testing.T) { block := factory.MakeBlock(state, h, new(types.Commit)) partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(h, tmtime.Now()) + seenCommit := makeTestExtCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) } @@ -452,7 +431,7 @@ func TestPruneBlocks(t *testing.T) { } func TestLoadBlockMeta(t *testing.T) { - bs, db := freshBlockStore() + bs, db := newInMemoryBlockStore() height := int64(10) loadMeta := func() (interface{}, error) { meta := bs.LoadBlockMeta(height) @@ -499,7 +478,7 @@ func TestBlockFetchAtHeight(t *testing.T) { partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(10, tmtime.Now()) + seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -521,9 +500,12 @@ func TestBlockFetchAtHeight(t *testing.T) { } func TestSeenAndCanonicalCommit(t *testing.T) { - bs, _ := freshBlockStore() + state, store, cleanup, err := makeStateAndBlockStore(t.TempDir()) + defer cleanup() + require.NoError(t, err) + loadCommit := func() (interface{}, error) { - meta := bs.LoadSeenCommit() + meta := store.LoadSeenCommit() return meta, nil } @@ -536,19 +518,19 @@ func TestSeenAndCanonicalCommit(t *testing.T) { // produce a few blocks and check that the correct seen and cannoncial commits // are persisted. for h := int64(3); h <= 5; h++ { - blockCommit := makeTestCommit(h-1, tmtime.Now()) + blockCommit := makeTestExtCommit(h-1, tmtime.Now()).StripExtensions() block := factory.MakeBlock(state, h, blockCommit) partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(h, tmtime.Now()) - bs.SaveBlock(block, partSet, seenCommit) - c3 := bs.LoadSeenCommit() + seenCommit := makeTestExtCommit(h, tmtime.Now()) + store.SaveBlock(block, partSet, seenCommit) + c3 := store.LoadSeenCommit() require.NotNil(t, c3) require.Equal(t, h, c3.Height) - require.Equal(t, seenCommit.Hash(), c3.Hash()) - c5 := bs.LoadBlockCommit(h) + require.Equal(t, seenCommit.StripExtensions().Hash(), c3.Hash()) + c5 := store.LoadBlockCommit(h) require.Nil(t, c5) - c6 := bs.LoadBlockCommit(h - 1) + c6 := store.LoadBlockCommit(h - 1) require.Equal(t, blockCommit.Hash(), c6.Hash()) } diff --git a/internal/test/factory/commit.go b/internal/test/factory/commit.go index bc40224997..1bd1c7ae6c 100644 --- a/internal/test/factory/commit.go +++ b/internal/test/factory/commit.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/types" ) -func MakeCommit(ctx context.Context, blockID types.BlockID, height int64, round int32, voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.Commit, error) { +func MakeExtendedCommit(ctx context.Context, blockID types.BlockID, height int64, round int32, voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.ExtendedCommit, error) { // all sign for i := 0; i < len(validators); i++ { pubKey, err := validators[i].GetPubKey(ctx) @@ -37,5 +37,5 @@ func MakeCommit(ctx context.Context, blockID types.BlockID, height int64, round } } - return voteSet.MakeCommit(), nil + return voteSet.MakeExtendedCommit(), nil } diff --git a/light/helpers_test.go b/light/helpers_test.go index d93735bb78..9187cc3c30 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -72,7 +72,12 @@ func (pkz privKeys) signHeader(t testing.TB, header *types.Header, valSet *types commitSigs[vote.ValidatorIndex] = vote.CommitSig() } - return types.NewCommit(header.Height, 1, blockID, commitSigs) + return &types.Commit{ + Height: header.Height, + Round: 1, + BlockID: blockID, + Signatures: commitSigs, + } } func makeVote(t testing.TB, header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey, blockID types.BlockID) *types.Vote { diff --git a/node/node_test.go b/node/node_test.go index c5ff1f014e..b1d7a94818 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -35,6 +35,7 @@ import ( "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -339,13 +340,13 @@ func TestCreateProposalBlock(t *testing.T) { sm.NopMetrics(), ) - commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + extCommit := &types.ExtendedCommit{Height: height - 1} block, err := blockExec.CreateProposalBlock( ctx, height, - state, commit, + state, + extCommit, proposerAddr, - nil, ) require.NoError(t, err) @@ -419,13 +420,13 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { sm.NopMetrics(), ) - commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + extCommit := &types.ExtendedCommit{Height: height - 1} block, err := blockExec.CreateProposalBlock( ctx, height, - state, commit, + state, + extCommit, proposerAddr, - nil, ) require.NoError(t, err) @@ -525,38 +526,41 @@ func TestMaxProposalBlockSize(t *testing.T) { } state.ChainID = maxChainID - cs := types.CommitSig{ - BlockIDFlag: types.BlockIDFlagNil, - ValidatorAddress: crypto.AddressHash([]byte("validator_address")), - Timestamp: timestamp, - Signature: crypto.CRandBytes(types.MaxSignatureSize), - } - - commit := &types.Commit{ - Height: math.MaxInt64, - Round: math.MaxInt32, - BlockID: blockID, - } - - votes := make([]*types.Vote, types.MaxVotesCount) + voteSet := types.NewVoteSet(state.ChainID, math.MaxInt64-1, math.MaxInt32, tmproto.PrecommitType, state.Validators) // add maximum amount of signatures to a single commit for i := 0; i < types.MaxVotesCount; i++ { pubKey, err := privVals[i].GetPubKey(ctx) require.NoError(t, err) - votes[i] = &types.Vote{ - ValidatorAddress: pubKey.Address(), + valIdx, val := state.Validators.GetByAddress(pubKey.Address()) + require.NotNil(t, val) + + vote := &types.Vote{ + Type: tmproto.PrecommitType, + Height: math.MaxInt64 - 1, + Round: math.MaxInt32, + BlockID: blockID, + Timestamp: timestamp, + ValidatorAddress: val.Address, + ValidatorIndex: valIdx, + Extension: []byte("extension"), } - commit.Signatures = append(commit.Signatures, cs) + vpb := vote.ToProto() + require.NoError(t, privVals[i].SignVote(ctx, state.ChainID, vpb)) + vote.Signature = vpb.Signature + vote.ExtensionSignature = vpb.ExtensionSignature + + added, err := voteSet.AddVote(vote) + require.NoError(t, err) + require.True(t, added) } block, err := blockExec.CreateProposalBlock( ctx, math.MaxInt64, state, - commit, + voteSet.MakeExtendedCommit(), proposerAddr, - votes, ) require.NoError(t, err) partSet, err := block.MakePartSet(types.BlockPartSizeBytes) diff --git a/privval/file.go b/privval/file.go index bf58036326..a5d696093c 100644 --- a/privval/file.go +++ b/privval/file.go @@ -375,17 +375,17 @@ func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { // Vote extensions are non-deterministic, so it is possible that an // application may have created a different extension. We therefore always - // re-sign the vote extensions of precommits. For prevotes, the extension - // signature will always be empty. + // re-sign the vote extensions of precommits. For prevotes and nil + // precommits, the extension signature will always be empty. var extSig []byte - if vote.Type == tmproto.PrecommitType { + if vote.Type == tmproto.PrecommitType && !types.ProtoBlockIDIsNil(&vote.BlockID) { extSignBytes := types.VoteExtensionSignBytes(chainID, vote) extSig, err = pv.Key.PrivKey.Sign(extSignBytes) if err != nil { return err } } else if len(vote.Extension) > 0 { - return errors.New("unexpected vote extension - extensions are only allowed in precommits") + return errors.New("unexpected vote extension - extensions are only allowed in non-nil precommits") } // We might crash before writing to the wal, diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index c002003228..8757f8ab3e 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -116,7 +116,8 @@ func (m *NoBlockResponse) GetHeight() int64 { // BlockResponse returns block to the requested type BlockResponse struct { - Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + ExtCommit *types.ExtendedCommit `protobuf:"bytes,2,opt,name=ext_commit,json=extCommit,proto3" json:"ext_commit,omitempty"` } func (m *BlockResponse) Reset() { *m = BlockResponse{} } @@ -159,6 +160,13 @@ func (m *BlockResponse) GetBlock() *types.Block { return nil } +func (m *BlockResponse) GetExtCommit() *types.ExtendedCommit { + if m != nil { + return m.ExtCommit + } + return nil +} + // StatusRequest requests the status of a peer. type StatusRequest struct { } @@ -385,30 +393,33 @@ func init() { func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) } var fileDescriptor_19b397c236e0fa07 = []byte{ - // 368 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x4f, 0xfa, 0x40, - 0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x1b, 0xa3, 0xc4, 0x98, 0x86, 0xd4, 0x97, - 0xe8, 0xc1, 0x36, 0xc1, 0xa3, 0xc6, 0x03, 0x27, 0x4c, 0x7c, 0x49, 0x4a, 0xbc, 0x78, 0x21, 0x14, - 0x37, 0x40, 0x94, 0x2e, 0x32, 0xdb, 0x03, 0xdf, 0xc2, 0x2f, 0xe0, 0xf7, 0xf1, 0xc8, 0xd1, 0xa3, - 0x81, 0x2f, 0x62, 0x98, 0x2d, 0x65, 0x69, 0xb0, 0xb7, 0xdd, 0xe9, 0x33, 0xbf, 0x79, 0xfa, 0x64, - 0x16, 0xea, 0x82, 0x45, 0x2f, 0x6c, 0x32, 0x1a, 0x46, 0xc2, 0x0f, 0xdf, 0x78, 0xef, 0x15, 0xa7, - 0x51, 0xcf, 0x17, 0xd3, 0x31, 0x43, 0x6f, 0x3c, 0xe1, 0x82, 0xdb, 0xbb, 0x6b, 0x85, 0x97, 0x2a, - 0x0e, 0x0e, 0x95, 0x3e, 0x52, 0xcb, 0x6e, 0xd9, 0xe3, 0x9e, 0x42, 0xa5, 0xb9, 0xbc, 0x06, 0xec, - 0x3d, 0x66, 0x28, 0xec, 0x3d, 0x28, 0x0d, 0xd8, 0xb0, 0x3f, 0x10, 0x35, 0xbd, 0xae, 0x9f, 0x19, - 0x41, 0x72, 0x73, 0xcf, 0xc1, 0x7a, 0xe0, 0x89, 0x12, 0xc7, 0x3c, 0x42, 0xf6, 0xa7, 0xf4, 0x06, - 0xcc, 0x4d, 0xe1, 0x05, 0x14, 0x69, 0x24, 0xe9, 0xca, 0x8d, 0x7d, 0x4f, 0xf1, 0x29, 0xfd, 0x4b, - 0xbd, 0x54, 0xb9, 0x16, 0x98, 0x6d, 0xd1, 0x15, 0x31, 0x26, 0x9e, 0xdc, 0x6b, 0xa8, 0xae, 0x0a, - 0xf9, 0xa3, 0x6d, 0x1b, 0x0a, 0x61, 0x17, 0x59, 0xed, 0x1f, 0x55, 0xe9, 0xec, 0x7e, 0x1a, 0xf0, - 0xff, 0x9e, 0x21, 0x76, 0xfb, 0xcc, 0xbe, 0x05, 0x93, 0x66, 0x74, 0x26, 0x12, 0x9d, 0x38, 0x72, - 0xbd, 0x6d, 0xc9, 0x79, 0x6a, 0x30, 0x2d, 0x2d, 0xa8, 0x84, 0x6a, 0x50, 0x6d, 0xd8, 0x89, 0x78, - 0x67, 0x45, 0x93, 0xbe, 0x68, 0x6e, 0xb9, 0x71, 0xb2, 0x1d, 0x97, 0xc9, 0xaf, 0xa5, 0x05, 0x56, - 0x94, 0x89, 0xf4, 0x0e, 0xaa, 0x19, 0xa2, 0x41, 0xc4, 0xa3, 0x5c, 0x83, 0x29, 0xcf, 0x0c, 0xb3, - 0x34, 0xa4, 0xdc, 0xd2, 0xdf, 0x2d, 0xe4, 0xd1, 0x36, 0x42, 0x5f, 0xd2, 0x50, 0x2d, 0xd8, 0x8f, - 0x60, 0xa5, 0xb4, 0xc4, 0x5c, 0x91, 0x70, 0xc7, 0xf9, 0xb8, 0xd4, 0x5d, 0x15, 0x37, 0x2a, 0xcd, - 0x22, 0x18, 0x18, 0x8f, 0x9a, 0x4f, 0x5f, 0x73, 0x47, 0x9f, 0xcd, 0x1d, 0xfd, 0x67, 0xee, 0xe8, - 0x1f, 0x0b, 0x47, 0x9b, 0x2d, 0x1c, 0xed, 0x7b, 0xe1, 0x68, 0xcf, 0x57, 0xfd, 0xa1, 0x18, 0xc4, - 0xa1, 0xd7, 0xe3, 0x23, 0x5f, 0x5d, 0xe2, 0xf5, 0x91, 0x76, 0xd8, 0xdf, 0xf6, 0x30, 0xc2, 0x12, - 0x7d, 0xbb, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1c, 0xa3, 0x45, 0x37, 0x03, 0x00, 0x00, + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xcd, 0x4a, 0xeb, 0x50, + 0x10, 0xc7, 0x93, 0x9b, 0xb6, 0x97, 0x3b, 0xb7, 0x69, 0xb8, 0xe1, 0xa2, 0x45, 0x24, 0x94, 0xf8, + 0x81, 0x2e, 0x4c, 0x40, 0x97, 0x0a, 0x42, 0x45, 0xa8, 0xe0, 0x07, 0xa4, 0xb8, 0x71, 0x53, 0x9a, + 0xf4, 0xd0, 0x06, 0x4d, 0x4e, 0xed, 0x39, 0x81, 0x76, 0xe5, 0x2b, 0xf8, 0x02, 0xbe, 0x8f, 0xcb, + 0x2e, 0x5d, 0x4a, 0xfb, 0x22, 0xd2, 0x39, 0x69, 0x9a, 0xc6, 0x98, 0xdd, 0x64, 0xce, 0x7f, 0x7e, + 0xf9, 0xcf, 0x0c, 0x03, 0x0d, 0x4e, 0xc2, 0x1e, 0x19, 0x05, 0x7e, 0xc8, 0x6d, 0xf7, 0x89, 0x7a, + 0x8f, 0x6c, 0x12, 0x7a, 0x36, 0x9f, 0x0c, 0x09, 0xb3, 0x86, 0x23, 0xca, 0xa9, 0xfe, 0x7f, 0xa5, + 0xb0, 0x12, 0xc5, 0xd6, 0x76, 0xaa, 0x0e, 0xd5, 0xa2, 0x5a, 0xd4, 0xe4, 0xbc, 0xa6, 0x88, 0xe6, + 0x3e, 0x54, 0x9b, 0x0b, 0xb1, 0x43, 0x9e, 0x23, 0xc2, 0xb8, 0xbe, 0x01, 0x95, 0x01, 0xf1, 0xfb, + 0x03, 0x5e, 0x97, 0x1b, 0xf2, 0x81, 0xe2, 0xc4, 0x5f, 0xe6, 0x21, 0x68, 0xb7, 0x34, 0x56, 0xb2, + 0x21, 0x0d, 0x19, 0xf9, 0x51, 0xfa, 0x02, 0xea, 0xba, 0xf0, 0x08, 0xca, 0x68, 0x08, 0x75, 0x7f, + 0x8f, 0x37, 0xad, 0x54, 0x17, 0xc2, 0x8b, 0xd0, 0x0b, 0x95, 0x7e, 0x0e, 0x40, 0xc6, 0xbc, 0xe3, + 0xd1, 0x20, 0xf0, 0x79, 0xfd, 0x17, 0xd6, 0x34, 0xbe, 0xd7, 0x5c, 0x8e, 0x31, 0xd5, 0xbb, 0x40, + 0x9d, 0xf3, 0x87, 0x8c, 0xb9, 0x08, 0x4d, 0x0d, 0xd4, 0x36, 0xef, 0xf2, 0x88, 0xc5, 0x4d, 0x99, + 0x67, 0x50, 0x5b, 0x26, 0x8a, 0xbd, 0xeb, 0x3a, 0x94, 0xdc, 0x2e, 0x23, 0xf8, 0x57, 0xc5, 0xc1, + 0xd8, 0x7c, 0x53, 0xe0, 0xf7, 0x0d, 0x61, 0xac, 0xdb, 0x27, 0xfa, 0x15, 0xa8, 0x68, 0xb2, 0x33, + 0x12, 0xe8, 0xb8, 0x25, 0xd3, 0xca, 0x5b, 0x8c, 0x95, 0x9e, 0x6c, 0x4b, 0x72, 0xaa, 0x6e, 0x7a, + 0xd2, 0x6d, 0xf8, 0x17, 0xd2, 0xce, 0x92, 0x26, 0x7c, 0xc5, 0xdd, 0xee, 0xe5, 0xe3, 0x32, 0x0b, + 0x68, 0x49, 0x8e, 0x16, 0x66, 0x76, 0x72, 0x0d, 0xb5, 0x0c, 0x51, 0x41, 0xe2, 0x4e, 0xa1, 0xc1, + 0x84, 0xa7, 0xba, 0x59, 0x1a, 0xc3, 0xb9, 0x25, 0xed, 0x96, 0x8a, 0x68, 0x6b, 0x43, 0x5f, 0xd0, + 0x58, 0x3a, 0xa1, 0xdf, 0x81, 0x96, 0xd0, 0x62, 0x73, 0x65, 0xc4, 0xed, 0x16, 0xe3, 0x12, 0x77, + 0x35, 0xb6, 0x96, 0x69, 0x96, 0x41, 0x61, 0x51, 0xd0, 0xbc, 0x7f, 0x9f, 0x19, 0xf2, 0x74, 0x66, + 0xc8, 0x9f, 0x33, 0x43, 0x7e, 0x9d, 0x1b, 0xd2, 0x74, 0x6e, 0x48, 0x1f, 0x73, 0x43, 0x7a, 0x38, + 0xed, 0xfb, 0x7c, 0x10, 0xb9, 0x96, 0x47, 0x03, 0x3b, 0x7d, 0x05, 0xab, 0x10, 0x8f, 0xc0, 0xce, + 0xbb, 0x3b, 0xb7, 0x82, 0x6f, 0x27, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x13, 0x4f, 0x42, + 0x96, 0x03, 0x00, 0x00, } func (m *BlockRequest) Marshal() (dAtA []byte, err error) { @@ -487,6 +498,18 @@ func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExtCommit != nil { + { + size, err := m.ExtCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.Block != nil { { size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) @@ -740,6 +763,10 @@ func (m *BlockResponse) Size() (n int) { l = m.Block.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.ExtCommit != nil { + l = m.ExtCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -1049,6 +1076,42 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtCommit == nil { + m.ExtCommit = &types.ExtendedCommit{} + } + if err := m.ExtCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto index 4febfd145c..67da76dce0 100644 --- a/proto/tendermint/blocksync/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -4,6 +4,7 @@ package tendermint.blocksync; option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync"; import "tendermint/types/block.proto"; +import "tendermint/types/types.proto"; // BlockRequest requests a block for a specific height message BlockRequest { @@ -19,6 +20,7 @@ message NoBlockResponse { // BlockResponse returns block to the requested message BlockResponse { tendermint.types.Block block = 1; + tendermint.types.ExtendedCommit ext_commit = 2; } // StatusRequest requests the status of a peer. diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 1904afcd1e..fcfbc01f54 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -726,6 +726,162 @@ func (m *CommitSig) GetSignature() []byte { return nil } +type ExtendedCommit struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id"` + ExtendedSignatures []ExtendedCommitSig `protobuf:"bytes,4,rep,name=extended_signatures,json=extendedSignatures,proto3" json:"extended_signatures"` +} + +func (m *ExtendedCommit) Reset() { *m = ExtendedCommit{} } +func (m *ExtendedCommit) String() string { return proto.CompactTextString(m) } +func (*ExtendedCommit) ProtoMessage() {} +func (*ExtendedCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{8} +} +func (m *ExtendedCommit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedCommit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExtendedCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedCommit.Merge(m, src) +} +func (m *ExtendedCommit) XXX_Size() int { + return m.Size() +} +func (m *ExtendedCommit) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedCommit.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendedCommit proto.InternalMessageInfo + +func (m *ExtendedCommit) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ExtendedCommit) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *ExtendedCommit) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *ExtendedCommit) GetExtendedSignatures() []ExtendedCommitSig { + if m != nil { + return m.ExtendedSignatures + } + return nil +} + +// ExtendedCommitSig retains all the same fields as CommitSig but adds vote +// extension-related fields. +type ExtendedCommitSig struct { + BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` + ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` + // Vote extension data + Extension []byte `protobuf:"bytes,5,opt,name=extension,proto3" json:"extension,omitempty"` + // Vote extension signature + ExtensionSignature []byte `protobuf:"bytes,6,opt,name=extension_signature,json=extensionSignature,proto3" json:"extension_signature,omitempty"` +} + +func (m *ExtendedCommitSig) Reset() { *m = ExtendedCommitSig{} } +func (m *ExtendedCommitSig) String() string { return proto.CompactTextString(m) } +func (*ExtendedCommitSig) ProtoMessage() {} +func (*ExtendedCommitSig) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{9} +} +func (m *ExtendedCommitSig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedCommitSig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedCommitSig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExtendedCommitSig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedCommitSig.Merge(m, src) +} +func (m *ExtendedCommitSig) XXX_Size() int { + return m.Size() +} +func (m *ExtendedCommitSig) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedCommitSig.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendedCommitSig proto.InternalMessageInfo + +func (m *ExtendedCommitSig) GetBlockIdFlag() BlockIDFlag { + if m != nil { + return m.BlockIdFlag + } + return BlockIDFlagUnknown +} + +func (m *ExtendedCommitSig) GetValidatorAddress() []byte { + if m != nil { + return m.ValidatorAddress + } + return nil +} + +func (m *ExtendedCommitSig) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *ExtendedCommitSig) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +func (m *ExtendedCommitSig) GetExtension() []byte { + if m != nil { + return m.Extension + } + return nil +} + +func (m *ExtendedCommitSig) GetExtensionSignature() []byte { + if m != nil { + return m.ExtensionSignature + } + return nil +} + type Proposal struct { Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` @@ -740,7 +896,7 @@ func (m *Proposal) Reset() { *m = Proposal{} } func (m *Proposal) String() string { return proto.CompactTextString(m) } func (*Proposal) ProtoMessage() {} func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{8} + return fileDescriptor_d3a6e55e2345de56, []int{10} } func (m *Proposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -827,7 +983,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} } func (m *SignedHeader) String() string { return proto.CompactTextString(m) } func (*SignedHeader) ProtoMessage() {} func (*SignedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{9} + return fileDescriptor_d3a6e55e2345de56, []int{11} } func (m *SignedHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -879,7 +1035,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} } func (m *LightBlock) String() string { return proto.CompactTextString(m) } func (*LightBlock) ProtoMessage() {} func (*LightBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{10} + return fileDescriptor_d3a6e55e2345de56, []int{12} } func (m *LightBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -933,7 +1089,7 @@ func (m *BlockMeta) Reset() { *m = BlockMeta{} } func (m *BlockMeta) String() string { return proto.CompactTextString(m) } func (*BlockMeta) ProtoMessage() {} func (*BlockMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{11} + return fileDescriptor_d3a6e55e2345de56, []int{13} } func (m *BlockMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1002,7 +1158,7 @@ func (m *TxProof) Reset() { *m = TxProof{} } func (m *TxProof) String() string { return proto.CompactTextString(m) } func (*TxProof) ProtoMessage() {} func (*TxProof) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{12} + return fileDescriptor_d3a6e55e2345de56, []int{14} } func (m *TxProof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1063,6 +1219,8 @@ func init() { proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") + proto.RegisterType((*ExtendedCommit)(nil), "tendermint.types.ExtendedCommit") + proto.RegisterType((*ExtendedCommitSig)(nil), "tendermint.types.ExtendedCommitSig") proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") @@ -1073,91 +1231,95 @@ func init() { func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1341 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x73, 0xdb, 0xc4, - 0x17, 0x8f, 0x62, 0x25, 0xb6, 0x9f, 0xed, 0xc4, 0xd9, 0x6f, 0xda, 0xba, 0x6e, 0xe3, 0x68, 0xfc, - 0x1d, 0x20, 0x2d, 0x8c, 0x52, 0x52, 0x86, 0x1f, 0x07, 0x0e, 0xb6, 0x93, 0xb6, 0x9e, 0x26, 0x8e, - 0x91, 0xdd, 0x32, 0x70, 0xd1, 0xc8, 0xd6, 0xd6, 0x16, 0x95, 0x25, 0x8d, 0x76, 0x1d, 0x92, 0xfe, - 0x05, 0x4c, 0x4e, 0x3d, 0x71, 0xcb, 0x09, 0x0e, 0xdc, 0x39, 0x70, 0x65, 0x38, 0xf5, 0xd8, 0x1b, - 0x5c, 0x28, 0x4c, 0x3a, 0xc3, 0xdf, 0xc1, 0xec, 0x0f, 0xc9, 0x72, 0x9c, 0x40, 0xa7, 0xd3, 0xe1, - 0xe2, 0xd1, 0xbe, 0xf7, 0x79, 0x6f, 0xdf, 0x8f, 0xcf, 0xee, 0x5b, 0xc3, 0x75, 0x8a, 0x3d, 0x1b, - 0x87, 0x23, 0xc7, 0xa3, 0x9b, 0xf4, 0x28, 0xc0, 0x44, 0xfc, 0xea, 0x41, 0xe8, 0x53, 0x1f, 0x15, - 0x27, 0x5a, 0x9d, 0xcb, 0xcb, 0xab, 0x03, 0x7f, 0xe0, 0x73, 0xe5, 0x26, 0xfb, 0x12, 0xb8, 0xf2, - 0xfa, 0xc0, 0xf7, 0x07, 0x2e, 0xde, 0xe4, 0xab, 0xde, 0xf8, 0xd1, 0x26, 0x75, 0x46, 0x98, 0x50, - 0x6b, 0x14, 0x48, 0xc0, 0x5a, 0x62, 0x9b, 0x7e, 0x78, 0x14, 0x50, 0x9f, 0x61, 0xfd, 0x47, 0x52, - 0x5d, 0x49, 0xa8, 0x0f, 0x70, 0x48, 0x1c, 0xdf, 0x4b, 0xc6, 0x51, 0xd6, 0x66, 0xa2, 0x3c, 0xb0, - 0x5c, 0xc7, 0xb6, 0xa8, 0x1f, 0x0a, 0x44, 0xf5, 0x13, 0x28, 0xb4, 0xad, 0x90, 0x76, 0x30, 0xbd, - 0x87, 0x2d, 0x1b, 0x87, 0x68, 0x15, 0x16, 0xa8, 0x4f, 0x2d, 0xb7, 0xa4, 0x68, 0xca, 0x46, 0xc1, - 0x10, 0x0b, 0x84, 0x40, 0x1d, 0x5a, 0x64, 0x58, 0x9a, 0xd7, 0x94, 0x8d, 0xbc, 0xc1, 0xbf, 0xab, - 0x43, 0x50, 0x99, 0x29, 0xb3, 0x70, 0x3c, 0x1b, 0x1f, 0x46, 0x16, 0x7c, 0xc1, 0xa4, 0xbd, 0x23, - 0x8a, 0x89, 0x34, 0x11, 0x0b, 0xf4, 0x01, 0x2c, 0xf0, 0xf8, 0x4b, 0x29, 0x4d, 0xd9, 0xc8, 0x6d, - 0x95, 0xf4, 0x44, 0xa1, 0x44, 0x7e, 0x7a, 0x9b, 0xe9, 0xeb, 0xea, 0xb3, 0x17, 0xeb, 0x73, 0x86, - 0x00, 0x57, 0x5d, 0x48, 0xd7, 0x5d, 0xbf, 0xff, 0xb8, 0xb9, 0x1d, 0x07, 0xa2, 0x4c, 0x02, 0x41, - 0x7b, 0xb0, 0x1c, 0x58, 0x21, 0x35, 0x09, 0xa6, 0xe6, 0x90, 0x67, 0xc1, 0x37, 0xcd, 0x6d, 0xad, - 0xeb, 0x67, 0xfb, 0xa0, 0x4f, 0x25, 0x2b, 0x77, 0x29, 0x04, 0x49, 0x61, 0xf5, 0x2f, 0x15, 0x16, - 0x65, 0x31, 0x3e, 0x85, 0xb4, 0x2c, 0x2b, 0xdf, 0x30, 0xb7, 0xb5, 0x96, 0xf4, 0x28, 0x55, 0x7a, - 0xc3, 0xf7, 0x08, 0xf6, 0xc8, 0x98, 0x48, 0x7f, 0x91, 0x0d, 0x7a, 0x1b, 0x32, 0xfd, 0xa1, 0xe5, - 0x78, 0xa6, 0x63, 0xf3, 0x88, 0xb2, 0xf5, 0xdc, 0xe9, 0x8b, 0xf5, 0x74, 0x83, 0xc9, 0x9a, 0xdb, - 0x46, 0x9a, 0x2b, 0x9b, 0x36, 0xba, 0x0c, 0x8b, 0x43, 0xec, 0x0c, 0x86, 0x94, 0x97, 0x25, 0x65, - 0xc8, 0x15, 0xfa, 0x18, 0x54, 0x46, 0x88, 0x92, 0xca, 0xf7, 0x2e, 0xeb, 0x82, 0x2d, 0x7a, 0xc4, - 0x16, 0xbd, 0x1b, 0xb1, 0xa5, 0x9e, 0x61, 0x1b, 0x3f, 0xfd, 0x63, 0x5d, 0x31, 0xb8, 0x05, 0x6a, - 0x40, 0xc1, 0xb5, 0x08, 0x35, 0x7b, 0xac, 0x6c, 0x6c, 0xfb, 0x05, 0xee, 0xe2, 0xea, 0x6c, 0x41, - 0x64, 0x61, 0x65, 0xe8, 0x39, 0x66, 0x25, 0x44, 0x36, 0xda, 0x80, 0x22, 0x77, 0xd2, 0xf7, 0x47, - 0x23, 0x87, 0x9a, 0xbc, 0xee, 0x8b, 0xbc, 0xee, 0x4b, 0x4c, 0xde, 0xe0, 0xe2, 0x7b, 0xac, 0x03, - 0xd7, 0x20, 0x6b, 0x5b, 0xd4, 0x12, 0x90, 0x34, 0x87, 0x64, 0x98, 0x80, 0x2b, 0xdf, 0x81, 0xe5, - 0x98, 0x75, 0x44, 0x40, 0x32, 0xc2, 0xcb, 0x44, 0xcc, 0x81, 0xb7, 0x60, 0xd5, 0xc3, 0x87, 0xd4, - 0x3c, 0x8b, 0xce, 0x72, 0x34, 0x62, 0xba, 0x87, 0xd3, 0x16, 0x6f, 0xc1, 0x52, 0x3f, 0x2a, 0xbe, - 0xc0, 0x02, 0xc7, 0x16, 0x62, 0x29, 0x87, 0x5d, 0x85, 0x8c, 0x15, 0x04, 0x02, 0x90, 0xe3, 0x80, - 0xb4, 0x15, 0x04, 0x5c, 0x75, 0x13, 0x56, 0x78, 0x8e, 0x21, 0x26, 0x63, 0x97, 0x4a, 0x27, 0x79, - 0x8e, 0x59, 0x66, 0x0a, 0x43, 0xc8, 0x39, 0xf6, 0xff, 0x50, 0xc0, 0x07, 0x8e, 0x8d, 0xbd, 0x3e, - 0x16, 0xb8, 0x02, 0xc7, 0xe5, 0x23, 0x21, 0x07, 0xdd, 0x80, 0x62, 0x10, 0xfa, 0x81, 0x4f, 0x70, - 0x68, 0x5a, 0xb6, 0x1d, 0x62, 0x42, 0x4a, 0x4b, 0xc2, 0x5f, 0x24, 0xaf, 0x09, 0x71, 0xb5, 0x04, - 0xea, 0xb6, 0x45, 0x2d, 0x54, 0x84, 0x14, 0x3d, 0x24, 0x25, 0x45, 0x4b, 0x6d, 0xe4, 0x0d, 0xf6, - 0x59, 0xfd, 0x29, 0x05, 0xea, 0x43, 0x9f, 0x62, 0x74, 0x1b, 0x54, 0xd6, 0x26, 0xce, 0xbe, 0xa5, - 0xf3, 0xf8, 0xdc, 0x71, 0x06, 0x1e, 0xb6, 0xf7, 0xc8, 0xa0, 0x7b, 0x14, 0x60, 0x83, 0x83, 0x13, - 0x74, 0x9a, 0x9f, 0xa2, 0xd3, 0x2a, 0x2c, 0x84, 0xfe, 0xd8, 0xb3, 0x39, 0xcb, 0x16, 0x0c, 0xb1, - 0x40, 0x3b, 0x90, 0x89, 0x59, 0xa2, 0xfe, 0x1b, 0x4b, 0x96, 0x19, 0x4b, 0x18, 0x87, 0xa5, 0xc0, - 0x48, 0xf7, 0x24, 0x59, 0xea, 0x90, 0x8d, 0x2f, 0x2f, 0xc9, 0xb6, 0x57, 0x23, 0xec, 0xc4, 0x0c, - 0xbd, 0x0b, 0x2b, 0x71, 0xef, 0xe3, 0xe2, 0x09, 0xc6, 0x15, 0x63, 0x85, 0xac, 0xde, 0x14, 0xad, - 0x4c, 0x71, 0x01, 0xa5, 0x79, 0x5e, 0x13, 0x5a, 0x35, 0xf9, 0x4d, 0x74, 0x1d, 0xb2, 0xc4, 0x19, - 0x78, 0x16, 0x1d, 0x87, 0x58, 0x32, 0x6f, 0x22, 0x60, 0x5a, 0x7c, 0x48, 0xb1, 0xc7, 0x0f, 0xb9, - 0x60, 0xda, 0x44, 0x80, 0x36, 0xe1, 0x7f, 0xf1, 0xc2, 0x9c, 0x78, 0x11, 0x2c, 0x43, 0xb1, 0xaa, - 0x13, 0x69, 0xaa, 0x3f, 0x2b, 0xb0, 0x28, 0x0e, 0x46, 0xa2, 0x0d, 0xca, 0xf9, 0x6d, 0x98, 0xbf, - 0xa8, 0x0d, 0xa9, 0xd7, 0x6f, 0x43, 0x0d, 0x20, 0x0e, 0x93, 0x94, 0x54, 0x2d, 0xb5, 0x91, 0xdb, - 0xba, 0x36, 0xeb, 0x48, 0x84, 0xd8, 0x71, 0x06, 0xf2, 0xdc, 0x27, 0x8c, 0xaa, 0xbf, 0x2b, 0x90, - 0x8d, 0xf5, 0xa8, 0x06, 0x85, 0x28, 0x2e, 0xf3, 0x91, 0x6b, 0x0d, 0x24, 0x15, 0xd7, 0x2e, 0x0c, - 0xee, 0x8e, 0x6b, 0x0d, 0x8c, 0x9c, 0x8c, 0x87, 0x2d, 0xce, 0x6f, 0xeb, 0xfc, 0x05, 0x6d, 0x9d, - 0xe2, 0x51, 0xea, 0xf5, 0x78, 0x34, 0xd5, 0x71, 0xf5, 0x4c, 0xc7, 0xab, 0x3f, 0xce, 0x43, 0xa6, - 0xcd, 0x8f, 0xa2, 0xe5, 0xfe, 0x17, 0x07, 0xec, 0x1a, 0x64, 0x03, 0xdf, 0x35, 0x85, 0x46, 0xe5, - 0x9a, 0x4c, 0xe0, 0xbb, 0xc6, 0x4c, 0xdb, 0x17, 0xde, 0xd0, 0xe9, 0x5b, 0x7c, 0x03, 0x55, 0x4b, - 0x9f, 0xad, 0x5a, 0x08, 0x79, 0x51, 0x0a, 0x39, 0x1a, 0x6f, 0xb1, 0x1a, 0xf0, 0x59, 0xab, 0xcc, - 0x8e, 0x72, 0x11, 0xb6, 0x40, 0x1a, 0x12, 0xc7, 0x2c, 0xc4, 0x24, 0x91, 0xd3, 0xb9, 0x74, 0x11, - 0x2d, 0x0d, 0x89, 0xab, 0x7e, 0xab, 0x00, 0xec, 0xb2, 0xca, 0xf2, 0x7c, 0xd9, 0x50, 0x23, 0x3c, - 0x04, 0x73, 0x6a, 0xe7, 0xca, 0x45, 0x4d, 0x93, 0xfb, 0xe7, 0x49, 0x32, 0xee, 0x06, 0x14, 0x26, - 0x64, 0x24, 0x38, 0x0a, 0xe6, 0x1c, 0x27, 0xf1, 0xac, 0xe9, 0x60, 0x6a, 0xe4, 0x0f, 0x12, 0xab, - 0xea, 0x2f, 0x0a, 0x64, 0x79, 0x4c, 0x7b, 0x98, 0x5a, 0x53, 0x3d, 0x54, 0x5e, 0xbf, 0x87, 0x6b, - 0x00, 0xc2, 0x0d, 0x71, 0x9e, 0x60, 0xc9, 0xac, 0x2c, 0x97, 0x74, 0x9c, 0x27, 0x18, 0x7d, 0x18, - 0x17, 0x3c, 0xf5, 0xcf, 0x05, 0x97, 0x47, 0x3a, 0x2a, 0xfb, 0x15, 0x48, 0x7b, 0xe3, 0x91, 0xc9, - 0x26, 0x8c, 0x2a, 0xd8, 0xea, 0x8d, 0x47, 0xdd, 0x43, 0x52, 0xfd, 0x0a, 0xd2, 0xdd, 0x43, 0xfe, - 0xda, 0x62, 0x14, 0x0d, 0x7d, 0x5f, 0x8e, 0x78, 0xf1, 0xb4, 0xca, 0x30, 0x01, 0x9f, 0x68, 0x08, - 0x54, 0x36, 0xcb, 0xa3, 0xb7, 0x1f, 0xfb, 0x46, 0xfa, 0x2b, 0xbe, 0xe3, 0xe4, 0x0b, 0xee, 0xe6, - 0xaf, 0x0a, 0xe4, 0x12, 0xf7, 0x03, 0x7a, 0x1f, 0x2e, 0xd5, 0x77, 0xf7, 0x1b, 0xf7, 0xcd, 0xe6, - 0xb6, 0x79, 0x67, 0xb7, 0x76, 0xd7, 0x7c, 0xd0, 0xba, 0xdf, 0xda, 0xff, 0xbc, 0x55, 0x9c, 0x2b, - 0x5f, 0x3e, 0x3e, 0xd1, 0x50, 0x02, 0xfb, 0xc0, 0x7b, 0xec, 0xf9, 0x5f, 0xb3, 0xab, 0x78, 0x75, - 0xda, 0xa4, 0x56, 0xef, 0xec, 0xb4, 0xba, 0x45, 0xa5, 0x7c, 0xe9, 0xf8, 0x44, 0x5b, 0x49, 0x58, - 0xd4, 0x7a, 0x04, 0x7b, 0x74, 0xd6, 0xa0, 0xb1, 0xbf, 0xb7, 0xd7, 0xec, 0x16, 0xe7, 0x67, 0x0c, - 0xe4, 0x85, 0x7d, 0x03, 0x56, 0xa6, 0x0d, 0x5a, 0xcd, 0xdd, 0x62, 0xaa, 0x8c, 0x8e, 0x4f, 0xb4, - 0xa5, 0x04, 0xba, 0xe5, 0xb8, 0xe5, 0xcc, 0x37, 0xdf, 0x55, 0xe6, 0x7e, 0xf8, 0xbe, 0xa2, 0xb0, - 0xcc, 0x0a, 0x53, 0x77, 0x04, 0x7a, 0x0f, 0xae, 0x74, 0x9a, 0x77, 0x5b, 0x3b, 0xdb, 0xe6, 0x5e, - 0xe7, 0xae, 0xd9, 0xfd, 0xa2, 0xbd, 0x93, 0xc8, 0x6e, 0xf9, 0xf8, 0x44, 0xcb, 0xc9, 0x94, 0x2e, - 0x42, 0xb7, 0x8d, 0x9d, 0x87, 0xfb, 0xdd, 0x9d, 0xa2, 0x22, 0xd0, 0xed, 0x10, 0x1f, 0xf8, 0x14, - 0x73, 0xf4, 0x2d, 0xb8, 0x7a, 0x0e, 0x3a, 0x4e, 0x6c, 0xe5, 0xf8, 0x44, 0x2b, 0xb4, 0x43, 0x2c, - 0xce, 0x0f, 0xb7, 0xd0, 0xa1, 0x34, 0x6b, 0xb1, 0xdf, 0xde, 0xef, 0xd4, 0x76, 0x8b, 0x5a, 0xb9, - 0x78, 0x7c, 0xa2, 0xe5, 0xa3, 0xcb, 0x90, 0xe1, 0x27, 0x99, 0xd5, 0x3f, 0x7b, 0x76, 0x5a, 0x51, - 0x9e, 0x9f, 0x56, 0x94, 0x3f, 0x4f, 0x2b, 0xca, 0xd3, 0x97, 0x95, 0xb9, 0xe7, 0x2f, 0x2b, 0x73, - 0xbf, 0xbd, 0xac, 0xcc, 0x7d, 0xf9, 0xd1, 0xc0, 0xa1, 0xc3, 0x71, 0x4f, 0xef, 0xfb, 0xa3, 0xcd, - 0xe4, 0x3f, 0x8c, 0xc9, 0xa7, 0xf8, 0xa7, 0x73, 0xf6, 0xdf, 0x47, 0x6f, 0x91, 0xcb, 0x6f, 0xff, - 0x1d, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xc0, 0x81, 0x37, 0x3e, 0x0d, 0x00, 0x00, + // 1396 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4b, 0x6f, 0x1b, 0xd5, + 0x17, 0xcf, 0xd8, 0xe3, 0xd7, 0xb1, 0x9d, 0x38, 0xf7, 0x9f, 0xb6, 0xae, 0xdb, 0x38, 0x96, 0xab, + 0x3f, 0xa4, 0x05, 0x39, 0x25, 0x45, 0x3c, 0x16, 0x2c, 0x6c, 0xc7, 0x6d, 0xad, 0x26, 0x8e, 0x19, + 0xbb, 0x45, 0x74, 0x33, 0x1a, 0x7b, 0x6e, 0xed, 0xa1, 0xf6, 0xcc, 0x68, 0xe6, 0x3a, 0x38, 0xfd, + 0x04, 0x28, 0xab, 0xae, 0xd8, 0x65, 0x05, 0x0b, 0xf6, 0x20, 0xb1, 0x45, 0xac, 0xba, 0xec, 0x0e, + 0x36, 0x14, 0x48, 0x25, 0x3e, 0x07, 0xba, 0x8f, 0x19, 0xcf, 0xc4, 0x31, 0x54, 0x51, 0x05, 0x12, + 0x9b, 0x68, 0xee, 0x39, 0xbf, 0x73, 0xee, 0x79, 0xfc, 0xee, 0xc9, 0x31, 0x5c, 0x25, 0xd8, 0xd4, + 0xb1, 0x33, 0x36, 0x4c, 0xb2, 0x45, 0x0e, 0x6d, 0xec, 0xf2, 0xbf, 0x15, 0xdb, 0xb1, 0x88, 0x85, + 0x72, 0x33, 0x6d, 0x85, 0xc9, 0x0b, 0x6b, 0x03, 0x6b, 0x60, 0x31, 0xe5, 0x16, 0xfd, 0xe2, 0xb8, + 0xc2, 0xc6, 0xc0, 0xb2, 0x06, 0x23, 0xbc, 0xc5, 0x4e, 0xbd, 0xc9, 0xa3, 0x2d, 0x62, 0x8c, 0xb1, + 0x4b, 0xb4, 0xb1, 0x2d, 0x00, 0xeb, 0x81, 0x6b, 0xfa, 0xce, 0xa1, 0x4d, 0x2c, 0x8a, 0xb5, 0x1e, + 0x09, 0x75, 0x31, 0xa0, 0x3e, 0xc0, 0x8e, 0x6b, 0x58, 0x66, 0x30, 0x8e, 0x42, 0x69, 0x2e, 0xca, + 0x03, 0x6d, 0x64, 0xe8, 0x1a, 0xb1, 0x1c, 0x8e, 0x28, 0x7f, 0x08, 0xd9, 0xb6, 0xe6, 0x90, 0x0e, + 0x26, 0x77, 0xb1, 0xa6, 0x63, 0x07, 0xad, 0x41, 0x8c, 0x58, 0x44, 0x1b, 0xe5, 0xa5, 0x92, 0xb4, + 0x99, 0x55, 0xf8, 0x01, 0x21, 0x90, 0x87, 0x9a, 0x3b, 0xcc, 0x47, 0x4a, 0xd2, 0x66, 0x46, 0x61, + 0xdf, 0xe5, 0x21, 0xc8, 0xd4, 0x94, 0x5a, 0x18, 0xa6, 0x8e, 0xa7, 0x9e, 0x05, 0x3b, 0x50, 0x69, + 0xef, 0x90, 0x60, 0x57, 0x98, 0xf0, 0x03, 0x7a, 0x17, 0x62, 0x2c, 0xfe, 0x7c, 0xb4, 0x24, 0x6d, + 0xa6, 0xb7, 0xf3, 0x95, 0x40, 0xa1, 0x78, 0x7e, 0x95, 0x36, 0xd5, 0xd7, 0xe4, 0x67, 0x2f, 0x36, + 0x96, 0x14, 0x0e, 0x2e, 0x8f, 0x20, 0x51, 0x1b, 0x59, 0xfd, 0xc7, 0xcd, 0x1d, 0x3f, 0x10, 0x69, + 0x16, 0x08, 0xda, 0x83, 0x15, 0x5b, 0x73, 0x88, 0xea, 0x62, 0xa2, 0x0e, 0x59, 0x16, 0xec, 0xd2, + 0xf4, 0xf6, 0x46, 0xe5, 0x74, 0x1f, 0x2a, 0xa1, 0x64, 0xc5, 0x2d, 0x59, 0x3b, 0x28, 0x2c, 0xff, + 0x21, 0x43, 0x5c, 0x14, 0xe3, 0x23, 0x48, 0x88, 0xb2, 0xb2, 0x0b, 0xd3, 0xdb, 0xeb, 0x41, 0x8f, + 0x42, 0x55, 0xa9, 0x5b, 0xa6, 0x8b, 0x4d, 0x77, 0xe2, 0x0a, 0x7f, 0x9e, 0x0d, 0x7a, 0x03, 0x92, + 0xfd, 0xa1, 0x66, 0x98, 0xaa, 0xa1, 0xb3, 0x88, 0x52, 0xb5, 0xf4, 0xc9, 0x8b, 0x8d, 0x44, 0x9d, + 0xca, 0x9a, 0x3b, 0x4a, 0x82, 0x29, 0x9b, 0x3a, 0xba, 0x08, 0xf1, 0x21, 0x36, 0x06, 0x43, 0xc2, + 0xca, 0x12, 0x55, 0xc4, 0x09, 0x7d, 0x00, 0x32, 0x25, 0x44, 0x5e, 0x66, 0x77, 0x17, 0x2a, 0x9c, + 0x2d, 0x15, 0x8f, 0x2d, 0x95, 0xae, 0xc7, 0x96, 0x5a, 0x92, 0x5e, 0xfc, 0xf4, 0xd7, 0x0d, 0x49, + 0x61, 0x16, 0xa8, 0x0e, 0xd9, 0x91, 0xe6, 0x12, 0xb5, 0x47, 0xcb, 0x46, 0xaf, 0x8f, 0x31, 0x17, + 0x97, 0xe7, 0x0b, 0x22, 0x0a, 0x2b, 0x42, 0x4f, 0x53, 0x2b, 0x2e, 0xd2, 0xd1, 0x26, 0xe4, 0x98, + 0x93, 0xbe, 0x35, 0x1e, 0x1b, 0x44, 0x65, 0x75, 0x8f, 0xb3, 0xba, 0x2f, 0x53, 0x79, 0x9d, 0x89, + 0xef, 0xd2, 0x0e, 0x5c, 0x81, 0x94, 0xae, 0x11, 0x8d, 0x43, 0x12, 0x0c, 0x92, 0xa4, 0x02, 0xa6, + 0x7c, 0x13, 0x56, 0x7c, 0xd6, 0xb9, 0x1c, 0x92, 0xe4, 0x5e, 0x66, 0x62, 0x06, 0xbc, 0x09, 0x6b, + 0x26, 0x9e, 0x12, 0xf5, 0x34, 0x3a, 0xc5, 0xd0, 0x88, 0xea, 0x1e, 0x84, 0x2d, 0xfe, 0x0f, 0xcb, + 0x7d, 0xaf, 0xf8, 0x1c, 0x0b, 0x0c, 0x9b, 0xf5, 0xa5, 0x0c, 0x76, 0x19, 0x92, 0x9a, 0x6d, 0x73, + 0x40, 0x9a, 0x01, 0x12, 0x9a, 0x6d, 0x33, 0xd5, 0x0d, 0x58, 0x65, 0x39, 0x3a, 0xd8, 0x9d, 0x8c, + 0x88, 0x70, 0x92, 0x61, 0x98, 0x15, 0xaa, 0x50, 0xb8, 0x9c, 0x61, 0xaf, 0x41, 0x16, 0x1f, 0x18, + 0x3a, 0x36, 0xfb, 0x98, 0xe3, 0xb2, 0x0c, 0x97, 0xf1, 0x84, 0x0c, 0x74, 0x1d, 0x72, 0xb6, 0x63, + 0xd9, 0x96, 0x8b, 0x1d, 0x55, 0xd3, 0x75, 0x07, 0xbb, 0x6e, 0x7e, 0x99, 0xfb, 0xf3, 0xe4, 0x55, + 0x2e, 0x2e, 0xe7, 0x41, 0xde, 0xd1, 0x88, 0x86, 0x72, 0x10, 0x25, 0x53, 0x37, 0x2f, 0x95, 0xa2, + 0x9b, 0x19, 0x85, 0x7e, 0x96, 0xbf, 0x8f, 0x82, 0xfc, 0xc0, 0x22, 0x18, 0xdd, 0x02, 0x99, 0xb6, + 0x89, 0xb1, 0x6f, 0xf9, 0x2c, 0x3e, 0x77, 0x8c, 0x81, 0x89, 0xf5, 0x3d, 0x77, 0xd0, 0x3d, 0xb4, + 0xb1, 0xc2, 0xc0, 0x01, 0x3a, 0x45, 0x42, 0x74, 0x5a, 0x83, 0x98, 0x63, 0x4d, 0x4c, 0x9d, 0xb1, + 0x2c, 0xa6, 0xf0, 0x03, 0x6a, 0x40, 0xd2, 0x67, 0x89, 0xfc, 0x77, 0x2c, 0x59, 0xa1, 0x2c, 0xa1, + 0x1c, 0x16, 0x02, 0x25, 0xd1, 0x13, 0x64, 0xa9, 0x41, 0xca, 0x1f, 0x5e, 0x82, 0x6d, 0xaf, 0x46, + 0xd8, 0x99, 0x19, 0x7a, 0x0b, 0x56, 0xfd, 0xde, 0xfb, 0xc5, 0xe3, 0x8c, 0xcb, 0xf9, 0x0a, 0x51, + 0xbd, 0x10, 0xad, 0x54, 0x3e, 0x80, 0x12, 0x2c, 0xaf, 0x19, 0xad, 0x9a, 0x6c, 0x12, 0x5d, 0x85, + 0x94, 0x6b, 0x0c, 0x4c, 0x8d, 0x4c, 0x1c, 0x2c, 0x98, 0x37, 0x13, 0x50, 0x2d, 0x9e, 0x12, 0x6c, + 0xb2, 0x47, 0xce, 0x99, 0x36, 0x13, 0xa0, 0x2d, 0xf8, 0x9f, 0x7f, 0x50, 0x67, 0x5e, 0x38, 0xcb, + 0x90, 0xaf, 0xea, 0x78, 0x9a, 0xf2, 0x0f, 0x12, 0xc4, 0xf9, 0xc3, 0x08, 0xb4, 0x41, 0x3a, 0xbb, + 0x0d, 0x91, 0x45, 0x6d, 0x88, 0x9e, 0xbf, 0x0d, 0x55, 0x00, 0x3f, 0x4c, 0x37, 0x2f, 0x97, 0xa2, + 0x9b, 0xe9, 0xed, 0x2b, 0xf3, 0x8e, 0x78, 0x88, 0x1d, 0x63, 0x20, 0xde, 0x7d, 0xc0, 0xa8, 0xfc, + 0x8b, 0x04, 0x29, 0x5f, 0x8f, 0xaa, 0x90, 0xf5, 0xe2, 0x52, 0x1f, 0x8d, 0xb4, 0x81, 0xa0, 0xe2, + 0xfa, 0xc2, 0xe0, 0x6e, 0x8f, 0xb4, 0x81, 0x92, 0x16, 0xf1, 0xd0, 0xc3, 0xd9, 0x6d, 0x8d, 0x2c, + 0x68, 0x6b, 0x88, 0x47, 0xd1, 0xf3, 0xf1, 0x28, 0xd4, 0x71, 0xf9, 0x54, 0xc7, 0xcb, 0xbf, 0x4b, + 0xb0, 0xdc, 0x98, 0xb2, 0xf0, 0xf5, 0x7f, 0xb3, 0x55, 0x0f, 0x05, 0xb7, 0x74, 0xac, 0xab, 0x73, + 0x3d, 0xbb, 0x36, 0xef, 0x31, 0x1c, 0xf3, 0xac, 0x77, 0xc8, 0xf3, 0xd2, 0x99, 0xf5, 0xf0, 0xbb, + 0x08, 0xac, 0xce, 0xe1, 0xff, 0x7b, 0xbd, 0x0c, 0xbf, 0xde, 0xd8, 0x2b, 0xbe, 0xde, 0xf8, 0xc2, + 0xd7, 0xfb, 0x6d, 0x04, 0x92, 0x6d, 0x36, 0xa5, 0xb5, 0xd1, 0x3f, 0x31, 0x7b, 0xaf, 0x40, 0xca, + 0xb6, 0x46, 0x2a, 0xd7, 0xc8, 0x4c, 0x93, 0xb4, 0xad, 0x91, 0x32, 0x47, 0xb3, 0xd8, 0x6b, 0x1a, + 0xcc, 0xf1, 0xd7, 0xd0, 0x84, 0xc4, 0xe9, 0x07, 0xe5, 0x40, 0x86, 0x97, 0x42, 0x6c, 0x4d, 0x37, + 0x69, 0x0d, 0xd8, 0x1a, 0x26, 0xcd, 0x6f, 0x79, 0x3c, 0x6c, 0x8e, 0x54, 0x04, 0x8e, 0x5a, 0xf0, + 0x25, 0x43, 0x2c, 0x6e, 0xf9, 0x45, 0x13, 0x4b, 0x11, 0xb8, 0xf2, 0x97, 0x12, 0xc0, 0x2e, 0xad, + 0x2c, 0xcb, 0x97, 0xee, 0x3b, 0x2e, 0x0b, 0x41, 0x0d, 0xdd, 0x5c, 0x5c, 0xd4, 0x34, 0x71, 0x7f, + 0xc6, 0x0d, 0xc6, 0x5d, 0x87, 0xec, 0x8c, 0xdb, 0x2e, 0xf6, 0x82, 0x39, 0xc3, 0x89, 0xbf, 0x86, + 0x74, 0x30, 0x51, 0x32, 0x07, 0x81, 0x53, 0xf9, 0x47, 0x09, 0x52, 0x2c, 0xa6, 0x3d, 0x4c, 0xb4, + 0x50, 0x0f, 0xa5, 0xf3, 0xf7, 0x70, 0x1d, 0x80, 0xbb, 0x71, 0x8d, 0x27, 0x58, 0x30, 0x2b, 0xc5, + 0x24, 0x1d, 0xe3, 0x09, 0x46, 0xef, 0xf9, 0x05, 0x8f, 0xfe, 0x75, 0xc1, 0xc5, 0xc4, 0xf0, 0xca, + 0x7e, 0x09, 0x12, 0xe6, 0x64, 0xac, 0xd2, 0xe5, 0x43, 0xe6, 0x6c, 0x35, 0x27, 0xe3, 0xee, 0xd4, + 0x2d, 0x7f, 0x06, 0x89, 0xee, 0x94, 0x2d, 0xe2, 0x94, 0xa2, 0x8e, 0x65, 0x89, 0xed, 0x8f, 0x6f, + 0xdd, 0x49, 0x2a, 0x60, 0xcb, 0x0e, 0x02, 0x99, 0xae, 0x79, 0xde, 0xcf, 0x02, 0xfa, 0x8d, 0x2a, + 0xaf, 0xb8, 0xe2, 0x8b, 0xe5, 0xfe, 0xc6, 0x4f, 0x12, 0xa4, 0x03, 0xe3, 0x06, 0xbd, 0x03, 0x17, + 0x6a, 0xbb, 0xfb, 0xf5, 0x7b, 0x6a, 0x73, 0x47, 0xbd, 0xbd, 0x5b, 0xbd, 0xa3, 0xde, 0x6f, 0xdd, + 0x6b, 0xed, 0x7f, 0xd2, 0xca, 0x2d, 0x15, 0x2e, 0x1e, 0x1d, 0x97, 0x50, 0x00, 0x7b, 0xdf, 0x7c, + 0x6c, 0x5a, 0x9f, 0xd3, 0x77, 0xbe, 0x16, 0x36, 0xa9, 0xd6, 0x3a, 0x8d, 0x56, 0x37, 0x27, 0x15, + 0x2e, 0x1c, 0x1d, 0x97, 0x56, 0x03, 0x16, 0xd5, 0x9e, 0x8b, 0x4d, 0x32, 0x6f, 0x50, 0xdf, 0xdf, + 0xdb, 0x6b, 0x76, 0x73, 0x91, 0x39, 0x03, 0xf1, 0x0f, 0xe2, 0x3a, 0xac, 0x86, 0x0d, 0x5a, 0xcd, + 0xdd, 0x5c, 0xb4, 0x80, 0x8e, 0x8e, 0x4b, 0xcb, 0x01, 0x74, 0xcb, 0x18, 0x15, 0x92, 0x5f, 0x7c, + 0x55, 0x5c, 0xfa, 0xe6, 0xeb, 0xa2, 0x44, 0x33, 0xcb, 0x86, 0x66, 0x04, 0x7a, 0x1b, 0x2e, 0x75, + 0x9a, 0x77, 0x5a, 0x8d, 0x1d, 0x75, 0xaf, 0x73, 0x47, 0xed, 0x7e, 0xda, 0x6e, 0x04, 0xb2, 0x5b, + 0x39, 0x3a, 0x2e, 0xa5, 0x45, 0x4a, 0x8b, 0xd0, 0x6d, 0xa5, 0xf1, 0x60, 0xbf, 0xdb, 0xc8, 0x49, + 0x1c, 0xdd, 0x76, 0xf0, 0x81, 0x45, 0x30, 0x43, 0xdf, 0x84, 0xcb, 0x67, 0xa0, 0xfd, 0xc4, 0x56, + 0x8f, 0x8e, 0x4b, 0xd9, 0xb6, 0x83, 0xf9, 0xfb, 0x61, 0x16, 0x15, 0xc8, 0xcf, 0x5b, 0xec, 0xb7, + 0xf7, 0x3b, 0xd5, 0xdd, 0x5c, 0xa9, 0x90, 0x3b, 0x3a, 0x2e, 0x65, 0xbc, 0x61, 0x48, 0xf1, 0xb3, + 0xcc, 0x6a, 0x1f, 0x3f, 0x3b, 0x29, 0x4a, 0xcf, 0x4f, 0x8a, 0xd2, 0x6f, 0x27, 0x45, 0xe9, 0xe9, + 0xcb, 0xe2, 0xd2, 0xf3, 0x97, 0xc5, 0xa5, 0x9f, 0x5f, 0x16, 0x97, 0x1e, 0xbe, 0x3f, 0x30, 0xc8, + 0x70, 0xd2, 0xab, 0xf4, 0xad, 0xf1, 0x56, 0xf0, 0xc7, 0xe7, 0xec, 0x93, 0xff, 0x08, 0x3e, 0xfd, + 0xc3, 0xb4, 0x17, 0x67, 0xf2, 0x5b, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x46, 0xcf, 0x37, 0x28, + 0x59, 0x0f, 0x00, 0x00, } func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { @@ -1634,6 +1796,127 @@ func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ExtendedCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExtendedSignatures) > 0 { + for iNdEx := len(m.ExtendedSignatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExtendedSignatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExtendedCommitSig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedCommitSig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedCommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExtensionSignature) > 0 { + i -= len(m.ExtensionSignature) + copy(dAtA[i:], m.ExtensionSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) + i-- + dAtA[i] = 0x32 + } + if len(m.Extension) > 0 { + i -= len(m.Extension) + copy(dAtA[i:], m.Extension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) + i-- + dAtA[i] = 0x2a + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x22 + } + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x1a + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x12 + } + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *Proposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1661,12 +1944,12 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err10 != nil { - return 0, err10 + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err12 != nil { + return 0, err12 } - i -= n10 - i = encodeVarintTypes(dAtA, i, uint64(n10)) + i -= n12 + i = encodeVarintTypes(dAtA, i, uint64(n12)) i-- dAtA[i] = 0x32 { @@ -2117,55 +2400,108 @@ func (m *CommitSig) Size() (n int) { return n } -func (m *Proposal) Size() (n int) { +func (m *ExtendedCommit) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) - } if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } if m.Round != 0 { n += 1 + sovTypes(uint64(m.Round)) } - if m.PolRound != 0 { - n += 1 + sovTypes(uint64(m.PolRound)) - } l = m.BlockID.Size() n += 1 + l + sovTypes(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.ExtendedSignatures) > 0 { + for _, e := range m.ExtendedSignatures { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } } return n } -func (m *SignedHeader) Size() (n int) { +func (m *ExtendedCommitSig) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Header != nil { - l = m.Header.Size() + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + l = len(m.ValidatorAddress) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.Commit != nil { - l = m.Commit.Size() + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - return n -} - -func (m *LightBlock) Size() (n int) { - if m == nil { - return 0 + l = len(m.Extension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ExtensionSignature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.PolRound != 0 { + n += 1 + sovTypes(uint64(m.PolRound)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *LightBlock) Size() (n int) { + if m == nil { + return 0 } var l int _ = l @@ -3823,6 +4159,399 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } return nil } +func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendedSignatures", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtendedSignatures = append(m.ExtendedSignatures, ExtendedCommitSig{}) + if err := m.ExtendedSignatures[len(m.ExtendedSignatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedCommitSig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedCommitSig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } + m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...) + if m.Extension == nil { + m.Extension = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...) + if m.ExtensionSignature == nil { + m.ExtensionSignature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Proposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index e2b8a46c83..52668f7198 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -142,6 +142,28 @@ message CommitSig { bytes signature = 4; } +message ExtendedCommit { + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 + [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated ExtendedCommitSig extended_signatures = 4 [(gogoproto.nullable) = false]; +} + +// ExtendedCommitSig retains all the same fields as CommitSig but adds vote +// extension-related fields. +message ExtendedCommitSig { + BlockIDFlag block_id_flag = 1; + bytes validator_address = 2; + google.protobuf.Timestamp timestamp = 3 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 4; + // Vote extension data + bytes extension = 5; + // Vote extension signature + bytes extension_signature = 6; +} + message Proposal { SignedMsgType type = 1; int64 height = 2; diff --git a/scripts/confix/confix.go b/scripts/confix/confix.go index 6677f0b49a..b24c3a778d 100644 --- a/scripts/confix/confix.go +++ b/scripts/confix/confix.go @@ -17,6 +17,7 @@ import ( "github.com/creachadair/tomledit" "github.com/creachadair/tomledit/transform" "github.com/spf13/viper" + "github.com/tendermint/tendermint/config" ) diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 849e4edc39..a71ea14fb5 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -167,7 +167,7 @@ func generateLightClientAttackEvidence( blockID := makeBlockID(header.Hash(), 1000, []byte("partshash")) voteSet := types.NewVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(ctx, blockID, forgedHeight, 0, voteSet, pv, forgedTime) + commit, err := factory.MakeExtendedCommit(ctx, blockID, forgedHeight, 0, voteSet, pv, forgedTime) if err != nil { return nil, err } @@ -176,7 +176,7 @@ func generateLightClientAttackEvidence( ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: header, - Commit: commit, + Commit: commit.StripExtensions(), }, ValidatorSet: conflictingVals, }, diff --git a/types/block.go b/types/block.go index 17e9812cf5..32a4f9a0a9 100644 --- a/types/block.go +++ b/types/block.go @@ -608,16 +608,6 @@ type CommitSig struct { Signature []byte `json:"signature"` } -// NewCommitSigForBlock returns new CommitSig with BlockIDFlagCommit. -func NewCommitSigForBlock(signature []byte, valAddr Address, ts time.Time) CommitSig { - return CommitSig{ - BlockIDFlag: BlockIDFlagCommit, - ValidatorAddress: valAddr, - Timestamp: ts, - Signature: signature, - } -} - func MaxCommitBytes(valCount int) int64 { // From the repeated commit sig field var protoEncodingOverhead int64 = 2 @@ -632,16 +622,6 @@ func NewCommitSigAbsent() CommitSig { } } -// ForBlock returns true if CommitSig is for the block. -func (cs CommitSig) ForBlock() bool { - return cs.BlockIDFlag == BlockIDFlagCommit -} - -// Absent returns true if CommitSig is absent. -func (cs CommitSig) Absent() bool { - return cs.BlockIDFlag == BlockIDFlagAbsent -} - // CommitSig returns a string representation of CommitSig. // // 1. first 6 bytes of signature @@ -730,7 +710,6 @@ func (cs *CommitSig) ToProto() *tmproto.CommitSig { // FromProto sets a protobuf CommitSig to the given pointer. // It returns an error if the CommitSig is invalid. func (cs *CommitSig) FromProto(csp tmproto.CommitSig) error { - cs.BlockIDFlag = BlockIDFlag(csp.BlockIdFlag) cs.ValidatorAddress = csp.ValidatorAddress cs.Timestamp = csp.Timestamp @@ -741,6 +720,95 @@ func (cs *CommitSig) FromProto(csp tmproto.CommitSig) error { //------------------------------------- +// ExtendedCommitSig contains a commit signature along with its corresponding +// vote extension and vote extension signature. +type ExtendedCommitSig struct { + CommitSig // Commit signature + Extension []byte // Vote extension + ExtensionSignature []byte // Vote extension signature +} + +// NewExtendedCommitSigAbsent returns new ExtendedCommitSig with +// BlockIDFlagAbsent. Other fields are all empty. +func NewExtendedCommitSigAbsent() ExtendedCommitSig { + return ExtendedCommitSig{CommitSig: NewCommitSigAbsent()} +} + +// String returns a string representation of an ExtendedCommitSig. +// +// 1. commit sig +// 2. first 6 bytes of vote extension +// 3. first 6 bytes of vote extension signature +func (ecs ExtendedCommitSig) String() string { + return fmt.Sprintf("ExtendedCommitSig{%s with %X %X}", + ecs.CommitSig, + tmbytes.Fingerprint(ecs.Extension), + tmbytes.Fingerprint(ecs.ExtensionSignature), + ) +} + +// ValidateBasic checks whether the structure is well-formed. +func (ecs ExtendedCommitSig) ValidateBasic() error { + if err := ecs.CommitSig.ValidateBasic(); err != nil { + return err + } + + if ecs.BlockIDFlag == BlockIDFlagCommit { + if len(ecs.Extension) > MaxVoteExtensionSize { + return fmt.Errorf("vote extension is too big (max: %d)", MaxVoteExtensionSize) + } + if len(ecs.ExtensionSignature) == 0 { + return errors.New("vote extension signature is missing") + } + if len(ecs.ExtensionSignature) > MaxSignatureSize { + return fmt.Errorf("vote extension signature is too big (max: %d)", MaxSignatureSize) + } + return nil + } + + // We expect there to not be any vote extension or vote extension signature + // on nil or absent votes. + if len(ecs.Extension) != 0 { + return fmt.Errorf("vote extension is present for commit sig with block ID flag %v", ecs.BlockIDFlag) + } + if len(ecs.ExtensionSignature) != 0 { + return fmt.Errorf("vote extension signature is present for commit sig with block ID flag %v", ecs.BlockIDFlag) + } + return nil +} + +// ToProto converts the ExtendedCommitSig to its Protobuf representation. +func (ecs *ExtendedCommitSig) ToProto() *tmproto.ExtendedCommitSig { + if ecs == nil { + return nil + } + + return &tmproto.ExtendedCommitSig{ + BlockIdFlag: tmproto.BlockIDFlag(ecs.BlockIDFlag), + ValidatorAddress: ecs.ValidatorAddress, + Timestamp: ecs.Timestamp, + Signature: ecs.Signature, + Extension: ecs.Extension, + ExtensionSignature: ecs.ExtensionSignature, + } +} + +// FromProto populates the ExtendedCommitSig with values from the given +// Protobuf representation. Returns an error if the ExtendedCommitSig is +// invalid. +func (ecs *ExtendedCommitSig) FromProto(ecsp tmproto.ExtendedCommitSig) error { + ecs.BlockIDFlag = BlockIDFlag(ecsp.BlockIdFlag) + ecs.ValidatorAddress = ecsp.ValidatorAddress + ecs.Timestamp = ecsp.Timestamp + ecs.Signature = ecsp.Signature + ecs.Extension = ecsp.Extension + ecs.ExtensionSignature = ecsp.ExtensionSignature + + return ecs.ValidateBasic() +} + +//------------------------------------- + // Commit contains the evidence that a block was committed by a set of validators. // NOTE: Commit is empty for height 1, but never nil. type Commit struct { @@ -756,42 +824,12 @@ type Commit struct { // Memoized in first call to corresponding method. // NOTE: can't memoize in constructor because constructor isn't used for // unmarshaling. - hash tmbytes.HexBytes - bitArray *bits.BitArray -} - -// NewCommit returns a new Commit. -func NewCommit(height int64, round int32, blockID BlockID, commitSigs []CommitSig) *Commit { - return &Commit{ - Height: height, - Round: round, - BlockID: blockID, - Signatures: commitSigs, - } -} - -// CommitToVoteSet constructs a VoteSet from the Commit and validator set. -// Panics if signatures from the commit can't be added to the voteset. -// Inverse of VoteSet.MakeCommit(). -func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, commit.Height, commit.Round, tmproto.PrecommitType, vals) - for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some precommits can be missing. - } - vote := commit.GetVote(int32(idx)) - if err := vote.ValidateBasic(); err != nil { - panic(fmt.Errorf("failed to validate vote reconstructed from LastCommit: %w", err)) - } - added, err := voteSet.AddVote(vote) - if !added || err != nil { - panic(fmt.Errorf("failed to reconstruct LastCommit: %w", err)) - } - } - return voteSet + hash tmbytes.HexBytes } -// GetVote converts the CommitSig for the given valIdx to a Vote. +// GetVote converts the CommitSig for the given valIdx to a Vote. Commits do +// not contain vote extensions, so the vote extension and vote extension +// signature will not be present in the returned vote. // Returns nil if the precommit at valIdx is nil. // Panics if valIdx >= commit.Size(). func (commit *Commit) GetVote(valIdx int32) *Vote { @@ -822,26 +860,7 @@ func (commit *Commit) VoteSignBytes(chainID string, valIdx int32) []byte { return VoteSignBytes(chainID, v) } -// Type returns the vote type of the commit, which is always VoteTypePrecommit -// Implements VoteSetReader. -func (commit *Commit) Type() byte { - return byte(tmproto.PrecommitType) -} - -// GetHeight returns height of the commit. -// Implements VoteSetReader. -func (commit *Commit) GetHeight() int64 { - return commit.Height -} - -// GetRound returns height of the commit. -// Implements VoteSetReader. -func (commit *Commit) GetRound() int32 { - return commit.Round -} - // Size returns the number of signatures in the commit. -// Implements VoteSetReader. func (commit *Commit) Size() int { if commit == nil { return 0 @@ -849,33 +868,6 @@ func (commit *Commit) Size() int { return len(commit.Signatures) } -// BitArray returns a BitArray of which validators voted for BlockID or nil in this commit. -// Implements VoteSetReader. -func (commit *Commit) BitArray() *bits.BitArray { - if commit.bitArray == nil { - commit.bitArray = bits.NewBitArray(len(commit.Signatures)) - for i, commitSig := range commit.Signatures { - // TODO: need to check the BlockID otherwise we could be counting conflicts, - // not just the one with +2/3 ! - commit.bitArray.SetIndex(i, !commitSig.Absent()) - } - } - return commit.bitArray -} - -// GetByIndex returns the vote corresponding to a given validator index. -// Panics if `index >= commit.Size()`. -// Implements VoteSetReader. -func (commit *Commit) GetByIndex(valIdx int32) *Vote { - return commit.GetVote(valIdx) -} - -// IsCommit returns true if there is at least one signature. -// Implements VoteSetReader. -func (commit *Commit) IsCommit() bool { - return len(commit.Signatures) != 0 -} - // ValidateBasic performs basic validation that doesn't involve state data. // Does not actually check the cryptographic signatures. func (commit *Commit) ValidateBasic() error { @@ -999,7 +991,209 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { return commit, commit.ValidateBasic() } -//----------------------------------------------------------------------------- +//------------------------------------- + +// ExtendedCommit is similar to Commit, except that its signatures also retain +// their corresponding vote extensions and vote extension signatures. +type ExtendedCommit struct { + Height int64 + Round int32 + BlockID BlockID + ExtendedSignatures []ExtendedCommitSig + + bitArray *bits.BitArray +} + +// Clone creates a deep copy of this extended commit. +func (ec *ExtendedCommit) Clone() *ExtendedCommit { + sigs := make([]ExtendedCommitSig, len(ec.ExtendedSignatures)) + copy(sigs, ec.ExtendedSignatures) + ecc := *ec + ecc.ExtendedSignatures = sigs + return &ecc +} + +// ToVoteSet constructs a VoteSet from the Commit and validator set. +// Panics if signatures from the commit can't be added to the voteset. +// Inverse of VoteSet.MakeExtendedCommit(). +func (ec *ExtendedCommit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet { + voteSet := NewVoteSet(chainID, ec.Height, ec.Round, tmproto.PrecommitType, vals) + for idx, ecs := range ec.ExtendedSignatures { + if ecs.BlockIDFlag == BlockIDFlagAbsent { + continue // OK, some precommits can be missing. + } + vote := ec.GetExtendedVote(int32(idx)) + if err := vote.ValidateWithExtension(); err != nil { + panic(fmt.Errorf("failed to validate vote reconstructed from LastCommit: %w", err)) + } + added, err := voteSet.AddVote(vote) + if !added || err != nil { + panic(fmt.Errorf("failed to reconstruct vote set from extended commit: %w", err)) + } + } + return voteSet +} + +// StripExtensions converts an ExtendedCommit to a Commit by removing all vote +// extension-related fields. +func (ec *ExtendedCommit) StripExtensions() *Commit { + cs := make([]CommitSig, len(ec.ExtendedSignatures)) + for idx, ecs := range ec.ExtendedSignatures { + cs[idx] = ecs.CommitSig + } + return &Commit{ + Height: ec.Height, + Round: ec.Round, + BlockID: ec.BlockID, + Signatures: cs, + } +} + +// GetExtendedVote converts the ExtendedCommitSig for the given validator +// index to a Vote with a vote extensions. +// It panics if valIndex is out of range. +func (ec *ExtendedCommit) GetExtendedVote(valIndex int32) *Vote { + ecs := ec.ExtendedSignatures[valIndex] + return &Vote{ + Type: tmproto.PrecommitType, + Height: ec.Height, + Round: ec.Round, + BlockID: ecs.BlockID(ec.BlockID), + Timestamp: ecs.Timestamp, + ValidatorAddress: ecs.ValidatorAddress, + ValidatorIndex: valIndex, + Signature: ecs.Signature, + Extension: ecs.Extension, + ExtensionSignature: ecs.ExtensionSignature, + } +} + +// Type returns the vote type of the extended commit, which is always +// VoteTypePrecommit +// Implements VoteSetReader. +func (ec *ExtendedCommit) Type() byte { return byte(tmproto.PrecommitType) } + +// GetHeight returns height of the extended commit. +// Implements VoteSetReader. +func (ec *ExtendedCommit) GetHeight() int64 { return ec.Height } + +// GetRound returns height of the extended commit. +// Implements VoteSetReader. +func (ec *ExtendedCommit) GetRound() int32 { return ec.Round } + +// Size returns the number of signatures in the extended commit. +// Implements VoteSetReader. +func (ec *ExtendedCommit) Size() int { + if ec == nil { + return 0 + } + return len(ec.ExtendedSignatures) +} + +// BitArray returns a BitArray of which validators voted for BlockID or nil in +// this extended commit. +// Implements VoteSetReader. +func (ec *ExtendedCommit) BitArray() *bits.BitArray { + if ec.bitArray == nil { + ec.bitArray = bits.NewBitArray(len(ec.ExtendedSignatures)) + for i, extCommitSig := range ec.ExtendedSignatures { + // TODO: need to check the BlockID otherwise we could be counting conflicts, + // not just the one with +2/3 ! + ec.bitArray.SetIndex(i, extCommitSig.BlockIDFlag != BlockIDFlagAbsent) + } + } + return ec.bitArray +} + +// GetByIndex returns the vote corresponding to a given validator index. +// Panics if `index >= extCommit.Size()`. +// Implements VoteSetReader. +func (ec *ExtendedCommit) GetByIndex(valIdx int32) *Vote { + return ec.GetExtendedVote(valIdx) +} + +// IsCommit returns true if there is at least one signature. +// Implements VoteSetReader. +func (ec *ExtendedCommit) IsCommit() bool { + return len(ec.ExtendedSignatures) != 0 +} + +// ValidateBasic checks whether the extended commit is well-formed. Does not +// actually check the cryptographic signatures. +func (ec *ExtendedCommit) ValidateBasic() error { + if ec.Height < 0 { + return errors.New("negative Height") + } + if ec.Round < 0 { + return errors.New("negative Round") + } + + if ec.Height >= 1 { + if ec.BlockID.IsNil() { + return errors.New("commit cannot be for nil block") + } + + if len(ec.ExtendedSignatures) == 0 { + return errors.New("no signatures in commit") + } + for i, extCommitSig := range ec.ExtendedSignatures { + if err := extCommitSig.ValidateBasic(); err != nil { + return fmt.Errorf("wrong ExtendedCommitSig #%d: %v", i, err) + } + } + } + return nil +} + +// ToProto converts ExtendedCommit to protobuf +func (ec *ExtendedCommit) ToProto() *tmproto.ExtendedCommit { + if ec == nil { + return nil + } + + c := new(tmproto.ExtendedCommit) + sigs := make([]tmproto.ExtendedCommitSig, len(ec.ExtendedSignatures)) + for i := range ec.ExtendedSignatures { + sigs[i] = *ec.ExtendedSignatures[i].ToProto() + } + c.ExtendedSignatures = sigs + + c.Height = ec.Height + c.Round = ec.Round + c.BlockID = ec.BlockID.ToProto() + + return c +} + +// ExtendedCommitFromProto constructs an ExtendedCommit from the given Protobuf +// representation. It returns an error if the extended commit is invalid. +func ExtendedCommitFromProto(ecp *tmproto.ExtendedCommit) (*ExtendedCommit, error) { + if ecp == nil { + return nil, errors.New("nil ExtendedCommit") + } + + extCommit := new(ExtendedCommit) + + bi, err := BlockIDFromProto(&ecp.BlockID) + if err != nil { + return nil, err + } + + sigs := make([]ExtendedCommitSig, len(ecp.ExtendedSignatures)) + for i := range ecp.ExtendedSignatures { + if err := sigs[i].FromProto(ecp.ExtendedSignatures[i]); err != nil { + return nil, err + } + } + extCommit.ExtendedSignatures = sigs + extCommit.Height = ecp.Height + extCommit.Round = ecp.Round + extCommit.BlockID = *bi + + return extCommit, extCommit.ValidateBasic() +} + +//------------------------------------- // Data contains the set of transactions included in the block type Data struct { @@ -1170,3 +1364,9 @@ func BlockIDFromProto(bID *tmproto.BlockID) (*BlockID, error) { return blockID, blockID.ValidateBasic() } + +// ProtoBlockIDIsNil is similar to the IsNil function on BlockID, but for the +// Protobuf representation. +func ProtoBlockIDIsNil(bID *tmproto.BlockID) bool { + return len(bID.Hash) == 0 && ProtoPartSetHeaderIsZero(&bID.PartSetHeader) +} diff --git a/types/block_test.go b/types/block_test.go index 7f2378505d..09a8b602ed 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -42,14 +42,14 @@ func TestBlockAddEvidence(t *testing.T) { h := int64(3) voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) + extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") require.NoError(t, err) evList := []Evidence{ev} - block := MakeBlock(h, txs, commit, evList) + block := MakeBlock(h, txs, extCommit.StripExtensions(), evList) require.NotNil(t, block) require.Equal(t, 1, len(block.Evidence)) require.NotNil(t, block.EvidenceHash) @@ -66,9 +66,9 @@ func TestBlockValidateBasic(t *testing.T) { h := int64(3) voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) - + extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) + commit := extCommit.StripExtensions() ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") require.NoError(t, err) @@ -104,7 +104,10 @@ func TestBlockValidateBasic(t *testing.T) { blk.LastCommit = nil }, true}, {"Invalid LastCommit", func(blk *Block) { - blk.LastCommit = NewCommit(-1, 0, *voteSet.maj23, nil) + blk.LastCommit = &Commit{ + Height: -1, + BlockID: *voteSet.maj23, + } }, true}, {"Invalid Evidence", func(blk *Block) { emptyEv := &DuplicateVoteEvidence{} @@ -153,15 +156,14 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { h := int64(3) voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) - + extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") require.NoError(t, err) evList := []Evidence{ev} - partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(512) + partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.StripExtensions(), evList).MakePartSet(512) require.NoError(t, err) assert.NotNil(t, partSet) @@ -178,14 +180,14 @@ func TestBlockHashesTo(t *testing.T) { h := int64(3) voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) + extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") require.NoError(t, err) evList := []Evidence{ev} - block := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList) + block := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.StripExtensions(), evList) block.ValidatorsHash = valSet.Hash() assert.False(t, block.HashesTo([]byte{})) assert.False(t, block.HashesTo([]byte("something else"))) @@ -260,7 +262,7 @@ func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) + commit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) assert.Equal(t, h-1, commit.Height) @@ -273,7 +275,7 @@ func TestCommit(t *testing.T) { require.NotNil(t, commit.BitArray()) assert.Equal(t, bits.NewBitArray(10).Size(), commit.BitArray().Size()) - assert.Equal(t, voteWithoutExtension(voteSet.GetByIndex(0)), commit.GetByIndex(0)) + assert.Equal(t, voteSet.GetByIndex(0), commit.GetByIndex(0)) assert.True(t, commit.IsCommit()) } @@ -477,11 +479,11 @@ func randCommit(ctx context.Context, t *testing.T, now time.Time) *Commit { lastID := makeBlockIDRandom() h := int64(3) voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, now) + commit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, now) require.NoError(t, err) - return commit + return commit.StripExtensions() } func hexBytesFromString(t *testing.T, s string) bytes.HexBytes { @@ -554,7 +556,7 @@ func TestBlockMaxDataBytesNoEvidence(t *testing.T) { } } -func TestCommitToVoteSet(t *testing.T) { +func TestExtendedCommitToVoteSet(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) @@ -562,17 +564,16 @@ func TestCommitToVoteSet(t *testing.T) { defer cancel() voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) - + extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) assert.NoError(t, err) chainID := voteSet.ChainID() - voteSet2 := CommitToVoteSet(chainID, commit, valSet) + voteSet2 := extCommit.ToVoteSet(chainID, valSet) for i := int32(0); int(i) < len(vals); i++ { - vote1 := voteWithoutExtension(voteSet.GetByIndex(i)) + vote1 := voteSet.GetByIndex(i) vote2 := voteSet2.GetByIndex(i) - vote3 := commit.GetVote(i) + vote3 := extCommit.GetExtendedVote(i) vote1bz, err := vote1.ToProto().Marshal() require.NoError(t, err) @@ -634,12 +635,12 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } if tc.valid { - commit := voteSet.MakeCommit() // panics without > 2/3 valid votes - assert.NotNil(t, commit) - err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, commit) + extCommit := voteSet.MakeExtendedCommit() // panics without > 2/3 valid votes + assert.NotNil(t, extCommit) + err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, extCommit.StripExtensions()) assert.NoError(t, err) } else { - assert.Panics(t, func() { voteSet.MakeCommit() }) + assert.Panics(t, func() { voteSet.MakeExtendedCommit() }) } } } diff --git a/types/evidence.go b/types/evidence.go index aed954a930..c5b5b6223d 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -309,7 +309,7 @@ func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *Validator // validators who are in the commonVals and voted for the lunatic header if l.ConflictingHeaderIsInvalid(trusted.Header) { for _, commitSig := range l.ConflictingBlock.Commit.Signatures { - if !commitSig.ForBlock() { + if commitSig.BlockIDFlag != BlockIDFlagCommit { continue } @@ -329,12 +329,12 @@ func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *Validator // only need a single loop to find the validators that voted twice. for i := 0; i < len(l.ConflictingBlock.Commit.Signatures); i++ { sigA := l.ConflictingBlock.Commit.Signatures[i] - if !sigA.ForBlock() { + if sigA.BlockIDFlag != BlockIDFlagCommit { continue } sigB := trusted.Commit.Signatures[i] - if !sigB.ForBlock() { + if sigB.BlockIDFlag != BlockIDFlagCommit { continue } diff --git a/types/evidence_test.go b/types/evidence_test.go index 27e346343c..8b06a62186 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -153,8 +153,10 @@ func TestLightClientAttackEvidenceBasic(t *testing.T) { header := makeHeaderRandom() header.Height = height blockID := makeBlockID(crypto.Checksum([]byte("blockhash")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) - commit, err := makeCommit(ctx, blockID, height, 1, voteSet, privVals, defaultVoteTime) + extCommit, err := makeExtCommit(ctx, blockID, height, 1, voteSet, privVals, defaultVoteTime) require.NoError(t, err) + commit := extCommit.StripExtensions() + lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ SignedHeader: &SignedHeader{ @@ -217,8 +219,10 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) { header.Height = height header.ValidatorsHash = valSet.Hash() blockID := makeBlockID(header.Hash(), math.MaxInt32, crypto.Checksum([]byte("partshash"))) - commit, err := makeCommit(ctx, blockID, height, 1, voteSet, privVals, time.Now()) + extCommit, err := makeExtCommit(ctx, blockID, height, 1, voteSet, privVals, time.Now()) require.NoError(t, err) + commit := extCommit.StripExtensions() + lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ SignedHeader: &SignedHeader{ @@ -424,13 +428,13 @@ func TestEvidenceVectors(t *testing.T) { ProposerAddress: []byte("2915b7b15f979e48ebc61774bb1d86ba3136b7eb"), } blockID3 := makeBlockID(header.Hash(), math.MaxInt32, crypto.Checksum([]byte("partshash"))) - commit, err := makeCommit(ctx, blockID3, height, 1, voteSet, privVals, defaultVoteTime) + extCommit, err := makeExtCommit(ctx, blockID3, height, 1, voteSet, privVals, defaultVoteTime) require.NoError(t, err) lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ SignedHeader: &SignedHeader{ Header: header, - Commit: commit, + Commit: extCommit.StripExtensions(), }, ValidatorSet: valSet, }, diff --git a/types/part_set.go b/types/part_set.go index 9bf36279f7..d9341b61ff 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -145,6 +145,12 @@ func PartSetHeaderFromProto(ppsh *tmproto.PartSetHeader) (*PartSetHeader, error) return psh, psh.ValidateBasic() } +// ProtoPartSetHeaderIsZero is similar to the IsZero function for +// PartSetHeader, but for the Protobuf representation. +func ProtoPartSetHeaderIsZero(ppsh *tmproto.PartSetHeader) bool { + return ppsh.Total == 0 && len(ppsh.Hash) == 0 +} + //------------------------------------- type PartSet struct { diff --git a/types/priv_validator.go b/types/priv_validator.go index 72027c6223..b7f4bd1653 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -90,7 +90,6 @@ func (pv MockPV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vot } signBytes := VoteSignBytes(useChainID, vote) - extSignBytes := VoteExtensionSignBytes(useChainID, vote) sig, err := pv.PrivKey.Sign(signBytes) if err != nil { return err @@ -98,14 +97,15 @@ func (pv MockPV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vot vote.Signature = sig var extSig []byte - // We only sign vote extensions for precommits - if vote.Type == tmproto.PrecommitType { + // We only sign vote extensions for non-nil precommits + if vote.Type == tmproto.PrecommitType && !ProtoBlockIDIsNil(&vote.BlockID) { + extSignBytes := VoteExtensionSignBytes(useChainID, vote) extSig, err = pv.PrivKey.Sign(extSignBytes) if err != nil { return err } } else if len(vote.Extension) > 0 { - return errors.New("unexpected vote extension - vote extensions are only allowed in precommits") + return errors.New("unexpected vote extension - vote extensions are only allowed in non-nil precommits") } vote.ExtensionSignature = extSig return nil diff --git a/types/test_util.go b/types/test_util.go index 8aea2f02c3..11daa69b95 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -8,8 +8,8 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) -func makeCommit(ctx context.Context, blockID BlockID, height int64, round int32, - voteSet *VoteSet, validators []PrivValidator, now time.Time) (*Commit, error) { +func makeExtCommit(ctx context.Context, blockID BlockID, height int64, round int32, + voteSet *VoteSet, validators []PrivValidator, now time.Time) (*ExtendedCommit, error) { // all sign for i := 0; i < len(validators); i++ { @@ -33,7 +33,7 @@ func makeCommit(ctx context.Context, blockID BlockID, height int64, round int32, } } - return voteSet.MakeCommit(), nil + return voteSet.MakeExtendedCommit(), nil } func signAddVote(ctx context.Context, privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bool, err error) { @@ -46,13 +46,3 @@ func signAddVote(ctx context.Context, privVal PrivValidator, vote *Vote, voteSet vote.ExtensionSignature = v.ExtensionSignature return voteSet.AddVote(vote) } - -// Votes constructed from commits don't have extensions, because we don't store -// the extensions themselves in the commit. This method is used to construct a -// copy of a vote, but nil its extension and signature. -func voteWithoutExtension(v *Vote) *Vote { - vc := v.Copy() - vc.Extension = nil - vc.ExtensionSignature = nil - return vc -} diff --git a/types/validation.go b/types/validation.go index 21c8730f5c..02d1b0b56f 100644 --- a/types/validation.go +++ b/types/validation.go @@ -36,10 +36,10 @@ func VerifyCommit(chainID string, vals *ValidatorSet, blockID BlockID, votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 // ignore all absent signatures - ignore := func(c CommitSig) bool { return c.Absent() } + ignore := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagAbsent } // only count the signatures that are for the block - count := func(c CommitSig) bool { return c.ForBlock() } + count := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagCommit } // attempt to batch verify if shouldBatchVerify(vals, commit) { @@ -69,7 +69,7 @@ func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 // ignore all commit signatures that are not for the block - ignore := func(c CommitSig) bool { return !c.ForBlock() } + ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit } // count all the remaining signatures count := func(c CommitSig) bool { return true } @@ -113,7 +113,7 @@ func VerifyCommitLightTrusting(chainID string, vals *ValidatorSet, commit *Commi votingPowerNeeded := totalVotingPowerMulByNumerator / int64(trustLevel.Denominator) // ignore all commit signatures that are not for the block - ignore := func(c CommitSig) bool { return !c.ForBlock() } + ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit } // count all the remaining signatures count := func(c CommitSig) bool { return true } diff --git a/types/validation_test.go b/types/validation_test.go index 7900ee5ce0..f63c34450a 100644 --- a/types/validation_test.go +++ b/types/validation_test.go @@ -99,7 +99,12 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { vi++ } - commit := NewCommit(tc.height, round, tc.blockID, sigs) + commit := &Commit{ + Height: tc.height, + Round: round, + BlockID: tc.blockID, + Signatures: sigs, + } err := valSet.VerifyCommit(chainID, blockID, height, commit) if tc.expErr { @@ -146,9 +151,10 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { defer cancel() voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10) - commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) - + extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) + commit := extCommit.StripExtensions() + require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) // malleate 4th signature @@ -176,9 +182,10 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign defer cancel() voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10) - commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) - + extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) + commit := extCommit.StripExtensions() + require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) // malleate 4th signature (3 signatures are enough for 2/3+) @@ -203,9 +210,10 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin defer cancel() voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10) - commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) - + extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) + commit := extCommit.StripExtensions() + require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) // malleate 3rd signature (2 signatures are enough for 1/3+ trust level) @@ -227,10 +235,11 @@ func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) { var ( blockID = makeBlockIDRandom() voteSet, originalValset, vals = randVoteSet(ctx, t, 1, 1, tmproto.PrecommitType, 6, 1) - commit, err = makeCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now()) + extCommit, err = makeExtCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now()) newValSet, _ = randValidatorPrivValSet(ctx, t, 2, 1) ) require.NoError(t, err) + commit := extCommit.StripExtensions() testCases := []struct { valSet *ValidatorSet @@ -271,11 +280,11 @@ func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) { var ( blockID = makeBlockIDRandom() voteSet, valSet, vals = randVoteSet(ctx, t, 1, 1, tmproto.PrecommitType, 1, MaxTotalVotingPower) - commit, err = makeCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now()) + extCommit, err = makeExtCommit(ctx, blockID, 1, 1, voteSet, vals, time.Now()) ) require.NoError(t, err) - err = valSet.VerifyCommitLightTrusting("test_chain_id", commit, + err = valSet.VerifyCommitLightTrusting("test_chain_id", extCommit.StripExtensions(), tmmath.Fraction{Numerator: 25, Denominator: 55}) if assert.Error(t, err) { assert.Contains(t, err.Error(), "int64 overflow") diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 0963276264..8b5846da93 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1539,8 +1539,9 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { // nolint // generate n validators voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5)) // create a commit with n validators - commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) + extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) + commit := extCommit.StripExtensions() for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommit(chainID, blockID, h, commit) @@ -1567,8 +1568,9 @@ func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) { // nolint voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5)) // create a commit with n validators - commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) + extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) + commit := extCommit.StripExtensions() for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommitLight(chainID, blockID, h, commit) @@ -1594,8 +1596,9 @@ func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) { // generate n validators voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5)) // create a commit with n validators - commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) + extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) + commit := extCommit.StripExtensions() for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3}) diff --git a/types/vote.go b/types/vote.go index f20ee491ee..446de130ae 100644 --- a/types/vote.go +++ b/types/vote.go @@ -14,6 +14,9 @@ import ( const ( nilVoteStr string = "nil-Vote" + + // The maximum supported number of bytes in a vote extension. + MaxVoteExtensionSize int = 1024 * 1024 ) var ( @@ -109,6 +112,26 @@ func (vote *Vote) CommitSig() CommitSig { } } +// ExtendedCommitSig attempts to construct an ExtendedCommitSig from this vote. +// Panics if either the vote extension signature is missing or if the block ID +// is not either empty or complete. +func (vote *Vote) ExtendedCommitSig() ExtendedCommitSig { + if vote == nil { + return NewExtendedCommitSigAbsent() + } + + cs := vote.CommitSig() + if vote.BlockID.IsComplete() && len(vote.ExtensionSignature) == 0 { + panic(fmt.Sprintf("Invalid vote %v - BlockID is complete but missing vote extension signature", vote)) + } + + return ExtendedCommitSig{ + CommitSig: cs, + Extension: vote.Extension, + ExtensionSignature: vote.ExtensionSignature, + } +} + // VoteSignBytes returns the proto-encoding of the canonicalized Vote, for // signing. Panics if the marshaling fails. // @@ -216,12 +239,10 @@ func (vote *Vote) VerifyWithExtension(chainID string, pubKey crypto.PubKey) erro if err != nil { return err } - // We only verify vote extension signatures for precommits. - if vote.Type == tmproto.PrecommitType { + // We only verify vote extension signatures for non-nil precommits. + if vote.Type == tmproto.PrecommitType && !ProtoBlockIDIsNil(&v.BlockID) { extSignBytes := VoteExtensionSignBytes(chainID, v) - // TODO: Remove extension signature nil check to enforce vote extension - // signing once we resolve https://github.com/tendermint/tendermint/issues/8272 - if vote.ExtensionSignature != nil && !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { + if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { return ErrVoteInvalidSignature } } @@ -273,8 +294,10 @@ func (vote *Vote) ValidateBasic() error { return fmt.Errorf("signature is too big (max: %d)", MaxSignatureSize) } - // We should only ever see vote extensions in precommits. - if vote.Type != tmproto.PrecommitType { + // We should only ever see vote extensions in non-nil precommits, otherwise + // this is a violation of the specification. + // https://github.com/tendermint/tendermint/issues/8487 + if vote.Type != tmproto.PrecommitType || (vote.Type == tmproto.PrecommitType && vote.BlockID.IsNil()) { if len(vote.Extension) > 0 { return errors.New("unexpected vote extension") } @@ -294,12 +317,9 @@ func (vote *Vote) ValidateWithExtension() error { return err } - // We should always see vote extension signatures in precommits - if vote.Type == tmproto.PrecommitType { - // TODO(thane): Remove extension length check once - // https://github.com/tendermint/tendermint/issues/8272 is - // resolved. - if len(vote.Extension) > 0 && len(vote.ExtensionSignature) == 0 { + // We should always see vote extension signatures in non-nil precommits + if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() { + if len(vote.ExtensionSignature) == 0 { return errors.New("vote extension signature is missing") } if len(vote.ExtensionSignature) > MaxSignatureSize { diff --git a/types/vote_set.go b/types/vote_set.go index b4d149576b..224d4e4f86 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -220,13 +220,6 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok return nil, false } -func (voteSet *VoteSet) GetVotes() []*Vote { - if voteSet == nil { - return nil - } - return voteSet.votes -} - // Assumes signature is valid. // If conflicting vote exists, returns it. func (voteSet *VoteSet) addVerifiedVote( @@ -606,36 +599,41 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { //-------------------------------------------------------------------------------- // Commit -// MakeCommit constructs a Commit from the VoteSet. It only includes precommits -// for the block, which has 2/3+ majority, and nil. +// MakeExtendedCommit constructs a Commit from the VoteSet. It only includes +// precommits for the block, which has 2/3+ majority, and nil. // // Panics if the vote type is not PrecommitType or if there's no +2/3 votes for // a single block. -func (voteSet *VoteSet) MakeCommit() *Commit { +func (voteSet *VoteSet) MakeExtendedCommit() *ExtendedCommit { if voteSet.signedMsgType != tmproto.PrecommitType { - panic("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") + panic("Cannot MakeExtendCommit() unless VoteSet.Type is PrecommitType") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() // Make sure we have a 2/3 majority if voteSet.maj23 == nil { - panic("Cannot MakeCommit() unless a blockhash has +2/3") + panic("Cannot MakeExtendCommit() unless a blockhash has +2/3") } - // For every validator, get the precommit - commitSigs := make([]CommitSig, len(voteSet.votes)) + // For every validator, get the precommit with extensions + sigs := make([]ExtendedCommitSig, len(voteSet.votes)) for i, v := range voteSet.votes { - commitSig := v.CommitSig() + sig := v.ExtendedCommitSig() // if block ID exists but doesn't match, exclude sig - if commitSig.ForBlock() && !v.BlockID.Equals(*voteSet.maj23) { - commitSig = NewCommitSigAbsent() + if sig.BlockIDFlag == BlockIDFlagCommit && !v.BlockID.Equals(*voteSet.maj23) { + sig = NewExtendedCommitSigAbsent() } - commitSigs[i] = commitSig + sigs[i] = sig } - return NewCommit(voteSet.GetHeight(), voteSet.GetRound(), *voteSet.maj23, commitSigs) + return &ExtendedCommit{ + Height: voteSet.GetHeight(), + Round: voteSet.GetRound(), + BlockID: *voteSet.maj23, + ExtendedSignatures: sigs, + } } //-------------------------------------------------------------------------------- diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 1805b4c3e6..8d166d508d 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -450,7 +450,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { } // MakeCommit should fail. - assert.Panics(t, func() { voteSet.MakeCommit() }, "Doesn't have +2/3 majority") + assert.Panics(t, func() { voteSet.MakeExtendedCommit() }, "Doesn't have +2/3 majority") // 7th voted for some other block. { @@ -487,13 +487,13 @@ func TestVoteSet_MakeCommit(t *testing.T) { require.NoError(t, err) } - commit := voteSet.MakeCommit() + extCommit := voteSet.MakeExtendedCommit() // Commit should have 10 elements - assert.Equal(t, 10, len(commit.Signatures)) + assert.Equal(t, 10, len(extCommit.ExtendedSignatures)) // Ensure that Commit is good. - if err := commit.ValidateBasic(); err != nil { + if err := extCommit.ValidateBasic(); err != nil { t.Errorf("error in Commit.ValidateBasic(): %v", err) } } diff --git a/types/vote_test.go b/types/vote_test.go index 5673ccf57a..70cd913812 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -223,26 +223,22 @@ func TestVoteExtension(t *testing.T) { includeSignature: true, expectError: false, }, - // TODO(thane): Re-enable once - // https://github.com/tendermint/tendermint/issues/8272 is resolved - //{ - // name: "no extension signature", - // extension: []byte("extension"), - // includeSignature: false, - // expectError: true, - //}, + { + name: "no extension signature", + extension: []byte("extension"), + includeSignature: false, + expectError: true, + }, { name: "empty extension", includeSignature: true, expectError: false, }, - // TODO: Re-enable once - // https://github.com/tendermint/tendermint/issues/8272 is resolved. - //{ - // name: "no extension and no signature", - // includeSignature: false, - // expectError: true, - //}, + { + name: "no extension and no signature", + includeSignature: false, + expectError: true, + }, } for _, tc := range testCases { @@ -497,11 +493,11 @@ func getSampleCommit(ctx context.Context, t testing.TB) *Commit { lastID := makeBlockIDRandom() voteSet, _, vals := randVoteSet(ctx, t, 2, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(ctx, lastID, 2, 1, voteSet, vals, time.Now()) + commit, err := makeExtCommit(ctx, lastID, 2, 1, voteSet, vals, time.Now()) require.NoError(t, err) - return commit + return commit.StripExtensions() } func BenchmarkVoteSignBytes(b *testing.B) { From b0fe38c24543ed61796a865fcdfc34ff57e97ba0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 May 2022 13:23:47 +0000 Subject: [PATCH 021/203] build(deps): Bump github.com/creachadair/tomledit from 0.0.19 to 0.0.22 (#8504) Bumps [github.com/creachadair/tomledit](https://github.com/creachadair/tomledit) from 0.0.19 to 0.0.22.
Commits
  • f7ad71d cli: accept @ prefixed value arguments for strings
  • 2cb36fd Release v0.0.21
  • f56c992 cli: move subcommands to a separate file
  • 0271385 cli: allow list arguments as prefix filters
  • 6e4454e cli: add subcommand "add"
  • d59f49c Add a basic command-line tool to read and set keys.
  • 9f9039f Add a test for top-level comment blocking.
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/creachadair/tomledit&package-manager=go_modules&previous-version=0.0.19&new-version=0.0.22)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6922847a33..024f910039 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect github.com/containerd/continuity v0.2.1 // indirect - github.com/creachadair/tomledit v0.0.19 + github.com/creachadair/tomledit v0.0.22 github.com/daixiang0/gci v0.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect diff --git a/go.sum b/go.sum index 96f0e92e24..e7c2ecda36 100644 --- a/go.sum +++ b/go.sum @@ -230,10 +230,11 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= +github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= -github.com/creachadair/tomledit v0.0.19 h1:zbpfUtYFYFdpRjwJY9HJlto1iZ4M5YwYB6qqc37F6UM= -github.com/creachadair/tomledit v0.0.19/go.mod h1:gvtfnSZLa+YNQD28vaPq0Nk12bRxEhmUdBzAWn+EGF4= +github.com/creachadair/tomledit v0.0.22 h1:lRtepmrwhzDq+g1gv5ftVn5itgo7CjYbm6abKTToqJ4= +github.com/creachadair/tomledit v0.0.22/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= From 0130a8cf976fa6ba9cf8011b29c12ad61711e5fb Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Wed, 11 May 2022 18:06:15 -0400 Subject: [PATCH 022/203] scripts/metricsgen: add metricsdiff tool (#8501) Adds the `metricsdiff` tool. The metricsdiff tool parses two files containing prometheus metrics and calculates the sets of metrics that were added or removed between the two files or have changed labels. This tool is added to ensure that the metrics been generated for `metricsgen` match the bespoke metrics. The following metrics were found to be different between master and the the tool was built with. The output makes sense given that the metrics branch does _not_ contain https://github.com/tendermint/tendermint/pull/8480. ``` ./metricsdiff metrics_master metrics_generated Removes: --- tendermint_consensus_proposal_create_count --- tendermint_consensus_vote_extension_receive_count --- tendermint_consensus_round_voting_power_percent --- tendermint_consensus_proposal_receive_count ``` --- scripts/metricsgen/metricsdiff/metricsdiff.go | 197 ++++++++++++++++++ .../metricsdiff/metricsdiff_test.go | 62 ++++++ 2 files changed, 259 insertions(+) create mode 100644 scripts/metricsgen/metricsdiff/metricsdiff.go create mode 100644 scripts/metricsgen/metricsdiff/metricsdiff_test.go diff --git a/scripts/metricsgen/metricsdiff/metricsdiff.go b/scripts/metricsgen/metricsdiff/metricsdiff.go new file mode 100644 index 0000000000..5ed72ff97c --- /dev/null +++ b/scripts/metricsgen/metricsdiff/metricsdiff.go @@ -0,0 +1,197 @@ +// metricsdiff is a tool for generating a diff between two different files containing +// prometheus metrics. metricsdiff outputs which metrics have been added, removed, +// or have different sets of labels between the two files. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s + +Generate the diff between the two files of Prometheus metrics. +The input should have the format output by a Prometheus HTTP endpoint. +The tool indicates which metrics have been added, removed, or use different +label sets from path1 to path2. + +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +// Diff contains the set of metrics that were modified between two files +// containing prometheus metrics output. +type Diff struct { + Adds []string + Removes []string + + Changes []LabelDiff +} + +// LabelDiff describes the label changes between two versions of the same metric. +type LabelDiff struct { + Metric string + Adds []string + Removes []string +} + +type parsedMetric struct { + name string + labels []string +} + +type metricsList []parsedMetric + +func main() { + flag.Parse() + if flag.NArg() != 2 { + log.Fatalf("Usage is '%s ', got %d arguments", + filepath.Base(os.Args[0]), flag.NArg()) + } + fa, err := os.Open(flag.Arg(0)) + if err != nil { + log.Fatalf("Open: %v", err) + } + defer fa.Close() + fb, err := os.Open(flag.Arg(1)) + if err != nil { + log.Fatalf("Open: %v", err) + } + defer fb.Close() + md, err := DiffFromReaders(fa, fb) + if err != nil { + log.Fatalf("Generating diff: %v", err) + } + fmt.Print(md) +} + +// DiffFromReaders parses the metrics present in the readers a and b and +// determines which metrics were added and removed in b. +func DiffFromReaders(a, b io.Reader) (Diff, error) { + var parser expfmt.TextParser + amf, err := parser.TextToMetricFamilies(a) + if err != nil { + return Diff{}, err + } + bmf, err := parser.TextToMetricFamilies(b) + if err != nil { + return Diff{}, err + } + + md := Diff{} + aList := toList(amf) + bList := toList(bmf) + + i, j := 0, 0 + for i < len(aList) || j < len(bList) { + for j < len(bList) && (i >= len(aList) || bList[j].name < aList[i].name) { + md.Adds = append(md.Adds, bList[j].name) + j++ + } + for i < len(aList) && j < len(bList) && aList[i].name == bList[j].name { + adds, removes := listDiff(aList[i].labels, bList[j].labels) + if len(adds) > 0 || len(removes) > 0 { + md.Changes = append(md.Changes, LabelDiff{ + Metric: aList[i].name, + Adds: adds, + Removes: removes, + }) + } + i++ + j++ + } + for i < len(aList) && (j >= len(bList) || aList[i].name < bList[j].name) { + md.Removes = append(md.Removes, aList[i].name) + i++ + } + } + return md, nil +} + +func toList(l map[string]*dto.MetricFamily) metricsList { + r := make([]parsedMetric, len(l)) + var idx int + for name, family := range l { + r[idx] = parsedMetric{ + name: name, + labels: labelsToStringList(family.Metric[0].Label), + } + idx++ + } + sort.Sort(metricsList(r)) + return r +} + +func labelsToStringList(ls []*dto.LabelPair) []string { + r := make([]string, len(ls)) + for i, l := range ls { + r[i] = l.GetName() + } + return sort.StringSlice(r) +} + +func listDiff(a, b []string) ([]string, []string) { + adds, removes := []string{}, []string{} + i, j := 0, 0 + for i < len(a) || j < len(b) { + for j < len(b) && (i >= len(a) || b[j] < a[i]) { + adds = append(adds, b[j]) + j++ + } + for i < len(a) && j < len(b) && a[i] == b[j] { + i++ + j++ + } + for i < len(a) && (j >= len(b) || a[i] < b[j]) { + removes = append(removes, a[i]) + i++ + } + } + return adds, removes +} + +func (m metricsList) Len() int { return len(m) } +func (m metricsList) Less(i, j int) bool { return m[i].name < m[j].name } +func (m metricsList) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (m Diff) String() string { + var s strings.Builder + if len(m.Adds) > 0 || len(m.Removes) > 0 { + fmt.Fprintln(&s, "Metric changes:") + } + if len(m.Adds) > 0 { + for _, add := range m.Adds { + fmt.Fprintf(&s, "+++ %s\n", add) + } + } + if len(m.Removes) > 0 { + for _, rem := range m.Removes { + fmt.Fprintf(&s, "--- %s\n", rem) + } + } + if len(m.Changes) > 0 { + fmt.Fprintln(&s, "Label changes:") + for _, ld := range m.Changes { + fmt.Fprintf(&s, "Metric: %s\n", ld.Metric) + for _, add := range ld.Adds { + fmt.Fprintf(&s, "+++ %s\n", add) + } + for _, rem := range ld.Removes { + fmt.Fprintf(&s, "--- %s\n", rem) + } + } + } + return s.String() +} diff --git a/scripts/metricsgen/metricsdiff/metricsdiff_test.go b/scripts/metricsgen/metricsdiff/metricsdiff_test.go new file mode 100644 index 0000000000..ec27ef1e9b --- /dev/null +++ b/scripts/metricsgen/metricsdiff/metricsdiff_test.go @@ -0,0 +1,62 @@ +package main_test + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/require" + metricsdiff "github.com/tendermint/tendermint/scripts/metricsgen/metricsdiff" +) + +func TestDiff(t *testing.T) { + for _, tc := range []struct { + name string + aContents string + bContents string + + want string + }{ + { + name: "labels", + aContents: ` + metric_one{label_one="content", label_two="content"} 0 + `, + bContents: ` + metric_one{label_three="content", label_four="content"} 0 + `, + want: `Label changes: +Metric: metric_one ++++ label_three ++++ label_four +--- label_one +--- label_two +`, + }, + { + name: "metrics", + aContents: ` + metric_one{label_one="content"} 0 + `, + bContents: ` + metric_two{label_two="content"} 0 + `, + want: `Metric changes: ++++ metric_two +--- metric_one +`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + bufA := bytes.NewBuffer([]byte{}) + bufB := bytes.NewBuffer([]byte{}) + _, err := io.WriteString(bufA, tc.aContents) + require.NoError(t, err) + _, err = io.WriteString(bufB, tc.bContents) + require.NoError(t, err) + md, err := metricsdiff.DiffFromReaders(bufA, bufB) + require.NoError(t, err) + require.Equal(t, tc.want, md.String()) + }) + } +} From b5550b0d1b8b85e00f50763d7533f7a182a3c7f3 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 12 May 2022 06:43:05 -0700 Subject: [PATCH 023/203] rpc: make block.height visible to event subscription (#8508) Although we index block.height for blocks in the KV indexer, this reserved attribute was not previously exposed to the event subscription API. Despite being advertised in the OpenAPI spec, neither the old (websocket) nor new (events) query interface could see it. This change exposes block.height to the /events API. In addition: Remove a non-public constant from types (finalize_block). This value is used only as an internal tag by the indexer, and should not be exposed to users of the public interface. (We could probably drop it entirely, as it was previously a disambiguator for BeginBlock vs. EndBlock events, but keeping a tag here simplifies the cleanup). --- internal/state/indexer/block/kv/kv.go | 2 +- types/events.go | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index 5356b4c07b..1b9a3120b9 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -65,7 +65,7 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { } // 2. index FinalizeBlock events - if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, types.EventTypeFinalizeBlock, height); err != nil { + if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, "finalize_block", height); err != nil { return fmt.Errorf("failed to index FinalizeBlock events: %w", err) } diff --git a/types/events.go b/types/events.go index d87b74cb8d..c818144dbf 100644 --- a/types/events.go +++ b/types/events.go @@ -131,7 +131,10 @@ type EventDataNewBlock struct { func (EventDataNewBlock) TypeTag() string { return "tendermint/event/NewBlock" } // ABCIEvents implements the eventlog.ABCIEventer interface. -func (e EventDataNewBlock) ABCIEvents() []abci.Event { return e.ResultFinalizeBlock.Events } +func (e EventDataNewBlock) ABCIEvents() []abci.Event { + base := []abci.Event{eventWithAttr(BlockHeightKey, fmt.Sprint(e.Block.Header.Height))} + return append(base, e.ResultFinalizeBlock.Events...) +} type EventDataNewBlockHeader struct { Header Header `json:"header"` @@ -144,7 +147,10 @@ type EventDataNewBlockHeader struct { func (EventDataNewBlockHeader) TypeTag() string { return "tendermint/event/NewBlockHeader" } // ABCIEvents implements the eventlog.ABCIEventer interface. -func (e EventDataNewBlockHeader) ABCIEvents() []abci.Event { return e.ResultFinalizeBlock.Events } +func (e EventDataNewBlockHeader) ABCIEvents() []abci.Event { + base := []abci.Event{eventWithAttr(BlockHeightKey, fmt.Sprint(e.Header.Height))} + return append(base, e.ResultFinalizeBlock.Events...) +} type EventDataNewEvidence struct { Evidence Evidence `json:"evidence"` @@ -262,18 +268,17 @@ func (EventDataEvidenceValidated) TypeTag() string { return "tendermint/event/Ev const ( // EventTypeKey is a reserved composite key for event name. EventTypeKey = "tm.event" + // TxHashKey is a reserved key, used to specify transaction's hash. // see EventBus#PublishEventTx TxHashKey = "tx.hash" + // TxHeightKey is a reserved key, used to specify transaction block's height. // see EventBus#PublishEventTx TxHeightKey = "tx.height" // BlockHeightKey is a reserved key used for indexing FinalizeBlock events. BlockHeightKey = "block.height" - - // EventTypeFinalizeBlock is a reserved key used for indexing FinalizeBlock events. - EventTypeFinalizeBlock = "finalize_block" ) var ( From 92811b9153998145efc1255e2b8d94f6dc9f8caf Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 12 May 2022 14:39:12 -0400 Subject: [PATCH 024/203] metrics: transition all metrics to using metricsgen generated constructors. (#8488) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What does this change do? This pull request completes the change to the `metricsgen` metrics. It adds `go generate` directives to all of the files containing the `Metrics` structs. Using the outputs of `metricsdiff` between these generated metrics and `master`, we can see that there is not a diff between the two sets of metrics when run locally. ``` [william@sidewinder] tendermint[wb/metrics-gen-transition]:. ◆ ./scripts/metricsgen/metricsdiff/metricsdiff metrics_master metrics_generated [william@sidewinder] tendermint[wb/metrics-gen-transition]:. ◆ ``` This change also adds parsing for a `metrics:` key in a field comment. If a comment line begins with `//metrics:` the rest of the line is interpreted to be the metric help text. Additionally, a bug where lists of labels were not properly quoted in the `metricsgen` rendered output was fixed. --- internal/consensus/metrics.gen.go | 248 ++++++++++++++ internal/consensus/metrics.go | 313 ++---------------- internal/eventlog/eventlog.go | 18 +- internal/eventlog/metrics.gen.go | 30 ++ internal/eventlog/metrics.go | 37 +-- internal/eventlog/prune.go | 4 +- internal/evidence/metrics.gen.go | 30 ++ internal/evidence/metrics.go | 33 +- internal/mempool/metrics.gen.go | 67 ++++ internal/mempool/metrics.go | 79 +---- internal/p2p/metrics.gen.go | 86 +++++ internal/p2p/metrics.go | 131 ++------ internal/p2p/metrics_test.go | 6 +- internal/p2p/pqueue.go | 5 +- internal/p2p/pqueue_test.go | 2 +- internal/p2p/router.go | 9 +- internal/proxy/metrics.gen.go | 32 ++ internal/proxy/metrics.go | 36 +- internal/state/indexer/metrics.go | 2 +- internal/state/metrics.gen.go | 46 +++ internal/state/metrics.go | 52 +-- internal/statesync/metrics.gen.go | 72 ++++ internal/statesync/metrics.go | 89 +---- scripts/metricsgen/metricsgen.go | 33 +- scripts/metricsgen/metricsgen_test.go | 4 +- .../testdata/commented/metrics.gen.go | 2 +- .../metricsgen/testdata/tags/metrics.gen.go | 3 +- 27 files changed, 759 insertions(+), 710 deletions(-) create mode 100644 internal/consensus/metrics.gen.go create mode 100644 internal/eventlog/metrics.gen.go create mode 100644 internal/evidence/metrics.gen.go create mode 100644 internal/mempool/metrics.gen.go create mode 100644 internal/p2p/metrics.gen.go create mode 100644 internal/proxy/metrics.gen.go create mode 100644 internal/state/metrics.gen.go create mode 100644 internal/statesync/metrics.gen.go diff --git a/internal/consensus/metrics.gen.go b/internal/consensus/metrics.gen.go new file mode 100644 index 0000000000..55cc59f6c5 --- /dev/null +++ b/internal/consensus/metrics.gen.go @@ -0,0 +1,248 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package consensus + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "height", + Help: "Height of the chain.", + }, labels).With(labelsAndValues...), + ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_last_signed_height", + Help: "Last height signed by this validator if the node is a validator.", + }, append(labels, "validator_address")).With(labelsAndValues...), + Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rounds", + Help: "Number of rounds.", + }, labels).With(labelsAndValues...), + RoundDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "round_duration", + Help: "Histogram of round duration.", + + Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), + }, labels).With(labelsAndValues...), + Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators", + Help: "Number of validators.", + }, labels).With(labelsAndValues...), + ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators_power", + Help: "Total power of all validators.", + }, labels).With(labelsAndValues...), + ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_power", + Help: "Power of a validator.", + }, append(labels, "validator_address")).With(labelsAndValues...), + ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_missed_blocks", + Help: "Amount of blocks missed per validator.", + }, append(labels, "validator_address")).With(labelsAndValues...), + MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators", + Help: "Number of validators who did not sign.", + }, labels).With(labelsAndValues...), + MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators_power", + Help: "Total power of the missing validators.", + }, labels).With(labelsAndValues...), + ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators", + Help: "Number of validators who tried to double sign.", + }, labels).With(labelsAndValues...), + ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators_power", + Help: "Total power of the byzantine validators.", + }, labels).With(labelsAndValues...), + BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_interval_seconds", + Help: "Time between this and the last block.", + }, labels).With(labelsAndValues...), + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_txs", + Help: "Number of transactions.", + }, labels).With(labelsAndValues...), + BlockSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_size_bytes", + Help: "Size of the block.", + }, labels).With(labelsAndValues...), + TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_txs", + Help: "Total number of transactions.", + }, labels).With(labelsAndValues...), + CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "latest_block_height", + Help: "The latest block height.", + }, labels).With(labelsAndValues...), + BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_syncing", + Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "state_syncing", + Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_parts", + Help: "Number of block parts transmitted by each peer.", + }, append(labels, "peer_id")).With(labelsAndValues...), + StepDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "step_duration", + Help: "Histogram of durations for each step in the consensus protocol.", + + Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), + }, append(labels, "step")).With(labelsAndValues...), + BlockGossipReceiveLatency: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_gossip_receive_latency", + Help: "Histogram of time taken to receive a block in seconds, measured between when a new block is first discovered to when the block is completed.", + + Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), + }, labels).With(labelsAndValues...), + BlockGossipPartsReceived: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_gossip_parts_received", + Help: "Number of block parts received by the node, separated by whether the part was relevant to the block the node is trying to gather or not.", + }, append(labels, "matches_current")).With(labelsAndValues...), + QuorumPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "quorum_prevote_delay", + Help: "Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum.", + }, append(labels, "proposer_address")).With(labelsAndValues...), + FullPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "full_prevote_delay", + Help: "Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted.", + }, append(labels, "proposer_address")).With(labelsAndValues...), + ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_timestamp_difference", + Help: "Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message.", + + Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10}, + }, append(labels, "is_timely")).With(labelsAndValues...), + VoteExtensionReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "vote_extension_receive_count", + Help: "Number of vote extensions received labeled by application response status.", + }, append(labels, "status")).With(labelsAndValues...), + ProposalReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_receive_count", + Help: "Total number of proposals received by the node since process start labeled by application response status.", + }, append(labels, "status")).With(labelsAndValues...), + ProposalCreateCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_create_count", + Help: "Total number of proposals created by the node since process start.", + }, labels).With(labelsAndValues...), + RoundVotingPowerPercent: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "round_voting_power_percent", + Help: "A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round.", + }, append(labels, "vote_type")).With(labelsAndValues...), + LateVotes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "late_votes", + Help: "Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in.", + }, append(labels, "vote_type")).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Height: discard.NewGauge(), + ValidatorLastSignedHeight: discard.NewGauge(), + Rounds: discard.NewGauge(), + RoundDuration: discard.NewHistogram(), + Validators: discard.NewGauge(), + ValidatorsPower: discard.NewGauge(), + ValidatorPower: discard.NewGauge(), + ValidatorMissedBlocks: discard.NewGauge(), + MissingValidators: discard.NewGauge(), + MissingValidatorsPower: discard.NewGauge(), + ByzantineValidators: discard.NewGauge(), + ByzantineValidatorsPower: discard.NewGauge(), + BlockIntervalSeconds: discard.NewHistogram(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewHistogram(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + BlockSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), + StepDuration: discard.NewHistogram(), + BlockGossipReceiveLatency: discard.NewHistogram(), + BlockGossipPartsReceived: discard.NewCounter(), + QuorumPrevoteDelay: discard.NewGauge(), + FullPrevoteDelay: discard.NewGauge(), + ProposalTimestampDifference: discard.NewHistogram(), + VoteExtensionReceiveCount: discard.NewCounter(), + ProposalReceiveCount: discard.NewCounter(), + ProposalCreateCount: discard.NewCounter(), + RoundVotingPowerPercent: discard.NewGauge(), + LateVotes: discard.NewCounter(), + } +} diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index e5c0162f40..bdf0eb412c 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -5,14 +5,10 @@ import ( "time" "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" - - prometheus "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -21,28 +17,30 @@ const ( MetricsSubsystem = "consensus" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { // Height of the chain. Height metrics.Gauge - // ValidatorLastSignedHeight of a validator. - ValidatorLastSignedHeight metrics.Gauge + // Last height signed by this validator if the node is a validator. + ValidatorLastSignedHeight metrics.Gauge `metrics_labels:"validator_address"` // Number of rounds. Rounds metrics.Gauge // Histogram of round duration. - RoundDuration metrics.Histogram + RoundDuration metrics.Histogram `metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` // Number of validators. Validators metrics.Gauge // Total power of all validators. ValidatorsPower metrics.Gauge // Power of a validator. - ValidatorPower metrics.Gauge - // Amount of blocks missed by a validator. - ValidatorMissedBlocks metrics.Gauge + ValidatorPower metrics.Gauge `metrics_labels:"validator_address"` + // Amount of blocks missed per validator. + ValidatorMissedBlocks metrics.Gauge `metrics_labels:"validator_address"` // Number of validators who did not sign. MissingValidators metrics.Gauge // Total power of the missing validators. @@ -62,27 +60,27 @@ type Metrics struct { // Total number of transactions. TotalTxs metrics.Gauge // The latest block height. - CommittedHeight metrics.Gauge + CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"` // Whether or not a node is block syncing. 1 if yes, 0 if no. BlockSyncing metrics.Gauge // Whether or not a node is state syncing. 1 if yes, 0 if no. StateSyncing metrics.Gauge - // Number of blockparts transmitted by peer. - BlockParts metrics.Counter + // Number of block parts transmitted by each peer. + BlockParts metrics.Counter `metrics_labels:"peer_id"` - // Histogram of step duration. - StepDuration metrics.Histogram + // Histogram of durations for each step in the consensus protocol. + StepDuration metrics.Histogram `metrics_labels:"step" metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` stepStart time.Time // Histogram of time taken to receive a block in seconds, measured between when a new block is first // discovered to when the block is completed. - BlockGossipReceiveLatency metrics.Histogram + BlockGossipReceiveLatency metrics.Histogram `metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` blockGossipStart time.Time // Number of block parts received by the node, separated by whether the part // was relevant to the block the node is trying to gather or not. - BlockGossipPartsReceived metrics.Counter + BlockGossipPartsReceived metrics.Counter `metrics_labels:"matches_current"` // QuroumPrevoteMessageDelay is the interval in seconds between the proposal // timestamp and the timestamp of the earliest prevote that achieved a quorum @@ -93,301 +91,50 @@ type Metrics struct { // be above 2/3 of the total voting power of the network defines the endpoint // the endpoint of the interval. Subtract the proposal timestamp from this endpoint // to obtain the quorum delay. - QuorumPrevoteDelay metrics.Gauge + //metrics:Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum. + QuorumPrevoteDelay metrics.Gauge `metrics_labels:"proposer_address"` // FullPrevoteDelay is the interval in seconds between the proposal // timestamp and the timestamp of the latest prevote in a round where 100% // of the voting power on the network issued prevotes. - FullPrevoteDelay metrics.Gauge + //metrics:Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted. + FullPrevoteDelay metrics.Gauge `metrics_labels:"proposer_address"` // ProposalTimestampDifference is the difference between the timestamp in // the proposal message and the local time of the validator at the time // that the validator received the message. - ProposalTimestampDifference metrics.Histogram + //metrics:Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message. + ProposalTimestampDifference metrics.Histogram `metrics_labels:"is_timely" metrics_bucketsizes:"-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10"` // VoteExtensionReceiveCount is the number of vote extensions received by this // node. The metric is annotated by the status of the vote extension from the // application, either 'accepted' or 'rejected'. - VoteExtensionReceiveCount metrics.Counter + //metrics:Number of vote extensions received labeled by application response status. + VoteExtensionReceiveCount metrics.Counter `metrics_labels:"status"` // ProposalReceiveCount is the total number of proposals received by this node // since process start. // The metric is annotated by the status of the proposal from the application, // either 'accepted' or 'rejected'. - ProposalReceiveCount metrics.Counter + //metrics:Total number of proposals received by the node since process start labeled by application response status. + ProposalReceiveCount metrics.Counter `metrics_labels:"status"` // ProposalCreationCount is the total number of proposals created by this node // since process start. - // The metric is annotated by the status of the proposal from the application, - // either 'accepted' or 'rejected'. + //metrics:Total number of proposals created by the node since process start. ProposalCreateCount metrics.Counter // RoundVotingPowerPercent is the percentage of the total voting power received // with a round. The value begins at 0 for each round and approaches 1.0 as // additional voting power is observed. The metric is labeled by vote type. - RoundVotingPowerPercent metrics.Gauge + //metrics:A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round. + RoundVotingPowerPercent metrics.Gauge `metrics_labels:"vote_type"` // LateVotes stores the number of votes that were received by this node that // correspond to earlier heights and rounds than this node is currently // in. - LateVotes metrics.Counter -} - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "height", - Help: "Height of the chain.", - }, labels).With(labelsAndValues...), - Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "rounds", - Help: "Number of rounds.", - }, labels).With(labelsAndValues...), - RoundDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "round_duration", - Help: "Time spent in a round.", - Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), - }, labels).With(labelsAndValues...), - Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validators", - Help: "Number of validators.", - }, labels).With(labelsAndValues...), - ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validator_last_signed_height", - Help: "Last signed height for a validator", - }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validator_missed_blocks", - Help: "Total missed blocks for a validator", - }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validators_power", - Help: "Total power of all validators.", - }, labels).With(labelsAndValues...), - ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validator_power", - Help: "Power of a validator", - }, append(labels, "validator_address")).With(labelsAndValues...), - MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "missing_validators", - Help: "Number of validators who did not sign.", - }, labels).With(labelsAndValues...), - MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "missing_validators_power", - Help: "Total power of the missing validators.", - }, labels).With(labelsAndValues...), - ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "byzantine_validators", - Help: "Number of validators who tried to double sign.", - }, labels).With(labelsAndValues...), - ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "byzantine_validators_power", - Help: "Total power of the byzantine validators.", - }, labels).With(labelsAndValues...), - BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_interval_seconds", - Help: "Time between this and the last block.", - }, labels).With(labelsAndValues...), - NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "num_txs", - Help: "Number of transactions.", - }, labels).With(labelsAndValues...), - BlockSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_size_bytes", - Help: "Size of the block.", - }, labels).With(labelsAndValues...), - TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "total_txs", - Help: "Total number of transactions.", - }, labels).With(labelsAndValues...), - CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "latest_block_height", - Help: "The latest block height.", - }, labels).With(labelsAndValues...), - BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_syncing", - Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.", - }, labels).With(labelsAndValues...), - StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "state_syncing", - Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", - }, labels).With(labelsAndValues...), - BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_parts", - Help: "Number of blockparts transmitted by peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), - BlockGossipReceiveLatency: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_gossip_receive_latency", - Help: "Difference in seconds between when the validator learns of a new block" + - "and when the validator receives the last piece of the block.", - Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), - }, labels).With(labelsAndValues...), - BlockGossipPartsReceived: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_gossip_parts_received", - Help: "Number of block parts received by the node, labeled by whether the " + - "part was relevant to the block the node was currently gathering or not.", - }, append(labels, "matches_current")).With(labelsAndValues...), - StepDuration: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "step_duration", - Help: "Time spent per step.", - Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), - }, append(labels, "step")).With(labelsAndValues...), - QuorumPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "quorum_prevote_delay", - Help: "Difference in seconds between the proposal timestamp and the timestamp " + - "of the latest prevote that achieved a quorum in the prevote step.", - }, append(labels, "proposer_address")).With(labelsAndValues...), - FullPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "full_prevote_delay", - Help: "Difference in seconds between the proposal timestamp and the timestamp " + - "of the latest prevote that achieved 100% of the voting power in the prevote step.", - }, append(labels, "proposer_address")).With(labelsAndValues...), - ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "proposal_timestamp_difference", - Help: "Difference in seconds between the timestamp in the proposal " + - "message and the local time when the message was received. " + - "Only calculated when a new block is proposed.", - Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10}, - }, append(labels, "is_timely")).With(labelsAndValues...), - VoteExtensionReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "vote_extension_receive_count", - Help: "Number of vote extensions received by the node since process start, labeled by " + - "the application's response to VerifyVoteExtension, either accept or reject.", - }, append(labels, "status")).With(labelsAndValues...), - - ProposalReceiveCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "proposal_receive_count", - Help: "Number of vote proposals received by the node since process start, labeled by " + - "the application's response to ProcessProposal, either accept or reject.", - }, append(labels, "status")).With(labelsAndValues...), - - ProposalCreateCount: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "proposal_create_count", - Help: "Number of proposals created by the node since process start.", - }, labels).With(labelsAndValues...), - - RoundVotingPowerPercent: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "round_voting_power_percent", - Help: "Percentage of the total voting power received with a round. " + - "The value begins at 0 for each round and approaches 1.0 as additional " + - "voting power is observed.", - }, append(labels, "vote_type")).With(labelsAndValues...), - LateVotes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "late_votes", - Help: "Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in.", - }, append(labels, "vote_type")).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Height: discard.NewGauge(), - - ValidatorLastSignedHeight: discard.NewGauge(), - - Rounds: discard.NewGauge(), - RoundDuration: discard.NewHistogram(), - StepDuration: discard.NewHistogram(), - - Validators: discard.NewGauge(), - ValidatorsPower: discard.NewGauge(), - ValidatorPower: discard.NewGauge(), - ValidatorMissedBlocks: discard.NewGauge(), - MissingValidators: discard.NewGauge(), - MissingValidatorsPower: discard.NewGauge(), - ByzantineValidators: discard.NewGauge(), - ByzantineValidatorsPower: discard.NewGauge(), - - BlockIntervalSeconds: discard.NewHistogram(), - - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewHistogram(), - TotalTxs: discard.NewGauge(), - CommittedHeight: discard.NewGauge(), - BlockSyncing: discard.NewGauge(), - StateSyncing: discard.NewGauge(), - BlockParts: discard.NewCounter(), - BlockGossipReceiveLatency: discard.NewHistogram(), - BlockGossipPartsReceived: discard.NewCounter(), - QuorumPrevoteDelay: discard.NewGauge(), - FullPrevoteDelay: discard.NewGauge(), - ProposalTimestampDifference: discard.NewHistogram(), - VoteExtensionReceiveCount: discard.NewCounter(), - ProposalReceiveCount: discard.NewCounter(), - ProposalCreateCount: discard.NewCounter(), - RoundVotingPowerPercent: discard.NewGauge(), - LateVotes: discard.NewCounter(), - } + //metrics:Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. + LateVotes metrics.Counter `metrics_labels:"vote_type"` } // RecordConsMetrics uses for recording the block related metrics during fast-sync. diff --git a/internal/eventlog/eventlog.go b/internal/eventlog/eventlog.go index b507f79bc1..31c7d14fec 100644 --- a/internal/eventlog/eventlog.go +++ b/internal/eventlog/eventlog.go @@ -24,9 +24,9 @@ import ( // any number of readers. type Log struct { // These values do not change after construction. - windowSize time.Duration - maxItems int - numItemsGauge gauge + windowSize time.Duration + maxItems int + metrics *Metrics // Protects access to the fields below. Lock to modify the values of these // fields, or to read or snapshot the values. @@ -45,14 +45,14 @@ func New(opts LogSettings) (*Log, error) { return nil, errors.New("window size must be positive") } lg := &Log{ - windowSize: opts.WindowSize, - maxItems: opts.MaxItems, - numItemsGauge: discard{}, - ready: make(chan struct{}), - source: opts.Source, + windowSize: opts.WindowSize, + maxItems: opts.MaxItems, + metrics: NopMetrics(), + ready: make(chan struct{}), + source: opts.Source, } if opts.Metrics != nil { - lg.numItemsGauge = opts.Metrics.numItemsGauge + lg.metrics = opts.Metrics } return lg, nil } diff --git a/internal/eventlog/metrics.gen.go b/internal/eventlog/metrics.gen.go new file mode 100644 index 0000000000..d9d86b2b9e --- /dev/null +++ b/internal/eventlog/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package eventlog + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + numItems: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_items", + Help: "Number of items currently resident in the event log.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + numItems: discard.NewGauge(), + } +} diff --git a/internal/eventlog/metrics.go b/internal/eventlog/metrics.go index cc319032ee..fb7ccf694e 100644 --- a/internal/eventlog/metrics.go +++ b/internal/eventlog/metrics.go @@ -1,39 +1,14 @@ package eventlog -import ( - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) +import "github.com/go-kit/kit/metrics" -// gauge is the subset of the Prometheus gauge interface used here. -type gauge interface { - Set(float64) -} +const MetricsSubsystem = "eventlog" + +//go:generate go run ../../scripts/metricsgen -struct=Metrics // Metrics define the metrics exported by the eventlog package. type Metrics struct { - numItemsGauge gauge -} - -// discard is a no-op implementation of the gauge interface. -type discard struct{} - -func (discard) Set(float64) {} - -const eventlogSubsystem = "eventlog" -// PrometheusMetrics returns a collection of eventlog metrics for Prometheus. -func PrometheusMetrics(ns string, fields ...string) *Metrics { - var labels []string - for i := 0; i < len(fields); i += 2 { - labels = append(labels, fields[i]) - } - return &Metrics{ - numItemsGauge: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: ns, - Subsystem: eventlogSubsystem, - Name: "num_items", - Help: "Number of items currently resident in the event log.", - }, labels).With(fields...), - } + // Number of items currently resident in the event log. + numItems metrics.Gauge } diff --git a/internal/eventlog/prune.go b/internal/eventlog/prune.go index 4c3c1f0d0a..062e91bd2b 100644 --- a/internal/eventlog/prune.go +++ b/internal/eventlog/prune.go @@ -12,7 +12,7 @@ func (lg *Log) checkPrune(head *logEntry, size int, age time.Duration) error { const windowSlop = 30 * time.Second if age < (lg.windowSize+windowSlop) && (lg.maxItems <= 0 || size <= lg.maxItems) { - lg.numItemsGauge.Set(float64(lg.numItems)) + lg.metrics.numItems.Set(float64(lg.numItems)) return nil // no pruning is needed } @@ -46,7 +46,7 @@ func (lg *Log) checkPrune(head *logEntry, size int, age time.Duration) error { lg.mu.Lock() defer lg.mu.Unlock() lg.numItems = newState.size - lg.numItemsGauge.Set(float64(newState.size)) + lg.metrics.numItems.Set(float64(newState.size)) lg.oldestCursor = newState.oldest lg.head = newState.head return err diff --git a/internal/evidence/metrics.gen.go b/internal/evidence/metrics.gen.go new file mode 100644 index 0000000000..f2eb7dfa8f --- /dev/null +++ b/internal/evidence/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package evidence + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + NumEvidence: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_evidence", + Help: "Number of pending evidence in the evidence pool.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + NumEvidence: discard.NewGauge(), + } +} diff --git a/internal/evidence/metrics.go b/internal/evidence/metrics.go index 59efc23f91..adb0260f2d 100644 --- a/internal/evidence/metrics.go +++ b/internal/evidence/metrics.go @@ -2,9 +2,6 @@ package evidence import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,35 +10,11 @@ const ( MetricsSubsystem = "evidence_pool" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. // see MetricsProvider for descriptions. type Metrics struct { - // Number of evidence in the evidence pool + // Number of pending evidence in the evidence pool. NumEvidence metrics.Gauge } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - - NumEvidence: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "num_evidence", - Help: "Number of pending evidence in evidence pool.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - NumEvidence: discard.NewGauge(), - } -} diff --git a/internal/mempool/metrics.gen.go b/internal/mempool/metrics.gen.go new file mode 100644 index 0000000000..100c5e71cb --- /dev/null +++ b/internal/mempool/metrics.gen.go @@ -0,0 +1,67 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package mempool + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "size", + Help: "Number of uncommitted transactions in the mempool.", + }, labels).With(labelsAndValues...), + TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_size_bytes", + Help: "Histogram of transaction sizes in bytes.", + + Buckets: stdprometheus.ExponentialBuckets(1, 3, 7), + }, labels).With(labelsAndValues...), + FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "failed_txs", + Help: "Number of failed transactions.", + }, labels).With(labelsAndValues...), + RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rejected_txs", + Help: "Number of rejected transactions.", + }, labels).With(labelsAndValues...), + EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "evicted_txs", + Help: "Number of evicted transactions.", + }, labels).With(labelsAndValues...), + RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "recheck_times", + Help: "Number of times transactions are rechecked in the mempool.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Size: discard.NewGauge(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), + RejectedTxs: discard.NewCounter(), + EvictedTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), + } +} diff --git a/internal/mempool/metrics.go b/internal/mempool/metrics.go index 5d3022e80e..5323076351 100644 --- a/internal/mempool/metrics.go +++ b/internal/mempool/metrics.go @@ -2,9 +2,6 @@ package mempool import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,14 +10,16 @@ const ( MetricsSubsystem = "mempool" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. // see MetricsProvider for descriptions. type Metrics struct { - // Size of the mempool. + // Number of uncommitted transactions in the mempool. Size metrics.Gauge - // Histogram of transaction sizes, in bytes. - TxSizeBytes metrics.Histogram + // Histogram of transaction sizes in bytes. + TxSizeBytes metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:"1,3,7"` // Number of failed transactions. FailedTxs metrics.Counter @@ -29,80 +28,16 @@ type Metrics struct { // transactions that passed CheckTx but failed to make it into the mempool // due to resource limits, e.g. mempool is full and no lower priority // transactions exist in the mempool. + //metrics:Number of rejected transactions. RejectedTxs metrics.Counter // EvictedTxs defines the number of evicted transactions. These are valid // transactions that passed CheckTx and existed in the mempool but were later // evicted to make room for higher priority valid transactions that passed // CheckTx. + //metrics:Number of evicted transactions. EvictedTxs metrics.Counter // Number of times transactions are rechecked in the mempool. RecheckTimes metrics.Counter } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "size", - Help: "Size of the mempool (number of uncommitted transactions).", - }, labels).With(labelsAndValues...), - - TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "tx_size_bytes", - Help: "Transaction sizes in bytes.", - Buckets: stdprometheus.ExponentialBuckets(1, 3, 17), - }, labels).With(labelsAndValues...), - - FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "failed_txs", - Help: "Number of failed transactions.", - }, labels).With(labelsAndValues...), - - RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "rejected_txs", - Help: "Number of rejected transactions.", - }, labels).With(labelsAndValues...), - - EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "evicted_txs", - Help: "Number of evicted transactions.", - }, labels).With(labelsAndValues...), - - RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "recheck_times", - Help: "Number of times transactions are rechecked in the mempool.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Size: discard.NewGauge(), - TxSizeBytes: discard.NewHistogram(), - FailedTxs: discard.NewCounter(), - RejectedTxs: discard.NewCounter(), - EvictedTxs: discard.NewCounter(), - RecheckTimes: discard.NewCounter(), - } -} diff --git a/internal/p2p/metrics.gen.go b/internal/p2p/metrics.gen.go new file mode 100644 index 0000000000..cbfba29d94 --- /dev/null +++ b/internal/p2p/metrics.gen.go @@ -0,0 +1,86 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package p2p + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers", + Help: "Number of peers.", + }, labels).With(labelsAndValues...), + PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_receive_bytes_total", + Help: "Number of bytes per channel received from a given peer.", + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), + PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_send_bytes_total", + Help: "Number of bytes per channel sent to a given peer.", + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), + PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_pending_send_bytes", + Help: "Number of bytes pending being sent to a given peer.", + }, append(labels, "peer_id")).With(labelsAndValues...), + RouterPeerQueueRecv: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_peer_queue_recv", + Help: "The time taken to read off of a peer's queue before sending on the connection.", + }, labels).With(labelsAndValues...), + RouterPeerQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_peer_queue_send", + Help: "The time taken to send on a peer's queue which will later be read and sent on the connection.", + }, labels).With(labelsAndValues...), + RouterChannelQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_channel_queue_send", + Help: "The time taken to send on a p2p channel's queue which will later be consued by the corresponding reactor/service.", + }, labels).With(labelsAndValues...), + PeerQueueDroppedMsgs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "router_channel_queue_dropped_msgs", + Help: "The number of messages dropped from a peer's queue for a specific p2p Channel.", + }, append(labels, "ch_id")).With(labelsAndValues...), + PeerQueueMsgSize: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_queue_msg_size", + Help: "The size of messages sent over a peer's queue for a specific p2p Channel.", + }, append(labels, "ch_id")).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Peers: discard.NewGauge(), + PeerReceiveBytesTotal: discard.NewCounter(), + PeerSendBytesTotal: discard.NewCounter(), + PeerPendingSendBytes: discard.NewGauge(), + RouterPeerQueueRecv: discard.NewHistogram(), + RouterPeerQueueSend: discard.NewHistogram(), + RouterChannelQueueSend: discard.NewHistogram(), + PeerQueueDroppedMsgs: discard.NewCounter(), + PeerQueueMsgSize: discard.NewGauge(), + } +} diff --git a/internal/p2p/metrics.go b/internal/p2p/metrics.go index 2780d221ef..b45f128e5a 100644 --- a/internal/p2p/metrics.go +++ b/internal/p2p/metrics.go @@ -7,9 +7,6 @@ import ( "sync" "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -25,140 +22,55 @@ var ( valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { // Number of peers. Peers metrics.Gauge - // Number of bytes received from a given peer. - PeerReceiveBytesTotal metrics.Counter - // Number of bytes sent to a given peer. - PeerSendBytesTotal metrics.Counter - // Pending bytes to be sent to a given peer. - PeerPendingSendBytes metrics.Gauge + // Number of bytes per channel received from a given peer. + PeerReceiveBytesTotal metrics.Counter `metrics_labels:"peer_id, chID, message_type"` + // Number of bytes per channel sent to a given peer. + PeerSendBytesTotal metrics.Counter `metrics_labels:"peer_id, chID, message_type"` + // Number of bytes pending being sent to a given peer. + PeerPendingSendBytes metrics.Gauge `metrics_labels:"peer_id"` // RouterPeerQueueRecv defines the time taken to read off of a peer's queue // before sending on the connection. + //metrics:The time taken to read off of a peer's queue before sending on the connection. RouterPeerQueueRecv metrics.Histogram // RouterPeerQueueSend defines the time taken to send on a peer's queue which // will later be read and sent on the connection (see RouterPeerQueueRecv). + //metrics:The time taken to send on a peer's queue which will later be read and sent on the connection. RouterPeerQueueSend metrics.Histogram // RouterChannelQueueSend defines the time taken to send on a p2p channel's // queue which will later be consued by the corresponding reactor/service. + //metrics:The time taken to send on a p2p channel's queue which will later be consued by the corresponding reactor/service. RouterChannelQueueSend metrics.Histogram // PeerQueueDroppedMsgs defines the number of messages dropped from a peer's // queue for a specific flow (i.e. Channel). - PeerQueueDroppedMsgs metrics.Counter + //metrics:The number of messages dropped from a peer's queue for a specific p2p Channel. + PeerQueueDroppedMsgs metrics.Counter `metrics_labels:"ch_id" metrics_name:"router_channel_queue_dropped_msgs"` // PeerQueueMsgSize defines the average size of messages sent over a peer's // queue for a specific flow (i.e. Channel). - PeerQueueMsgSize metrics.Gauge + //metrics:The size of messages sent over a peer's queue for a specific p2p Channel. + PeerQueueMsgSize metrics.Gauge `metrics_labels:"ch_id" metric_name:"router_channel_queue_msg_size"` +} +type metricsLabelCache struct { mtx *sync.RWMutex messageLabelNames map[reflect.Type]string } -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peers", - Help: "Number of peers.", - }, labels).With(labelsAndValues...), - - PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_receive_bytes_total", - Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), - - PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_send_bytes_total", - Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), - - PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_pending_send_bytes", - Help: "Number of pending bytes to be sent to a given peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), - - RouterPeerQueueRecv: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_peer_queue_recv", - Help: "The time taken to read off of a peer's queue before sending on the connection.", - }, labels).With(labelsAndValues...), - - RouterPeerQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_peer_queue_send", - Help: "The time taken to send on a peer's queue which will later be read and sent on the connection (see RouterPeerQueueRecv).", - }, labels).With(labelsAndValues...), - - RouterChannelQueueSend: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_channel_queue_send", - Help: "The time taken to send on a p2p channel's queue which will later be consued by the corresponding reactor/service.", - }, labels).With(labelsAndValues...), - - PeerQueueDroppedMsgs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_channel_queue_dropped_msgs", - Help: "The number of messages dropped from a peer's queue for a specific p2p Channel.", - }, append(labels, "ch_id")).With(labelsAndValues...), - - PeerQueueMsgSize: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "router_channel_queue_msg_size", - Help: "The size of messages sent over a peer's queue for a specific p2p Channel.", - }, append(labels, "ch_id")).With(labelsAndValues...), - - mtx: &sync.RWMutex{}, - messageLabelNames: map[reflect.Type]string{}, - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Peers: discard.NewGauge(), - PeerReceiveBytesTotal: discard.NewCounter(), - PeerSendBytesTotal: discard.NewCounter(), - PeerPendingSendBytes: discard.NewGauge(), - RouterPeerQueueRecv: discard.NewHistogram(), - RouterPeerQueueSend: discard.NewHistogram(), - RouterChannelQueueSend: discard.NewHistogram(), - PeerQueueDroppedMsgs: discard.NewCounter(), - PeerQueueMsgSize: discard.NewGauge(), - mtx: &sync.RWMutex{}, - messageLabelNames: map[reflect.Type]string{}, - } -} - // ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang // type that is passed in. // This method uses a map on the Metrics struct so that each label name only needs // to be produced once to prevent expensive string operations. -func (m *Metrics) ValueToMetricLabel(i interface{}) string { +func (m *metricsLabelCache) ValueToMetricLabel(i interface{}) string { t := reflect.TypeOf(i) m.mtx.RLock() @@ -176,3 +88,10 @@ func (m *Metrics) ValueToMetricLabel(i interface{}) string { m.messageLabelNames[t] = l return l } + +func newMetricsLabelCache() *metricsLabelCache { + return &metricsLabelCache{ + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, + } +} diff --git a/internal/p2p/metrics_test.go b/internal/p2p/metrics_test.go index 839786d919..98523fe822 100644 --- a/internal/p2p/metrics_test.go +++ b/internal/p2p/metrics_test.go @@ -9,12 +9,12 @@ import ( ) func TestValueToMetricsLabel(t *testing.T) { - m := NopMetrics() + lc := newMetricsLabelCache() r := &p2p.PexResponse{} - str := m.ValueToMetricLabel(r) + str := lc.ValueToMetricLabel(r) assert.Equal(t, "p2p_PexResponse", str) // subsequent calls to the function should produce the same result - str = m.ValueToMetricLabel(r) + str = lc.ValueToMetricLabel(r) assert.Equal(t, "p2p_PexResponse", str) } diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index 21c950dfb0..268daa8deb 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -70,6 +70,7 @@ var _ queue = (*pqScheduler)(nil) type pqScheduler struct { logger log.Logger metrics *Metrics + lc *metricsLabelCache size uint sizes map[uint]uint // cumulative priority sizes pq *priorityQueue @@ -88,6 +89,7 @@ type pqScheduler struct { func newPQScheduler( logger log.Logger, m *Metrics, + lc *metricsLabelCache, chDescs []*ChannelDescriptor, enqueueBuf, dequeueBuf, capacity uint, ) *pqScheduler { @@ -117,6 +119,7 @@ func newPQScheduler( return &pqScheduler{ logger: logger.With("router", "scheduler"), metrics: m, + lc: lc, chDescs: chDescsCopy, capacity: capacity, chPriorities: chPriorities, @@ -251,7 +254,7 @@ func (s *pqScheduler) process(ctx context.Context) { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, "peer_id", string(pqEnv.envelope.To), - "message_type", s.metrics.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) + "message_type", s.lc.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) s.metrics.PeerPendingSendBytes.With( "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index 22ecbcecb5..d1057ac7e2 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -17,7 +17,7 @@ func TestCloseWhileDequeueFull(t *testing.T) { chDescs := []*ChannelDescriptor{ {ID: 0x01, Priority: 1}, } - pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) + pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), newMetricsLabelCache(), chDescs, uint(enqueueLength), 1, 120) for i := 0; i < enqueueLength; i++ { pqueue.enqueue() <- Envelope{ diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 459be79756..a9a01f3c7f 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -148,7 +148,9 @@ type Router struct { *service.BaseService logger log.Logger - metrics *Metrics + metrics *Metrics + lc *metricsLabelCache + options RouterOptions privKey crypto.PrivKey peerManager *PeerManager @@ -193,6 +195,7 @@ func NewRouter( router := &Router{ logger: logger, metrics: metrics, + lc: newMetricsLabelCache(), privKey: privKey, nodeInfoProducer: nodeInfoProducer, connTracker: newConnTracker( @@ -226,7 +229,7 @@ func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error size++ } - q := newPQScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) + q := newPQScheduler(r.logger, r.metrics, r.lc, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) q.start(ctx) return q }, nil @@ -839,7 +842,7 @@ func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Conn r.metrics.PeerReceiveBytesTotal.With( "chID", fmt.Sprint(chID), "peer_id", string(peerID), - "message_type", r.metrics.ValueToMetricLabel(msg)).Add(float64(proto.Size(msg))) + "message_type", r.lc.ValueToMetricLabel(msg)).Add(float64(proto.Size(msg))) r.metrics.RouterChannelQueueSend.Observe(time.Since(start).Seconds()) r.logger.Debug("received message", "peer", peerID, "message", msg) diff --git a/internal/proxy/metrics.gen.go b/internal/proxy/metrics.gen.go new file mode 100644 index 0000000000..ea483f83db --- /dev/null +++ b/internal/proxy/metrics.gen.go @@ -0,0 +1,32 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package proxy + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + MethodTiming: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "method_timing", + Help: "Timing for each ABCI method.", + + Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, + }, append(labels, "method", "type")).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + MethodTiming: discard.NewHistogram(), + } +} diff --git a/internal/proxy/metrics.go b/internal/proxy/metrics.go index 99bd7d7b04..b95687a03b 100644 --- a/internal/proxy/metrics.go +++ b/internal/proxy/metrics.go @@ -2,9 +2,6 @@ package proxy import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,35 +10,10 @@ const ( MetricsSubsystem = "abci_connection" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains the prometheus metrics exposed by the proxy package. type Metrics struct { - MethodTiming metrics.Histogram -} - -// PrometheusMetrics constructs a Metrics instance that collects metrics samples. -// The resulting metrics will be prefixed with namespace and labeled with the -// defaultLabelsAndValues. defaultLabelsAndValues must be a list of string pairs -// where the first of each pair is the label and the second is the value. -func PrometheusMetrics(namespace string, defaultLabelsAndValues ...string) *Metrics { - defaultLabels := []string{} - for i := 0; i < len(defaultLabelsAndValues); i += 2 { - defaultLabels = append(defaultLabels, defaultLabelsAndValues[i]) - } - return &Metrics{ - MethodTiming: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "method_timing", - Help: "ABCI Method Timing", - Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, - }, append(defaultLabels, []string{"method", "type"}...)).With(defaultLabelsAndValues...), - } -} - -// NopMetrics constructs a Metrics instance that discards all samples and is suitable -// for testing. -func NopMetrics() *Metrics { - return &Metrics{ - MethodTiming: discard.NewHistogram(), - } + // Timing for each ABCI method. + MethodTiming metrics.Histogram `metrics_bucketsizes:".0001,.0004,.002,.009,.02,.1,.65,2,6,25" metrics_labels:"method, type"` } diff --git a/internal/state/indexer/metrics.go b/internal/state/indexer/metrics.go index 0b92b879e3..93dd0dc9ec 100644 --- a/internal/state/indexer/metrics.go +++ b/internal/state/indexer/metrics.go @@ -4,7 +4,7 @@ import ( "github.com/go-kit/kit/metrics" ) -//go:generate go run github.com/tendermint/tendermint/scripts/metricsgen -struct=Metrics +//go:generate go run ../../../scripts/metricsgen -struct=Metrics // MetricsSubsystem is a the subsystem label for the indexer package. const MetricsSubsystem = "indexer" diff --git a/internal/state/metrics.gen.go b/internal/state/metrics.gen.go new file mode 100644 index 0000000000..eb8ca9f780 --- /dev/null +++ b/internal/state/metrics.gen.go @@ -0,0 +1,46 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package state + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_processing_time", + Help: "Time between BeginBlock and EndBlock.", + + Buckets: stdprometheus.LinearBuckets(1, 10, 10), + }, labels).With(labelsAndValues...), + ConsensusParamUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "consensus_param_updates", + Help: "Number of consensus parameter updates returned by the application since process start.", + }, labels).With(labelsAndValues...), + ValidatorSetUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_set_updates", + Help: "Number of validator set updates returned by the application since process start.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + BlockProcessingTime: discard.NewHistogram(), + ConsensusParamUpdates: discard.NewCounter(), + ValidatorSetUpdates: discard.NewCounter(), + } +} diff --git a/internal/state/metrics.go b/internal/state/metrics.go index 1d4a13b941..3663121a6c 100644 --- a/internal/state/metrics.go +++ b/internal/state/metrics.go @@ -2,9 +2,6 @@ package state import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -13,59 +10,20 @@ const ( MetricsSubsystem = "state" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { // Time between BeginBlock and EndBlock. - BlockProcessingTime metrics.Histogram + BlockProcessingTime metrics.Histogram `metrics_buckettype:"lin" metrics_bucketsizes:"1,10,10"` // ConsensusParamUpdates is the total number of times the application has // udated the consensus params since process start. + //metrics:Number of consensus parameter updates returned by the application since process start. ConsensusParamUpdates metrics.Counter // ValidatorSetUpdates is the total number of times the application has // udated the validator set since process start. + //metrics:Number of validator set updates returned by the application since process start. ValidatorSetUpdates metrics.Counter } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_processing_time", - Help: "Time between BeginBlock and EndBlock in ms.", - Buckets: stdprometheus.LinearBuckets(1, 10, 10), - }, labels).With(labelsAndValues...), - ConsensusParamUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "consensus_param_updates", - Help: "The total number of times the application as updated the consensus " + - "parameters since process start.", - }, labels).With(labelsAndValues...), - - ValidatorSetUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "validator_set_updates", - Help: "The total number of times the application as updated the validator " + - "set since process start.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - BlockProcessingTime: discard.NewHistogram(), - ConsensusParamUpdates: discard.NewCounter(), - ValidatorSetUpdates: discard.NewCounter(), - } -} diff --git a/internal/statesync/metrics.gen.go b/internal/statesync/metrics.gen.go new file mode 100644 index 0000000000..b4d5caa12c --- /dev/null +++ b/internal/statesync/metrics.gen.go @@ -0,0 +1,72 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package statesync + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + TotalSnapshots: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_snapshots", + Help: "The total number of snapshots discovered.", + }, labels).With(labelsAndValues...), + ChunkProcessAvgTime: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "chunk_process_avg_time", + Help: "The average processing time per chunk.", + }, labels).With(labelsAndValues...), + SnapshotHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_height", + Help: "The height of the current snapshot the has been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunk: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunk", + Help: "The current number of chunks that have been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunk_total", + Help: "The total number of chunks in the current snapshot.", + }, labels).With(labelsAndValues...), + BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "back_filled_blocks", + Help: "The current number of blocks that have been back-filled.", + }, labels).With(labelsAndValues...), + BackFillBlocksTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "back_fill_blocks_total", + Help: "The total number of blocks that need to be back-filled.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + TotalSnapshots: discard.NewCounter(), + ChunkProcessAvgTime: discard.NewGauge(), + SnapshotHeight: discard.NewGauge(), + SnapshotChunk: discard.NewCounter(), + SnapshotChunkTotal: discard.NewGauge(), + BackFilledBlocks: discard.NewCounter(), + BackFillBlocksTotal: discard.NewGauge(), + } +} diff --git a/internal/statesync/metrics.go b/internal/statesync/metrics.go index fb134f5804..a8a3af9152 100644 --- a/internal/statesync/metrics.go +++ b/internal/statesync/metrics.go @@ -2,9 +2,6 @@ package statesync import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" ) const ( @@ -12,80 +9,22 @@ const ( MetricsSubsystem = "statesync" ) +//go:generate go run ../../scripts/metricsgen -struct=Metrics + // Metrics contains metrics exposed by this package. type Metrics struct { - TotalSnapshots metrics.Counter + // The total number of snapshots discovered. + TotalSnapshots metrics.Counter + // The average processing time per chunk. ChunkProcessAvgTime metrics.Gauge - SnapshotHeight metrics.Gauge - SnapshotChunk metrics.Counter - SnapshotChunkTotal metrics.Gauge - BackFilledBlocks metrics.Counter + // The height of the current snapshot the has been processed. + SnapshotHeight metrics.Gauge + // The current number of chunks that have been processed. + SnapshotChunk metrics.Counter + // The total number of chunks in the current snapshot. + SnapshotChunkTotal metrics.Gauge + // The current number of blocks that have been back-filled. + BackFilledBlocks metrics.Counter + // The total number of blocks that need to be back-filled. BackFillBlocksTotal metrics.Gauge } - -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - TotalSnapshots: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "total_snapshots", - Help: "The total number of snapshots discovered.", - }, labels).With(labelsAndValues...), - ChunkProcessAvgTime: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "chunk_process_avg_time", - Help: "The average processing time per chunk.", - }, labels).With(labelsAndValues...), - SnapshotHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "snapshot_height", - Help: "The height of the current snapshot the has been processed.", - }, labels).With(labelsAndValues...), - SnapshotChunk: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "snapshot_chunk", - Help: "The current number of chunks that have been processed.", - }, labels).With(labelsAndValues...), - SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "snapshot_chunks_total", - Help: "The total number of chunks in the current snapshot.", - }, labels).With(labelsAndValues...), - BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "backfilled_blocks", - Help: "The current number of blocks that have been back-filled.", - }, labels).With(labelsAndValues...), - BackFillBlocksTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "backfilled_blocks_total", - Help: "The total number of blocks that need to be back-filled.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - TotalSnapshots: discard.NewCounter(), - ChunkProcessAvgTime: discard.NewGauge(), - SnapshotHeight: discard.NewGauge(), - SnapshotChunk: discard.NewCounter(), - SnapshotChunkTotal: discard.NewGauge(), - BackFilledBlocks: discard.NewCounter(), - BackFillBlocksTotal: discard.NewGauge(), - } -} diff --git a/scripts/metricsgen/metricsgen.go b/scripts/metricsgen/metricsgen.go index 70cb36a773..0f564e66ae 100644 --- a/scripts/metricsgen/metricsgen.go +++ b/scripts/metricsgen/metricsgen.go @@ -88,8 +88,8 @@ func PrometheusMetrics(namespace string, labelsAndValues...string) *Metrics { {{- if eq (len $metric.Labels) 0 }} }, labels).With(labelsAndValues...), {{ else }} - }, append(labels, {{$metric.Labels | printf "%q" }})).With(labelsAndValues...), - {{- end }} + }, append(labels, {{$metric.Labels}})).With(labelsAndValues...), + {{ end }} {{- end }} } } @@ -249,14 +249,8 @@ func findMetricsStruct(files map[string]*ast.File, structName string) (*ast.Stru } func parseMetricField(f *ast.Field) ParsedMetricField { - var comment string - if f.Doc != nil { - for _, c := range f.Doc.List { - comment += strings.TrimPrefix(c.Text, "// ") - } - } pmf := ParsedMetricField{ - Description: comment, + Description: extractHelpMessage(f.Doc), MetricName: extractFieldName(f.Names[0].String(), f.Tag), FieldName: f.Names[0].String(), TypeName: extractTypeName(f.Type), @@ -272,6 +266,21 @@ func extractTypeName(e ast.Expr) string { return strings.TrimPrefix(path.Ext(types.ExprString(e)), ".") } +func extractHelpMessage(cg *ast.CommentGroup) string { + if cg == nil { + return "" + } + var help []string //nolint: prealloc + for _, c := range cg.List { + mt := strings.TrimPrefix(c.Text, "//metrics:") + if mt != c.Text { + return strings.TrimSpace(mt) + } + help = append(help, strings.TrimSpace(strings.TrimPrefix(c.Text, "//"))) + } + return strings.Join(help, " ") +} + func isMetric(e ast.Expr, mPkgName string) bool { return strings.Contains(types.ExprString(e), fmt.Sprintf("%s.", mPkgName)) } @@ -280,7 +289,11 @@ func extractLabels(bl *ast.BasicLit) string { if bl != nil { t := reflect.StructTag(strings.Trim(bl.Value, "`")) if v := t.Get(labelsTag); v != "" { - return v + var res []string + for _, s := range strings.Split(v, ",") { + res = append(res, strconv.Quote(strings.TrimSpace(s))) + } + return strings.Join(res, ",") } } return "" diff --git a/scripts/metricsgen/metricsgen_test.go b/scripts/metricsgen/metricsgen_test.go index 83251e651b..a925b591d1 100644 --- a/scripts/metricsgen/metricsgen_test.go +++ b/scripts/metricsgen/metricsgen_test.go @@ -149,7 +149,7 @@ func TestParseMetricsStruct(t *testing.T) { { name: "metric labels", metricsStruct: "type Metrics struct {\n" + - "myCounter metrics.Counter `metrics_labels:\"label1, label2\"`\n" + + "myCounter metrics.Counter `metrics_labels:\"label1,label2\"`\n" + "}", expected: metricsgen.TemplateData{ Package: pkgName, @@ -158,7 +158,7 @@ func TestParseMetricsStruct(t *testing.T) { TypeName: "Counter", FieldName: "myCounter", MetricName: "my_counter", - Labels: "label1, label2", + Labels: "\"label1\",\"label2\"", }, }, }, diff --git a/scripts/metricsgen/testdata/commented/metrics.gen.go b/scripts/metricsgen/testdata/commented/metrics.gen.go index 038da3d463..c1346da384 100644 --- a/scripts/metricsgen/testdata/commented/metrics.gen.go +++ b/scripts/metricsgen/testdata/commented/metrics.gen.go @@ -18,7 +18,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Namespace: namespace, Subsystem: MetricsSubsystem, Name: "field", - Help: "Height of the chain.We expect multi-line comments to parse correctly.", + Help: "Height of the chain. We expect multi-line comments to parse correctly.", }, labels).With(labelsAndValues...), } } diff --git a/scripts/metricsgen/testdata/tags/metrics.gen.go b/scripts/metricsgen/testdata/tags/metrics.gen.go index 7ac292d3c4..43779c7a16 100644 --- a/scripts/metricsgen/testdata/tags/metrics.gen.go +++ b/scripts/metricsgen/testdata/tags/metrics.gen.go @@ -19,7 +19,8 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "with_labels", Help: "", - }, append(labels, "step,time")).With(labelsAndValues...), WithExpBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + }, append(labels, "step", "time")).With(labelsAndValues...), + WithExpBuckets: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "with_exp_buckets", From f9fa0a3228514ba5948069ddb4465ea90d5ff0e3 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 12 May 2022 11:54:49 -0700 Subject: [PATCH 025/203] docs: update event subscription documentation for new API (#8509) Update the static documentation about event subscription to include the new /events API, and to add more details about how queries work. Mention that the streaming API is deprecated. --- docs/tendermint-core/subscription.md | 244 ++++++++++++++++++++++----- rpc/openapi/openapi.yaml | 23 ++- 2 files changed, 210 insertions(+), 57 deletions(-) diff --git a/docs/tendermint-core/subscription.md b/docs/tendermint-core/subscription.md index 0f452c5633..84979f61a4 100644 --- a/docs/tendermint-core/subscription.md +++ b/docs/tendermint-core/subscription.md @@ -2,74 +2,228 @@ order: 7 --- -# Subscribing to events via Websocket +# Subscribing to Events -Tendermint emits different events, which you can subscribe to via -[Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful -for third-party applications (for analysis) or for inspecting state. +A Tendermint node emits events about important state transitions during +consensus. These events can be queried by clients via the [RPC interface][rpc] +on nodes that enable it. The [list of supported event types][event-types] can +be found in the tendermint/types Go package. -[List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants) +In Tendermint v0.36 there are two APIs to query events: -To connect to a node via websocket from the CLI, you can use a tool such as -[wscat](https://github.com/websockets/wscat) and run: +- The [**legacy streaming API**](#legacy-streaming-api), comprising the + `subscribe`, `unsubscribe`, and `unsubscribe_all` RPC methods over websocket. + +- The [**event log API**](#event-log-api), comprising the `events` RPC method. + +The legacy streaming API is deprecated in Tendermint v0.36, and will be removed +in Tendermint v0.37. Clients are strongly encouraged to migrate to the new +event log API as soon as is practical. + +[rpc]: https://docs.tendermint.com/master/rpc +[event-types]: https://godoc.org/github.com/tendermint/tendermint/types#EventNewBlockValue + +## Filter Queries + +Event requests take a [filter query][query] parameter. A filter query is a +string that describes a subset of available event items to return. An empty +query matches all events; otherwise a query comprises one or more *terms* +comparing event metadata to target values. + +For example, to select new block events, use the term: -```sh -wscat ws://127.0.0.1:26657/websocket ``` +tm.event = 'NewBlock' +``` + +Multiple terms can be combined with `AND` (case matters), for example to match +the transaction event with a given hash, use the query: + +``` +tm.event = 'Tx' AND tx.hash = 'EA7B33F' +``` + +Operands may be strings in single quotes (`'Tx'`), numbers (`45`), dates, or +timestamps. + +The comparison operators include `=`, `<`, `<=`, `>`, `>=`, and `CONTAINS` (for +substring match). In addition, the `EXISTS` operator checks for the presence +of an attribute regardless of its value. + +### Attributes + +Tendermint implicitly defines a string-valued `tm.event` attribute for all +event types. Transaction items (type `Tx`) are also assigned `tx.hash` +(string), giving the hash of the transaction, and and `tx.height` (number) +giving the height of the block containing the transaction. For `NewBlock` and +`NewBlockHeader` events, Tendermint defines a `block.height` attribute giving +the height of the block. + +Additional attributes can be provided by the application as [ABCI `Event` +records][abci-event] in response to the `FinalizeBlock` request. The full name +of the attribute in the query is formed by combining the `type` and attribute +`key` with a period. + +For example, given the events + +```go +[]abci.Event{{ + Type: "reward", + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "cosmos1xyz012pdq"}, + {Key: "amount", Value: "45.62"}, + {Key: "balance", Value: "100.390001"}, + }, +}} +``` + +a query may refer to the names `reward.address`, `reward.amount`, and `reward.balance`, as in: + +``` +reward.address EXISTS AND reward.balance > 45 +``` + +Certain application-specific metadata are also indexed for offline queries. +See [Indexing transactions](../app-dev/indexing-transactions.md) for more details. + +[query]: https://godoc.org/github.com/tendermint/tendermint/internal/pubsub/query/syntax +[abci-event]: https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto#L397 + +## Event Log API + +Starting in Tendermint v0.36, when the `rpc.event-log-window-size` +configuration is enabled, the node maintains maintains a log of all events +within this operator-defined time window. This API supersedes the websocket +subscription API described below. -You can subscribe to any of the events above by calling the `subscribe` RPC -method via Websocket along with a valid query. +Clients can query these events can by long-polling the `/events` RPC method, +which returns the most recent items from the log that match the [request +parameters][reqevents]. Each item returned includes a cursor that marks its +location in the log. Cursors can be passed via the `before` and `after` +parameters to fetch events earlier in the log. + +For example, this request: ```json { "jsonrpc": "2.0", - "method": "subscribe", - "id": 0, + "id": 1, + "method": "events", "params": { - "query": "tm.event='NewBlock'" + "filter": { + "query": "tm.event = 'Tx' AND app.key = 'applesauce'" + }, + "maxItems": 1, + "after": "" } } ``` -Check out [API docs](https://docs.tendermint.com/master/rpc/) for -more information on query syntax and other options. +will return a result similar to the following: + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "items": [ + { + "cursor": "16ee3d5e65be53d8-03d5", + "event": "Tx", + "data": { + "type": "tendermint/event/Tx", + "value": { + "height": 70, + "tx": "YXBwbGVzYXVjZT1zeXJ1cA==", + "result": { + "events": [ + { + "type": "app", + "attributes": [ + { + "key": "creator", + "value": "Cosmoshi Netowoko", + "index": true + }, + { + "key": "key", + "value": "applesauce", + "index": true + }, + { + "key": "index_key", + "value": "index is working", + "index": true + }, + { + "key": "noindex_key", + "value": "index is working", + "index": false + } + ] + } + ] + } + } + } + } + ], + "more": false, + "oldest": "16ee3d4c471c3b00-0001", + "newest": "16ee3d5f2e05a4e0-0400" + } +} +``` + +The `"items"` array gives the matching items (up to the requested +`"maxResults"`) in decreasing time order (i.e., newest to oldest). In this +case, there is only one result, but if there are additional results that were +not returned, the `"more"` flag will be true. Calling `/events` again with the +same query and `"after"` set to the cursor of the newest result (in this +example, `"16ee3d5e65be53d8-03d5"`) will fetch newer results. + +Go clients can use the [`eventstream`][eventstream] package to simplify the use +of this method. The `eventstream.Stream` automatically handles polling for new +events, updating the cursor, and reporting any missed events. -You can also use tags, given you had included them into DeliverTx -response, to query transaction results. See [Indexing -transactions](../app-dev/indexing-transactions.md) for details. +[reqevents]: https://pkg.go.dev/github.com/tendermint/tendermint@master/rpc/coretypes#RequestEvents +[eventstream]: https://godoc.org/github.com/tendermint/tendermint/rpc/client/eventstream -## ValidatorSetUpdates +## Legacy Streaming API -When validator set changes, ValidatorSetUpdates event is published. The -event carries a list of pubkey/power pairs. The list is the same -Tendermint receives from ABCI application (see [EndBlock -section](https://github.com/tendermint/tendermint/blob/master/spec/abci/abci.md#endblock) in -the ABCI spec). +- **Note:** This API is deprecated in Tendermint v0.36, and will be removed in + Tendermint v0.37. New clients and existing use should use the [event log + API](#event-log-api) instead. See [ADR 075][adr075] for more details. -Response: +To subscribe to events in the streaming API, you must connect to the node RPC +service using a [websocket][ws]. From the command line you can use a tool such +as [wscat][wscat], for example: + +```sh +wscat ws://127.0.0.1:26657/websocket +``` + +[ws]: https://en.wikipedia.org/wiki/WebSocket +[wscat]: https://github.com/websockets/wscat + +To subscribe to events, call the `subscribe` JSON-RPC method method passing in +a [filter query][query] for the events you wish to receive: ```json { "jsonrpc": "2.0", - "id": 0, - "result": { - "query": "tm.event='ValidatorSetUpdates'", - "data": { - "type": "tendermint/event/ValidatorSetUpdates", - "value": { - "validator_updates": [ - { - "address": "09EAD022FD25DE3A02E64B0FE9610B1417183EE4", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck=" - }, - "voting_power": "10", - "proposer_priority": "0" - } - ] - } - } + "method": "subscribe", + "id": 1, + "params": { + "query": "tm.event='NewBlock'" } } ``` + +The subscribe method returns an initial response confirming the subscription, +then sends additional JSON-RPC response messages containing the matching events +as they are published. The subscription continues until either the client +explicitly cancels the subscription (by calling `unsubscribe` or +`unsubscribe_all`) or until the websocket connection is terminated. + +[adr075]: https://tinyurl.com/adr075 diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 74ac3a0aef..d44463da7e 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -260,10 +260,9 @@ paths: operationId: events description: | Fetch a batch of events posted by the consensus node and matching a - specified query. + specified query string. - The query grammar is defined in - https://godoc.org/github.com/tendermint/tendermint/internal/pubsub/query/syntax. + The query grammar is defined in [pubsub/query/syntax](https://godoc.org/github.com/tendermint/tendermint/internal/pubsub/query/syntax). An empty query matches all events; otherwise a query comprises one or more terms comparing event metadata to target values. For example, to select new block events: @@ -275,13 +274,13 @@ paths: tm.event = 'Tx' AND tx.hash = 'EA7B33F' - The comparison operators include "=", "<", "<=", ">", ">=", and - "CONTAINS". Operands may be strings (in single quotes), numbers, dates, - or timestamps. In addition, the "EXISTS" operator allows you to check + The comparison operators include `=`, `<`, `<=`, `>`, `>=`, and + `CONTAINS`. Operands may be strings (in single quotes), numbers, dates, + or timestamps. In addition, the `EXISTS` operator allows you to check for the presence of an attribute regardless of its value. - Tendermint defines a tm.event attribute for all events. Transactions - are also assigned tx.hash and tx.height attributes. Other attributes + Tendermint defines a `tm.event` attribute for all events. Transactions + are also assigned `tx.hash` and `tx.height` attributes. Other attributes are provided by the application as ABCI Event records. The name of the event in the query is formed by combining the type and attribute key with a period. For example, given: @@ -295,16 +294,16 @@ paths: }, }} - the query may refer to the names "reward.address", "reward.amount", and - "reward.balance", as in: + the query may refer to the names`"reward.address`,`"reward.amount`, and + `reward.balance`, as in: reward.address EXISTS AND reward.balance > 45 The node maintains a log of all events within an operator-defined time window. The /events method returns the most recent items from the log that match the query. Each item returned includes a cursor that marks - its location in the log. Cursors can be passed via the "before" and - "after" parameters to fetch events earlier in the log. + its location in the log. Cursors can be passed via the `before` and + `after` parameters to fetch events earlier in the log. parameters: - in: query name: filter From f094fd204a90921e7b6aac633520e262f964d8aa Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 13 May 2022 16:18:24 +0200 Subject: [PATCH 026/203] update protos (#8515) --- abci/types/types.pb.go | 270 ++++++++++++++++++++----- proto/tendermint/blocksync/types.pb.go | 30 ++- proto/tendermint/consensus/types.pb.go | 50 ++++- proto/tendermint/consensus/wal.pb.go | 25 ++- proto/tendermint/crypto/keys.pb.go | 5 +- proto/tendermint/crypto/proof.pb.go | 25 ++- proto/tendermint/libs/bits/types.pb.go | 5 +- proto/tendermint/mempool/types.pb.go | 10 +- proto/tendermint/p2p/conn.pb.go | 25 ++- proto/tendermint/p2p/pex.pb.go | 20 +- proto/tendermint/p2p/types.pb.go | 25 ++- proto/tendermint/privval/types.pb.go | 55 ++++- proto/tendermint/state/types.pb.go | 25 ++- proto/tendermint/statesync/types.pb.go | 45 ++++- proto/tendermint/types/block.pb.go | 5 +- proto/tendermint/types/canonical.pb.go | 25 ++- proto/tendermint/types/events.pb.go | 5 +- proto/tendermint/types/evidence.pb.go | 20 +- proto/tendermint/types/params.pb.go | 40 +++- proto/tendermint/types/types.pb.go | 75 +++++-- proto/tendermint/types/validator.pb.go | 15 +- proto/tendermint/version/types.pb.go | 5 +- 22 files changed, 644 insertions(+), 161 deletions(-) diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index dd13086289..cdfef391d5 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -10878,7 +10878,10 @@ func (m *Request) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10960,7 +10963,10 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11010,7 +11016,10 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11162,7 +11171,10 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11400,7 +11412,10 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11555,7 +11570,10 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11739,7 +11757,10 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11842,7 +11863,10 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11926,7 +11950,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11995,7 +12022,10 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12045,7 +12075,10 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12095,7 +12128,10 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12215,7 +12251,10 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12322,7 +12361,10 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12457,7 +12499,10 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12745,7 +12790,10 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13048,7 +13096,10 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13151,7 +13202,10 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13322,7 +13376,10 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13625,7 +13682,10 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14375,7 +14435,10 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14457,7 +14520,10 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14539,7 +14605,10 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14589,7 +14658,10 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14775,7 +14847,10 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14929,7 +15004,10 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15236,7 +15314,10 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15320,7 +15401,10 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15674,7 +15758,10 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15945,7 +16032,10 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16099,7 +16189,10 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16202,7 +16295,10 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16286,7 +16382,10 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16355,7 +16454,10 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16439,7 +16541,10 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16616,7 +16721,10 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16838,7 +16946,10 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17045,7 +17156,10 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17129,7 +17243,10 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17198,7 +17315,10 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17439,7 +17559,10 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17542,7 +17665,10 @@ func (m *CommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17645,7 +17771,10 @@ func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17761,7 +17890,10 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17895,7 +18027,10 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18166,7 +18301,10 @@ func (m *ExecTxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18321,7 +18459,10 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18424,7 +18565,10 @@ func (m *TxRecord) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18527,7 +18671,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18629,7 +18776,10 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18732,7 +18882,10 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18869,7 +19022,10 @@ func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19042,7 +19198,10 @@ func (m *Misbehavior) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19217,7 +19376,10 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index 8757f8ab3e..910ccea476 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -927,7 +927,10 @@ func (m *BlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -996,7 +999,10 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1118,7 +1124,10 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1168,7 +1177,10 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1256,7 +1268,10 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1481,7 +1496,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 4ae9abc9e1..d542d929e3 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -1935,7 +1935,10 @@ func (m *NewRoundStep) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2112,7 +2115,10 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2195,7 +2201,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2316,7 +2325,10 @@ func (m *ProposalPOL) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2437,7 +2449,10 @@ func (m *BlockPart) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2523,7 +2538,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2649,7 +2667,10 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2789,7 +2810,10 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2962,7 +2986,10 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3327,7 +3354,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go index fd80819cd0..86ff1be01f 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/proto/tendermint/consensus/wal.pb.go @@ -921,7 +921,10 @@ func (m *MsgInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1061,7 +1064,10 @@ func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1130,7 +1136,10 @@ func (m *EndHeight) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1320,7 +1329,10 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1439,7 +1451,10 @@ func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go index 24c6c1b1ba..8ff4c4a4fe 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/proto/tendermint/crypto/keys.pb.go @@ -687,7 +687,10 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthKeys + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthKeys } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 82fb943fcd..97350c64c7 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -820,7 +820,10 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -940,7 +943,10 @@ func (m *ValueOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1086,7 +1092,10 @@ func (m *DominoOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1236,7 +1245,10 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1320,7 +1332,10 @@ func (m *ProofOps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go index c0ebcb9760..ad87f854f4 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/proto/tendermint/libs/bits/types.pb.go @@ -307,7 +307,10 @@ func (m *BitArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 11e259551d..3487652bc8 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -370,7 +370,10 @@ func (m *Txs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -455,7 +458,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go index 47a3bb0cd8..7c26d3fcd4 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/proto/tendermint/p2p/conn.pb.go @@ -723,7 +723,10 @@ func (m *PacketPing) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -773,7 +776,10 @@ func (m *PacketPong) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -896,7 +902,10 @@ func (m *PacketMsg) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1051,7 +1060,10 @@ func (m *Packet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1168,7 +1180,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 15ccce15e5..25d636e43d 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -587,7 +587,10 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -637,7 +640,10 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -721,7 +727,10 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -841,7 +850,10 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index bffa6884fe..a0e647ee7b 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -917,7 +917,10 @@ func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1227,7 +1230,10 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1341,7 +1347,10 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1493,7 +1502,10 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1666,7 +1678,10 @@ func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index 56b35e7271..da30f75270 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -1708,7 +1708,10 @@ func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1909,7 +1915,10 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2027,7 +2036,10 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2146,7 +2158,10 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2264,7 +2279,10 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2383,7 +2401,10 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2433,7 +2454,10 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2483,7 +2507,10 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2813,7 +2840,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2930,7 +2960,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index af5c64ecf8..8db184011b 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -944,7 +944,10 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1049,7 +1052,10 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1151,7 +1157,10 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1266,7 +1275,10 @@ func (m *Version) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1732,7 +1744,10 @@ func (m *State) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 5541c28037..93e844730a 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1740,7 +1740,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1965,7 +1971,10 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2072,7 +2081,10 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2233,7 +2245,10 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2302,7 +2317,10 @@ func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2388,7 +2406,10 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2457,7 +2478,10 @@ func (m *ParamsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2559,7 +2583,10 @@ func (m *ParamsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index f2077aad8b..aacb90fab7 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -389,7 +389,10 @@ func (m *Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthBlock + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthBlock } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index 50c0c84fa2..e08342a460 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -920,7 +920,10 @@ func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1023,7 +1026,10 @@ func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1232,7 +1238,10 @@ func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1422,7 +1431,10 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1558,7 +1570,10 @@ func (m *CanonicalVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go index a9aa26a799..1c49aef647 100644 --- a/proto/tendermint/types/events.pb.go +++ b/proto/tendermint/types/events.pb.go @@ -285,7 +285,10 @@ func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvents } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index 052fb0e6b7..746d853130 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -827,7 +827,10 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1020,7 +1023,10 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1211,7 +1217,10 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1295,7 +1304,10 @@ func (m *EvidenceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 41d417b915..e3a5e83a96 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -1722,7 +1722,10 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1810,7 +1813,10 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1931,7 +1937,10 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2013,7 +2022,10 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2082,7 +2094,10 @@ func (m *VersionParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2170,7 +2185,10 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2292,7 +2310,10 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2542,7 +2563,10 @@ func (m *TimeoutParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index fcfbc01f54..f6f8a33f3f 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -2650,7 +2650,10 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2786,7 +2789,10 @@ func (m *Part) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2903,7 +2909,10 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3409,7 +3418,10 @@ func (m *Header) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3491,7 +3503,10 @@ func (m *Data) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3819,7 +3834,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3974,7 +3992,10 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4144,7 +4165,10 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4299,7 +4323,10 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4537,7 +4564,10 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4763,7 +4793,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4885,7 +4918,10 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5007,7 +5043,10 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5161,7 +5200,10 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5315,7 +5357,10 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go index 23b30ed3cb..2c3468b83f 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/proto/tendermint/types/validator.pb.go @@ -583,7 +583,10 @@ func (m *ValidatorSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -738,7 +741,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -843,7 +849,10 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 76a94fd3c0..7aefd7747b 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -265,7 +265,10 @@ func (m *Consensus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { From 81e5bf84173ab67c80150cb1c4299b2612f8ad2e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 13 May 2022 08:09:09 -0700 Subject: [PATCH 027/203] Consolidate Dependabot checks for GitHub Actions deps. (#8518) We currently have Dependabot check for updates to GitHub actions once a week on master, but daily on the backport branches. This is unnecessarily noisy. As a first step to reducing this noise, consolidate all the settings onto the default branch (master). --- .github/dependabot.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3db35d523e..0108f040d7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,6 +4,30 @@ updates: directory: "/" schedule: interval: weekly + day: monday + target-branch: "master" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + day: monday + target-branch: "v0.35.x" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + day: monday + target-branch: "v0.34.x" open-pull-requests-limit: 10 labels: - T:dependencies @@ -13,6 +37,7 @@ updates: directory: "/docs" schedule: interval: weekly + day: monday open-pull-requests-limit: 10 ################################### From c29d1b34fdc5815b35d57c7aafc04ebfc3b057cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 May 2022 15:32:25 +0000 Subject: [PATCH 028/203] build(deps): Bump golangci/golangci-lint-action from 3.1.0 to 3.2.0 (#8525) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.1.0 to 3.2.0.
Release notes

Sourced from golangci/golangci-lint-action's releases.

v3.2.0

What's Changed

misc

dependencies

New Contributors

... (truncated)

Commits
  • 537aa19 Expire cache periodically to avoid unbounded size (#466)
  • f70e52d build(deps): bump @​actions/core from 1.6.0 to 1.8.0 (#468)
  • a304692 build(deps-dev): bump @​typescript-eslint/eslint-plugin (#469)
  • eeca7c5 build(deps-dev): bump eslint from 8.14.0 to 8.15.0 (#467)
  • dfbcd2a build(deps): bump github/codeql-action from 1 to 2 (#459)
  • 4421331 build(deps-dev): bump @​typescript-eslint/parser from 5.20.0 to 5.22.0 (#464)
  • 5e6c1bb build(deps-dev): bump typescript from 4.6.3 to 4.6.4 (#461)
  • 44eba43 build(deps-dev): bump @​typescript-eslint/eslint-plugin (#460)
  • 358a5e3 build(deps-dev): bump @​typescript-eslint/eslint-plugin (#457)
  • b9c65a5 build(deps-dev): bump @​typescript-eslint/parser from 5.19.0 to 5.20.0 (#455)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golangci/golangci-lint-action&package-manager=github_actions&previous-version=3.1.0&new-version=3.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 359514426e..863d5ab106 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -27,7 +27,7 @@ jobs: **/**.go go.mod go.sum - - uses: golangci/golangci-lint-action@v3.1.0 + - uses: golangci/golangci-lint-action@v3 with: # Required: the version of golangci-lint is required and # must be specified without patch version: we always use the From 7da9746a5704429c89845f29f7222af4e720dcc7 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 13 May 2022 12:08:21 -0700 Subject: [PATCH 029/203] Fix protobuf generated code. (#8545) * Revert "update protos (#8515)" This reverts commit f094fd204a90921e7b6aac633520e262f964d8aa. It appears that #8515 may have been generated with an out-of-date version of either buf or the gogo plugin. using the latest versions (buf 1.4.0 and gogo 1.3.2) reverts those changes. * Add a script to re-generate protos with the latest tools. This script is just a wrapper for the Make rule, but it runs the build inside a container with the latest versions of buf and gogo installed. This reduces the chance that an out-of-date ambient installation on a developer machine will get us outdated output. --- abci/types/types.pb.go | 270 +++++-------------------- proto/tendermint/blocksync/types.pb.go | 30 +-- proto/tendermint/consensus/types.pb.go | 50 +---- proto/tendermint/consensus/wal.pb.go | 25 +-- proto/tendermint/crypto/keys.pb.go | 5 +- proto/tendermint/crypto/proof.pb.go | 25 +-- proto/tendermint/libs/bits/types.pb.go | 5 +- proto/tendermint/mempool/types.pb.go | 10 +- proto/tendermint/p2p/conn.pb.go | 25 +-- proto/tendermint/p2p/pex.pb.go | 20 +- proto/tendermint/p2p/types.pb.go | 25 +-- proto/tendermint/privval/types.pb.go | 55 +---- proto/tendermint/state/types.pb.go | 25 +-- proto/tendermint/statesync/types.pb.go | 45 +---- proto/tendermint/types/block.pb.go | 5 +- proto/tendermint/types/canonical.pb.go | 25 +-- proto/tendermint/types/events.pb.go | 5 +- proto/tendermint/types/evidence.pb.go | 20 +- proto/tendermint/types/params.pb.go | 40 +--- proto/tendermint/types/types.pb.go | 75 ++----- proto/tendermint/types/validator.pb.go | 15 +- proto/tendermint/version/types.pb.go | 5 +- scripts/proto-gen.sh | 23 +++ 23 files changed, 184 insertions(+), 644 deletions(-) create mode 100755 scripts/proto-gen.sh diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index cdfef391d5..dd13086289 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -10878,10 +10878,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10963,10 +10960,7 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11016,10 +11010,7 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11171,10 +11162,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11412,10 +11400,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11570,10 +11555,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11757,10 +11739,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11863,10 +11842,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11950,10 +11926,7 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12022,10 +11995,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12075,10 +12045,7 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12128,10 +12095,7 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12251,10 +12215,7 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12361,10 +12322,7 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12499,10 +12457,7 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12790,10 +12745,7 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13096,10 +13048,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13202,10 +13151,7 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13376,10 +13322,7 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13682,10 +13625,7 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14435,10 +14375,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14520,10 +14457,7 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14605,10 +14539,7 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14658,10 +14589,7 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14847,10 +14775,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15004,10 +14929,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15314,10 +15236,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15401,10 +15320,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15758,10 +15674,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16032,10 +15945,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16189,10 +16099,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16295,10 +16202,7 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16382,10 +16286,7 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16454,10 +16355,7 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16541,10 +16439,7 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16721,10 +16616,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16946,10 +16838,7 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17156,10 +17045,7 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17243,10 +17129,7 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17315,10 +17198,7 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17559,10 +17439,7 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17665,10 +17542,7 @@ func (m *CommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17771,10 +17645,7 @@ func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17890,10 +17761,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18027,10 +17895,7 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18301,10 +18166,7 @@ func (m *ExecTxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18459,10 +18321,7 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18565,10 +18424,7 @@ func (m *TxRecord) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18671,10 +18527,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18776,10 +18629,7 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18882,10 +18732,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19022,10 +18869,7 @@ func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19198,10 +19042,7 @@ func (m *Misbehavior) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19376,10 +19217,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index 910ccea476..8757f8ab3e 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -927,10 +927,7 @@ func (m *BlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -999,10 +996,7 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1124,10 +1118,7 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1177,10 +1168,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1268,10 +1256,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1496,10 +1481,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index d542d929e3..4ae9abc9e1 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -1935,10 +1935,7 @@ func (m *NewRoundStep) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2115,10 +2112,7 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2201,10 +2195,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2325,10 +2316,7 @@ func (m *ProposalPOL) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2449,10 +2437,7 @@ func (m *BlockPart) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2538,10 +2523,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2667,10 +2649,7 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2810,10 +2789,7 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2986,10 +2962,7 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3354,10 +3327,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go index 86ff1be01f..fd80819cd0 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/proto/tendermint/consensus/wal.pb.go @@ -921,10 +921,7 @@ func (m *MsgInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1064,10 +1061,7 @@ func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1136,10 +1130,7 @@ func (m *EndHeight) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1329,10 +1320,7 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1451,10 +1439,7 @@ func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go index 8ff4c4a4fe..24c6c1b1ba 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/proto/tendermint/crypto/keys.pb.go @@ -687,10 +687,7 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthKeys - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthKeys } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 97350c64c7..82fb943fcd 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -820,10 +820,7 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -943,10 +940,7 @@ func (m *ValueOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1092,10 +1086,7 @@ func (m *DominoOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1245,10 +1236,7 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1332,10 +1320,7 @@ func (m *ProofOps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go index ad87f854f4..c0ebcb9760 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/proto/tendermint/libs/bits/types.pb.go @@ -307,10 +307,7 @@ func (m *BitArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 3487652bc8..11e259551d 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -370,10 +370,7 @@ func (m *Txs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -458,10 +455,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go index 7c26d3fcd4..47a3bb0cd8 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/proto/tendermint/p2p/conn.pb.go @@ -723,10 +723,7 @@ func (m *PacketPing) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -776,10 +773,7 @@ func (m *PacketPong) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -902,10 +896,7 @@ func (m *PacketMsg) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1060,10 +1051,7 @@ func (m *Packet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1180,10 +1168,7 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 25d636e43d..15ccce15e5 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -587,10 +587,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -640,10 +637,7 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -727,10 +721,7 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -850,10 +841,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index a0e647ee7b..bffa6884fe 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -917,10 +917,7 @@ func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1230,10 +1227,7 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1347,10 +1341,7 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1502,10 +1493,7 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1678,10 +1666,7 @@ func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index da30f75270..56b35e7271 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -1708,10 +1708,7 @@ func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1793,10 +1790,7 @@ func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1915,10 +1909,7 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2036,10 +2027,7 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2158,10 +2146,7 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2279,10 +2264,7 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2401,10 +2383,7 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2454,10 +2433,7 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2507,10 +2483,7 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2840,10 +2813,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2960,10 +2930,7 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 8db184011b..af5c64ecf8 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -944,10 +944,7 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1052,10 +1049,7 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1157,10 +1151,7 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1275,10 +1266,7 @@ func (m *Version) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1744,10 +1732,7 @@ func (m *State) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 93e844730a..5541c28037 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1740,10 +1740,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1793,10 +1790,7 @@ func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1971,10 +1965,7 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2081,10 +2072,7 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2245,10 +2233,7 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2317,10 +2302,7 @@ func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2406,10 +2388,7 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2478,10 +2457,7 @@ func (m *ParamsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2583,10 +2559,7 @@ func (m *ParamsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index aacb90fab7..f2077aad8b 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -389,10 +389,7 @@ func (m *Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthBlock - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthBlock } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index e08342a460..50c0c84fa2 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -920,10 +920,7 @@ func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1026,10 +1023,7 @@ func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1238,10 +1232,7 @@ func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1431,10 +1422,7 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1570,10 +1558,7 @@ func (m *CanonicalVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go index 1c49aef647..a9aa26a799 100644 --- a/proto/tendermint/types/events.pb.go +++ b/proto/tendermint/types/events.pb.go @@ -285,10 +285,7 @@ func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvents } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index 746d853130..052fb0e6b7 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -827,10 +827,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1023,10 +1020,7 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1217,10 +1211,7 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1304,10 +1295,7 @@ func (m *EvidenceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index e3a5e83a96..41d417b915 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -1722,10 +1722,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1813,10 +1810,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1937,10 +1931,7 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2022,10 +2013,7 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2094,10 +2082,7 @@ func (m *VersionParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2185,10 +2170,7 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2310,10 +2292,7 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2563,10 +2542,7 @@ func (m *TimeoutParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index f6f8a33f3f..fcfbc01f54 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -2650,10 +2650,7 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2789,10 +2786,7 @@ func (m *Part) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2909,10 +2903,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3418,10 +3409,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3503,10 +3491,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3834,10 +3819,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3992,10 +3974,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4165,10 +4144,7 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4323,10 +4299,7 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4564,10 +4537,7 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4793,10 +4763,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4918,10 +4885,7 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5043,10 +5007,7 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5200,10 +5161,7 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5357,10 +5315,7 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go index 2c3468b83f..23b30ed3cb 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/proto/tendermint/types/validator.pb.go @@ -583,10 +583,7 @@ func (m *ValidatorSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -741,10 +738,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -849,10 +843,7 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 7aefd7747b..76a94fd3c0 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -265,10 +265,7 @@ func (m *Consensus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/scripts/proto-gen.sh b/scripts/proto-gen.sh new file mode 100755 index 0000000000..10499dcd1f --- /dev/null +++ b/scripts/proto-gen.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# +# Update the generated code for protocol buffers in the Tendermint repository. +# This must be run from inside a Tendermint working directory. +# +set -euo pipefail + +# Work from the root of the repository. +cd "$(git rev-parse --show-toplevel)" + +# Run inside Docker to install the correct versions of the required tools +# without polluting the local system. +docker run --rm -i -v "$PWD":/w --workdir=/w golang:1.18-alpine sh <<"EOF" +apk add curl git make + +readonly buf_release='https://github.com/bufbuild/buf/releases/latest/download' +readonly OS="$(uname -s)" ARCH="$(uname -m)" +curl -sSL "${buf_release}/buf-${OS}-${ARCH}.tar.gz" \ + | tar -xzf - -C /usr/local --strip-components=1 + +go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest +make proto-gen +EOF From 9c9a4140d971cb8ce2d2f4695c34452c5572f9d8 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 13 May 2022 12:27:10 -0700 Subject: [PATCH 030/203] chore: update generated mocks (#8546) --- internal/consensus/mocks/cons_sync_reactor.go | 1 - internal/state/indexer/mocks/event_sink.go | 1 - internal/state/mocks/evidence_pool.go | 1 - internal/state/mocks/store.go | 1 - internal/statesync/mocks/state_provider.go | 1 - 5 files changed, 5 deletions(-) diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index 3aa02e9fe0..f904e9129a 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -6,7 +6,6 @@ import ( testing "testing" mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/internal/state" ) diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index 69abe39071..decf551abd 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -6,7 +6,6 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/internal/pubsub/query" diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 0ea3ba17b9..49633269b1 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -6,7 +6,6 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/internal/state" testing "testing" diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 1d9ef2f6ff..9b41f3c1bc 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,7 +4,6 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 82e4bd60eb..582ebcd9c4 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -6,7 +6,6 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/internal/state" testing "testing" From 2d9a379b635750558925c4115a2ca613d3c4d34a Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 13 May 2022 12:41:24 -0700 Subject: [PATCH 031/203] build: add CI check that generated files are up-to-date (#8521) Add an actions workflow that verifies that generated files are up-to-date during a pull request. If not, give the reader instructions about what to do to update the PR. Checks are included for protobuf and mockery. --- .github/workflows/check-generated.yml | 76 +++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 .github/workflows/check-generated.yml diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml new file mode 100644 index 0000000000..50d923376b --- /dev/null +++ b/.github/workflows/check-generated.yml @@ -0,0 +1,76 @@ +# Verify that generated code is up-to-date. +# +# Note that we run these checks regardless whether the input files have +# changed, because generated code can change in response to toolchain updates +# even if no files in the repository are modified. +name: Check generated code +on: + pull_request: + branches: + - master + +permissions: + contents: read + +jobs: + check-mocks: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.17' + + - uses: actions/checkout@v3 + + - name: "Check generated mocks" + run: | + set -euo pipefail + make mockery 2>/dev/null + + if ! git diff --stat --exit-code ; then + echo ">> ERROR:" + echo ">>" + echo ">> Generated mocks require update (either Mockery or source files may have changed)." + echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR." + echo ">>" + exit 1 + fi + + check-proto: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.17' + + - uses: actions/checkout@v3 + with: + fetch-depth: 1 # we need a .git directory to run git diff + + - name: "Check protobuf generated code" + run: | + set -euo pipefail + + # Install buf and gogo tools, so that differences that arise from + # toolchain differences are also caught. + readonly tools="$(mktemp -d)" + export PATH="${PATH}:${tools}/bin" + export GOBIN="${tools}/bin" + + readonly base='https://github.com/bufbuild/buf/releases/latest/download' + readonly OS="$(uname -s)" ARCH="$(uname -m)" + curl -sSL "${base}/buf-${OS}-${ARCH}.tar.gz" \ + | tar -xzf - -C "$tools" --strip-components=1 + + go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest + + make proto-gen + + if ! git diff --stat --exit-code ; then + echo ">> ERROR:" + echo ">>" + echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)." + echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR." + echo ">>" + exit 1 + fi From bdca7270148a5a8ae1fbac8fb629efc716ceb6ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 May 2022 13:08:55 -0700 Subject: [PATCH 032/203] build(deps): Bump github.com/prometheus/client_golang (#8540) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.12.1 to 1.12.2. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/v1.12.2/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.12.1...v1.12.2) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: M. J. Fromberger --- go.mod | 6 +++--- go.sum | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 024f910039..aea70543bb 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible - github.com/prometheus/client_golang v1.12.1 + github.com/prometheus/client_golang v1.12.2 github.com/rs/cors v1.8.2 github.com/rs/zerolog v1.26.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa @@ -173,8 +173,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.32.1 github.com/prometheus/procfs v0.7.3 // indirect github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect diff --git a/go.sum b/go.sum index e7c2ecda36..3f9d98b508 100644 --- a/go.sum +++ b/go.sum @@ -873,8 +873,9 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From 42e59246622a0cf2f750acfdd0bc7c169fb3a847 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Sat, 14 May 2022 08:27:53 -0400 Subject: [PATCH 033/203] mempool: do not continue checking transactions if context was cacneled (#8549) --- internal/mempool/reactor.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index ae578e70a3..3c22988ee6 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -153,6 +153,15 @@ func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope *p2p.Envelo // problem. continue } + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // Do not propagate context + // cancellation errors, but do + // not continue to check + // transactions from this + // message if we are shutting down. + return nil + } + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) From c780619db5b7ef92847c96fab2b39575baca724d Mon Sep 17 00:00:00 2001 From: Ivo Elenchev Date: Sun, 15 May 2022 01:50:36 +0300 Subject: [PATCH 034/203] Fix typo (#8550) --- docs/nodes/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/nodes/README.md b/docs/nodes/README.md index fd9056e0dd..a0f14e6c88 100644 --- a/docs/nodes/README.md +++ b/docs/nodes/README.md @@ -45,4 +45,4 @@ We will cover the various types of node types within Tendermint. Validators are nodes that participate in the security of a network. Validators have an associated power in Tendermint, this power can represent stake in a [proof of stake](https://en.wikipedia.org/wiki/Proof_of_stake) system, reputation in [proof of authority](https://en.wikipedia.org/wiki/Proof_of_authority) or any sort of measurable unit. Running a secure and consistently online validator is crucial to a networks health. A validator must be secure and fault tolerant, it is recommended to run your validator with 2 or more sentry nodes. -As a validator there is the potential to have your weight reduced, this is defined by the application. Tendermint is notified by the application if a validator should have there weight increased or reduced. Application have different types of malicious behavior which lead to slashing of the validators power. Please check the documentation of the application you will be running in order to find more information. +As a validator there is the potential to have your weight reduced, this is defined by the application. Tendermint is notified by the application if a validator should have their weight increased or reduced. Application have different types of malicious behavior which lead to slashing of the validators power. Please check the documentation of the application you will be running in order to find more information. From d004638b0b544986286f3d4e1cd13ee14ada402f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 13:20:31 +0000 Subject: [PATCH 035/203] build(deps): Bump github.com/prometheus/common from 0.32.1 to 0.34.0 (#8557) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.32.1 to 0.34.0.
Release notes

Sourced from github.com/prometheus/common's releases.

v0.34.0

  • [ENHANCEMENT] Enable selecting minimum TLS version. #375

v0.33.0

  • [ENHANCEMENT] Make HTTP2 User Visible #360
  • [FEATURE] Add proxy_url support for oauth2 #358
Commits
  • 3763a1d TLS config: Enable selection of min TLS version (#375)
  • 0c7319a Remove comment about PROMETHEUS_COMMON_DISABLE_HTTP2 env var because it is no...
  • 840c039 Use path.Clean to clean sigv4 path.
  • ffd0efb Deduplicate slashes for sigv4 signature
  • 902cb39 Merge pull request #365 from prometheus/superq/bump_sigv4
  • 910a9df Update sigv4 modules
  • 2c24277 Merge pull request #362 from prometheus/repo_sync
  • f6b0912 Merge pull request #353 from prometheus/superq/bump_go
  • e457c0a Update common Prometheus files
  • 0e1254b Merge pull request #359 from prometheus/repo_sync
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/prometheus/common&package-manager=go_modules&previous-version=0.32.1&new-version=0.34.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index aea70543bb..9af23abaa4 100644 --- a/go.mod +++ b/go.mod @@ -174,7 +174,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.32.1 + github.com/prometheus/common v0.34.0 github.com/prometheus/procfs v0.7.3 // indirect github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect diff --git a/go.sum b/go.sum index 3f9d98b508..8d4173cf63 100644 --- a/go.sum +++ b/go.sum @@ -888,8 +888,9 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE= +github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= From fb7229135a2cceef615944535f466e848c7ff4e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 14:04:08 +0000 Subject: [PATCH 036/203] build(deps): Bump google.golang.org/grpc from 1.46.0 to 1.46.2 (#8559) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.46.0 to 1.46.2.
Release notes

Sourced from google.golang.org/grpc's releases.

Release v1.46.2

Bug Fixes

  • client: fix potential panic during RPC retries (#5323)
  • xds: fix leak of deleted CDS resources from CSDS view (#5339)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.46.0&new-version=1.46.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9af23abaa4..2151f4b41c 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 golang.org/x/net v0.0.0-20220412020605-290c469a71a5 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.46.0 + google.golang.org/grpc v1.46.2 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index 8d4173cf63..791fd4b73f 100644 --- a/go.sum +++ b/go.sum @@ -1723,8 +1723,8 @@ google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 7f79661c2efec63f37b4084083a52e2b2274a27b Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 16 May 2022 10:15:06 -0400 Subject: [PATCH 037/203] rfc: onboarding projects (#8413) This is meant as a supporting recruiting document. The idea is to describe a bunch of projects scoped and selected as teaching projects for new engineers joining the team. This isn't meant to replace "neweng" or "good-first-ticket" tags on issues, but provide a higher level set of examples of the kinds of things that someone joining the team could tackle. --- docs/rfc/README.md | 1 + docs/rfc/rfc-020-onboarding-projects.rst | 240 +++++++++++++++++++++++ 2 files changed, 241 insertions(+) create mode 100644 docs/rfc/rfc-020-onboarding-projects.rst diff --git a/docs/rfc/README.md b/docs/rfc/README.md index d944e72e78..2872c988ad 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -57,5 +57,6 @@ sections. - [RFC-017: ABCI++ Vote Extension Propagation](./rfc-017-abci++-vote-extension-propag.md) - [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md) - [RFC-019: Configuration File Versioning](./rfc-019-config-version.md) +- [RFC-020: Onboarding Projects](./rfc-020-onboarding-projects.rst) diff --git a/docs/rfc/rfc-020-onboarding-projects.rst b/docs/rfc/rfc-020-onboarding-projects.rst new file mode 100644 index 0000000000..dc18de65d7 --- /dev/null +++ b/docs/rfc/rfc-020-onboarding-projects.rst @@ -0,0 +1,240 @@ +======================================= +RFC 020: Tendermint Onboarding Projects +======================================= + +.. contents:: + :backlinks: none + +Changelog +--------- + +- 2022-03-30: Initial draft. (@tychoish) +- 2022-04-25: Imported document to tendermint repository. (@tychoish) + +Overview +-------- + +This document describes a collection of projects that might be good for new +engineers joining the Tendermint Core team. These projects mostly describe +features that we'd be very excited to see land in the code base, but that are +intentionally outside of the critical path of a release on the roadmap, and +have the following properties that we think make good on-boarding projects: + +- require relatively little context for the project or its history beyond a + more isolated area of the code. + +- provide exposure to different areas of the codebase, so new team members + will have reason to explore the code base, build relationships with people + on the team, and gain experience with more than one area of the system. + +- be of moderate size, striking a healthy balance between trivial or + mechanical changes (which provide little insight) and large intractable + changes that require deeper insight than is available during onboarding to + address well. A good size project should have natural touchpoints or + check-ins. + +Projects +-------- + +Before diving into one of these projects, have a conversation about the +project or aspects of Tendermint that you're excited to work on with your +onboarding buddy. This will help make sure that these issues are still +relevant, help you get any context, underatnding known pitfalls, and to +confirm a high level approach or design (if relevant.) On-boarding buddies +should be prepared to do some design work before someone joins the team. + +The descriptions that follow provide some basic background and attempt to +describe the user stories and the potential impact of these project. + +E2E Test Systems +~~~~~~~~~~~~~~~~ + +Tendermint's E2E framework makes it possible to run small test networks with +different Tendermint configurations, and make sure that the system works. The +tests run Tendermint in a separate binary, and the system provides some very +high level protection against making changes that could break Tendermint in +otherwise difficult to detect ways. + +Working on the E2E system is a good place to get introduced to the Tendermint +codebase, particularly for developers who are newer to Go, as the E2E +system (generator, runner, etc.) is distinct from the rest of Tendermint and +comparatively quite small, so it may be easier to begin making changes in this +area. At the same time, because the E2E system exercises *all* of Tendermint, +work in this area is a good way to get introduced to various components of the +system. + +Configurable E2E Workloads +++++++++++++++++++++++++++ + +All E2E tests use the same workload (e.g. generated transactions, submitted to +different nodes in the network,) which has been tuned empirically to provide a +gentle but consistent parallel load that all E2E tests can pass. Ideally, the +workload generator could be configurable to have different shapes of work +(bursty, different transaction sizes, weighted to different nodes, etc.) and +even perhaps further parameterized within a basic shape, which would make it +possible to use our existing test infrastructure to answer different questions +about the performance or capability of the system. + +The work would involve adding a new parameter to the E2E test manifest, and +creating an option (e.g. "legacy") for the current load generation model, +extract configurations options for the current load generation, and then +prototype implementations of alternate load generation, and also run some +preliminary using the tools. + +Byzantine E2E Workloads ++++++++++++++++++++++++ + +There are two main kinds of integration tests in Tendermint: the E2E test +framework, and then a collection of integration tests that masquerade as +unit-tests. While some of this expansion of test scope is (potentially) +inevitable, the masquerading unit tests (e.g ``consensus.byzantine_test.go``) +end up being difficult to understand, difficult to maintain, and unreliable. + +One solution to this, would be to modify the E2E ABCI application to allow it +to inject byzantine behavior, and then have this be a configurable aspect of +a test network to be able to provoke Byzantine behavior in a "real" system and +then observe that evidence is constructed. This would make it possible to +remove the legacy tests entirely once the new tests have proven themselves. + +Abstract Orchestration Framework +++++++++++++++++++++++++++++++++ + +The orchestration of e2e test processes is presently done using docker +compose, which works well, but has proven a bit limiting as all processes need +to run on a single machine, and the log aggregation functions are confusing at +best. + +This project would replace the current orchestration with something more +generic, potentially maintaining the current system, but also allowing the e2e +tests to manage processes using k8s. There are a few "local" k8s frameworks +(e.g. kind and k3s,) which might be able to be useful for our current testing +model, but hopefully, we could use this new implementation with other k8s +systems for more flexible distribute test orchestration. + +Improve Operationalize Experience of ``run-multiple.sh`` +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The e2e test runner currently runs a single test, and in most cases we manage +the test cases using a shell script that ensure cleanup of entire test +suites. This is a bit difficult to maintain and makes reproduction of test +cases more awkward than it should be. The e2e ``runner`` itself should provide +equivalent functionality to ``run-multiple.sh``: ensure cleanup of test cases, +collect and process output, and be able to manage entire suites of cases. + +It might also be useful to implement an e2e test orchestrator that runs all +tendermint instances in a single process, using "real" networks for faster +feedback and iteration during development. + +In addition to being a bit easier to maintain, having a more capable runner +implementation would make it easier to collect data from test runs, improve +debugability and reporting. + +Fan-Out For CI E2E Tests +++++++++++++++++++++++++ + +While there are some parallelism in the execution of e2e tests, each e2e test +job must build a tendermint e2e image, which takes about 5 minutes of CPU time +per-task, which given the size of each of the runs. + +We'd like to be able to reduce the amount of overhead per-e2e tests while +keeping the cycle time for working with the tests very low, while also +maintaining a reasonable level of test coverage. This is an impossible +tradeoff, in some ways, and the percentage of overhead at the moment is large +enough that we can make some material progress with a moderate amount of time. + +Most of this work has to do with modifying github actions configuration and +e2e artifact (docker) building to reduce redundant work. Eventually, when we +can drop the requirement for CGo storage engines, it will be possible to move +(cross) compile tendermint locally, and then inject the binary into the docker +container, which would reduce a lot of the build-time complexity, although we +can move more in this direction or have runtime flags to disable CGo +dependencies for local development. + +Remove Panics +~~~~~~~~~~~~~ + +There are lots of places in the code base which can panic, and would not be +particularly well handled. While in some cases, panics are the right answer, +in many cases the panics were just added to simplify downstream error +checking, and could easily be converted to errors. + +The `Don't Panic RFC +`_ +covers some of the background and approach. + +While the changes are in this project are relatively rote, this will provide +exposure to lots of different areas of the codebase as well as insight into +how different areas of the codebase interact with eachother, as well as +experience with the test suites and infrastructure. + +Implement more Expressive ABCI Applications +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Tendermint maintains two very simple ABCI applications (a KV application used +for basic testing, and slightly more advanced test application used in the +end-to-end tests). Writing an application would provide a new engineer with +useful experiences using Tendermint that mirrors the expierence of downstream +users. + +This is more of an exploratory project, but could include providing common +interfaces on top of Tendermint consensus for other well known protocols or +tools (e.g. ``etcd``) or a DNS server or some other tool. + +Self-Regulating Reactors +~~~~~~~~~~~~~~~~~~~~~~~~ + +Currently reactors (the internal processes that are responsible for the higher +level behavior of Tendermint) can be started and stopped, but have no +provision for being paused. These additional semantics may allow Tendermint to +pause reactors (and avoid processing their messhages, etc.) and allow better +coordination in the future. + +While this is a big project, it's possible to break this apart into many +smaller projects: make p2p channels pauseable, add pause/UN-pause hooks to the +service implementation and machinery, and finally to modify the reactor +implementations to take advantage of these additional semantics + +This project would give an engineer some exposure to the p2p layer of the +code, as well as to various aspects of the reactor implementations. + +Metrics +~~~~~~~ + +Tendermint has a metrics system that is relatively underutilized, and figuring +out ways to capture and organize the metrics to provide value to users might +provide an interesting set of projects for new engineers on Tendermint. + +Convert Logs to Metrics ++++++++++++++++++++++++ + +Because the tendermint logs tend to be quite verbose and not particularly +actionable, most users largely ignore the logging or run at very low +verbosity. While the log statements in the code do describe useful events, +taken as a whole the system is not particularly tractable, and particularly at +the Debug level, not useful. One solution to this problem is to identify log +messages that might be (e.g. increment a counter for certian kinds of errors) + +One approach might be to look at various logging statements, particularly +debug statements or errors that are logged but not returned, and see if +they're convertable to counters or other metrics. + +Expose Metrics to Tests ++++++++++++++++++++++++ + +The existing Tendermint test suites replace the metrics infrastructure with +no-op implementations, which means that tests can neither verify that metrics +are ever recorded, nor can tests use metrics to observe events in the +system. Writing an implementation, for testing, that makes it possible to +record metrics and provides an API for introspecting this data, as well as +potentially writing tests that take advantage of this type, could be useful. + +Logging Metrics ++++++++++++++++ + +In some systems, the logging system itself can provide some interesting +insights for operators: having metrics that track the number of messages at +different levels as well as the total number of messages, can act as a canary +for the system as a whole. + +This should be achievable by adding an interceptor layer within the logging +package itself that can add metrics to the existing system. From 5a42479d52f2574e40cb1c719e3ec7ac1929f9b3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 May 2022 11:55:04 +0000 Subject: [PATCH 038/203] build(deps): Bump github.com/lib/pq from 1.10.5 to 1.10.6 (#8567) Bumps [github.com/lib/pq](https://github.com/lib/pq) from 1.10.5 to 1.10.6.
Commits
  • 8c6de56 Merge pull request #1081 from catj-cockroach/add-kubernetes-secret-support
  • d8917fa adds support for kubernetes mounted private keys
  • 54a3a4b Merge pull request #1082 from johanneswuerbach/connector-dialer
  • 30d9faf Merge pull request #1080 from drakkan/sqlstate
  • cf6aeee feat: change the connector dialer
  • ef3111e error: add SQLState
  • 006a3f4 Added code that accounts for the 'Z' timezone separator in the ParseTimestamp...
  • da91844 Merge pull request #1078 from otan-cockroach/copydata
  • 326e7d0 fix CopyData comment
  • b3b8332 expose raw CopyData command (#1077)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/lib/pq&package-manager=go_modules&previous-version=1.10.5&new-version=1.10.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2151f4b41c..90142c5b1d 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/lib/pq v1.10.5 + github.com/lib/pq v1.10.6 github.com/libp2p/go-buffer-pool v0.0.2 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b diff --git a/go.sum b/go.sum index 791fd4b73f..c26a91a662 100644 --- a/go.sum +++ b/go.sum @@ -678,8 +678,8 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= -github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/lufeee/execinquery v1.0.0 h1:1XUTuLIVPDlFvUU3LXmmZwHDsolsxXnY67lzhpeqe0I= From 2897b75853818f72930499e25b77b8dc8fdff7e8 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 17 May 2022 10:56:26 -0400 Subject: [PATCH 039/203] p2p: remove unused get height methods (#8569) --- internal/mempool/reactor.go | 40 ++++++++------------------------ internal/mempool/reactor_test.go | 1 - internal/p2p/peermanager.go | 31 ------------------------- internal/p2p/peermanager_test.go | 35 ---------------------------- node/node.go | 2 +- node/setup.go | 2 -- 6 files changed, 11 insertions(+), 100 deletions(-) diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 3c22988ee6..28ee9e334a 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -6,7 +6,6 @@ import ( "fmt" "runtime/debug" "sync" - "time" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" @@ -22,13 +21,6 @@ var ( _ p2p.Wrapper = (*protomem.Message)(nil) ) -// PeerManager defines the interface contract required for getting necessary -// peer information. This should eventually be replaced with a message-oriented -// approach utilizing the p2p stack. -type PeerManager interface { - GetHeight(types.NodeID) int64 -} - // Reactor implements a service that contains mempool of txs that are broadcasted // amongst peers. It maintains a map from peer ID to counter, to prevent gossiping // txs to the peers you received it from. @@ -40,9 +32,8 @@ type Reactor struct { mempool *TxMempool ids *IDs - getPeerHeight func(types.NodeID) int64 - peerEvents p2p.PeerEventSubscriber - chCreator p2p.ChannelCreator + peerEvents p2p.PeerEventSubscriber + chCreator p2p.ChannelCreator // observePanic is a function for observing panics that were recovered in methods on // Reactor. observePanic is called with the recovered value. @@ -59,18 +50,16 @@ func NewReactor( txmp *TxMempool, chCreator p2p.ChannelCreator, peerEvents p2p.PeerEventSubscriber, - getPeerHeight func(types.NodeID) int64, ) *Reactor { r := &Reactor{ - logger: logger, - cfg: cfg, - mempool: txmp, - ids: NewMempoolIDs(), - chCreator: chCreator, - peerEvents: peerEvents, - getPeerHeight: getPeerHeight, - peerRoutines: make(map[types.NodeID]context.CancelFunc), - observePanic: defaultObservePanic, + logger: logger, + cfg: cfg, + mempool: txmp, + ids: NewMempoolIDs(), + chCreator: chCreator, + peerEvents: peerEvents, + peerRoutines: make(map[types.NodeID]context.CancelFunc), + observePanic: defaultObservePanic, } r.BaseService = *service.NewBaseService(logger, "Mempool", r) @@ -327,15 +316,6 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m memTx := nextGossipTx.Value.(*WrappedTx) - if r.getPeerHeight != nil { - height := r.getPeerHeight(peerID) - if height > 0 && height < memTx.height-1 { - // allow for a lag of one block - time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } - // NOTE: Transaction batching was disabled due to: // https://github.com/tendermint/tendermint/issues/5796 if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok { diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 8ceae20135..351315bae7 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -85,7 +85,6 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode mempool, chCreator, func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, - rts.network.Nodes[nodeID].PeerManager.GetHeight, ) rts.nodes = append(rts.nodes, nodeID) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 756551a490..165b00e61a 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -1027,37 +1027,6 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration return delay } -// GetHeight returns a peer's height, as reported via SetHeight, or 0 if the -// peer or height is unknown. -// -// FIXME: This is a temporary workaround to share state between the consensus -// and mempool reactors, carried over from the legacy P2P stack. Reactors should -// not have dependencies on each other, instead tracking this themselves. -func (m *PeerManager) GetHeight(peerID types.NodeID) int64 { - m.mtx.Lock() - defer m.mtx.Unlock() - - peer, _ := m.store.Get(peerID) - return peer.Height -} - -// SetHeight stores a peer's height, making it available via GetHeight. -// -// FIXME: This is a temporary workaround to share state between the consensus -// and mempool reactors, carried over from the legacy P2P stack. Reactors should -// not have dependencies on each other, instead tracking this themselves. -func (m *PeerManager) SetHeight(peerID types.NodeID, height int64) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - peer, ok := m.store.Get(peerID) - if !ok { - peer = m.newPeerInfo(peerID) - } - peer.Height = height - return m.store.Set(peer) -} - // peerStore stores information about peers. It is not thread-safe, assuming it // is only used by PeerManager which handles concurrency control. This allows // the manager to execute multiple operations atomically via its own mutex. diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 82d1e26932..47e8462a42 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -1868,38 +1868,3 @@ func TestPeerManager_Advertise_Self(t *testing.T) { self, }, peerManager.Advertise(dID, 100)) } - -func TestPeerManager_SetHeight_GetHeight(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - - db := dbm.NewMemDB() - peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) - require.NoError(t, err) - - // Getting a height should default to 0, for unknown peers and - // for known peers without height. - added, err := peerManager.Add(a) - require.NoError(t, err) - require.True(t, added) - require.EqualValues(t, 0, peerManager.GetHeight(a.NodeID)) - require.EqualValues(t, 0, peerManager.GetHeight(b.NodeID)) - - // Setting a height should work for a known node. - require.NoError(t, peerManager.SetHeight(a.NodeID, 3)) - require.EqualValues(t, 3, peerManager.GetHeight(a.NodeID)) - - // Setting a height should add an unknown node. - require.Equal(t, []types.NodeID{a.NodeID}, peerManager.Peers()) - require.NoError(t, peerManager.SetHeight(b.NodeID, 7)) - require.EqualValues(t, 7, peerManager.GetHeight(b.NodeID)) - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) - - // The heights should not be persisted. - peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) - require.NoError(t, err) - - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) - require.Zero(t, peerManager.GetHeight(a.NodeID)) - require.Zero(t, peerManager.GetHeight(b.NodeID)) -} diff --git a/node/node.go b/node/node.go index 56379d2e2d..1bda1f0f74 100644 --- a/node/node.go +++ b/node/node.go @@ -266,7 +266,7 @@ func makeNode( node.evPool = evPool mpReactor, mp := createMempoolReactor(logger, cfg, proxyApp, stateStore, nodeMetrics.mempool, - peerManager.Subscribe, node.router.OpenChannel, peerManager.GetHeight) + peerManager.Subscribe, node.router.OpenChannel) node.rpcEnv.Mempool = mp node.services = append(node.services, mpReactor) diff --git a/node/setup.go b/node/setup.go index d6966800a6..8089ea4665 100644 --- a/node/setup.go +++ b/node/setup.go @@ -147,7 +147,6 @@ func createMempoolReactor( memplMetrics *mempool.Metrics, peerEvents p2p.PeerEventSubscriber, chCreator p2p.ChannelCreator, - peerHeight func(types.NodeID) int64, ) (service.Service, mempool.Mempool) { logger = logger.With("module", "mempool") @@ -166,7 +165,6 @@ func createMempoolReactor( mp, chCreator, peerEvents, - peerHeight, ) if cfg.Consensus.WaitForTxs() { From 66c4c82f7a687f75d2641a2452222b21a8d7d7ac Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 17 May 2022 08:52:39 -0700 Subject: [PATCH 040/203] rpc: rework timeouts to be per-method instead of global (#8570) * rpc: rework timeouts to be per-method instead of global Prior to this change, we set a 10-second global timeout for all RPC methods using the net/http Server type's WriteTimeout. This meant that any request whose handler did not return within that period would simply drop the connection to the client. This timeout is too short for a default, as evidenced by issues like [1] and [2]. In addition, the mode of failure on the client side is confusing; it shows up as a dropped connection (EOF) rather than a meaningful error from the service. More importantly, various methods have diffent constraints: Some should be able to return quickly, others may need to adjust based on the application workload. This is a first step toward supporting configurable timeouts. This change: - Removes the server-wide default global timeout, and instead: - Wires up a default context timeout for all RPC handlers. - Increases the default timeout from 10s to 60s. - Adds a hook to override this per-method as needed. This does NOT expose the timeouts in the configuration file (yet). [1] https://github.com/osmosis-labs/osmosis/issues/1391 [2] https://github.com/tendermint/tendermint/issues/8465 --- CHANGELOG_PENDING.md | 1 + internal/rpc/core/routes.go | 2 +- rpc/jsonrpc/server/http_server.go | 29 +++++++++++++++------- rpc/jsonrpc/server/rpc_func.go | 40 +++++++++++++++++++++++-------- 4 files changed, 52 insertions(+), 20 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 65ab5ee3b5..d38caf50b8 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -21,6 +21,7 @@ Special thanks to external contributors on this release: - [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair) - [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters) - [config] \#8222 default indexer configuration to null. (@creachadair) + - [rpc] \#8570 rework timeouts to be per-method instead of global. (@creachadair) - Apps diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index 4bc1ca4140..cafb92094a 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -28,7 +28,7 @@ func NewRoutesMap(svc RPCService, opts *RouteOptions) RoutesMap { out := RoutesMap{ // Event subscription. Note that subscribe, unsubscribe, and // unsubscribe_all are only available via the websocket endpoint. - "events": rpc.NewRPCFunc(svc.Events), + "events": rpc.NewRPCFunc(svc.Events).Timeout(0), "subscribe": rpc.NewWSRPCFunc(svc.Subscribe), "unsubscribe": rpc.NewWSRPCFunc(svc.Unsubscribe), "unsubscribe_all": rpc.NewWSRPCFunc(svc.UnsubscribeAll), diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 0b715835d0..50a37158ec 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -20,16 +20,27 @@ import ( // Config is a RPC server configuration. type Config struct { - // see netutil.LimitListener + // The maximum number of connections that will be accepted by the listener. + // See https://godoc.org/golang.org/x/net/netutil#LimitListener MaxOpenConnections int - // mirrors http.Server#ReadTimeout + + // Used to set the HTTP server's per-request read timeout. + // See https://godoc.org/net/http#Server.ReadTimeout ReadTimeout time.Duration - // mirrors http.Server#WriteTimeout + + // Used to set the HTTP server's per-request write timeout. Note that this + // affects ALL methods on the server, so it should not be set too low. This + // should be used as a safety valve, not a resource-control timeout. + // + // See https://godoc.org/net/http#Server.WriteTimeout WriteTimeout time.Duration - // MaxBodyBytes controls the maximum number of bytes the - // server will read parsing the request body. + + // Controls the maximum number of bytes the server will read parsing the + // request body. MaxBodyBytes int64 - // mirrors http.Server#MaxHeaderBytes + + // Controls the maximum size of a request header. + // See https://godoc.org/net/http#Server.MaxHeaderBytes MaxHeaderBytes int } @@ -38,9 +49,9 @@ func DefaultConfig() *Config { return &Config{ MaxOpenConnections: 0, // unlimited ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxBodyBytes: int64(1000000), // 1MB - MaxHeaderBytes: 1 << 20, // same as the net/http default + WriteTimeout: 0, // no default timeout + MaxBodyBytes: 1000000, // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default } } diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index 8eba287283..1fff323d75 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -9,11 +9,16 @@ import ( "net/http" "reflect" "strings" + "time" "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) +// DefaultRPCTimeout is the default context timeout for calls to any RPC method +// that does not override it with a more specific timeout. +const DefaultRPCTimeout = 60 * time.Second + // RegisterRPCFuncs adds a route to mux for each non-websocket function in the // funcMap, and also a root JSON-RPC POST handler. func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) { @@ -32,11 +37,12 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo // RPCFunc contains the introspected type information for a function. type RPCFunc struct { - f reflect.Value // underlying rpc function - param reflect.Type // the parameter struct, or nil - result reflect.Type // the non-error result type, or nil - args []argInfo // names and type information (for URL decoding) - ws bool // websocket only + f reflect.Value // underlying rpc function + param reflect.Type // the parameter struct, or nil + result reflect.Type // the non-error result type, or nil + args []argInfo // names and type information (for URL decoding) + timeout time.Duration // default request timeout, 0 means none + ws bool // websocket only } // argInfo records the name of a field, along with a bit to tell whether the @@ -52,6 +58,12 @@ type argInfo struct { // with the resulting argument value. It reports an error if parameter parsing // fails, otherwise it returns the result from the wrapped function. func (rf *RPCFunc) Call(ctx context.Context, params json.RawMessage) (interface{}, error) { + // If ctx has its own deadline we will respect it; otherwise use rf.timeout. + if _, ok := ctx.Deadline(); !ok && rf.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, rf.timeout) + defer cancel() + } args, err := rf.parseParams(ctx, params) if err != nil { return nil, err @@ -74,6 +86,11 @@ func (rf *RPCFunc) Call(ctx context.Context, params json.RawMessage) (interface{ return returns[0].Interface(), nil } +// Timeout updates rf to include a default timeout for calls to rf. This +// timeout is used if one is not already provided on the request context. +// Setting d == 0 means there will be no timeout. Returns rf to allow chaining. +func (rf *RPCFunc) Timeout(d time.Duration) *RPCFunc { rf.timeout = d; return rf } + // parseParams parses the parameters of a JSON-RPC request and returns the // corresponding argument values. On success, the first argument value will be // the value of ctx. @@ -129,7 +146,9 @@ func (rf *RPCFunc) adjustParams(data []byte) (json.RawMessage, error) { // func(context.Context, *T) (R, error) // // for an arbitrary struct type T and type R. NewRPCFunc will panic if f does -// not have one of these forms. +// not have one of these forms. A newly-constructed RPCFunc has a default +// timeout of DefaultRPCTimeout; use the Timeout method to adjust this as +// needed. func NewRPCFunc(f interface{}) *RPCFunc { rf, err := newRPCFunc(f) if err != nil { @@ -215,10 +234,11 @@ func newRPCFunc(f interface{}) (*RPCFunc, error) { } return &RPCFunc{ - f: fv, - param: ptype, - result: rtype, - args: args, + f: fv, + param: ptype, + result: rtype, + args: args, + timeout: DefaultRPCTimeout, // until overridden }, nil } From 21f140410bdfd3f097f9e563cf6b714f08ff5ca5 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 17 May 2022 09:49:23 -0700 Subject: [PATCH 041/203] rpc: enable the ADR 075 event log by default in new configs (#8572) Since we are deprecating the stream-based event subscription in v0.36, we should ensure that new nodes enable the replacement by default. For now, just set a baseline 30-second window. --- config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 500e3f7d64..c1fa4223a1 100644 --- a/config/config.go +++ b/config/config.go @@ -523,7 +523,7 @@ func DefaultRPCConfig() *RPCConfig { MaxSubscriptionClients: 100, MaxSubscriptionsPerClient: 5, ExperimentalDisableWebsocket: false, // compatible with TM v0.35 and earlier - EventLogWindowSize: 0, // disables /events RPC by default + EventLogWindowSize: 30 * time.Second, EventLogMaxItems: 0, TimeoutBroadcastTxCommit: 10 * time.Second, From c620900fdd98838e29debdc47243e03f13d3ef05 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 17 May 2022 10:34:43 -0700 Subject: [PATCH 042/203] rpc: fix plumbing of broadcast_tx_commit timeout (#8573) In #3435 we allowed this timeout to override the global write timeout. But after #8570 this meant we were applying a shorter timeout by default. Don't do the patch if the timeout is already unlimited. This is a temporary workaround; in light of #8561 I plan to get rid of this option entirely during the v0.37 cycle, but meanwhile we should keep existing use more or less coherent. --- cmd/tendermint/commands/light.go | 3 ++- internal/inspect/rpc/rpc.go | 3 ++- internal/rpc/core/env.go | 3 ++- test/e2e/node/main.go | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index 8e39d79009..2b812fe185 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -171,7 +171,8 @@ for applications built w/ Cosmos SDK). // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if cfg.WriteTimeout > 0 && cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second } diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go index 00c3e52efa..d706168346 100644 --- a/internal/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -125,7 +125,8 @@ func serverRPCConfig(r *config.RPCConfig) *server.Config { // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if cfg.WriteTimeout > 0 && cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit { cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second } return cfg diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 24f43a4a71..124525f26f 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -236,7 +236,8 @@ func (env *Environment) StartService(ctx context.Context, conf *config.Config) ( // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if cfg.WriteTimeout > 0 && cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second } diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 2cbb9e4b05..94c1af1abe 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -210,7 +210,8 @@ func startLightNode(ctx context.Context, logger log.Logger, cfg *Config) error { // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if rpccfg.WriteTimeout <= tmcfg.RPC.TimeoutBroadcastTxCommit { + // Note we don't need to adjust anything if the timeout is already unlimited. + if rpccfg.WriteTimeout > 0 && rpccfg.WriteTimeout <= tmcfg.RPC.TimeoutBroadcastTxCommit { rpccfg.WriteTimeout = tmcfg.RPC.TimeoutBroadcastTxCommit + 1*time.Second } From 2e20b820ab6cf009c12a1cba74507b2a8bf4d9b4 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Thu, 19 May 2022 08:53:28 +0200 Subject: [PATCH 043/203] Adapted `apps.md` from ABCI directory (#8506) * Copied over the 'Apps' section from ABCI. Need to adapt it * Adapted the ABCI text in requirements section * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: M. J. Fromberger * Adressed @cason's comments * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: M. J. Fromberger * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: M. J. Fromberger * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: M. J. Fromberger * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: M. J. Fromberger * Addressed remaining comments * Addressed some of @cmwaters comments * Addressed more comments * Addressed @JayT106's comments Co-authored-by: Daniel Co-authored-by: M. J. Fromberger --- spec/abci++/README.md | 2 +- .../abci++_app_requirements_002_draft.md | 845 ++++++++++++++++++ spec/abci++/abci++_methods_002_draft.md | 20 +- 3 files changed, 857 insertions(+), 10 deletions(-) diff --git a/spec/abci++/README.md b/spec/abci++/README.md index 38feba9d7e..0f7a87c4ae 100644 --- a/spec/abci++/README.md +++ b/spec/abci++/README.md @@ -29,7 +29,7 @@ This specification is split as follows: - [Methods](./abci++_methods_002_draft.md) - complete details on all ABCI++ methods and message types. - [Requirements for the Application](./abci++_app_requirements_002_draft.md) - formal requirements - on the Application's logic to ensure liveness of Tendermint. These requirements define what + on the Application's logic to ensure Tendermint properties such as liveness. These requirements define what Tendermint expects from the Application. - [Tendermint's expected behavior](./abci++_tmint_expected_behavior_002_draft.md) - specification of how the different ABCI++ methods may be called by Tendermint. This explains what the Application diff --git a/spec/abci++/abci++_app_requirements_002_draft.md b/spec/abci++/abci++_app_requirements_002_draft.md index 68014a536f..ff9df2c56a 100644 --- a/spec/abci++/abci++_app_requirements_002_draft.md +++ b/spec/abci++/abci++_app_requirements_002_draft.md @@ -5,6 +5,8 @@ title: Application Requirements # Application Requirements +## Formal Requirements + This section specifies what Tendermint expects from the Application. It is structured as a set of formal requirements that can be used for testing and verification of the Application's logic. @@ -177,3 +179,846 @@ Likewise, `ExtendVote` can also be non-deterministic: but may also depend on other values or operations. * *wrp = wrq ⇏ erp = erq* + +## Managing the Application state and related topics + +### Connection State + +Tendermint maintains four concurrent ABCI++ connections, namely +[Consensus Connection](#consensus-connection), +[Mempool Connection](#mempool-connection), +[Info/Query Connection](#infoquery-connection), and +[Snapshot Connection](#snapshot-connection). +It is common for an application to maintain a distinct copy of +the state for each connection, which are synchronized upon `Commit` calls. + +#### Concurrency + +In principle, each of the four ABCI++ connections operates concurrently with one +another. This means applications need to ensure access to state is +thread safe. Up to v0.35.x, both the +[default in-process ABCI client](https://github.com/tendermint/tendermint/blob/v0.35.x/abci/client/local_client.go#L18) +and the +[default Go ABCI server](https://github.com/tendermint/tendermint/blob/v0.35.x/abci/server/socket_server.go#L32) +used a global lock to guard the handling of events across all connections, so they were not +concurrent at all. This meant whether your app was compiled in-process with +Tendermint using the `NewLocalClient`, or run out-of-process using the `SocketServer`, +ABCI messages from all connections were received in sequence, one at a +time. +This is no longer the case starting from v0.36.0: the global locks have been removed and it is +up to the Application to synchronize access to its state when handling +ABCI++ methods on all connections. +Nevertheless, as all ABCI calls are now synchronous, ABCI messages using the same connection are +still received in sequence. + +#### FinalizeBlock + +When the consensus algorithm decides on a block, Tendermint uses `FinalizeBlock` to send the +decided block's data to the Application, which uses it to transition its state. + +The Application must remember the latest height from which it +has run a successful `Commit` so that it can tell Tendermint where to +pick up from when it recovers from a crash. See information on the Handshake +[here](#crash-recovery). + +#### Commit + +The Application should persist its state during `Commit`, before returning from it. + +Before invoking `Commit`, Tendermint locks the mempool and flushes the mempool connection. This ensures that +no new messages +will be received on the mempool connection during this processing step, providing an opportunity to safely +update all four +connection states to the latest committed state at the same time. + +When `Commit` returns, Tendermint unlocks the mempool. + +WARNING: if the ABCI app logic processing the `Commit` message sends a +`/broadcast_tx_sync` or `/broadcast_tx` and waits for the response +before proceeding, it will deadlock. Executing `broadcast_tx` calls +involves acquiring the mempool lock that Tendermint holds during the `Commit` call. +Synchronous mempool-related calls must be avoided as part of the sequential logic of the +`Commit` function. + +#### Candidate States + +Tendermint calls `PrepareProposal` when it is about to send a proposed block to the network. +Likewise, Tendermint calls `ProcessProposal` upon reception of a proposed block from the +network. In both cases, the proposed block's data +is disclosed to the Application, in the same conditions as is done in `FinalizeBlock`. +The block data disclosed the to Application by these three methods are the following: + +* the transaction list +* the `LastCommit` referring to the previous block +* the block header's hash (except in `PrepareProposal`, where it is not known yet) +* list of validators that misbehaved +* the block's timestamp +* `NextValidatorsHash` +* Proposer address + +The Application may decide to *immediately* execute the given block (i.e., upon `PrepareProposal` +or `ProcessProposal`). There are two main reasons why the Application may want to do this: + +* *Avoiding invalid transactions in blocks*. + In order to be sure that the block does not contain *any* invalid transaction, there may be + no way other than fully executing the transactions in the block as though it was the *decided* + block. +* *Quick `FinalizeBlock` execution*. + Upon reception of the decided block via `FinalizeBlock`, if that same block was executed + upon `PrepareProposal` or `ProcessProposal` and the resulting state was kept in memory, the + Application can simply apply that state (faster) to the main state, rather than reexecuting + the decided block (slower). + +`PrepareProposal`/`ProcessProposal` can be called many times for a given height. Moreover, +it is not possible to accurately predict which of the blocks proposed in a height will be decided, +being delivered to the Application in that height's `FinalizeBlock`. +Therefore, the state resulting from executing a proposed block, denoted a *candidate state*, should +be kept in memory as a possible final state for that height. When `FinalizeBlock` is called, the Application should +check if the decided block corresponds to one of its candidate states; if so, it will apply it as +its *ExecuteTxState* (see [Consensus Connection](#consensus-connection) below), +which will be persisted during the upcoming `Commit` call. + +Under adverse conditions (e.g., network instability), Tendermint might take many rounds. +In this case, potentially many proposed blocks will be disclosed to the Application for a given height. +By the nature of Tendermint's consensus algorithm, the number of proposed blocks received by the Application +for a particular height cannot be bound, so Application developers must act with care and use mechanisms +to bound memory usage. As a general rule, the Application should be ready to discard candidate states +before `FinalizeBlock`, even if one of them might end up corresponding to the +decided block and thus have to be reexecuted upon `FinalizeBlock`. + +### States and ABCI++ Connections + +#### Consensus Connection + +The Consensus Connection should maintain an *ExecuteTxState* — the working state +for block execution. It should be updated by the call to `FinalizeBlock` +during block execution and committed to disk as the "latest +committed state" during `Commit`. Execution of a proposed block (via `PrepareProposal`/`ProcessProposal`) +**must not** update the *ExecuteTxState*, but rather be kept as a separate candidate state until `FinalizeBlock` +confirms which of the candidate states (if any) can be used to update *ExecuteTxState*. + +#### Mempool Connection + +The mempool Connection maintains *CheckTxState*. Tendermint sequentially processes an incoming +transaction (via RPC from client or P2P from the gossip layer) against *CheckTxState*. +If the processing does not return any error, the transaction is accepted into the mempool +and Tendermint starts gossipping it. +*CheckTxState* should be reset to the latest committed state +at the end of every `Commit`. + +During the execution of a consensus instance, the *CheckTxState* may be updated concurrently with the +*ExecuteTxState*, as messages may be sent concurrently on the Consensus and Mempool connections. +At the end of the consensus instance, as described above, Tendermint locks the mempool and flushes +the mempool connection before calling `Commit`. This ensures that all pending `CheckTx` calls are +responded to and no new ones can begin. + +After the `Commit` call returns, while still holding the mempool lock, `CheckTx` is run again on all +transactions that remain in the node's local mempool after filtering those included in the block. +Parameter `Type` in `RequestCheckTx` +indicates whether an incoming transaction is new (`CheckTxType_New`), or a +recheck (`CheckTxType_Recheck`). + +Finally, after re-checking transactions in the mempool, Tendermint will unlock +the mempool connection. New transactions are once again able to be processed through `CheckTx`. + +Note that `CheckTx` is just a weak filter to keep invalid transactions out of the mempool and, +utimately, ouf of the blockchain. +Since the transaction cannot be guaranteed to be checked against the exact same state as it +will be executed as part of a (potential) decided block, `CheckTx` shouldn't check *everything* +that affects the transaction's validity, in particular those checks whose validity may depend on +transaction ordering. `CheckTx` is weak because a Byzantine node need not care about `CheckTx`; +it can propose a block full of invalid transactions if it wants. The mechanism ABCI++ has +in place for dealing with such behavior is `ProcessProposal`. + +##### Replay Protection + +It is possible for old transactions to be sent again to the Application. This is typically +undesirable for all transactions, except for a generally small subset of them which are idempotent. + +The mempool has a mechanism to prevent duplicated transactions from being processed. +This mechanism is nevertheless best-effort (currently based on the indexer) +and does not provide any guarantee of non duplication. +It is thus up to the Application to implement an application-specific +replay protection mechanism with strong guarantees as part of the logic in `CheckTx`. + +#### Info/Query Connection + +The Info (or Query) Connection should maintain a `QueryState`. This connection has two +purposes: 1) having the application answer the queries Tenderissued receives from users +(see section [Query](#query)), +and 2) synchronizing Tendermint and the Application at start up time (see +[Crash Recovery](#crash-recovery)) +or after state sync (see [State Sync](#state-sync)). + +`QueryState` is a read-only copy of *ExecuteTxState* as it was after the last +`Commit`, i.e. +after the full block has been processed and the state committed to disk. + +#### Snapshot Connection + +The Snapshot Connection is used to serve state sync snapshots for other nodes +and/or restore state sync snapshots to a local node being bootstrapped. +Snapshop management is optional: an Application may choose not to implement it. + +For more information, see Section [State Sync](#state-sync). + +### Transaction Results + +The Application is expected to return a list of +[`ExecTxResult`](./abci%2B%2B_methods_002_draft.md#exectxresult) in +[`ResponseFinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock). The list of transaction +results must respect the same order as the list of transactions delivered via +[`RequestFinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock). +This section discusses the fields inside this structure, along with the fields in +[`ResponseCheckTx`](./abci%2B%2B_methods_002_draft.md#checktx), +whose semantics are similar. + +The `Info` and `Log` fields are +non-deterministic values for debugging/convenience purposes. Tendermint logs them but they +are otherwise ignored. + +#### Gas + +Ethereum introduced the notion of *gas* as an abstract representation of the +cost of the resources consumed by nodes when processing a transaction. Every operation in the +Ethereum Virtual Machine uses some amount of gas. +Gas has a market-variable price based on which miners can accept or reject to execute a +particular operation. + +Users propose a maximum amount of gas for their transaction; if the transaction uses less, they get +the difference credited back. Tendermint adopts a similar abstraction, +though uses it only optionally and weakly, allowing applications to define +their own sense of the cost of execution. + +In Tendermint, the [ConsensusParams.Block.MaxGas](#consensus-parameters) limits the amount of +total gas that can be used by all transactions in a block. +The default value is `-1`, which means the block gas limit is not enforced, or that the concept of +gas is meaningless. + +Responses contain a `GasWanted` and `GasUsed` field. The former is the maximum +amount of gas the sender of a transaction is willing to use, and the latter is how much it actually +used. Applications should enforce that `GasUsed <= GasWanted` — i.e. transaction execution +or validation should fail before it can use more resources than it requested. + +When `MaxGas > -1`, Tendermint enforces the following rules: + +* `GasWanted <= MaxGas` for every transaction in the mempool +* `(sum of GasWanted in a block) <= MaxGas` when proposing a block + +If `MaxGas == -1`, no rules about gas are enforced. + +In v0.35.x and earlier versions, Tendermint does not enforce anything about Gas in consensus, +only in the mempool. +This means it does not guarantee that committed blocks satisfy these rules. +It is the application's responsibility to return non-zero response codes when gas limits are exceeded +when executing the transactions of a block. +Since the introduction of `PrepareProposal` and `ProcessProposal` in v.0.36.x, it is now possible +for the Application to enforce that all blocks proposed (and voted for) in consensus — and thus all +blocks decided — respect the `MaxGas` limits described above. + +Since the Application should enforce that `GasUsed <= GasWanted` when executing a transaction, and +it can use `PrepareProposal` and `ProcessProposal` to enforce that `(sum of GasWanted in a block) <= MaxGas` +in all proposed or prevoted blocks, +we have: + +* `(sum of GasUsed in a block) <= MaxGas` for every block + +The `GasUsed` field is ignored by Tendermint. + +#### Specifics of `ResponseCheckTx` + +If `Code != 0`, it will be rejected from the mempool and hence +not broadcasted to other peers and not included in a proposal block. + +`Data` contains the result of the `CheckTx` transaction execution, if any. It does not need to be +deterministic since, given a transaction, nodes' Applications +might have a different *CheckTxState* values when they receive it and check their validity +via `CheckTx`. +Tendermint ignores this value in `ResponseCheckTx`. + +`Events` include any events for the execution, though since the transaction has not +been committed yet, they are effectively ignored by Tendermint. + +From v0.35.x on, there is a `Priority` field in `ResponseCheckTx` that can be +used to explicitly prioritize transactions in the mempool for inclusion in a block +proposal. + +#### Specifics of `ExecTxResult` + +`FinalizeBlock` is the workhorse of the blockchain. Tendermint delivers the decided block, +including the list of all its transactions synchronously to the Application. +The block delivered (and thus the transaction order) is the same at all correct nodes as guaranteed +by the Agreement property of Tendermint consensus. + +In same block execution mode, field `LastResultsHash` in the block header refers to the results +of all transactions stored in that block. Therefore, +`PrepareProposal` must return `ExecTxResult` so that it can +be used to build the block to be proposed in the current height. + +The `Data` field in `ExecTxResult` contains an array of bytes with the transaction result. +It must be deterministic (i.e., the same value must be returned at all nodes), but it can contain arbitrary +data. Likewise, the value of `Code` must be deterministic. +If `Code != 0`, the transaction will be marked invalid, +though it is still included in the block. Invalid transaction are not indexed, as they are +considered analogous to those that failed `CheckTx`. + +Both the `Code` and `Data` are included in a structure that is hashed into the +`LastResultsHash` of the block header in the next height (next block execution mode), or the +header of the block to propose in the current height (same block execution mode, `ExecTxResult` as +part of `PrepareProposal`). + +`Events` include any events for the execution, which Tendermint will use to index +the transaction by. This allows transactions to be queried according to what +events took place during their execution. + +### Updating the Validator Set + +The application may set the validator set during +[`InitChain`](./abci%2B%2B_methods_002_draft.md#initchain), and may update it during +[`FinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock) +(next block execution mode) or +[`PrepareProposal`](./abci%2B%2B_methods_002_draft.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods_002_draft.md#processproposal) +(same block execution mode). In all cases, a structure of type +[`ValidatorUpdate`](./abci%2B%2B_methods_002_draft.md#validatorupdate) is returned. + +The `InitChain` method, used to initialize the Application, can return a list of validators. +If the list is empty, Tendermint will use the validators loaded from the genesis +file. +If the list returned by `InitChain` is not empty, Tendermint will use its contents as the validator set. +This way the application can set the initial validator set for the +blockchain. + +Applications must ensure that a single set of validator updates does not contain duplicates, i.e. +a given public key can only appear once within a given update. If an update includes +duplicates, the block execution will fail irrecoverably. + +Structure `ValidatorUpdate` contains a public key, which is used to identify the validator: +The public key currently supports three types: + +* `ed25519` +* `secp256k1` +* `sr25519` + +Structure `ValidatorUpdate` also contains an `ìnt64` field denoting the validator's new power. +Applications must ensure that +`ValidatorUpdate` structures abide by the following rules: + +* power must be non-negative +* if power is set to 0, the validator must be in the validator set; it will be removed from the set +* if power is greater than 0: + * if the validator is not in the validator set, it will be added to the + set with the given power + * if the validator is in the validator set, its power will be adjusted to the given power +* the total power of the new validator set must not exceed `MaxTotalVotingPower`, where + `MaxTotalVotingPower = MaxInt64 / 8` + +Note the updates returned after processing the block at height `H` will only take effect +at block `H+2` (see Section [Methods](./abci%2B%2B_methods_002_draft.md)). + +### Consensus Parameters + +`ConsensusParams` are global parameters that apply to all validators in a blockchain. +They enforce certain limits in the blockchain, like the maximum size +of blocks, amount of gas used in a block, and the maximum acceptable age of +evidence. They can be set in +[`InitChain`](./abci%2B%2B_methods_002_draft.md#initchain), and updated in +[`FinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock) +(next block execution mode) or +[`PrepareProposal`](./abci%2B%2B_methods_002_draft.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods_002_draft.md#processproposal) +(same block execution model). +These parameters are deterministically set and/or updated by the Application, so +all full nodes have the same value at a given height. + +#### List of Parameters + +These are the current consensus parameters (as of v0.36.x): + +1. [BlockParams.MaxBytes](#blockparamsmaxbytes) +2. [BlockParams.MaxGas](#blockparamsmaxgas) +3. [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) +4. [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) +5. [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) +6. [SynchronyParams.MessageDelay](#synchronyparamsmessagedelay) +7. [SynchronyParams.Precision](#synchronyparamsprecision) +8. [TimeoutParams.Propose](#timeoutparamspropose) +9. [TimeoutParams.ProposeDelta](#timeoutparamsproposedelta) +10. [TimeoutParams.Vote](#timeoutparamsvote) +11. [TimeoutParams.VoteDelta](#timeoutparamsvotedelta) +12. [TimeoutParams.Commit](#timeoutparamscommit) +13. [TimeoutParams.BypassCommitTimeout](#timeoutparamsbypasscommittimeout) + +##### BlockParams.MaxBytes + +The maximum size of a complete Protobuf encoded block. +This is enforced by Tendermint consensus. + +This implies a maximum transaction size that is this `MaxBytes`, less the expected size of +the header, the validator set, and any included evidence in the block. + +Must have `0 < MaxBytes < 100 MB`. + +##### BlockParams.MaxGas + +The maximum of the sum of `GasWanted` that will be allowed in a proposed block. +This is *not* enforced by Tendermint consensus. +It is left to the Application to enforce (ie. if transactions are included past the +limit, they should return non-zero codes). It is used by Tendermint to limit the +transactions included in a proposed block. + +Must have `MaxGas >= -1`. +If `MaxGas == -1`, no limit is enforced. + +##### EvidenceParams.MaxAgeDuration + +This is the maximum age of evidence in time units. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeNumBlocks` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeDuration > 0`. + +##### EvidenceParams.MaxAgeNumBlocks + +This is the maximum age of evidence in blocks. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeDuration` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeNumBlocks > 0`. + +##### EvidenceParams.MaxBytes + +This is the maximum size of total evidence in bytes that can be committed to a +single block. It should fall comfortably under the max block bytes. + +Its value must not exceed the size of +a block minus its overhead ( ~ `BlockParams.MaxBytes`). + +Must have `MaxBytes > 0`. + +##### SynchronyParams.MessageDelay + +This sets a bound on how long a proposal message may take to reach all +validators on a network and still be considered valid. + +This parameter is part of the +[proposer-based timestamps](../consensus/proposer-based-timestamp) +(PBTS) algorithm. + +##### SynchronyParams.Precision + +This sets a bound on how skewed a proposer's clock may be from any validator +on the network while still producing valid proposals. + +This parameter is part of the +[proposer-based timestamps](../consensus/proposer-based-timestamp) +(PBTS) algorithm. + +##### TimeoutParams.Propose + +Timeout in ms of the propose step of the Tendermint consensus algorithm. +This value is the initial timeout at every height (round 0). + +The value in subsequent rounds is modified by parameter `ProposeDelta`. +When a new height is started, the `Propose` timeout value is reset to this +parameter. + +If a node waiting for a proposal message does not receive one matching its +current height and round before this timeout, the node will issue a +`nil` prevote for the round and advance to the next step. + +##### TimeoutParams.ProposeDelta + +Increment in ms to be added to the `Propose` timeout every time the Tendermint +consensus algorithm advances one round in a given height. + +When a new height is started, the `Propose` timeout value is reset. + +##### TimeoutParams.Vote + +Timeout in ms of the prevote and precommit steps of the Tendermint consensus +algorithm. +This value is the initial timeout at every height (round 0). + +The value in subsequent rounds is modified by parameter `VoteDelta`. +When a new height is started, the `Vote` timeout value is reset to this +parameter. + +The `Vote` timeout does not begin until a quorum of votes has been received. +Once a quorum of votes has been seen and this timeout elapses, Tendermint will +procced to the next step of the consensus algorithm. If Tendermint receives +all of the remaining votes before the end of the timeout, it will proceed +to the next step immediately. + +##### TimeoutParams.VoteDelta + +Increment in ms to be added to the `Vote` timeout every time the Tendermint +consensus algorithm advances one round in a given height. + +When a new height is started, the `Vote` timeout value is reset. + +##### TimeoutParams.Commit + +This configures how long Tendermint will wait after receiving a quorum of +precommits before beginning consensus for the next height. This can be +used to allow slow precommits to arrive for inclusion in the next height +before progressing. + +##### TimeoutParams.BypassCommitTimeout + +This configures the node to proceed immediately to the next height once the +node has received all precommits for a block, forgoing the remaining commit timeout. +Setting this parameter to `false` (the default) causes Tendermint to wait +for the full commit timeout configured in `TimeoutParams.Commit`. + +#### Updating Consensus Parameters + +The application may set the `ConsensusParams` during +[`InitChain`](./abci%2B%2B_methods_002_draft.md#initchain), +and update them during +[`FinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock) +(next block execution mode) or +[`PrepareProposal`](./abci%2B%2B_methods_002_draft.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods_002_draft.md#processproposal) +(same block execution mode). +If the `ConsensusParams` is empty, it will be ignored. Each field +that is not empty will be applied in full. For instance, if updating the +`Block.MaxBytes`, applications must also set the other `Block` fields (like +`Block.MaxGas`), even if they are unchanged, as they will otherwise cause the +value to be updated to the default. + +##### `InitChain` + +`ResponseInitChain` includes a `ConsensusParams` parameter. +If `ConsensusParams` is `nil`, Tendermint will use the params loaded in the genesis +file. If `ConsensusParams` is not `nil`, Tendermint will use it. +This way the application can determine the initial consensus parameters for the +blockchain. + +##### `FinalizeBlock`, `PrepareProposal`/`ProcessProposal` + +In next block execution mode, `ResponseFinalizeBlock` accepts a `ConsensusParams` parameter. +If `ConsensusParams` is `nil`, Tendermint will do nothing. +If `ConsensusParams` is not `nil`, Tendermint will use it. +This way the application can update the consensus parameters over time. + +Likewise, in same block execution mode, `PrepareProposal` and `ProcessProposal` include +a `ConsensusParams` parameter. `PrepareProposal` may return a `ConsensusParams` to update +the consensus parameters in the block that is about to be proposed. If it returns `nil` +the consensus parameters will not be updated. `ProcessProposal` also accepts a +`ConsensusParams` parameter, which Tendermint will use it to calculate the corresponding +hashes and sanity-check them against those of the block that triggered `ProcessProposal` +at the first place. + +Note the updates returned in block `H` will take effect right away for block +`H+1` (both in next block and same block execution mode). + +### `Query` + +`Query` is a generic method with lots of flexibility to enable diverse sets +of queries on application state. Tendermint makes use of `Query` to filter new peers +based on ID and IP, and exposes `Query` to the user over RPC. + +Note that calls to `Query` are not replicated across nodes, but rather query the +local node's state - hence they may return stale reads. For reads that require +consensus, use a transaction. + +The most important use of `Query` is to return Merkle proofs of the application state at some height +that can be used for efficient application-specific light-clients. + +Note Tendermint has technically no requirements from the `Query` +message for normal operation - that is, the ABCI app developer need not implement +Query functionality if they do not wish to. + +#### Query Proofs + +The Tendermint block header includes a number of hashes, each providing an +anchor for some type of proof about the blockchain. The `ValidatorsHash` enables +quick verification of the validator set, the `DataHash` gives quick +verification of the transactions included in the block. + +The `AppHash` is unique in that it is application specific, and allows for +application-specific Merkle proofs about the state of the application. +While some applications keep all relevant state in the transactions themselves +(like Bitcoin and its UTXOs), others maintain a separated state that is +computed deterministically *from* transactions, but is not contained directly in +the transactions themselves (like Ethereum contracts and accounts). +For such applications, the `AppHash` provides a much more efficient way to verify light-client proofs. + +ABCI applications can take advantage of more efficient light-client proofs for +their state as follows: + +* in next block executon mode, return the Merkle root of the deterministic application state in + `ResponseCommit.Data`. This Merkle root will be included as the `AppHash` in the next block. +* in same block execution mode, return the Merkle root of the deterministic application state + in `ResponsePrepareProposal.AppHash`. This Merkle root will be included as the `AppHash` in + the block that is about to be proposed. +* return efficient Merkle proofs about that application state in `ResponseQuery.Proof` + that can be verified using the `AppHash` of the corresponding block. + +For instance, this allows an application's light-client to verify proofs of +absence in the application state, something which is much less efficient to do using the block hash. + +Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, +where the leaves of one tree are the root hashes of others. To support this, and +the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: + +```protobuf +message ProofOps { + repeated ProofOp ops = 1 +} + +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} +``` + +Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`. +This allows ABCI to support many different kinds of Merkle trees, encoding +formats, and proofs (eg. of presence and absence) just by varying the `type`. +The `data` contains the actual encoded proof, encoded according to the `type`. +When verifying the full proof, the root hash for one ProofOp is the value being +verified for the next ProofOp in the list. The root hash of the final ProofOp in +the list should match the `AppHash` being verified against. + +#### Peer Filtering + +When Tendermint connects to a peer, it sends two queries to the ABCI application +using the following paths, with no additional data: + +* `/p2p/filter/addr/`, where `` denote the IP address and + the port of the connection +* `p2p/filter/id/`, where `` is the peer node ID (ie. the + pubkey.Address() for the peer's PubKey) + +If either of these queries return a non-zero ABCI code, Tendermint will refuse +to connect to the peer. + +#### Paths + +Queries are directed at paths, and may optionally include additional data. + +The expectation is for there to be some number of high level paths +differentiating concerns, like `/p2p`, `/store`, and `/app`. Currently, +Tendermint only uses `/p2p`, for filtering peers. For more advanced use, see the +implementation of +[Query in the Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/v0.23.1/baseapp/baseapp.go#L333). + +### Crash Recovery + +On startup, Tendermint calls the `Info` method on the Info Connection to get the latest +committed state of the app. The app MUST return information consistent with the +last block it succesfully completed Commit for. + +If the app succesfully committed block H, then `last_block_height = H` and `last_block_app_hash = `. If the app +failed during the Commit of block H, then `last_block_height = H-1` and +`last_block_app_hash = `. + +We now distinguish three heights, and describe how Tendermint syncs itself with +the app. + +```md +storeBlockHeight = height of the last block Tendermint saw a commit for +stateBlockHeight = height of the last block for which Tendermint completed all + block processing and saved all ABCI results to disk +appBlockHeight = height of the last block for which ABCI app succesfully + completed Commit + +``` + +Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` +Note also Tendermint never calls Commit on an ABCI app twice for the same height. + +The procedure is as follows. + +First, some simple start conditions: + +If `appBlockHeight == 0`, then call InitChain. + +If `storeBlockHeight == 0`, we're done. + +Now, some sanity checks: + +If `storeBlockHeight < appBlockHeight`, error +If `storeBlockHeight < stateBlockHeight`, panic +If `storeBlockHeight > stateBlockHeight+1`, panic + +Now, the meat: + +If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, +replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. +This happens if we completed processing the block, but the app forgot its height. + +If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done. +This happens if we crashed at an opportune spot. + +If `storeBlockHeight == stateBlockHeight+1` +This happens if we started processing the block but didn't finish. + +If `appBlockHeight < stateBlockHeight` + replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, + and replay the block at `storeBlockHeight` using the WAL. +This happens if the app forgot the last block it committed. + +If `appBlockHeight == stateBlockHeight`, + replay the last block (storeBlockHeight) in full. +This happens if we crashed before the app finished Commit + +If `appBlockHeight == storeBlockHeight` + update the state using the saved ABCI responses but dont run the block against the real app. +This happens if we crashed after the app finished Commit but before Tendermint saved the state. + +### State Sync + +A new node joining the network can simply join consensus at the genesis height and replay all +historical blocks until it is caught up. However, for large chains this can take a significant +amount of time, often on the order of days or weeks. + +State sync is an alternative mechanism for bootstrapping a new node, where it fetches a snapshot +of the state machine at a given height and restores it. Depending on the application, this can +be several orders of magnitude faster than replaying blocks. + +Note that state sync does not currently backfill historical blocks, so the node will have a +truncated block history - users are advised to consider the broader network implications of this in +terms of block availability and auditability. This functionality may be added in the future. + +For details on the specific ABCI calls and types, see the +[methods](abci%2B%2B_methods_002_draft.md) section. + +#### Taking Snapshots + +Applications that want to support state syncing must take state snapshots at regular intervals. How +this is accomplished is entirely up to the application. A snapshot consists of some metadata and +a set of binary chunks in an arbitrary format: + +* `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given + height has been committed, and must not contain data from any later heights. + +* `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot + formats, e.g. to switch from Protobuf to MessagePack for serialization. The application can use + this when restoring to choose whether to accept or reject a snapshot. + +* `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary + data, and should be less than 16 MB; 10 MB is a good starting point. + +* `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is + the same across nodes when downloading chunks. + +* `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other + necessary info. + +For a snapshot to be considered the same across nodes, all of these fields must be identical. When +sent across the network, snapshot metadata messages are limited to 4 MB. + +When a new node is running state sync and discovering snapshots, Tendermint will query an existing +application via the ABCI `ListSnapshots` method to discover available snapshots, and load binary +snapshot chunks via `LoadSnapshotChunk`. The application is free to choose how to implement this +and which formats to use, but must provide the following guarantees: + +* **Consistent:** A snapshot must be taken at a single isolated height, unaffected by + concurrent writes. This can be accomplished by using a data store that supports ACID + transactions with snapshot isolation. + +* **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, + for example by running in a separate thread. + +* **Deterministic:** A snapshot taken at the same height in the same format must be identical + (at the byte level) across nodes, including all metadata. This ensures good availability of + chunks, and that they fit together across nodes. + +A very basic approach might be to use a datastore with MVCC transactions (such as RocksDB), +start a transaction immediately after block commit, and spawn a new thread which is passed the +transaction handle. This thread can then export all data items, serialize them using e.g. +Protobuf, hash the byte stream, split it into chunks, and store the chunks in the file system +along with some metadata - all while the blockchain is applying new blocks in parallel. + +A more advanced approach might include incremental verification of individual chunks against the +chain app hash, parallel or batched exports, compression, and so on. + +Old snapshots should be removed after some time - generally only the last two snapshots are needed +(to prevent the last one from being removed while a node is restoring it). + +#### Bootstrapping a Node + +An empty node can be state synced by setting the configuration option `statesync.enabled = +true`. The node also needs the chain genesis file for basic chain info, and configuration for +light client verification of the restored snapshot: a set of Tendermint RPC servers, and a +trusted header hash and corresponding height from a trusted source, via the `statesync` +configuration section. + +Once started, the node will connect to the P2P network and begin discovering snapshots. These +will be offered to the local application via the `OfferSnapshot` ABCI method. Once a snapshot +is accepted Tendermint will fetch and apply the snapshot chunks. After all chunks have been +successfully applied, Tendermint verifies the app's `AppHash` against the chain using the light +client, then switches the node to normal consensus operation. + +##### Snapshot Discovery + +When the empty node joins the P2P network, it asks all peers to report snapshots via the +`ListSnapshots` ABCI call (limited to 10 per node). After some time, the node picks the most +suitable snapshot (generally prioritized by height, format, and number of peers), and offers it +to the application via `OfferSnapshot`. The application can choose a number of responses, +including accepting or rejecting it, rejecting the offered format, rejecting the peer who sent +it, and so on. Tendermint will keep discovering and offering snapshots until one is accepted or +the application aborts. + +##### Snapshot Restoration + +Once a snapshot has been accepted via `OfferSnapshot`, Tendermint begins downloading chunks from +any peers that have the same snapshot (i.e. that have identical metadata fields). Chunks are +spooled in a temporary directory, and then given to the application in sequential order via +`ApplySnapshotChunk` until all chunks have been accepted. + +The method for restoring snapshot chunks is entirely up to the application. + +During restoration, the application can respond to `ApplySnapshotChunk` with instructions for how +to continue. This will typically be to accept the chunk and await the next one, but it can also +ask for chunks to be refetched (either the current one or any number of previous ones), P2P peers +to be banned, snapshots to be rejected or retried, and a number of other responses - see the ABCI +reference for details. + +If Tendermint fails to fetch a chunk after some time, it will reject the snapshot and try a +different one via `OfferSnapshot` - the application can choose whether it wants to support +restarting restoration, or simply abort with an error. + +##### Snapshot Verification + +Once all chunks have been accepted, Tendermint issues an `Info` ABCI call to retrieve the +`LastBlockAppHash`. This is compared with the trusted app hash from the chain, retrieved and +verified using the light client. Tendermint also checks that `LastBlockHeight` corresponds to the +height of the snapshot. + +This verification ensures that an application is valid before joining the network. However, the +snapshot restoration may take a long time to complete, so applications may want to employ additional +verification during the restore to detect failures early. This might e.g. include incremental +verification of each chunk against the app hash (using bundled Merkle proofs), checksums to +protect against data corruption by the disk or network, and so on. However, it is important to +note that the only trusted information available is the app hash, and all other snapshot metadata +can be spoofed by adversaries. + +Apps may also want to consider state sync denial-of-service vectors, where adversaries provide +invalid or harmful snapshots to prevent nodes from joining the network. The application can +counteract this by asking Tendermint to ban peers. As a last resort, node operators can use +P2P configuration options to whitelist a set of trusted peers that can provide valid snapshots. + +##### Transition to Consensus + +Once the snapshots have all been restored, Tendermint gathers additional information necessary for +bootstrapping the node (e.g. chain ID, consensus parameters, validator sets, and block headers) +from the genesis file and light client RPC servers. It also calls `Info` to verify the following: + +* that the app hash from the snapshot it has delivered to the Application matches the apphash + stored in the next height's block (in next block execution), or the current block's height + (same block execution) +* that the version that the Application returns in `ResponseInfo` matches the version in the + current height's block header + +Once the state machine has been restored and Tendermint has gathered this additional +information, it transitions to block sync (if enabled) to fetch any remaining blocks up the chain +head, and then transitions to regular consensus operation. At this point the node operates like +any other node, apart from having a truncated block history at the height of the restored snapshot. diff --git a/spec/abci++/abci++_methods_002_draft.md b/spec/abci++/abci++_methods_002_draft.md index d1782bbdc1..4113a0c585 100644 --- a/spec/abci++/abci++_methods_002_draft.md +++ b/spec/abci++/abci++_methods_002_draft.md @@ -80,13 +80,15 @@ title: Methods * **Usage**: * Called once upon genesis. - * If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators - * If ResponseInitChain.Validators is not empty, it will be the initial - validator set (regardless of what is in RequestInitChain.Validators). + * If `ResponseInitChain.Validators` is empty, the initial validator set will be the `RequestInitChain.Validators` + * If `ResponseInitChain.Validators` is not empty, it will be the initial + validator set (regardless of what is in `RequestInitChain.Validators`). * This allows the app to decide if it wants to accept the initial validator - set proposed by tendermint (ie. in the genesis file), or if it wants to use - a different one (perhaps computed based on some application specific - information in the genesis file). + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). + * Both `ResponseInitChain.Validators` and `ResponseInitChain.Validators` are [ValidatorUpdate](#validatorupdate) structs. + So, technically, they both are _updating_ the set of validators from the empty set. ### Query @@ -302,7 +304,7 @@ title: Methods |-------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------|--------------| | tx_records | repeated [TxRecord](#txrecord) | Possibly modified list of transactions that have been picked as part of the proposed block. | 2 | | app_hash | bytes | The Merkle root hash of the application state. | 3 | - | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 4 | + | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 4 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 5 | | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 6 | @@ -414,7 +416,7 @@ Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as propos |-------------------------|--------------------------------------------------|-----------------------------------------------------------------------------------|--------------| | status | [ProposalStatus](#proposalstatus) | `enum` that signals if the application finds the proposal valid. | 1 | | app_hash | bytes | The Merkle root hash of the application state. | 2 | - | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions. | 3 | + | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions. | 3 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 4 | | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 5 | @@ -582,7 +584,7 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou | Name | Type | Description | Field Number | |-------------------------|-------------------------------------------------------------|----------------------------------------------------------------------------------|--------------| | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing | 1 | - | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 2 | + | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 2 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 4 | | app_hash | bytes | The Merkle root hash of the application state. | 5 | From 850ae93a90a507b6fdaf05c796d69de5126ace98 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Thu, 19 May 2022 10:35:36 +0200 Subject: [PATCH 044/203] Adapted `client-server.md` from ABCI directory (#8510) * Copied over 'client server' section from ABCI spec * Adapted the ABCI text in 'Client and Server' section * Minor changes to README * Removed TODO from Readme * Update spec/abci++/abci++_client_server_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_client_server_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_client_server_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_client_server_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_client_server_002_draft.md Co-authored-by: Daniel * Update spec/abci++/abci++_client_server_002_draft.md Co-authored-by: Daniel * Addressed comments * Moved GRPC link out of the Tendermint-specific occurrence * Fixed merge Co-authored-by: Daniel --- spec/abci++/README.md | 10 +- spec/abci++/abci++_client_server_002_draft.md | 102 ++++++++++++++++++ 2 files changed, 105 insertions(+), 7 deletions(-) create mode 100644 spec/abci++/abci++_client_server_002_draft.md diff --git a/spec/abci++/README.md b/spec/abci++/README.md index 0f7a87c4ae..a22babfeed 100644 --- a/spec/abci++/README.md +++ b/spec/abci++/README.md @@ -25,19 +25,15 @@ This allows Tendermint to run with applications written in many programming lang This specification is split as follows: -- [Overview and basic concepts](./abci++_basic_concepts_002_draft.md) - interface's overview and concepts needed to understand other parts of this specification. +- [Overview and basic concepts](./abci++_basic_concepts_002_draft.md) - interface's overview and concepts + needed to understand other parts of this specification. - [Methods](./abci++_methods_002_draft.md) - complete details on all ABCI++ methods and message types. - [Requirements for the Application](./abci++_app_requirements_002_draft.md) - formal requirements on the Application's logic to ensure Tendermint properties such as liveness. These requirements define what - Tendermint expects from the Application. + Tendermint expects from the Application; second part on managing ABCI application state and related topics. - [Tendermint's expected behavior](./abci++_tmint_expected_behavior_002_draft.md) - specification of how the different ABCI++ methods may be called by Tendermint. This explains what the Application is to expect from Tendermint. - ->**TODO** Re-read these and remove redundant info - -- [Applications](../abci/apps.md) - how to manage ABCI application state and other - details about building ABCI applications - [Client and Server](../abci/client-server.md) - for those looking to implement their own ABCI application servers diff --git a/spec/abci++/abci++_client_server_002_draft.md b/spec/abci++/abci++_client_server_002_draft.md new file mode 100644 index 0000000000..f26ee8cd51 --- /dev/null +++ b/spec/abci++/abci++_client_server_002_draft.md @@ -0,0 +1,102 @@ +--- +order: 5 +title: Client and Server +--- + +# Client and Server + +This section is for those looking to implement their own ABCI Server, perhaps in +a new programming language. + +You are expected to have read all previous sections of ABCI++ specification, namely +[Basic Concepts](./abci%2B%2B_basic_concepts_002_draft.md), +[Methods](./abci%2B%2B_methods_002_draft.md), +[Application Requirements](./abci%2B%2B_app_requirements_002_draft.md), and +[Expected Behavior](./abci%2B%2B_tmint_expected_behavior_002_draft.md). + +## Message Protocol and Synchrony + +The message protocol consists of pairs of requests and responses defined in the +[protobuf file](../../proto/tendermint/abci/types.proto). + +Some messages have no fields, while others may include byte-arrays, strings, integers, +or custom protobuf types. + +For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). + +As of v0.36 requests are synchronous. For each of ABCI++'s four connections (see +[Connections](./abci%2B%2B_app_requirements_002_draft.md)), when Tendermint issues a request to the +Application, it will wait for the response before continuing execution. As a side effect, +requests and responses are ordered for each connection, but not necessarily across connections. + +## Server Implementations + +To use ABCI in your programming language of choice, there must be an ABCI +server in that language. Tendermint supports four implementations of the ABCI server: + +- in Tendermint's repository: + - In-process + - ABCI-socket + - GRPC +- [tendermint-rs](https://github.com/informalsystems/tendermint-rs) +- [tower-abci](https://github.com/penumbra-zone/tower-abci) + +The implementations in Tendermint's repository can be tested using `abci-cli` by setting +the `--abci` flag appropriately. + +See examples, in various stages of maintenance, in +[Go](https://github.com/tendermint/tendermint/tree/master/abci/server), +[JavaScript](https://github.com/tendermint/js-abci), +[C++](https://github.com/mdyring/cpp-tmsp), and +[Java](https://github.com/jTendermint/jabci). + +### In Process + +The simplest implementation uses function calls in Golang. +This means ABCI applications written in Golang can be linked with Tendermint Core and run as a single binary. + +### GRPC + +If you are not using Golang, +but [GRPC](https://grpc.io/) is available in your language, this is the easiest approach, +though it will have significant performance overhead. + +Please check GRPC's documentation to know to set up the Application as an +ABCI GRPC server. + +### Socket + +Tendermint's socket-based ABCI interface is an asynchronous, +raw socket server which provides ordered message passing over unix or tcp. +Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers). + +If GRPC is not available in your language, your application requires higher +performance, or otherwise enjoy programming, you may implement your own +ABCI server using the Tendermint's socket-based ABCI interface. +The first step is to auto-generate the relevant data +types and codec in your language using `protoc`. +In addition to being proto3 encoded, messages coming over +the socket are length-prefixed. proto3 doesn't have an +official length-prefix standard, so we use our own. The first byte in +the prefix represents the length of the Big Endian encoded length. The +remaining bytes in the prefix are the Big Endian encoded length. + +For example, if the proto3 encoded ABCI message is `0xDEADBEEF` (4 +bytes long), the length-prefixed message is `0x0104DEADBEEF` (`01` byte for encoding the length `04` of the message). If the proto3 +encoded ABCI message is 65535 bytes long, the length-prefixed message +would start with 0x02FFFF. + +Note that this length-prefixing scheme does not apply for GRPC. + +Note that your ABCI server must be able to support multiple connections, as +Tendermint uses four connections. + +## Client + +There are currently two use-cases for an ABCI client. One is testing +tools that allow ABCI requests to be sent to the actual application via +command line. An example of this is `abci-cli`, which accepts CLI commands +to send corresponding ABCI requests. +The other is a consensus engine, such as Tendermint Core, +which makes ABCI requests to the application as prescribed by the consensus +algorithm used. From b4bf74ba9c367b5b640f7da14eb40bf2dad24a5e Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 19 May 2022 18:16:34 +0200 Subject: [PATCH 045/203] abci: serialize semantics of abci client (#8578) Prior to this change, it was possible that two client calls could enqueue their requests in the response queue in a different order than they were processed by the sender goroutine. This violates the requirement that responses must be delivered in the same order they were enqueued. To avert this, make the sender goroutine responsible for enqueuing. Also, remove an unnecessary channel buffer. --- abci/client/socket_client.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index aa4fdcbe93..8904d557db 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -17,12 +17,6 @@ import ( "github.com/tendermint/tendermint/libs/service" ) -const ( - // reqQueueSize is the max number of queued async requests. - // (memory: 256MB max assuming 1MB transactions) - reqQueueSize = 256 -) - // This is goroutine-safe, but users should beware that the application in // general is not meant to be interfaced with concurrent callers. type socketClient struct { @@ -48,7 +42,7 @@ var _ Client = (*socketClient)(nil) func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &socketClient{ logger: logger, - reqQueue: make(chan *requestAndResponse, reqQueueSize), + reqQueue: make(chan *requestAndResponse), mustConnect: mustConnect, addr: addr, reqSent: list.New(), @@ -127,6 +121,8 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer cli.stopForError(fmt.Errorf("flush buffer: %w", err)) return } + + cli.trackRequest(reqres) } } } @@ -158,7 +154,7 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader } } -func (cli *socketClient) willSendReq(reqres *requestAndResponse) { +func (cli *socketClient) trackRequest(reqres *requestAndResponse) { cli.mtx.Lock() defer cli.mtx.Unlock() @@ -199,7 +195,6 @@ func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*ty } reqres := makeReqRes(req) - cli.willSendReq(reqres) select { case cli.reqQueue <- reqres: From 4a9bbe047f8dc762e9020b528a019899c781ddcc Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 19 May 2022 12:11:57 -0700 Subject: [PATCH 046/203] Fix lock sequencing in socket client request tracking. (#8581) * Fix lock sequencing in socket client request tracking. It is not safe to check base service state (IsRunning) while holding the lock for the client state. If we do, then during shutdown we may deadlock with the invocation of the OnStop handler, which the base service executes while holding the service lock. * Enqueue pending requests before sending them to the server. If we don't do this, the server can reply before the request lands in the queue. That will cause the receiver to terminate early for an unsolicited response. So enqueue first: This is safe because we're doing it in the same routine as services the channel, so we won't take another message till we are safely past that point. * Document what we did. * Fix socket paths in tests. --- abci/client/socket_client.go | 14 +++++++++----- internal/proxy/client_test.go | 6 +++--- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 8904d557db..7dfcf76cc3 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -112,6 +112,11 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer case <-ctx.Done(): return case reqres := <-cli.reqQueue: + // N.B. We must enqueue before sending out the request, otherwise the + // server may reply before we do it, and the receiver will fail for an + // unsolicited reply. + cli.trackRequest(reqres) + if err := types.WriteMessage(reqres.Request, bw); err != nil { cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return @@ -121,8 +126,6 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer cli.stopForError(fmt.Errorf("flush buffer: %w", err)) return } - - cli.trackRequest(reqres) } } } @@ -155,13 +158,14 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader } func (cli *socketClient) trackRequest(reqres *requestAndResponse) { - cli.mtx.Lock() - defer cli.mtx.Unlock() - + // N.B. We must NOT hold the client state lock while checking this, or we + // may deadlock with shutdown. if !cli.IsRunning() { return } + cli.mtx.Lock() + defer cli.mtx.Unlock() cli.reqSent.PushBack(reqres) } diff --git a/internal/proxy/client_test.go b/internal/proxy/client_test.go index 09ac3f2c87..41a34bde7d 100644 --- a/internal/proxy/client_test.go +++ b/internal/proxy/client_test.go @@ -58,7 +58,7 @@ func (app *appConnTest) Info(ctx context.Context, req *types.RequestInfo) (*type var SOCKET = "socket" func TestEcho(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", t.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) if err != nil { @@ -98,7 +98,7 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", b.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) if err != nil { @@ -146,7 +146,7 @@ func TestInfo(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", t.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) if err != nil { From ad73e6da2f3d8f4cb00a0c4649ba06b941b40ec9 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 19 May 2022 15:35:30 -0400 Subject: [PATCH 047/203] consensus: update state from store before use in reactor (#8576) Closes: #8575 This PR aims to fix the `LastCommitRound can only be negative for initial height 0` issue we see in the e2e tests by initializing the `state` object before starting the receive routines in the consensus reactor. This is somewhat inelegant, but it should fix the issue. --- internal/consensus/reactor.go | 2 ++ internal/consensus/state.go | 18 +++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 1a9d49057e..5015233394 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -216,6 +216,8 @@ func (r *Reactor) OnStart(ctx context.Context) error { if err := r.state.Start(ctx); err != nil { return err } + } else if err := r.state.updateStateFromStore(); err != nil { + return err } go r.updateRoundStateRoutine(ctx) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index b016e26878..320042d308 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -122,9 +122,8 @@ type State struct { // store blocks and commits blockStore sm.BlockStore - stateStore sm.Store - initialStatePopulated bool - skipBootstrapping bool + stateStore sm.Store + skipBootstrapping bool // create and execute blocks blockExec *sm.BlockExecutor @@ -248,9 +247,6 @@ func NewState( } func (cs *State) updateStateFromStore() error { - if cs.initialStatePopulated { - return nil - } state, err := cs.stateStore.Load() if err != nil { return fmt.Errorf("loading state: %w", err) @@ -259,6 +255,15 @@ func (cs *State) updateStateFromStore() error { return nil } + eq, err := state.Equals(cs.state) + if err != nil { + return fmt.Errorf("comparing state: %w", err) + } + // if the new state is equivalent to the old state, we should not trigger a state update. + if eq { + return nil + } + // We have no votes, so reconstruct LastCommit from SeenCommit. if state.LastBlockHeight > 0 { cs.reconstructLastCommit(state) @@ -266,7 +271,6 @@ func (cs *State) updateStateFromStore() error { cs.updateToState(state) - cs.initialStatePopulated = true return nil } From 4786a5ffded3a59865772242266b84c736cf01ff Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 19 May 2022 15:23:14 -0700 Subject: [PATCH 048/203] Remove the periodically-scheduled Markdown link check. (#8580) The state of links in our documentation is now sufficiently good that we are running link checks during PRs. There is no longer any practical benefit to running the scheduled "global" check. Most of the errors it reports are rate limitations anyway (429). --- .github/workflows/linkchecker.yml | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 .github/workflows/linkchecker.yml diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml deleted file mode 100644 index e2ba808617..0000000000 --- a/.github/workflows/linkchecker.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Check Markdown links -on: - schedule: - - cron: '* */24 * * *' -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: creachadair/github-action-markdown-link-check@master - with: - folder-path: "docs" From 0cceadf4d4c5acf9d390f22616b80070fece52d4 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Fri, 20 May 2022 17:46:52 -0400 Subject: [PATCH 049/203] abci++: add consensus parameter logic to control vote extension require height (#8547) This PR makes vote extensions optional within Tendermint. A new ConsensusParams field, called ABCIParams.VoteExtensionsEnableHeight, has been added to toggle whether or not extensions should be enabled or disabled depending on the current height of the consensus engine. Related to: #8453 --- internal/blocksync/pool.go | 6 +- internal/blocksync/reactor.go | 121 +++++--- internal/blocksync/reactor_test.go | 179 +++++++---- internal/consensus/common_test.go | 15 +- internal/consensus/msgs.go | 6 +- internal/consensus/reactor.go | 21 +- internal/consensus/reactor_test.go | 113 +++++++ internal/consensus/replay_test.go | 8 +- internal/consensus/state.go | 148 ++++++--- internal/consensus/state_test.go | 285 +++++++++++++----- internal/consensus/types/height_vote_set.go | 26 +- .../consensus/types/height_vote_set_test.go | 2 +- internal/evidence/pool_test.go | 7 +- internal/evidence/verify_test.go | 24 +- internal/state/execution.go | 22 +- internal/state/execution_test.go | 112 ++++++- internal/state/mocks/block_store.go | 7 +- internal/state/services.go | 3 +- internal/state/store.go | 19 ++ internal/state/validation_test.go | 6 +- internal/statesync/reactor_test.go | 4 +- internal/store/store.go | 88 ++++-- internal/store/store_test.go | 116 ++++++- internal/test/factory/params.go | 1 + node/node_test.go | 2 +- test/e2e/runner/evidence.go | 6 +- test/e2e/tests/app_test.go | 1 + types/block.go | 109 ++++++- types/block_test.go | 165 ++++++++-- types/evidence_test.go | 6 +- types/params.go | 24 ++ types/validation_test.go | 10 +- types/validator_set_test.go | 6 +- types/vote.go | 71 +++-- types/vote_set.go | 38 ++- types/vote_set_test.go | 90 +++++- types/vote_test.go | 50 ++- 37 files changed, 1510 insertions(+), 407 deletions(-) diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index 30bb6962e1..64ce54dc62 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -280,7 +280,7 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, extComm pool.mtx.Lock() defer pool.mtx.Unlock() - if block.Height != extCommit.Height { + if extCommit != nil && block.Height != extCommit.Height { return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, extCommit.Height) } @@ -597,7 +597,9 @@ func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCo return false } bpr.block = block - bpr.extCommit = extCommit + if extCommit != nil { + bpr.extCommit = extCommit + } bpr.mtx.Unlock() select { diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 144595889a..6c1c060e74 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -185,31 +185,39 @@ func (r *Reactor) OnStop() { // Otherwise, we'll respond saying we do not have it. func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error { block := r.store.LoadBlock(msg.Height) - if block != nil { - extCommit := r.store.LoadBlockExtendedCommit(msg.Height) - if extCommit == nil { - return fmt.Errorf("found block in store without extended commit: %v", block) - } - blockProto, err := block.ToProto() - if err != nil { - return fmt.Errorf("failed to convert block to protobuf: %w", err) - } - + if block == nil { + r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) return blockSyncCh.Send(ctx, p2p.Envelope{ - To: peerID, - Message: &bcproto.BlockResponse{ - Block: blockProto, - ExtCommit: extCommit.ToProto(), - }, + To: peerID, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, }) } - r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) + state, err := r.stateStore.Load() + if err != nil { + return fmt.Errorf("loading state: %w", err) + } + var extCommit *types.ExtendedCommit + if state.ConsensusParams.ABCI.VoteExtensionsEnabled(msg.Height) { + extCommit = r.store.LoadBlockExtendedCommit(msg.Height) + if extCommit == nil { + return fmt.Errorf("found block in store with no extended commit: %v", block) + } + } + + blockProto, err := block.ToProto() + if err != nil { + return fmt.Errorf("failed to convert block to protobuf: %w", err) + } return blockSyncCh.Send(ctx, p2p.Envelope{ - To: peerID, - Message: &bcproto.NoBlockResponse{Height: msg.Height}, + To: peerID, + Message: &bcproto.BlockResponse{ + Block: blockProto, + ExtCommit: extCommit.ToProto(), + }, }) + } // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. @@ -242,12 +250,16 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo "err", err) return err } - extCommit, err := types.ExtendedCommitFromProto(msg.ExtCommit) - if err != nil { - r.logger.Error("failed to convert extended commit from proto", - "peer", envelope.From, - "err", err) - return err + var extCommit *types.ExtendedCommit + if msg.ExtCommit != nil { + var err error + extCommit, err = types.ExtendedCommitFromProto(msg.ExtCommit) + if err != nil { + r.logger.Error("failed to convert extended commit from proto", + "peer", envelope.From, + "err", err) + return err + } } if err := r.pool.AddBlock(envelope.From, block, extCommit, block.Size()); err != nil { @@ -440,6 +452,8 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh lastRate = 0.0 didProcessCh = make(chan struct{}, 1) + + initialCommitHasExtensions = (r.initialState.LastBlockHeight > 0 && r.store.LoadBlockExtendedCommit(r.initialState.LastBlockHeight) != nil) ) defer trySyncTicker.Stop() @@ -463,12 +477,27 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh ) switch { - // TODO(sergio) Might be needed for implementing the upgrading solution. Remove after that - //case state.LastBlockHeight > 0 && r.store.LoadBlockExtCommit(state.LastBlockHeight) == nil: - case state.LastBlockHeight > 0 && blocksSynced == 0: - // Having state-synced, we need to blocksync at least one block + + // The case statement below is a bit confusing, so here is a breakdown + // of its logic and purpose: + // + // If VoteExtensions are enabled we cannot switch to consensus without + // the vote extension data for the previous height, i.e. state.LastBlockHeight. + // + // If extensions were required during state.LastBlockHeight and we have + // sync'd at least one block, then we are guaranteed to have extensions. + // BlockSync requires that the blocks it fetches have extensions if + // extensions were enabled during the height. + // + // If extensions were required during state.LastBlockHeight and we have + // not sync'd any blocks, then we can only transition to Consensus + // if we already had extensions for the initial height. + // If any of these conditions is not met, we continue the loop, looking + // for extensions. + case state.ConsensusParams.ABCI.VoteExtensionsEnabled(state.LastBlockHeight) && + (blocksSynced == 0 && !initialCommitHasExtensions): r.logger.Info( - "no seen commit yet", + "no extended commit yet", "height", height, "last_block_height", state.LastBlockHeight, "initial_height", state.InitialHeight, @@ -520,18 +549,19 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // see if there are any blocks to sync first, second, extCommit := r.pool.PeekTwoBlocks() - if first == nil || second == nil || extCommit == nil { - if first != nil && extCommit == nil { - // See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631 - panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height)) - } - // we need all to sync the first block + if first != nil && extCommit == nil && + state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { + // See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631 + panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height)) + } else if first == nil || second == nil { + // we need to have fetched two consecutive blocks in order to + // perform blocksync verification continue - } else { - // try again quickly next loop - didProcessCh <- struct{}{} } + // try again quickly next loop + didProcessCh <- struct{}{} + firstParts, err := first.MakePartSet(types.BlockPartSizeBytes) if err != nil { r.logger.Error("failed to make ", @@ -557,7 +587,10 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // validate the block before we persist it err = r.blockExec.ValidateBlock(ctx, state, first) } - + if err == nil && state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { + // if vote extensions were required at this height, ensure they exist. + err = extCommit.EnsureExtensions() + } // If either of the checks failed we log the error and request for a new block // at that height if err != nil { @@ -593,7 +626,15 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh r.pool.PopRequest() // TODO: batch saves so we do not persist to disk every block - r.store.SaveBlock(first, firstParts, extCommit) + if state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { + r.store.SaveBlockWithExtendedCommit(first, firstParts, extCommit) + } else { + // We use LastCommit here instead of extCommit. extCommit is not + // guaranteed to be populated by the peer if extensions are not enabled. + // Currently, the peer should provide an extCommit even if the vote extension data are absent + // but this may change so using second.LastCommit is safer. + r.store.SaveBlock(first, firstParts, second.LastCommit) + } // TODO: Same thing for app - but we would need a way to get the hash // without persisting the state. diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 1d4d7d4d61..0477eb45df 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -40,8 +40,6 @@ type reactorTestSuite struct { blockSyncChannels map[types.NodeID]*p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate peerUpdates map[types.NodeID]*p2p.PeerUpdates - - blockSync bool } func setup( @@ -69,7 +67,6 @@ func setup( blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - blockSync: true, } chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)} @@ -97,21 +94,19 @@ func setup( return rts } -func (rts *reactorTestSuite) addNode( +func makeReactor( ctx context.Context, t *testing.T, nodeID types.NodeID, genDoc *types.GenesisDoc, privVal types.PrivValidator, - maxBlockHeight int64, -) { - t.Helper() + channelCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber) *Reactor { logger := log.NewNopLogger() - rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) - require.NoError(t, rts.app[nodeID].Start(ctx)) + app := proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) + require.NoError(t, app.Start(ctx)) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() @@ -139,7 +134,7 @@ func (rts *reactorTestSuite) addNode( blockExec := sm.NewBlockExecutor( stateStore, log.NewNopLogger(), - rts.app[nodeID], + app, mp, sm.EmptyEvidencePool{}, blockStore, @@ -147,44 +142,35 @@ func (rts *reactorTestSuite) addNode( sm.NopMetrics(), ) - var lastExtCommit *types.ExtendedCommit - - // The commit we are building for the current height. - seenExtCommit := &types.ExtendedCommit{} - - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastExtCommit = seenExtCommit.Clone() + return NewReactor( + logger, + stateStore, + blockExec, + blockStore, + nil, + channelCreator, + peerEvents, + true, + consensus.NopMetrics(), + nil, // eventbus, can be nil + ) +} - thisBlock := sf.MakeBlock(state, blockHeight, lastExtCommit.StripExtensions()) - thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) - require.NoError(t, err) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - - // Simulate a commit for the current height - vote, err := factory.MakeVote( - ctx, - privVal, - thisBlock.Header.ChainID, - 0, - thisBlock.Header.Height, - 0, - 2, - blockID, - time.Now(), - ) - require.NoError(t, err) - seenExtCommit = &types.ExtendedCommit{ - Height: vote.Height, - Round: vote.Round, - BlockID: blockID, - ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, - } +func (rts *reactorTestSuite) addNode( + ctx context.Context, + t *testing.T, + nodeID types.NodeID, + genDoc *types.GenesisDoc, + privVal types.PrivValidator, + maxBlockHeight int64, +) { + t.Helper() - state, err = blockExec.ApplyBlock(ctx, state, blockID, thisBlock) - require.NoError(t, err) + logger := log.NewNopLogger() - blockStore.SaveBlock(thisBlock, thisParts, seenExtCommit) - } + rts.nodes = append(rts.nodes, nodeID) + rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) + require.NoError(t, rts.app[nodeID].Start(ctx)) rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) @@ -193,21 +179,64 @@ func (rts *reactorTestSuite) addNode( chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { return rts.blockSyncChannels[nodeID], nil } - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - stateStore, - blockExec, - blockStore, - nil, - chCreator, - func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, - rts.blockSync, - consensus.NopMetrics(), - nil, // eventbus, can be nil + + peerEvents := func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] } + reactor := makeReactor(ctx, t, nodeID, genDoc, privVal, chCreator, peerEvents) + + lastExtCommit := &types.ExtendedCommit{} + + state, err := reactor.stateStore.Load() + require.NoError(t, err) + for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { + block, blockID, partSet, seenExtCommit := makeNextBlock(ctx, t, state, privVal, blockHeight, lastExtCommit) + + state, err = reactor.blockExec.ApplyBlock(ctx, state, blockID, block) + require.NoError(t, err) + + reactor.store.SaveBlockWithExtendedCommit(block, partSet, seenExtCommit) + lastExtCommit = seenExtCommit + } + + rts.reactors[nodeID] = reactor + require.NoError(t, reactor.Start(ctx)) + require.True(t, reactor.IsRunning()) +} + +func makeNextBlock(ctx context.Context, + t *testing.T, + state sm.State, + signer types.PrivValidator, + height int64, + lc *types.ExtendedCommit) (*types.Block, types.BlockID, *types.PartSet, *types.ExtendedCommit) { + + lastExtCommit := lc.Clone() + + block := sf.MakeBlock(state, height, lastExtCommit.ToCommit()) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: partSet.Header()} + + // Simulate a commit for the current height + vote, err := factory.MakeVote( + ctx, + signer, + block.Header.ChainID, + 0, + block.Header.Height, + 0, + 2, + blockID, + time.Now(), ) + require.NoError(t, err) + seenExtCommit := &types.ExtendedCommit{ + Height: vote.Height, + Round: vote.Round, + BlockID: blockID, + ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, + } + return block, blockID, partSet, seenExtCommit - require.NoError(t, rts.reactors[nodeID].Start(ctx)) - require.True(t, rts.reactors[nodeID].IsRunning()) } func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { @@ -415,3 +444,35 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { len(rts.reactors[newNode.NodeID].pool.peers), ) } + +/* +func TestReactorReceivesNoExtendedCommit(t *testing.T) { + blockDB := dbm.NewMemDB() + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(blockDB) + blockExec := sm.NewBlockExecutor( + stateStore, + log.NewNopLogger(), + rts.app[nodeID], + mp, + sm.EmptyEvidencePool{}, + blockStore, + eventbus, + sm.NopMetrics(), + ) + NewReactor( + log.NewNopLogger(), + stateStore, + blockExec, + blockStore, + nil, + chCreator, + func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, + rts.blockSync, + consensus.NopMetrics(), + nil, // eventbus, can be nil + ) + +} +*/ diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 1dc92b33c6..27fc39d6bf 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -527,10 +527,11 @@ func loadPrivValidator(t *testing.T, cfg *config.Config) *privval.FilePV { } type makeStateArgs struct { - config *config.Config - logger log.Logger - validators int - application abci.Application + config *config.Config + consensusParams *types.ConsensusParams + logger log.Logger + validators int + application abci.Application } func makeState(ctx context.Context, t *testing.T, args makeStateArgs) (*State, []*validatorStub) { @@ -551,9 +552,13 @@ func makeState(ctx context.Context, t *testing.T, args makeStateArgs) (*State, [ if args.logger == nil { args.logger = log.NewNopLogger() } + c := factory.ConsensusParams() + if args.consensusParams != nil { + c = args.consensusParams + } state, privVals := makeGenesisState(ctx, t, args.config, genesisStateArgs{ - Params: factory.ConsensusParams(), + Params: c, Validators: validators, }) diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index c59c06a413..1024c24ae9 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -222,11 +222,7 @@ func (*VoteMessage) TypeTag() string { return "tendermint/Vote" } // ValidateBasic checks whether the vote within the message is well-formed. func (m *VoteMessage) ValidateBasic() error { - // Here we validate votes with vote extensions, since we require vote - // extensions to be sent in precommit messages during consensus. Prevote - // messages should never have vote extensions, and this is also validated - // here. - return m.Vote.ValidateWithExtension() + return m.Vote.ValidateBasic() } // String returns a string representation. diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 5015233394..18d5851a4e 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -798,13 +798,20 @@ func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase { // Load the block's extended commit for prs.Height, which contains precommit // signatures for prs.Height. - if ec := r.state.blockStore.LoadBlockExtendedCommit(prs.Height); ec != nil { - if ok, err := r.pickSendVote(ctx, ps, ec, voteCh); err != nil { - return - } else if ok { - logger.Debug("picked Catchup commit to send", "height", prs.Height) - continue - } + var ec *types.ExtendedCommit + if r.state.state.ConsensusParams.ABCI.VoteExtensionsEnabled(prs.Height) { + ec = r.state.blockStore.LoadBlockExtendedCommit(prs.Height) + } else { + ec = r.state.blockStore.LoadBlockCommit(prs.Height).WrappedExtendedCommit() + } + if ec == nil { + continue + } + if ok, err := r.pickSendVote(ctx, ps, ec, voteCh); err != nil { + return + } else if ok { + logger.Debug("picked Catchup commit to send", "height", prs.Height) + continue } } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index c6a8869db3..96cf800bdf 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -32,6 +32,7 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -600,6 +601,118 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { wg.Wait() } +// TestSwitchToConsensusVoteExtensions tests that the SwitchToConsensus correctly +// checks for vote extension data when required. +func TestSwitchToConsensusVoteExtensions(t *testing.T) { + for _, testCase := range []struct { + name string + storedHeight int64 + initialRequiredHeight int64 + includeExtensions bool + shouldPanic bool + }{ + { + name: "no vote extensions but not required", + initialRequiredHeight: 0, + storedHeight: 2, + includeExtensions: false, + shouldPanic: false, + }, + { + name: "no vote extensions but required this height", + initialRequiredHeight: 2, + storedHeight: 2, + includeExtensions: false, + shouldPanic: true, + }, + { + name: "no vote extensions and required in future", + initialRequiredHeight: 3, + storedHeight: 2, + includeExtensions: false, + shouldPanic: false, + }, + { + name: "no vote extensions and required previous height", + initialRequiredHeight: 1, + storedHeight: 2, + includeExtensions: false, + shouldPanic: true, + }, + { + name: "vote extensions and required previous height", + initialRequiredHeight: 1, + storedHeight: 2, + includeExtensions: true, + shouldPanic: false, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer cancel() + cs, vs := makeState(ctx, t, makeStateArgs{validators: 1}) + validator := vs[0] + validator.Height = testCase.storedHeight + + cs.state.LastBlockHeight = testCase.storedHeight + cs.state.LastValidators = cs.state.Validators.Copy() + cs.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.initialRequiredHeight + + propBlock, err := cs.createProposalBlock(ctx) + require.NoError(t, err) + + // Consensus is preparing to do the next height after the stored height. + cs.Height = testCase.storedHeight + 1 + propBlock.Height = testCase.storedHeight + blockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + + var voteSet *types.VoteSet + if testCase.includeExtensions { + voteSet = types.NewExtendedVoteSet(cs.state.ChainID, testCase.storedHeight, 0, tmproto.PrecommitType, cs.state.Validators) + } else { + voteSet = types.NewVoteSet(cs.state.ChainID, testCase.storedHeight, 0, tmproto.PrecommitType, cs.state.Validators) + } + signedVote := signVote(ctx, t, validator, tmproto.PrecommitType, cs.state.ChainID, types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: blockParts.Header(), + }) + + if !testCase.includeExtensions { + signedVote.Extension = nil + signedVote.ExtensionSignature = nil + } + + added, err := voteSet.AddVote(signedVote) + require.NoError(t, err) + require.True(t, added) + + if testCase.includeExtensions { + cs.blockStore.SaveBlockWithExtendedCommit(propBlock, blockParts, voteSet.MakeExtendedCommit()) + } else { + cs.blockStore.SaveBlock(propBlock, blockParts, voteSet.MakeExtendedCommit().ToCommit()) + } + reactor := NewReactor( + log.NewNopLogger(), + cs, + nil, + nil, + cs.eventBus, + true, + NopMetrics(), + ) + + if testCase.shouldPanic { + assert.Panics(t, func() { + reactor.SwitchToConsensus(ctx, cs.state, false) + }) + } else { + reactor.SwitchToConsensus(ctx, cs.state, false) + } + }) + } +} + func TestReactorRecordsVotesAndBlockParts(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index c8f04655b2..99d3c17a14 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -1204,14 +1204,16 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } } func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { +func (bs *mockBlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { +} +func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { } func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { - return bs.extCommits[height-1].StripExtensions() + return bs.extCommits[height-1].ToCommit() } func (bs *mockBlockStore) LoadSeenCommit() *types.Commit { - return bs.extCommits[len(bs.extCommits)-1].StripExtensions() + return bs.extCommits[len(bs.extCommits)-1].ToCommit() } func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { return bs.extCommits[height-1] diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 320042d308..3af775bb62 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -696,23 +696,54 @@ func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) { } } -// Reconstruct LastCommit from SeenCommit, which we saved along with the block, -// (which happens even before saving the state) +// Reconstruct the LastCommit from either SeenCommit or the ExtendedCommit. SeenCommit +// and ExtendedCommit are saved along with the block. If VoteExtensions are required +// the method will panic on an absent ExtendedCommit or an ExtendedCommit without +// extension data. func (cs *State) reconstructLastCommit(state sm.State) { - extCommit := cs.blockStore.LoadBlockExtendedCommit(state.LastBlockHeight) - if extCommit == nil { - panic(fmt.Sprintf( - "failed to reconstruct last commit; commit for height %v not found", - state.LastBlockHeight, - )) + extensionsEnabled := cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(state.LastBlockHeight) + if !extensionsEnabled { + votes, err := cs.votesFromSeenCommit(state) + if err != nil { + panic(fmt.Sprintf("failed to reconstruct last commit; %s", err)) + } + cs.LastCommit = votes + return + } + + votes, err := cs.votesFromExtendedCommit(state) + if err != nil { + panic(fmt.Sprintf("failed to reconstruct last extended commit; %s", err)) + } + cs.LastCommit = votes +} + +func (cs *State) votesFromExtendedCommit(state sm.State) (*types.VoteSet, error) { + ec := cs.blockStore.LoadBlockExtendedCommit(state.LastBlockHeight) + if ec == nil { + return nil, fmt.Errorf("extended commit for height %v not found", state.LastBlockHeight) + } + vs := ec.ToExtendedVoteSet(state.ChainID, state.LastValidators) + if !vs.HasTwoThirdsMajority() { + return nil, errors.New("extended commit does not have +2/3 majority") } + return vs, nil +} - lastPrecommits := extCommit.ToVoteSet(state.ChainID, state.LastValidators) - if !lastPrecommits.HasTwoThirdsMajority() { - panic("failed to reconstruct last commit; does not have +2/3 maj") +func (cs *State) votesFromSeenCommit(state sm.State) (*types.VoteSet, error) { + commit := cs.blockStore.LoadSeenCommit() + if commit == nil || commit.Height != state.LastBlockHeight { + commit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight) + } + if commit == nil { + return nil, fmt.Errorf("commit for height %v not found", state.LastBlockHeight) } - cs.LastCommit = lastPrecommits + vs := commit.ToVoteSet(state.ChainID, state.LastValidators) + if !vs.HasTwoThirdsMajority() { + return nil, errors.New("commit does not have +2/3 majority") + } + return vs, nil } // Updates State and increments height to match that of state. @@ -814,7 +845,11 @@ func (cs *State) updateToState(state sm.State) { cs.ValidRound = -1 cs.ValidBlock = nil cs.ValidBlockParts = nil - cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + if state.ConsensusParams.ABCI.VoteExtensionsEnabled(height) { + cs.Votes = cstypes.NewExtendedHeightVoteSet(state.ChainID, height, validators) + } else { + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + } cs.CommitRound = -1 cs.LastValidators = state.LastValidators cs.TriggeredTimeoutPrecommit = false @@ -1925,8 +1960,12 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) { if cs.blockStore.Height() < block.Height { // NOTE: the seenCommit is local justification to commit this block, // but may differ from the LastCommit included in the next block - precommits := cs.Votes.Precommits(cs.CommitRound) - cs.blockStore.SaveBlock(block, blockParts, precommits.MakeExtendedCommit()) + seenExtendedCommit := cs.Votes.Precommits(cs.CommitRound).MakeExtendedCommit() + if cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(block.Height) { + cs.blockStore.SaveBlockWithExtendedCommit(block, blockParts, seenExtendedCommit) + } else { + cs.blockStore.SaveBlock(block, blockParts, seenExtendedCommit.ToCommit()) + } } else { // Happens during replay if we already saved the block but didn't commit logger.Debug("calling finalizeCommit on already stored block", "height", block.Height) @@ -2341,13 +2380,45 @@ func (cs *State) addVote( return } - // Verify VoteExtension if precommit and not nil - // https://github.com/tendermint/tendermint/issues/8487 - if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() { - err := cs.blockExec.VerifyVoteExtension(ctx, vote) - cs.metrics.MarkVoteExtensionReceived(err == nil) - if err != nil { - return false, err + // Check to see if the chain is configured to extend votes. + if cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(cs.Height) { + // The chain is configured to extend votes, check that the vote is + // not for a nil block and verify the extensions signature against the + // corresponding public key. + + var myAddr []byte + if cs.privValidatorPubKey != nil { + myAddr = cs.privValidatorPubKey.Address() + } + // Verify VoteExtension if precommit and not nil + // https://github.com/tendermint/tendermint/issues/8487 + if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() && + !bytes.Equal(vote.ValidatorAddress, myAddr) { // Skip the VerifyVoteExtension call if the vote was issued by this validator. + + // The core fields of the vote message were already validated in the + // consensus reactor when the vote was received. + // Here, we verify the signature of the vote extension included in the vote + // message. + _, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex) + if err := vote.VerifyExtension(cs.state.ChainID, val.PubKey); err != nil { + return false, err + } + + err := cs.blockExec.VerifyVoteExtension(ctx, vote) + cs.metrics.MarkVoteExtensionReceived(err == nil) + if err != nil { + return false, err + } + } + } else { + // Vote extensions are not enabled on the network. + // strip the extension data from the vote in case any is present. + // + // TODO punish a peer if it sent a vote with an extension when the feature + // is disabled on the network. + // https://github.com/tendermint/tendermint/issues/8565 + if stripped := vote.StripExtension(); stripped { + cs.logger.Error("vote included extension data but vote extensions are not enabled", "peer", peerID) } } @@ -2496,18 +2567,18 @@ func (cs *State) signVote( // If the signedMessageType is for precommit, // use our local precommit Timeout as the max wait time for getting a singed commit. The same goes for prevote. - timeout := cs.voteTimeout(cs.Round) - + timeout := time.Second if msgType == tmproto.PrecommitType && !vote.BlockID.IsNil() { + timeout = cs.voteTimeout(cs.Round) // if the signedMessage type is for a non-nil precommit, add // VoteExtension - ext, err := cs.blockExec.ExtendVote(ctx, vote) - if err != nil { - return nil, err + if cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(cs.Height) { + ext, err := cs.blockExec.ExtendVote(ctx, vote) + if err != nil { + return nil, err + } + vote.Extension = ext } - vote.Extension = ext - } else { - timeout = time.Second } v := vote.ToProto() @@ -2547,14 +2618,17 @@ func (cs *State) signAddVote( // TODO: pass pubKey to signVote vote, err := cs.signVote(ctx, msgType, hash, header) - if err == nil { - cs.sendInternalMessage(ctx, msgInfo{&VoteMessage{vote}, "", tmtime.Now()}) - cs.logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) - return vote + if err != nil { + cs.logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + return nil } - - cs.logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) - return nil + if !cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(vote.Height) { + // The signer will sign the extension, make sure to remove the data on the way out + vote.StripExtension() + } + cs.sendInternalMessage(ctx, msgInfo{&VoteMessage{vote}, "", tmtime.Now()}) + cs.logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) + return vote } // updatePrivValidatorPubKey get's the private validator public key and diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 6fa69a1a3c..797dcf59c4 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -18,6 +18,7 @@ import ( "github.com/tendermint/tendermint/internal/eventbus" tmpubsub "github.com/tendermint/tendermint/internal/pubsub" tmquery "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -2026,77 +2027,101 @@ func TestFinalizeBlockCalled(t *testing.T) { } } -// TestExtendVoteCalled tests that the vote extension methods are called at the -// correct point in the consensus algorithm. -func TestExtendVoteCalled(t *testing.T) { - config := configSetup(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +// TestExtendVoteCalledWhenEnabled tests that the vote extension methods are called at the +// correct point in the consensus algorithm when vote extensions are enabled. +func TestExtendVoteCalledWhenEnabled(t *testing.T) { + for _, testCase := range []struct { + name string + enabled bool + }{ + { + name: "enabled", + enabled: true, + }, + { + name: "disabled", + enabled: false, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - m := abcimocks.NewApplication(t) - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtension: []byte("extension"), - }, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, - }, nil) - m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() - cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) - height, round := cs1.Height, cs1.Round + m := abcimocks.NewApplication(t) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + if testCase.enabled { + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ + VoteExtension: []byte("extension"), + }, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) + } + m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + c := factory.ConsensusParams() + if !testCase.enabled { + c.ABCI.VoteExtensionsEnableHeight = 0 + } + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m, consensusParams: c}) + height, round := cs1.Height, cs1.Round - proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(ctx) - require.NoError(t, err) - addr := pv1.Address() - voteCh := subscribeToVoter(ctx, t, cs1, addr) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) - startTestRound(ctx, cs1, cs1.Height, round) - ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(t, proposalCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) - m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) + m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) - rs := cs1.GetRoundState() + rs := cs1.GetRoundState() - blockID := types.BlockID{ - Hash: rs.ProposalBlock.Hash(), - PartSetHeader: rs.ProposalBlockParts.Header(), - } - signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) - ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - ensurePrecommit(t, voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) - m.AssertCalled(t, "ExtendVote", ctx, &abci.RequestExtendVote{ - Height: height, - Hash: blockID.Hash, - }) + if testCase.enabled { + m.AssertCalled(t, "ExtendVote", ctx, &abci.RequestExtendVote{ + Height: height, + Hash: blockID.Hash, + }) + } else { + m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) + } - m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorAddress: addr, - Height: height, - VoteExtension: []byte("extension"), - }) - signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) - ensureNewRound(t, newRoundCh, height+1, 0) - m.AssertExpectations(t) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) + ensureNewRound(t, newRoundCh, height+1, 0) + m.AssertExpectations(t) - // Only 3 of the vote extensions are seen, as consensus proceeds as soon as the +2/3 threshold - // is observed by the consensus engine. - for _, pv := range vss[:3] { - pv, err := pv.GetPubKey(ctx) - require.NoError(t, err) - addr := pv.Address() - m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorAddress: addr, - Height: height, - VoteExtension: []byte("extension"), + // Only 3 of the vote extensions are seen, as consensus proceeds as soon as the +2/3 threshold + // is observed by the consensus engine. + for _, pv := range vss[1:3] { + pv, err := pv.GetPubKey(ctx) + require.NoError(t, err) + addr := pv.Address() + if testCase.enabled { + m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ + Hash: blockID.Hash, + ValidatorAddress: addr, + Height: height, + VoteExtension: []byte("extension"), + }) + } else { + m.AssertNotCalled(t, "VerifyVoteExtension", mock.Anything, mock.Anything) + } + } }) } @@ -2121,6 +2146,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round + cs1.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = cs1.Height proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) @@ -2138,7 +2164,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } - signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[2:]...) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss...) ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) ensurePrecommit(t, voteCh, height, round) @@ -2148,13 +2174,6 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Hash: blockID.Hash, }) - m.AssertCalled(t, "VerifyVoteExtension", mock.Anything, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorAddress: addr, - Height: height, - VoteExtension: []byte("extension"), - }) - m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[2:]...) ensureNewRound(t, newRoundCh, height+1, 0) @@ -2266,6 +2285,134 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { } } +// TestVoteExtensionEnableHeight tests that 'ExtensionRequireHeight' correctly +// enforces that vote extensions be present in consensus for heights greater than +// or equal to the configured value. +func TestVoteExtensionEnableHeight(t *testing.T) { + for _, testCase := range []struct { + name string + enableHeight int64 + hasExtension bool + expectExtendCalled bool + expectVerifyCalled bool + expectSuccessfulRound bool + }{ + { + name: "extension present but not enabled", + hasExtension: true, + enableHeight: 0, + expectExtendCalled: false, + expectVerifyCalled: false, + expectSuccessfulRound: true, + }, + { + name: "extension absent but not required", + hasExtension: false, + enableHeight: 0, + expectExtendCalled: false, + expectVerifyCalled: false, + expectSuccessfulRound: true, + }, + { + name: "extension present and required", + hasExtension: true, + enableHeight: 1, + expectExtendCalled: true, + expectVerifyCalled: true, + expectSuccessfulRound: true, + }, + { + name: "extension absent but required", + hasExtension: false, + enableHeight: 1, + expectExtendCalled: true, + expectVerifyCalled: false, + expectSuccessfulRound: false, + }, + { + name: "extension absent but required in future height", + hasExtension: false, + enableHeight: 2, + expectExtendCalled: false, + expectVerifyCalled: false, + expectSuccessfulRound: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + numValidators := 3 + m := abcimocks.NewApplication(t) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ + Status: abci.ResponseProcessProposal_ACCEPT, + }, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + if testCase.expectExtendCalled { + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) + } + if testCase.expectVerifyCalled { + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil).Times(numValidators - 1) + } + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + c := factory.ConsensusParams() + c.ABCI.VoteExtensionsEnableHeight = testCase.enableHeight + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m, validators: numValidators, consensusParams: c}) + cs1.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.enableHeight + height, round := cs1.Height, cs1.Round + + timeoutCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + + // sign all of the votes + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) + ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash()) + + var ext []byte + if testCase.hasExtension { + ext = []byte("extension") + } + + for _, vs := range vss[1:] { + vote, err := vs.signVote(ctx, tmproto.PrecommitType, config.ChainID(), blockID, ext) + if !testCase.hasExtension { + vote.ExtensionSignature = nil + } + require.NoError(t, err) + addVotes(cs1, vote) + } + if testCase.expectSuccessfulRound { + ensurePrecommit(t, voteCh, height, round) + height++ + ensureNewRound(t, newRoundCh, height, round) + } else { + ensureNoNewTimeout(t, timeoutCh, cs1.state.ConsensusParams.Timeout.VoteTimeout(round).Nanoseconds()) + } + + m.AssertExpectations(t) + }) + } +} + // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round diff --git a/internal/consensus/types/height_vote_set.go b/internal/consensus/types/height_vote_set.go index 661c5120e0..389c023561 100644 --- a/internal/consensus/types/height_vote_set.go +++ b/internal/consensus/types/height_vote_set.go @@ -38,9 +38,10 @@ We let each peer provide us with up to 2 unexpected "catchup" rounds. One for their LastCommit round, and another for the official commit round. */ type HeightVoteSet struct { - chainID string - height int64 - valSet *types.ValidatorSet + chainID string + height int64 + valSet *types.ValidatorSet + extensionsEnabled bool mtx sync.Mutex round int32 // max tracked round @@ -50,7 +51,17 @@ type HeightVoteSet struct { func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { hvs := &HeightVoteSet{ - chainID: chainID, + chainID: chainID, + extensionsEnabled: false, + } + hvs.Reset(height, valSet) + return hvs +} + +func NewExtendedHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { + hvs := &HeightVoteSet{ + chainID: chainID, + extensionsEnabled: true, } hvs.Reset(height, valSet) return hvs @@ -108,7 +119,12 @@ func (hvs *HeightVoteSet) addRound(round int32) { } // log.Debug("addRound(round)", "round", round) prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet) + var precommits *types.VoteSet + if hvs.extensionsEnabled { + precommits = types.NewExtendedVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet) + } else { + precommits = types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet) + } hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, Precommits: precommits, diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index acffa794cb..a2cfd84ec0 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -27,7 +27,7 @@ func TestPeerCatchupRounds(t *testing.T) { valSet, privVals := factory.ValidatorSet(ctx, t, 10, 1) chainID := cfg.ChainID() - hvs := NewHeightVoteSet(chainID, 1, valSet) + hvs := NewExtendedHeightVoteSet(chainID, 1, valSet) vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals, chainID) added, err := hvs.AddVote(vote999_0, "peer1") diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 4047d3e7fb..f80d13c7bc 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -250,7 +250,7 @@ func TestEvidencePoolUpdate(t *testing.T) { ) require.NoError(t, err) lastExtCommit := makeExtCommit(height, val.PrivKey.PubKey().Address()) - block := types.MakeBlock(height+1, []types.Tx{}, lastExtCommit.StripExtensions(), []types.Evidence{ev}) + block := types.MakeBlock(height+1, []types.Tx{}, lastExtCommit.ToCommit(), []types.Evidence{ev}) // update state (partially) state.LastBlockHeight = height + 1 @@ -569,7 +569,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo for i := int64(1); i <= state.LastBlockHeight; i++ { lastCommit := makeExtCommit(i-1, valAddr) - block := sf.MakeBlock(state, i, lastCommit.StripExtensions()) + block := sf.MakeBlock(state, i, lastCommit.ToCommit()) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1} @@ -580,7 +580,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo } seenCommit := makeExtCommit(i, valAddr) - blockStore.SaveBlock(block, partSet, seenCommit) + blockStore.SaveBlockWithExtendedCommit(block, partSet, seenCommit) } return blockStore, nil @@ -596,6 +596,7 @@ func makeExtCommit(height int64, valAddr []byte) *types.ExtendedCommit { Timestamp: defaultEvidenceTime, Signature: []byte("Signature"), }, + ExtensionSignature: []byte("Extended Signature"), }}, } } diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index 2ed84fa692..6341fde1de 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -233,10 +233,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { // we are simulating a duplicate vote attack where all the validators in the conflictingVals set // except the last validator vote twice blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) - voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewExtendedVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) extCommit, err := factory.MakeExtendedCommit(ctx, blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -253,11 +253,11 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) - trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedVoteSet := types.NewExtendedVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, 10, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) - trustedCommit := trustedExtCommit.StripExtensions() + trustedCommit := trustedExtCommit.ToCommit() trustedSignedHeader := &types.SignedHeader{ Header: trustedHeader, @@ -336,10 +336,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { // we are simulating an amnesia attack where all the validators in the conflictingVals set // except the last validator vote twice. However this time the commits are of different rounds. blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewExtendedVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals) extCommit, err := factory.MakeExtendedCommit(ctx, blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -356,11 +356,11 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) - trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedVoteSet := types.NewExtendedVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) - trustedCommit := trustedExtCommit.StripExtensions() + trustedCommit := trustedExtCommit.ToCommit() trustedSignedHeader := &types.SignedHeader{ Header: trustedHeader, @@ -553,10 +553,10 @@ func makeLunaticEvidence( }) blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) - voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewExtendedVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) extCommit, err := factory.MakeExtendedCommit(ctx, blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() ev = &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -582,10 +582,10 @@ func makeLunaticEvidence( } trustedBlockID := factory.MakeBlockIDWithHash(trustedHeader.Hash()) trustedVals, privVals := factory.ValidatorSet(ctx, t, totalVals, defaultVotingPower) - trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals) + trustedVoteSet := types.NewExtendedVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals) trustedExtCommit, err := factory.MakeExtendedCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime) require.NoError(t, err) - trustedCommit := trustedExtCommit.StripExtensions() + trustedCommit := trustedExtCommit.ToCommit() trusted = &types.LightBlock{ SignedHeader: &types.SignedHeader{ diff --git a/internal/state/execution.go b/internal/state/execution.go index 2c88c793bd..47c2cb7aef 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -3,6 +3,7 @@ package state import ( "bytes" "context" + "errors" "fmt" "time" @@ -100,15 +101,14 @@ func (blockExec *BlockExecutor) CreateProposalBlock( maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) - commit := lastExtCommit.StripExtensions() + commit := lastExtCommit.ToCommit() block := state.MakeBlock(height, txs, commit, evidence, proposerAddr) - rpp, err := blockExec.appClient.PrepareProposal( ctx, &abci.RequestPrepareProposal{ MaxTxBytes: maxDataBytes, Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: buildExtendedCommitInfo(lastExtCommit, blockExec.store, state.InitialHeight), + LocalLastCommit: buildExtendedCommitInfo(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.ABCI), ByzantineValidators: block.Evidence.ToABCI(), Height: block.Height, Time: block.Time, @@ -321,7 +321,7 @@ func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *t } if !resp.IsOK() { - return types.ErrVoteInvalidExtension + return errors.New("invalid vote extension") } return nil @@ -428,7 +428,7 @@ func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) a // data, it returns an empty record. // // Assumes that the commit signatures are sorted according to validator index. -func buildExtendedCommitInfo(ec *types.ExtendedCommit, store Store, initialHeight int64) abci.ExtendedCommitInfo { +func buildExtendedCommitInfo(ec *types.ExtendedCommit, store Store, initialHeight int64, ap types.ABCIParams) abci.ExtendedCommitInfo { if ec.Height < initialHeight { // There are no extended commits for heights below the initial height. return abci.ExtendedCommitInfo{} @@ -466,9 +466,15 @@ func buildExtendedCommitInfo(ec *types.ExtendedCommit, store Store, initialHeigh } var ext []byte - if ecs.BlockIDFlag == types.BlockIDFlagCommit { - // We only care about vote extensions if a validator has voted to - // commit. + // Check if vote extensions were enabled during the commit's height: ec.Height. + // ec is the commit from the previous height, so if extensions were enabled + // during that height, we ensure they are present and deliver the data to + // the proposer. If they were not enabled during this previous height, we + // will not deliver extension data. + if ap.VoteExtensionsEnabled(ec.Height) && ecs.BlockIDFlag == types.BlockIDFlagCommit { + if err := ecs.EnsureExtension(); err != nil { + panic(fmt.Errorf("commit at height %d received with missing vote extensions data", ec.Height)) + } ext = ecs.Extension } diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index ffe9cb6f88..5fb4dc297f 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -140,7 +140,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { } // block for height 2 - block := sf.MakeBlock(state, 2, lastCommit.StripExtensions()) + block := sf.MakeBlock(state, 2, lastCommit.ToCommit()) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} @@ -1004,6 +1004,116 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { mp.AssertExpectations(t) } +// TestCreateProposalBlockPanicOnAbsentVoteExtensions ensures that the CreateProposalBlock +// call correctly panics when the vote extension data is missing from the extended commit +// data that the method receives. +func TestCreateProposalAbsentVoteExtensions(t *testing.T) { + for _, testCase := range []struct { + name string + + // The height that is about to be proposed + height int64 + + // The first height during which vote extensions will be required for consensus to proceed. + extensionEnableHeight int64 + expectPanic bool + }{ + { + name: "missing extension data on first required height", + height: 2, + extensionEnableHeight: 1, + expectPanic: true, + }, + { + name: "missing extension during before required height", + height: 2, + extensionEnableHeight: 2, + expectPanic: false, + }, + { + name: "missing extension data and not required", + height: 2, + extensionEnableHeight: 0, + expectPanic: false, + }, + { + name: "missing extension data and required in two heights", + height: 2, + extensionEnableHeight: 3, + expectPanic: false, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + app := abcimocks.NewApplication(t) + if !testCase.expectPanic { + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + } + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, privVals := makeState(t, 1, int(testCase.height-1)) + stateStore := sm.NewStore(stateDB) + state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.extensionEnableHeight + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + sm.EmptyEvidencePool{}, + nil, + eventBus, + sm.NopMetrics(), + ) + block := sf.MakeBlock(state, testCase.height, new(types.Commit)) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} + pa, _ := state.Validators.GetByIndex(0) + lastCommit, _ := makeValidCommit(ctx, t, testCase.height-1, blockID, state.Validators, privVals) + stripSignatures(lastCommit) + if testCase.expectPanic { + require.Panics(t, func() { + blockExec.CreateProposalBlock(ctx, testCase.height, state, lastCommit, pa) //nolint:errcheck + }) + } else { + _, err = blockExec.CreateProposalBlock(ctx, testCase.height, state, lastCommit, pa) + require.NoError(t, err) + } + }) + } +} + +func stripSignatures(ec *types.ExtendedCommit) { + for i, commitSig := range ec.ExtendedSignatures { + commitSig.Extension = nil + commitSig.ExtensionSignature = nil + ec.ExtendedSignatures[i] = commitSig + } +} + func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { var ( h = make([]byte, crypto.HashSize) diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index 4eafb12739..58fc640fc5 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -209,7 +209,12 @@ func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) { } // SaveBlock provides a mock function with given fields: block, blockParts, seenCommit -func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { +func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + _m.Called(block, blockParts, seenCommit) +} + +// SaveBlockWithExtendedCommit provides a mock function with given fields: block, blockParts, seenCommit +func (_m *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { _m.Called(block, blockParts, seenCommit) } diff --git a/internal/state/services.go b/internal/state/services.go index 35a91aa11d..f86c4e3cf5 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -26,7 +26,8 @@ type BlockStore interface { LoadBlockMeta(height int64) *types.BlockMeta LoadBlock(height int64) *types.Block - SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) + SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) + SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) PruneBlocks(height int64) (uint64, error) diff --git a/internal/state/store.go b/internal/state/store.go index 2d2e4dc81e..d592016f62 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -2,6 +2,7 @@ package state import ( "bytes" + "encoding/binary" "errors" "fmt" @@ -59,6 +60,7 @@ func abciResponsesKey(height int64) []byte { // stateKey should never change after being set in init() var stateKey []byte +var tmpABCIKey []byte func init() { var err error @@ -66,6 +68,12 @@ func init() { if err != nil { panic(err) } + // temporary extra key before consensus param protos are regenerated + // TODO(wbanfield) remove in next PR + tmpABCIKey, err = orderedcode.Append(nil, int64(10000)) + if err != nil { + panic(err) + } } //---------------------- @@ -137,6 +145,12 @@ func (store dbStore) loadState(key []byte) (state State, err error) { if err != nil { return state, err } + buf, err = store.db.Get(tmpABCIKey) + if err != nil { + return state, err + } + h, _ := binary.Varint(buf) + sm.ConsensusParams.ABCI.VoteExtensionsEnableHeight = h return *sm, nil } @@ -181,6 +195,11 @@ func (store dbStore) save(state State, key []byte) error { if err := batch.Set(key, stateBz); err != nil { return err } + bz := make([]byte, 5) + binary.PutVarint(bz, state.ConsensusParams.ABCI.VoteExtensionsEnableHeight) + if err := batch.Set(tmpABCIKey, bz); err != nil { + return err + } return batch.WriteSync() } diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index b29cfd0f9c..0f43db5eb7 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -124,7 +124,7 @@ func TestValidateBlockHeader(t *testing.T) { */ state, _, lastExtCommit = makeAndCommitGoodBlock(ctx, t, state, height, lastCommit, state.Validators.GetProposer().Address, blockExec, privVals, nil) - lastCommit = lastExtCommit.StripExtensions() + lastCommit = lastExtCommit.ToCommit() } nextHeight := validationTestsStopHeight @@ -234,7 +234,7 @@ func TestValidateBlockCommit(t *testing.T) { privVals, nil, ) - lastCommit = lastExtCommit.StripExtensions() + lastCommit = lastExtCommit.ToCommit() /* wrongSigsCommit is fine except for the extra bad precommit @@ -384,7 +384,7 @@ func TestValidateBlockEvidence(t *testing.T) { privVals, evidence, ) - lastCommit = lastExtCommit.StripExtensions() + lastCommit = lastExtCommit.ToCommit() } } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 904fb2b74b..f57e228a7f 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -855,13 +855,13 @@ func mockLB(ctx context.Context, t *testing.T, height int64, time time.Time, las header.NextValidatorsHash = nextVals.Hash() header.ConsensusHash = types.DefaultConsensusParams().HashConsensusParams() lastBlockID = factory.MakeBlockIDWithHash(header.Hash()) - voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals) + voteSet := types.NewExtendedVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals) extCommit, err := factory.MakeExtendedCommit(ctx, lastBlockID, height, 0, voteSet, currentPrivVals, time) require.NoError(t, err) return nextVals, nextPrivVals, &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: header, - Commit: extCommit.StripExtensions(), + Commit: extCommit.ToCommit(), }, ValidatorSet: currentVals, } diff --git a/internal/store/store.go b/internal/store/store.go index 5617674a2f..1ba7e398db 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -2,6 +2,7 @@ package store import ( "bytes" + "errors" "fmt" "strconv" @@ -278,6 +279,9 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { return commit } +// LoadExtendedCommit returns the ExtendedCommit for the given height. +// The extended commit is not guaranteed to contain the same +2/3 precommits data +// as the commit in the block. func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { pbec := new(tmproto.ExtendedCommit) bz, err := bs.db.Get(extCommitKey(height)) @@ -466,25 +470,73 @@ func (bs *BlockStore) batchDelete( // If all the nodes restart after committing a block, // we need this to reload the precommits to catch-up nodes to the // most recent height. Otherwise they'd stall at H-1. -func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { +func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { if block == nil { panic("BlockStore can only save a non-nil block") } + batch := bs.db.NewBatch() + if err := bs.saveBlockToBatch(batch, block, blockParts, seenCommit); err != nil { + panic(err) + } + + if err := batch.WriteSync(); err != nil { + panic(err) + } + if err := batch.Close(); err != nil { + panic(err) + } +} + +// SaveBlockWithExtendedCommit persists the given block, blockParts, and +// seenExtendedCommit to the underlying db. seenExtendedCommit is stored under +// two keys in the database: as the seenCommit and as the ExtendedCommit data for the +// height. This allows the vote extension data to be persisted for all blocks +// that are saved. +func (bs *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenExtendedCommit *types.ExtendedCommit) { + if block == nil { + panic("BlockStore can only save a non-nil block") + } + if err := seenExtendedCommit.EnsureExtensions(); err != nil { + panic(fmt.Errorf("saving block with extensions: %w", err)) + } batch := bs.db.NewBatch() + if err := bs.saveBlockToBatch(batch, block, blockParts, seenExtendedCommit.ToCommit()); err != nil { + panic(err) + } + height := block.Height + + pbec := seenExtendedCommit.ToProto() + extCommitBytes := mustEncode(pbec) + if err := batch.Set(extCommitKey(height), extCommitBytes); err != nil { + panic(err) + } + + if err := batch.WriteSync(); err != nil { + panic(err) + } + + if err := batch.Close(); err != nil { + panic(err) + } +} + +func (bs *BlockStore) saveBlockToBatch(batch dbm.Batch, block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) error { + if block == nil { + panic("BlockStore can only save a non-nil block") + } height := block.Height hash := block.Hash() if g, w := height, bs.Height()+1; bs.Base() > 0 && g != w { - panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) + return fmt.Errorf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g) } if !blockParts.IsComplete() { - panic("BlockStore can only save complete block part sets") + return errors.New("BlockStore can only save complete block part sets") } if height != seenCommit.Height { - panic(fmt.Sprintf("BlockStore cannot save seen commit of a different height (block: %d, commit: %d)", - height, seenCommit.Height)) + return fmt.Errorf("BlockStore cannot save seen commit of a different height (block: %d, commit: %d)", height, seenCommit.Height) } // Save block parts. This must be done before the block meta, since callers @@ -499,44 +551,32 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s blockMeta := types.NewBlockMeta(block, blockParts) pbm := blockMeta.ToProto() if pbm == nil { - panic("nil blockmeta") + return errors.New("nil blockmeta") } metaBytes := mustEncode(pbm) if err := batch.Set(blockMetaKey(height), metaBytes); err != nil { - panic(err) + return err } if err := batch.Set(blockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { - panic(err) + return err } pbc := block.LastCommit.ToProto() blockCommitBytes := mustEncode(pbc) if err := batch.Set(blockCommitKey(height-1), blockCommitBytes); err != nil { - panic(err) + return err } // Save seen commit (seen +2/3 precommits for block) - pbsc := seenCommit.StripExtensions().ToProto() + pbsc := seenCommit.ToProto() seenCommitBytes := mustEncode(pbsc) if err := batch.Set(seenCommitKey(), seenCommitBytes); err != nil { - panic(err) - } - - pbec := seenCommit.ToProto() - extCommitBytes := mustEncode(pbec) - if err := batch.Set(extCommitKey(height), extCommitBytes); err != nil { - panic(err) - } - - if err := batch.WriteSync(); err != nil { - panic(err) + return err } - if err := batch.Close(); err != nil { - panic(err) - } + return nil } func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, batch dbm.Batch) { diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 9df3eed9f4..771129cc07 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -36,6 +36,7 @@ func makeTestExtCommit(height int64, timestamp time.Time) *types.ExtendedCommit Timestamp: timestamp, Signature: []byte("Signature"), }, + ExtensionSignature: []byte("ExtensionSignature"), }} return &types.ExtendedCommit{ Height: height, @@ -89,7 +90,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { part2 := validPartSet.GetPart(1) seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) - bs.SaveBlock(block, validPartSet, seenCommit) + bs.SaveBlockWithExtendedCommit(block, validPartSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") @@ -107,7 +108,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // End of setup, test data - commitAtH10 := makeTestExtCommit(10, tmtime.Now()).StripExtensions() + commitAtH10 := makeTestExtCommit(10, tmtime.Now()).ToCommit() tuples := []struct { block *types.Block parts *types.PartSet @@ -140,16 +141,17 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { ChainID: "block_test", Time: tmtime.Now(), ProposerAddress: tmrand.Bytes(crypto.AddressSize)}, - makeTestExtCommit(5, tmtime.Now()).StripExtensions(), + makeTestExtCommit(5, tmtime.Now()).ToCommit(), ), parts: validPartSet, seenCommit: makeTestExtCommit(5, tmtime.Now()), }, { - block: newBlock(header1, commitAtH10), - parts: incompletePartSet, - wantPanic: "only save complete block", // incomplete parts + block: newBlock(header1, commitAtH10), + parts: incompletePartSet, + wantPanic: "only save complete block", // incomplete parts + seenCommit: makeTestExtCommit(10, tmtime.Now()), }, { @@ -178,7 +180,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(header1, commitAtH10), + block: block, parts: validPartSet, seenCommit: seenCommit, @@ -187,7 +189,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(header1, commitAtH10), + block: block, parts: validPartSet, seenCommit: seenCommit, @@ -209,7 +211,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { bs, db := newInMemoryBlockStore() // SaveBlock res, err, panicErr := doFn(func() (interface{}, error) { - bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit) + bs.SaveBlockWithExtendedCommit(tuple.block, tuple.parts, tuple.seenCommit) if tuple.block == nil { return nil, nil } @@ -279,6 +281,90 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } } +// TestSaveBlockWithExtendedCommitPanicOnAbsentExtension tests that saving a +// block with an extended commit panics when the extension data is absent. +func TestSaveBlockWithExtendedCommitPanicOnAbsentExtension(t *testing.T) { + for _, testCase := range []struct { + name string + malleateCommit func(*types.ExtendedCommit) + shouldPanic bool + }{ + { + name: "basic save", + malleateCommit: func(_ *types.ExtendedCommit) {}, + shouldPanic: false, + }, + { + name: "save commit with no extensions", + malleateCommit: func(c *types.ExtendedCommit) { + c.StripExtensions() + }, + shouldPanic: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir()) + require.NoError(t, err) + defer cleanup() + block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) + seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) + ps, err := block.MakePartSet(2) + require.NoError(t, err) + testCase.malleateCommit(seenCommit) + if testCase.shouldPanic { + require.Panics(t, func() { + bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) + }) + } else { + bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) + } + }) + } +} + +// TestLoadBlockExtendedCommit tests loading the extended commit for a previously +// saved block. The load method should return nil when only a commit was saved and +// return the extended commit otherwise. +func TestLoadBlockExtendedCommit(t *testing.T) { + for _, testCase := range []struct { + name string + saveExtended bool + expectResult bool + }{ + { + name: "save commit", + saveExtended: false, + expectResult: false, + }, + { + name: "save extended commit", + saveExtended: true, + expectResult: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir()) + require.NoError(t, err) + defer cleanup() + block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) + seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) + ps, err := block.MakePartSet(2) + require.NoError(t, err) + if testCase.saveExtended { + bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) + } else { + bs.SaveBlock(block, ps, seenCommit.ToCommit()) + } + res := bs.LoadBlockExtendedCommit(block.Height) + if testCase.expectResult { + require.Equal(t, seenCommit, res) + } else { + require.Nil(t, res) + } + }) + } +} + func TestLoadBaseMeta(t *testing.T) { cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") require.NoError(t, err) @@ -293,7 +379,7 @@ func TestLoadBaseMeta(t *testing.T) { partSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestExtCommit(h, tmtime.Now()) - bs.SaveBlock(block, partSet, seenCommit) + bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) } pruned, err := bs.PruneBlocks(4) @@ -371,7 +457,7 @@ func TestPruneBlocks(t *testing.T) { partSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestExtCommit(h, tmtime.Now()) - bs.SaveBlock(block, partSet, seenCommit) + bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) } assert.EqualValues(t, 1, bs.Base()) @@ -479,7 +565,7 @@ func TestBlockFetchAtHeight(t *testing.T) { partSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) - bs.SaveBlock(block, partSet, seenCommit) + bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") blockAtHeight := bs.LoadBlock(bs.Height()) @@ -518,16 +604,16 @@ func TestSeenAndCanonicalCommit(t *testing.T) { // produce a few blocks and check that the correct seen and cannoncial commits // are persisted. for h := int64(3); h <= 5; h++ { - blockCommit := makeTestExtCommit(h-1, tmtime.Now()).StripExtensions() + blockCommit := makeTestExtCommit(h-1, tmtime.Now()).ToCommit() block := factory.MakeBlock(state, h, blockCommit) partSet, err := block.MakePartSet(2) require.NoError(t, err) seenCommit := makeTestExtCommit(h, tmtime.Now()) - store.SaveBlock(block, partSet, seenCommit) + store.SaveBlockWithExtendedCommit(block, partSet, seenCommit) c3 := store.LoadSeenCommit() require.NotNil(t, c3) require.Equal(t, h, c3.Height) - require.Equal(t, seenCommit.StripExtensions().Hash(), c3.Hash()) + require.Equal(t, seenCommit.ToCommit().Hash(), c3.Hash()) c5 := store.LoadBlockCommit(h) require.Nil(t, c5) c6 := store.LoadBlockCommit(h - 1) diff --git a/internal/test/factory/params.go b/internal/test/factory/params.go index dda8e2b3ca..c6fa3f9fca 100644 --- a/internal/test/factory/params.go +++ b/internal/test/factory/params.go @@ -18,5 +18,6 @@ func ConsensusParams() *types.ConsensusParams { VoteDelta: 1 * time.Millisecond, BypassCommitTimeout: true, } + c.ABCI.VoteExtensionsEnableHeight = 1 return c } diff --git a/node/node_test.go b/node/node_test.go index b1d7a94818..245e39b3c2 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -526,7 +526,7 @@ func TestMaxProposalBlockSize(t *testing.T) { } state.ChainID = maxChainID - voteSet := types.NewVoteSet(state.ChainID, math.MaxInt64-1, math.MaxInt32, tmproto.PrecommitType, state.Validators) + voteSet := types.NewExtendedVoteSet(state.ChainID, math.MaxInt64-1, math.MaxInt32, tmproto.PrecommitType, state.Validators) // add maximum amount of signatures to a single commit for i := 0; i < types.MaxVotesCount; i++ { diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index a71ea14fb5..fab1d7b20e 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -165,9 +165,9 @@ func generateLightClientAttackEvidence( // create a commit for the forged header blockID := makeBlockID(header.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewExtendedVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeExtendedCommit(ctx, blockID, forgedHeight, 0, voteSet, pv, forgedTime) + ec, err := factory.MakeExtendedCommit(ctx, blockID, forgedHeight, 0, voteSet, pv, forgedTime) if err != nil { return nil, err } @@ -176,7 +176,7 @@ func generateLightClientAttackEvidence( ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: header, - Commit: commit.StripExtensions(), + Commit: ec.ToCommit(), }, ValidatorSet: conflictingVals, }, diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index ed041e1861..20a153c1d7 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -190,6 +190,7 @@ func TestApp_Tx(t *testing.T) { func TestApp_VoteExtensions(t *testing.T) { testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { + t.Skip() client, err := node.Client() require.NoError(t, err) diff --git a/types/block.go b/types/block.go index 32a4f9a0a9..f508245bba 100644 --- a/types/block.go +++ b/types/block.go @@ -757,22 +757,23 @@ func (ecs ExtendedCommitSig) ValidateBasic() error { if len(ecs.Extension) > MaxVoteExtensionSize { return fmt.Errorf("vote extension is too big (max: %d)", MaxVoteExtensionSize) } - if len(ecs.ExtensionSignature) == 0 { - return errors.New("vote extension signature is missing") - } if len(ecs.ExtensionSignature) > MaxSignatureSize { return fmt.Errorf("vote extension signature is too big (max: %d)", MaxSignatureSize) } return nil } - // We expect there to not be any vote extension or vote extension signature - // on nil or absent votes. - if len(ecs.Extension) != 0 { - return fmt.Errorf("vote extension is present for commit sig with block ID flag %v", ecs.BlockIDFlag) + if len(ecs.ExtensionSignature) == 0 && len(ecs.Extension) != 0 { + return errors.New("vote extension signature absent on vote with extension") } - if len(ecs.ExtensionSignature) != 0 { - return fmt.Errorf("vote extension signature is present for commit sig with block ID flag %v", ecs.BlockIDFlag) + return nil +} + +// EnsureExtensions validates that a vote extensions signature is present for +// this ExtendedCommitSig. +func (ecs ExtendedCommitSig) EnsureExtension() error { + if ecs.BlockIDFlag == BlockIDFlagCommit && len(ecs.ExtensionSignature) == 0 { + return errors.New("vote extension data is missing") } return nil } @@ -916,6 +917,26 @@ func (commit *Commit) Hash() tmbytes.HexBytes { return commit.hash } +// WrappedExtendedCommit wraps a commit as an ExtendedCommit. +// The VoteExtension fields of the resulting value will by nil. +// Wrapping a Commit as an ExtendedCommit is useful when an API +// requires an ExtendedCommit wire type but does not +// need the VoteExtension data. +func (commit *Commit) WrappedExtendedCommit() *ExtendedCommit { + cs := make([]ExtendedCommitSig, len(commit.Signatures)) + for idx, s := range commit.Signatures { + cs[idx] = ExtendedCommitSig{ + CommitSig: s, + } + } + return &ExtendedCommit{ + Height: commit.Height, + Round: commit.Round, + BlockID: commit.BlockID, + ExtendedSignatures: cs, + } +} + // StringIndented returns a string representation of the commit. func (commit *Commit) StringIndented(indent string) string { if commit == nil { @@ -1013,17 +1034,33 @@ func (ec *ExtendedCommit) Clone() *ExtendedCommit { return &ecc } +// ToExtendedVoteSet constructs a VoteSet from the Commit and validator set. +// Panics if signatures from the ExtendedCommit can't be added to the voteset. +// Panics if any of the votes have invalid or absent vote extension data. +// Inverse of VoteSet.MakeExtendedCommit(). +func (ec *ExtendedCommit) ToExtendedVoteSet(chainID string, vals *ValidatorSet) *VoteSet { + voteSet := NewExtendedVoteSet(chainID, ec.Height, ec.Round, tmproto.PrecommitType, vals) + ec.addSigsToVoteSet(voteSet) + return voteSet +} + // ToVoteSet constructs a VoteSet from the Commit and validator set. -// Panics if signatures from the commit can't be added to the voteset. +// Panics if signatures from the ExtendedCommit can't be added to the voteset. // Inverse of VoteSet.MakeExtendedCommit(). func (ec *ExtendedCommit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet { voteSet := NewVoteSet(chainID, ec.Height, ec.Round, tmproto.PrecommitType, vals) + ec.addSigsToVoteSet(voteSet) + return voteSet +} + +// addSigsToVoteSet adds all of the signature to voteSet. +func (ec *ExtendedCommit) addSigsToVoteSet(voteSet *VoteSet) { for idx, ecs := range ec.ExtendedSignatures { if ecs.BlockIDFlag == BlockIDFlagAbsent { continue // OK, some precommits can be missing. } vote := ec.GetExtendedVote(int32(idx)) - if err := vote.ValidateWithExtension(); err != nil { + if err := vote.ValidateBasic(); err != nil { panic(fmt.Errorf("failed to validate vote reconstructed from LastCommit: %w", err)) } added, err := voteSet.AddVote(vote) @@ -1031,12 +1068,58 @@ func (ec *ExtendedCommit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet panic(fmt.Errorf("failed to reconstruct vote set from extended commit: %w", err)) } } +} + +// ToVoteSet constructs a VoteSet from the Commit and validator set. +// Panics if signatures from the commit can't be added to the voteset. +// Inverse of VoteSet.MakeCommit(). +func (commit *Commit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet { + voteSet := NewVoteSet(chainID, commit.Height, commit.Round, tmproto.PrecommitType, vals) + for idx, cs := range commit.Signatures { + if cs.BlockIDFlag == BlockIDFlagAbsent { + continue // OK, some precommits can be missing. + } + vote := commit.GetVote(int32(idx)) + if err := vote.ValidateBasic(); err != nil { + panic(fmt.Errorf("failed to validate vote reconstructed from commit: %w", err)) + } + added, err := voteSet.AddVote(vote) + if !added || err != nil { + panic(fmt.Errorf("failed to reconstruct vote set from commit: %w", err)) + } + } return voteSet } -// StripExtensions converts an ExtendedCommit to a Commit by removing all vote +// EnsureExtensions validates that a vote extensions signature is present for +// every ExtendedCommitSig in the ExtendedCommit. +func (ec *ExtendedCommit) EnsureExtensions() error { + for _, ecs := range ec.ExtendedSignatures { + if err := ecs.EnsureExtension(); err != nil { + return err + } + } + return nil +} + +// StripExtensions removes all VoteExtension data from an ExtendedCommit. This +// is useful when dealing with an ExendedCommit but vote extension data is +// expected to be absent. +func (ec *ExtendedCommit) StripExtensions() bool { + stripped := false + for idx := range ec.ExtendedSignatures { + if len(ec.ExtendedSignatures[idx].Extension) > 0 || len(ec.ExtendedSignatures[idx].ExtensionSignature) > 0 { + stripped = true + } + ec.ExtendedSignatures[idx].Extension = nil + ec.ExtendedSignatures[idx].ExtensionSignature = nil + } + return stripped +} + +// ToCommit converts an ExtendedCommit to a Commit by removing all vote // extension-related fields. -func (ec *ExtendedCommit) StripExtensions() *Commit { +func (ec *ExtendedCommit) ToCommit() *Commit { cs := make([]CommitSig, len(ec.ExtendedSignatures)) for idx, ecs := range ec.ExtendedSignatures { cs[idx] = ecs.CommitSig diff --git a/types/block_test.go b/types/block_test.go index 09a8b602ed..4c3a74d8f6 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -49,7 +49,7 @@ func TestBlockAddEvidence(t *testing.T) { require.NoError(t, err) evList := []Evidence{ev} - block := MakeBlock(h, txs, extCommit.StripExtensions(), evList) + block := MakeBlock(h, txs, extCommit.ToCommit(), evList) require.NotNil(t, block) require.Equal(t, 1, len(block.Evidence)) require.NotNil(t, block.EvidenceHash) @@ -68,7 +68,7 @@ func TestBlockValidateBasic(t *testing.T) { voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") require.NoError(t, err) @@ -163,7 +163,7 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { require.NoError(t, err) evList := []Evidence{ev} - partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.StripExtensions(), evList).MakePartSet(512) + partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.ToCommit(), evList).MakePartSet(512) require.NoError(t, err) assert.NotNil(t, partSet) @@ -187,7 +187,7 @@ func TestBlockHashesTo(t *testing.T) { require.NoError(t, err) evList := []Evidence{ev} - block := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.StripExtensions(), evList) + block := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.ToCommit(), evList) block.ValidatorsHash = valSet.Hash() assert.False(t, block.HashesTo([]byte{})) assert.False(t, block.HashesTo([]byte("something else"))) @@ -483,7 +483,7 @@ func randCommit(ctx context.Context, t *testing.T, now time.Time) *Commit { require.NoError(t, err) - return commit.StripExtensions() + return commit.ToCommit() } func hexBytesFromString(t *testing.T, s string) bytes.HexBytes { @@ -556,33 +556,138 @@ func TestBlockMaxDataBytesNoEvidence(t *testing.T) { } } +// TestVoteSetToExtendedCommit tests that the extended commit produced from a +// vote set contains the same vote information as the vote set. The test ensures +// that the MakeExtendedCommit method behaves as expected, whether vote extensions +// are present in the original votes or not. +func TestVoteSetToExtendedCommit(t *testing.T) { + for _, testCase := range []struct { + name string + includeExtension bool + }{ + { + name: "no extensions", + includeExtension: false, + }, + { + name: "with extensions", + includeExtension: true, + }, + } { + + t.Run(testCase.name, func(t *testing.T) { + blockID := makeBlockIDRandom() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + valSet, vals := randValidatorPrivValSet(ctx, t, 10, 1) + var voteSet *VoteSet + if testCase.includeExtension { + voteSet = NewExtendedVoteSet("test_chain_id", 3, 1, tmproto.PrecommitType, valSet) + } else { + voteSet = NewVoteSet("test_chain_id", 3, 1, tmproto.PrecommitType, valSet) + } + for i := 0; i < len(vals); i++ { + pubKey, err := vals[i].GetPubKey(ctx) + require.NoError(t, err) + vote := &Vote{ + ValidatorAddress: pubKey.Address(), + ValidatorIndex: int32(i), + Height: 3, + Round: 1, + Type: tmproto.PrecommitType, + BlockID: blockID, + Timestamp: time.Now(), + } + v := vote.ToProto() + err = vals[i].SignVote(ctx, voteSet.ChainID(), v) + require.NoError(t, err) + vote.Signature = v.Signature + if testCase.includeExtension { + vote.ExtensionSignature = v.ExtensionSignature + } + added, err := voteSet.AddVote(vote) + require.NoError(t, err) + require.True(t, added) + } + ec := voteSet.MakeExtendedCommit() + + for i := int32(0); int(i) < len(vals); i++ { + vote1 := voteSet.GetByIndex(i) + vote2 := ec.GetExtendedVote(i) + + vote1bz, err := vote1.ToProto().Marshal() + require.NoError(t, err) + vote2bz, err := vote2.ToProto().Marshal() + require.NoError(t, err) + assert.Equal(t, vote1bz, vote2bz) + } + }) + } +} + +// TestExtendedCommitToVoteSet tests that the vote set produced from an extended commit +// contains the same vote information as the extended commit. The test ensures +// that the ToVoteSet method behaves as expected, whether vote extensions +// are present in the original votes or not. func TestExtendedCommitToVoteSet(t *testing.T) { - lastID := makeBlockIDRandom() - h := int64(3) + for _, testCase := range []struct { + name string + includeExtension bool + }{ + { + name: "no extensions", + includeExtension: false, + }, + { + name: "with extensions", + includeExtension: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + lastID := makeBlockIDRandom() + h := int64(3) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) - extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) - assert.NoError(t, err) - - chainID := voteSet.ChainID() - voteSet2 := extCommit.ToVoteSet(chainID, valSet) - - for i := int32(0); int(i) < len(vals); i++ { - vote1 := voteSet.GetByIndex(i) - vote2 := voteSet2.GetByIndex(i) - vote3 := extCommit.GetExtendedVote(i) - - vote1bz, err := vote1.ToProto().Marshal() - require.NoError(t, err) - vote2bz, err := vote2.ToProto().Marshal() - require.NoError(t, err) - vote3bz, err := vote3.ToProto().Marshal() - require.NoError(t, err) - assert.Equal(t, vote1bz, vote2bz) - assert.Equal(t, vote1bz, vote3bz) + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + extCommit, err := makeExtCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) + assert.NoError(t, err) + + if !testCase.includeExtension { + for i := 0; i < len(vals); i++ { + v := voteSet.GetByIndex(int32(i)) + v.Extension = nil + v.ExtensionSignature = nil + extCommit.ExtendedSignatures[i].Extension = nil + extCommit.ExtendedSignatures[i].ExtensionSignature = nil + } + } + + chainID := voteSet.ChainID() + var voteSet2 *VoteSet + if testCase.includeExtension { + voteSet2 = extCommit.ToExtendedVoteSet(chainID, valSet) + } else { + voteSet2 = extCommit.ToVoteSet(chainID, valSet) + } + + for i := int32(0); int(i) < len(vals); i++ { + vote1 := voteSet.GetByIndex(i) + vote2 := voteSet2.GetByIndex(i) + vote3 := extCommit.GetExtendedVote(i) + + vote1bz, err := vote1.ToProto().Marshal() + require.NoError(t, err) + vote2bz, err := vote2.ToProto().Marshal() + require.NoError(t, err) + vote3bz, err := vote3.ToProto().Marshal() + require.NoError(t, err) + assert.Equal(t, vote1bz, vote2bz) + assert.Equal(t, vote1bz, vote3bz) + } + }) } } @@ -637,7 +742,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { if tc.valid { extCommit := voteSet.MakeExtendedCommit() // panics without > 2/3 valid votes assert.NotNil(t, extCommit) - err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, extCommit.StripExtensions()) + err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, extCommit.ToCommit()) assert.NoError(t, err) } else { assert.Panics(t, func() { voteSet.MakeExtendedCommit() }) diff --git a/types/evidence_test.go b/types/evidence_test.go index 8b06a62186..d014a3ecc9 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -155,7 +155,7 @@ func TestLightClientAttackEvidenceBasic(t *testing.T) { blockID := makeBlockID(crypto.Checksum([]byte("blockhash")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) extCommit, err := makeExtCommit(ctx, blockID, height, 1, voteSet, privVals, defaultVoteTime) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ @@ -221,7 +221,7 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) { blockID := makeBlockID(header.Hash(), math.MaxInt32, crypto.Checksum([]byte("partshash"))) extCommit, err := makeExtCommit(ctx, blockID, height, 1, voteSet, privVals, time.Now()) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ @@ -434,7 +434,7 @@ func TestEvidenceVectors(t *testing.T) { ConflictingBlock: &LightBlock{ SignedHeader: &SignedHeader{ Header: header, - Commit: extCommit.StripExtensions(), + Commit: extCommit.ToCommit(), }, ValidatorSet: valSet, }, diff --git a/types/params.go b/types/params.go index e8ee6fcdf7..3b5e9a2503 100644 --- a/types/params.go +++ b/types/params.go @@ -43,6 +43,7 @@ type ConsensusParams struct { Version VersionParams `json:"version"` Synchrony SynchronyParams `json:"synchrony"` Timeout TimeoutParams `json:"timeout"` + ABCI ABCIParams `json:"abci"` } // HashedParams is a subset of ConsensusParams. @@ -96,6 +97,21 @@ type TimeoutParams struct { BypassCommitTimeout bool `json:"bypass_commit_timeout"` } +// ABCIParams configure ABCI functionality specific to the Application Blockchain +// Interface. +type ABCIParams struct { + VoteExtensionsEnableHeight int64 `json:"vote_extensions_enable_height"` +} + +// VoteExtensionsEnabled returns true if vote extensions are enabled at height h +// and false otherwise. +func (a ABCIParams) VoteExtensionsEnabled(h int64) bool { + if a.VoteExtensionsEnableHeight == 0 { + return false + } + return a.VoteExtensionsEnableHeight <= h +} + // DefaultConsensusParams returns a default ConsensusParams. func DefaultConsensusParams() *ConsensusParams { return &ConsensusParams{ @@ -105,6 +121,7 @@ func DefaultConsensusParams() *ConsensusParams { Version: DefaultVersionParams(), Synchrony: DefaultSynchronyParams(), Timeout: DefaultTimeoutParams(), + ABCI: DefaultABCIParams(), } } @@ -176,6 +193,13 @@ func DefaultTimeoutParams() TimeoutParams { } } +func DefaultABCIParams() ABCIParams { + return ABCIParams{ + // When set to 0, vote extensions are not required. + VoteExtensionsEnableHeight: 0, + } +} + // TimeoutParamsOrDefaults returns the SynchronyParams, filling in any zero values // with the Tendermint defined default values. func (t TimeoutParams) TimeoutParamsOrDefaults() TimeoutParams { diff --git a/types/validation_test.go b/types/validation_test.go index f63c34450a..c28f63000f 100644 --- a/types/validation_test.go +++ b/types/validation_test.go @@ -153,7 +153,7 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10) extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) @@ -184,7 +184,7 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10) extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) @@ -212,7 +212,7 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, 10) extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) @@ -239,7 +239,7 @@ func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) { newValSet, _ = randValidatorPrivValSet(ctx, t, 2, 1) ) require.NoError(t, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() testCases := []struct { valSet *ValidatorSet @@ -284,7 +284,7 @@ func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) { ) require.NoError(t, err) - err = valSet.VerifyCommitLightTrusting("test_chain_id", extCommit.StripExtensions(), + err = valSet.VerifyCommitLightTrusting("test_chain_id", extCommit.ToCommit(), tmmath.Fraction{Numerator: 25, Denominator: 55}) if assert.Error(t, err) { assert.Contains(t, err.Error(), "int64 overflow") diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 8b5846da93..81e81919dd 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1541,7 +1541,7 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { // nolint // create a commit with n validators extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommit(chainID, blockID, h, commit) @@ -1570,7 +1570,7 @@ func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) { // nolint // create a commit with n validators extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommitLight(chainID, blockID, h, commit) @@ -1598,7 +1598,7 @@ func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) { // create a commit with n validators extCommit, err := makeExtCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) - commit := extCommit.StripExtensions() + commit := extCommit.ToCommit() for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3}) diff --git a/types/vote.go b/types/vote.go index 446de130ae..f7006b8cde 100644 --- a/types/vote.go +++ b/types/vote.go @@ -27,7 +27,7 @@ var ( ErrVoteInvalidBlockHash = errors.New("invalid block hash") ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature") ErrVoteNil = errors.New("nil vote") - ErrVoteInvalidExtension = errors.New("invalid vote extension") + ErrVoteExtensionAbsent = errors.New("vote extension absent") ) type ErrVoteConflictingVotes struct { @@ -112,6 +112,16 @@ func (vote *Vote) CommitSig() CommitSig { } } +// StripExtension removes any extension data from the vote. Useful if the +// chain has not enabled vote extensions. +// Returns true if extension data was present before stripping and false otherwise. +func (vote *Vote) StripExtension() bool { + stripped := len(vote.Extension) > 0 || len(vote.ExtensionSignature) > 0 + vote.Extension = nil + vote.ExtensionSignature = nil + return stripped +} + // ExtendedCommitSig attempts to construct an ExtendedCommitSig from this vote. // Panics if either the vote extension signature is missing or if the block ID // is not either empty or complete. @@ -120,13 +130,8 @@ func (vote *Vote) ExtendedCommitSig() ExtendedCommitSig { return NewExtendedCommitSigAbsent() } - cs := vote.CommitSig() - if vote.BlockID.IsComplete() && len(vote.ExtensionSignature) == 0 { - panic(fmt.Sprintf("Invalid vote %v - BlockID is complete but missing vote extension signature", vote)) - } - return ExtendedCommitSig{ - CommitSig: cs, + CommitSig: vote.CommitSig(), Extension: vote.Extension, ExtensionSignature: vote.ExtensionSignature, } @@ -230,11 +235,11 @@ func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { return err } -// VerifyWithExtension performs the same verification as Verify, but +// VerifyVoteAndExtension performs the same verification as Verify, but // additionally checks whether the vote extension signature corresponds to the // given chain ID and public key. We only verify vote extension signatures for // precommits. -func (vote *Vote) VerifyWithExtension(chainID string, pubKey crypto.PubKey) error { +func (vote *Vote) VerifyVoteAndExtension(chainID string, pubKey crypto.PubKey) error { v, err := vote.verifyAndReturnProto(chainID, pubKey) if err != nil { return err @@ -249,6 +254,20 @@ func (vote *Vote) VerifyWithExtension(chainID string, pubKey crypto.PubKey) erro return nil } +// VerifyExtension checks whether the vote extension signature corresponds to the +// given chain ID and public key. +func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey) error { + if vote.Type != tmproto.PrecommitType || vote.BlockID.IsNil() { + return nil + } + v := vote.ToProto() + extSignBytes := VoteExtensionSignBytes(chainID, v) + if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { + return ErrVoteInvalidSignature + } + return nil +} + // ValidateBasic checks whether the vote is well-formed. It does not, however, // check vote extensions - for vote validation with vote extension validation, // use ValidateWithExtension. @@ -306,30 +325,34 @@ func (vote *Vote) ValidateBasic() error { } } - return nil -} - -// ValidateWithExtension performs the same validations as ValidateBasic, but -// additionally checks whether a vote extension signature is present. This -// function is used in places where vote extension signatures are expected. -func (vote *Vote) ValidateWithExtension() error { - if err := vote.ValidateBasic(); err != nil { - return err - } - - // We should always see vote extension signatures in non-nil precommits if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() { - if len(vote.ExtensionSignature) == 0 { - return errors.New("vote extension signature is missing") - } if len(vote.ExtensionSignature) > MaxSignatureSize { return fmt.Errorf("vote extension signature is too big (max: %d)", MaxSignatureSize) } + if len(vote.ExtensionSignature) == 0 && len(vote.Extension) != 0 { + return fmt.Errorf("vote extension signature absent on vote with extension") + } } return nil } +// EnsureExtension checks for the presence of extensions signature data +// on precommit vote types. +func (vote *Vote) EnsureExtension() error { + // We should always see vote extension signatures in non-nil precommits + if vote.Type != tmproto.PrecommitType { + return nil + } + if vote.BlockID.IsNil() { + return nil + } + if len(vote.ExtensionSignature) > 0 { + return nil + } + return ErrVoteExtensionAbsent +} + // ToProto converts the handwritten type to proto generated type // return type, nil if everything converts safely, otherwise nil, error func (vote *Vote) ToProto() *tmproto.Vote { diff --git a/types/vote_set.go b/types/vote_set.go index 224d4e4f86..6d83ac85dd 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -3,6 +3,7 @@ package types import ( "bytes" "encoding/json" + "errors" "fmt" "strings" "sync" @@ -53,11 +54,12 @@ const ( NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64. */ type VoteSet struct { - chainID string - height int64 - round int32 - signedMsgType tmproto.SignedMsgType - valSet *ValidatorSet + chainID string + height int64 + round int32 + signedMsgType tmproto.SignedMsgType + valSet *ValidatorSet + extensionsEnabled bool mtx sync.Mutex votesBitArray *bits.BitArray @@ -68,7 +70,8 @@ type VoteSet struct { peerMaj23s map[string]BlockID // Maj23 for each peer } -// Constructs a new VoteSet struct used to accumulate votes for given height/round. +// NewVoteSet instantiates all fields of a new vote set. This constructor requires +// that no vote extension data be present on the votes that are added to the set. func NewVoteSet(chainID string, height int64, round int32, signedMsgType tmproto.SignedMsgType, valSet *ValidatorSet) *VoteSet { if height == 0 { @@ -89,6 +92,16 @@ func NewVoteSet(chainID string, height int64, round int32, } } +// NewExtendedVoteSet constructs a vote set with additional vote verification logic. +// The VoteSet constructed with NewExtendedVoteSet verifies the vote extension +// data for every vote added to the set. +func NewExtendedVoteSet(chainID string, height int64, round int32, + signedMsgType tmproto.SignedMsgType, valSet *ValidatorSet) *VoteSet { + vs := NewVoteSet(chainID, height, round, signedMsgType, valSet) + vs.extensionsEnabled = true + return vs +} + func (voteSet *VoteSet) ChainID() string { return voteSet.chainID } @@ -194,8 +207,17 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { } // Check signature. - if err := vote.VerifyWithExtension(voteSet.chainID, val.PubKey); err != nil { - return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err) + if voteSet.extensionsEnabled { + if err := vote.VerifyVoteAndExtension(voteSet.chainID, val.PubKey); err != nil { + return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err) + } + } else { + if err := vote.Verify(voteSet.chainID, val.PubKey); err != nil { + return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err) + } + if len(vote.ExtensionSignature) > 0 || len(vote.Extension) > 0 { + return false, errors.New("unexpected vote extension data present in vote") + } } // Add vote and get conflicting vote if any. diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 8d166d508d..e35da74914 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -498,6 +498,92 @@ func TestVoteSet_MakeCommit(t *testing.T) { } } +// TestVoteSet_VoteExtensionsEnabled tests that the vote set correctly validates +// vote extensions data when either required or not required. +func TestVoteSet_VoteExtensionsEnabled(t *testing.T) { + for _, tc := range []struct { + name string + requireExtensions bool + addExtension bool + exepectError bool + }{ + { + name: "no extension but expected", + requireExtensions: true, + addExtension: false, + exepectError: true, + }, + { + name: "invalid extensions but not expected", + requireExtensions: true, + addExtension: false, + exepectError: true, + }, + { + name: "no extension and not expected", + requireExtensions: false, + addExtension: false, + exepectError: false, + }, + { + name: "extension and expected", + requireExtensions: true, + addExtension: true, + exepectError: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + height, round := int64(1), int32(0) + valSet, privValidators := randValidatorPrivValSet(ctx, t, 5, 10) + var voteSet *VoteSet + if tc.requireExtensions { + voteSet = NewExtendedVoteSet("test_chain_id", height, round, tmproto.PrecommitType, valSet) + } else { + voteSet = NewVoteSet("test_chain_id", height, round, tmproto.PrecommitType, valSet) + } + + val0 := privValidators[0] + + val0p, err := val0.GetPubKey(ctx) + require.NoError(t, err) + val0Addr := val0p.Address() + blockHash := crypto.CRandBytes(32) + blockPartsTotal := uint32(123) + blockPartSetHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} + + vote := &Vote{ + ValidatorAddress: val0Addr, + ValidatorIndex: 0, + Height: height, + Round: round, + Type: tmproto.PrecommitType, + Timestamp: tmtime.Now(), + BlockID: BlockID{blockHash, blockPartSetHeader}, + } + v := vote.ToProto() + err = val0.SignVote(ctx, voteSet.ChainID(), v) + require.NoError(t, err) + vote.Signature = v.Signature + + if tc.addExtension { + vote.ExtensionSignature = v.ExtensionSignature + } + + added, err := voteSet.AddVote(vote) + if tc.exepectError { + require.Error(t, err) + require.False(t, added) + } else { + require.NoError(t, err) + require.True(t, added) + } + }) + } +} + // NOTE: privValidators are in order func randVoteSet( ctx context.Context, @@ -510,7 +596,7 @@ func randVoteSet( ) (*VoteSet, *ValidatorSet, []PrivValidator) { t.Helper() valSet, privValidators := randValidatorPrivValSet(ctx, t, numValidators, votingPower) - return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators + return NewExtendedVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators } func deterministicVoteSet( @@ -523,7 +609,7 @@ func deterministicVoteSet( ) (*VoteSet, *ValidatorSet, []PrivValidator) { t.Helper() valSet, privValidators := deterministicValidatorSet(ctx, t) - return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators + return NewExtendedVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators } func randValidatorPrivValSet(ctx context.Context, t testing.TB, numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { diff --git a/types/vote_test.go b/types/vote_test.go index 70cd913812..d0819d7c4c 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -267,7 +267,7 @@ func TestVoteExtension(t *testing.T) { if tc.includeSignature { vote.ExtensionSignature = v.ExtensionSignature } - err = vote.VerifyWithExtension("test_chain_id", pk) + err = vote.VerifyExtension("test_chain_id", pk) if tc.expectError { require.Error(t, err) } else { @@ -361,7 +361,7 @@ func TestValidVotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", tc.vote) tc.malleateVote(tc.vote) require.NoError(t, tc.vote.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.NoError(t, tc.vote.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + require.NoError(t, tc.vote.EnsureExtension(), "EnsureExtension for %s", tc.name) } } @@ -387,13 +387,13 @@ func TestInvalidVotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", prevote) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s in invalid prevote", tc.name) - require.Error(t, prevote.ValidateWithExtension(), "ValidateWithExtension for %s in invalid prevote", tc.name) + require.NoError(t, prevote.EnsureExtension(), "EnsureExtension for %s in invalid prevote", tc.name) precommit := examplePrecommit(t) signVote(ctx, t, privVal, "test_chain_id", precommit) tc.malleateVote(precommit) require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s in invalid precommit", tc.name) - require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s in invalid precommit", tc.name) + require.NoError(t, precommit.EnsureExtension(), "EnsureExtension for %s in invalid precommit", tc.name) } } @@ -414,7 +414,7 @@ func TestInvalidPrevotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", prevote) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.Error(t, prevote.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + require.NoError(t, prevote.EnsureExtension(), "EnsureExtension for %s", tc.name) } } @@ -431,18 +431,44 @@ func TestInvalidPrecommitExtensions(t *testing.T) { v.Extension = []byte("extension") v.ExtensionSignature = nil }}, - // TODO(thane): Re-enable once https://github.com/tendermint/tendermint/issues/8272 is resolved - //{"missing vote extension signature", func(v *Vote) { v.ExtensionSignature = nil }}, {"oversized vote extension signature", func(v *Vote) { v.ExtensionSignature = make([]byte, MaxSignatureSize+1) }}, } for _, tc := range testCases { precommit := examplePrecommit(t) signVote(ctx, t, privVal, "test_chain_id", precommit) tc.malleateVote(precommit) - // We don't expect an error from ValidateBasic, because it doesn't - // handle vote extensions. - require.NoError(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + // ValidateBasic ensures that vote extensions, if present, are well formed + require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) + } +} + +func TestEnsureVoteExtension(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + privVal := NewMockPV() + + testCases := []struct { + name string + malleateVote func(*Vote) + expectError bool + }{ + {"vote extension signature absent", func(v *Vote) { + v.Extension = nil + v.ExtensionSignature = nil + }, true}, + {"vote extension signature present", func(v *Vote) { + v.ExtensionSignature = []byte("extension signature") + }, false}, + } + for _, tc := range testCases { + precommit := examplePrecommit(t) + signVote(ctx, t, privVal, "test_chain_id", precommit) + tc.malleateVote(precommit) + if tc.expectError { + require.Error(t, precommit.EnsureExtension(), "EnsureExtension for %s", tc.name) + } else { + require.NoError(t, precommit.EnsureExtension(), "EnsureExtension for %s", tc.name) + } } } @@ -497,7 +523,7 @@ func getSampleCommit(ctx context.Context, t testing.TB) *Commit { require.NoError(t, err) - return commit.StripExtensions() + return commit.ToCommit() } func BenchmarkVoteSignBytes(b *testing.B) { From 3bf2875f807ae0218bc8e73b4862d90390e9617c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 May 2022 09:54:39 +0000 Subject: [PATCH 050/203] build(deps): Bump goreleaser/goreleaser-action from 2 to 3 (#8590) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) from 2 to 3.
Release notes

Sourced from goreleaser/goreleaser-action's releases.

v3.0.0

What's Changed

New Contributors

Full Changelog: https://github.com/goreleaser/goreleaser-action/compare/v2.9.1...v3.0.0

v2.9.1

What's Changed

Full Changelog: https://github.com/goreleaser/goreleaser-action/compare/v2...v2.9.1

v2.9.0

What's Changed

Full Changelog: https://github.com/goreleaser/goreleaser-action/compare/v2.8.1...v2.9.0

v2.8.1

What's Changed

Full Changelog: https://github.com/goreleaser/goreleaser-action/compare/v2.8.0...v2.8.1

v2.8.0

What's Changed

... (truncated)

Commits
  • 68acf3b chore(deps): bump @​actions/tool-cache from 1.7.2 to 2.0.1 (#355)
  • 46da113 chore: node 16 as default runtime (#343)
  • 223909a chore: update
  • c56d8df Revert "chore(deps): bump @​actions/core from 1.6.0 to 1.8.2 (#354)"
  • d1c2f83 chore(deps): bump @​actions/core from 1.6.0 to 1.8.2 (#354)
  • 5c65fd8 chore(deps): bump @​actions/http-client from 1.0.11 to 2.0.1 (#353)
  • 46cd12b chore(deps): bump yargs from 17.4.1 to 17.5.1 (#352)
  • 822d1bf chore(deps): bump docker/bake-action from 1 to 2 (#346)
  • c25888f chore: update dev dependencies and workflow (#342)
  • ec57748 chore(deps): bump yargs from 17.4.0 to 17.4.1 (#339)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=goreleaser/goreleaser-action&package-manager=github_actions&previous-version=2&new-version=3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ec4fa810b5..2e0cd548c5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -21,7 +21,7 @@ jobs: go-version: '1.17' - name: Build - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v3 if: ${{ github.event_name == 'pull_request' }} with: version: latest @@ -30,7 +30,7 @@ jobs: - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v3 if: startsWith(github.ref, 'refs/tags/') with: version: latest From 6ff77eece357f2b2cc17cb39eebbb0d1ef1a38d3 Mon Sep 17 00:00:00 2001 From: Jasmina Malicevic Date: Mon, 23 May 2022 12:28:24 +0200 Subject: [PATCH 051/203] light/http: added check for err == nil (#8579) --- light/provider/http/http.go | 54 +++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/light/provider/http/http.go b/light/provider/http/http.go index cf443e1b52..455c5cbaa5 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -173,8 +173,7 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato attempt := uint16(0) for { res, err := p.client.Validators(ctx, height, &page, &perPage) - switch e := err.(type) { - case nil: // success!! Now we validate the response + if err == nil { if len(res.Validators) == 0 { return nil, provider.ErrBadLightBlock{ Reason: fmt.Errorf("validator set is empty (height: %d, page: %d, per_page: %d)", @@ -187,35 +186,37 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato res.Total, height, page, perPage), } } - - case *url.Error: - if e.Timeout() { - // if we have exceeded retry attempts then return a no response error - if attempt == p.maxRetryAttempts { - return nil, p.noResponse() + } else { + switch e := err.(type) { + + case *url.Error: + if e.Timeout() { + // if we have exceeded retry attempts then return a no response error + if attempt == p.maxRetryAttempts { + return nil, p.noResponse() + } + attempt++ + // request timed out: we wait and try again with exponential backoff + time.Sleep(backoffTimeout(attempt)) + continue } - attempt++ - // request timed out: we wait and try again with exponential backoff - time.Sleep(backoffTimeout(attempt)) - continue - } - return nil, provider.ErrBadLightBlock{Reason: e} + return nil, provider.ErrBadLightBlock{Reason: e} - case *rpctypes.RPCError: - // process the rpc error and return the corresponding error to the light client - return nil, p.parseRPCError(e) + case *rpctypes.RPCError: + // process the rpc error and return the corresponding error to the light client + return nil, p.parseRPCError(e) - default: - // check if the error stems from the context - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return nil, err - } + default: + // check if the error stems from the context + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } - // If we don't know the error then by default we return an unreliable provider error and - // terminate the connection with the peer. - return nil, provider.ErrUnreliableProvider{Reason: e} + // If we don't know the error then by default we return an unreliable provider error and + // terminate the connection with the peer. + return nil, provider.ErrUnreliableProvider{Reason: e} + } } - // update the total and increment the page index so we can fetch the // next page of validators if need be total = res.Total @@ -223,6 +224,7 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato page++ break } + } valSet, err := types.ValidatorSetFromExistingValidators(vals) From 43313e9b85d8202bf6eabbe91bfc20e06bb61d8f Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Mon, 23 May 2022 14:23:23 -0400 Subject: [PATCH 052/203] abci++: add proto fields for enabling vote extensions (#8587) This pull requests adds the protocol buffer field for the `ABCI.VoteExtensionsEnableHeight` parameter. This proto field is threaded throughout all of the relevant places where consensus params are used and referenced. This PR also adds validation of the consensus param updates. Previous consensus param changes didn't depend on _previous_ versions of the params, so this change adds a method for validating against the old params as well. closes: #8453 --- internal/state/execution.go | 9 +- internal/state/store.go | 20 -- proto/tendermint/blocksync/types.proto | 2 +- proto/tendermint/privval/service.proto | 2 +- proto/tendermint/types/params.pb.go | 413 ++++++++++++++++++++----- proto/tendermint/types/params.proto | 15 + test/e2e/generator/generate.go | 7 + test/e2e/pkg/manifest.go | 5 + test/e2e/pkg/testnet.go | 29 +- test/e2e/runner/evidence.go | 8 +- test/e2e/runner/setup.go | 1 + test/e2e/tests/app_test.go | 16 +- types/params.go | 37 +++ types/params_test.go | 116 ++++++- 14 files changed, 552 insertions(+), 128 deletions(-) diff --git a/internal/state/execution.go b/internal/state/execution.go index 47c2cb7aef..1d87104d41 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -534,7 +534,7 @@ func (state State) Update( if len(validatorUpdates) > 0 { err := nValSet.UpdateWithChangeSet(validatorUpdates) if err != nil { - return state, fmt.Errorf("error changing validator set: %w", err) + return state, fmt.Errorf("changing validator set: %w", err) } // Change results from this height but only applies to the next next height. lastHeightValsChanged = header.Height + 1 + 1 @@ -551,7 +551,12 @@ func (state State) Update( nextParams = state.ConsensusParams.UpdateConsensusParams(consensusParamUpdates) err := nextParams.ValidateConsensusParams() if err != nil { - return state, fmt.Errorf("error updating consensus params: %w", err) + return state, fmt.Errorf("updating consensus params: %w", err) + } + + err = state.ConsensusParams.ValidateUpdate(consensusParamUpdates, header.Height) + if err != nil { + return state, fmt.Errorf("updating consensus params: %w", err) } state.Version.Consensus.App = nextParams.Version.AppVersion diff --git a/internal/state/store.go b/internal/state/store.go index d592016f62..a41719c925 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -2,7 +2,6 @@ package state import ( "bytes" - "encoding/binary" "errors" "fmt" @@ -60,7 +59,6 @@ func abciResponsesKey(height int64) []byte { // stateKey should never change after being set in init() var stateKey []byte -var tmpABCIKey []byte func init() { var err error @@ -68,12 +66,6 @@ func init() { if err != nil { panic(err) } - // temporary extra key before consensus param protos are regenerated - // TODO(wbanfield) remove in next PR - tmpABCIKey, err = orderedcode.Append(nil, int64(10000)) - if err != nil { - panic(err) - } } //---------------------- @@ -145,13 +137,6 @@ func (store dbStore) loadState(key []byte) (state State, err error) { if err != nil { return state, err } - buf, err = store.db.Get(tmpABCIKey) - if err != nil { - return state, err - } - h, _ := binary.Varint(buf) - sm.ConsensusParams.ABCI.VoteExtensionsEnableHeight = h - return *sm, nil } @@ -195,11 +180,6 @@ func (store dbStore) save(state State, key []byte) error { if err := batch.Set(key, stateBz); err != nil { return err } - bz := make([]byte, 5) - binary.PutVarint(bz, state.ConsensusParams.ABCI.VoteExtensionsEnableHeight) - if err := batch.Set(tmpABCIKey, bz); err != nil { - return err - } return batch.WriteSync() } diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto index 67da76dce0..dca81db2b6 100644 --- a/proto/tendermint/blocksync/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -19,7 +19,7 @@ message NoBlockResponse { // BlockResponse returns block to the requested message BlockResponse { - tendermint.types.Block block = 1; + tendermint.types.Block block = 1; tendermint.types.ExtendedCommit ext_commit = 2; } diff --git a/proto/tendermint/privval/service.proto b/proto/tendermint/privval/service.proto index 2c699e1cd5..63e9afca75 100644 --- a/proto/tendermint/privval/service.proto +++ b/proto/tendermint/privval/service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; package tendermint.privval; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; import "tendermint/privval/types.proto"; diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 41d417b915..764d7b385a 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -36,6 +36,7 @@ type ConsensusParams struct { Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` Synchrony *SynchronyParams `protobuf:"bytes,5,opt,name=synchrony,proto3" json:"synchrony,omitempty"` Timeout *TimeoutParams `protobuf:"bytes,6,opt,name=timeout,proto3" json:"timeout,omitempty"` + Abci *ABCIParams `protobuf:"bytes,7,opt,name=abci,proto3" json:"abci,omitempty"` } func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } @@ -113,6 +114,13 @@ func (m *ConsensusParams) GetTimeout() *TimeoutParams { return nil } +func (m *ConsensusParams) GetAbci() *ABCIParams { + if m != nil { + return m.Abci + } + return nil +} + // BlockParams contains limits on the block size. type BlockParams struct { // Max block size, in bytes. @@ -566,6 +574,60 @@ func (m *TimeoutParams) GetBypassCommitTimeout() bool { return false } +// ABCIParams configure functionality specific to the Application Blockchain Interface. +type ABCIParams struct { + // vote_extensions_enable_height configures the first height during which + // vote extensions will be enabled. During this specified height, and for all + // subsequent heights, precommit messages that do not contain valid extension data + // will be considered invalid. Prior to this height, vote extensions will not + // be used or accepted by validators on the network. + // + // Once enabled, vote extensions will be created by the application in ExtendVote, + // passed to the application for validation in VerifyVoteExtension and given + // to the application to use when proposing a block during PrepareProposal. + VoteExtensionsEnableHeight int64 `protobuf:"varint,1,opt,name=vote_extensions_enable_height,json=voteExtensionsEnableHeight,proto3" json:"vote_extensions_enable_height,omitempty"` +} + +func (m *ABCIParams) Reset() { *m = ABCIParams{} } +func (m *ABCIParams) String() string { return proto.CompactTextString(m) } +func (*ABCIParams) ProtoMessage() {} +func (*ABCIParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{8} +} +func (m *ABCIParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIParams.Merge(m, src) +} +func (m *ABCIParams) XXX_Size() int { + return m.Size() +} +func (m *ABCIParams) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIParams proto.InternalMessageInfo + +func (m *ABCIParams) GetVoteExtensionsEnableHeight() int64 { + if m != nil { + return m.VoteExtensionsEnableHeight + } + return 0 +} + func init() { proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams") proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams") @@ -575,55 +637,60 @@ func init() { proto.RegisterType((*HashedParams)(nil), "tendermint.types.HashedParams") proto.RegisterType((*SynchronyParams)(nil), "tendermint.types.SynchronyParams") proto.RegisterType((*TimeoutParams)(nil), "tendermint.types.TimeoutParams") + proto.RegisterType((*ABCIParams)(nil), "tendermint.types.ABCIParams") } func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } var fileDescriptor_e12598271a686f57 = []byte{ - // 680 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcf, 0x6e, 0xd3, 0x4a, - 0x14, 0xc6, 0xe3, 0x26, 0x4d, 0x93, 0x93, 0xa6, 0xa9, 0xe6, 0xde, 0xab, 0xeb, 0xdb, 0xab, 0x3a, - 0xc5, 0x0b, 0x54, 0x09, 0xc9, 0x41, 0xad, 0x50, 0x85, 0xc4, 0x1f, 0x91, 0x06, 0x81, 0x84, 0x8a, - 0x90, 0x29, 0x2c, 0xba, 0xb1, 0xc6, 0xc9, 0xe0, 0x5a, 0x8d, 0x3d, 0x96, 0xc7, 0x8e, 0xe2, 0xb7, - 0x60, 0x85, 0x78, 0x04, 0x78, 0x93, 0x2e, 0xbb, 0x64, 0x05, 0x28, 0x7d, 0x03, 0xd6, 0x2c, 0xd0, - 0xfc, 0x6b, 0x9a, 0x94, 0xd2, 0xac, 0xe2, 0xcc, 0xf9, 0x7e, 0xfe, 0x3c, 0xdf, 0x39, 0x33, 0xb0, - 0x99, 0x91, 0x78, 0x40, 0xd2, 0x28, 0x8c, 0xb3, 0x4e, 0x56, 0x24, 0x84, 0x75, 0x12, 0x9c, 0xe2, - 0x88, 0x39, 0x49, 0x4a, 0x33, 0x8a, 0xd6, 0xa7, 0x65, 0x47, 0x94, 0x37, 0xfe, 0x0e, 0x68, 0x40, - 0x45, 0xb1, 0xc3, 0x9f, 0xa4, 0x6e, 0xc3, 0x0a, 0x28, 0x0d, 0x86, 0xa4, 0x23, 0xfe, 0xf9, 0xf9, - 0xbb, 0xce, 0x20, 0x4f, 0x71, 0x16, 0xd2, 0x58, 0xd6, 0xed, 0x9f, 0x4b, 0xd0, 0xda, 0xa7, 0x31, - 0x23, 0x31, 0xcb, 0xd9, 0x2b, 0xe1, 0x80, 0x76, 0x61, 0xd9, 0x1f, 0xd2, 0xfe, 0x89, 0x69, 0x6c, - 0x19, 0xdb, 0x8d, 0x9d, 0x4d, 0x67, 0xde, 0xcb, 0xe9, 0xf2, 0xb2, 0x54, 0xbb, 0x52, 0x8b, 0x1e, - 0x40, 0x8d, 0x8c, 0xc2, 0x01, 0x89, 0xfb, 0xc4, 0x5c, 0x12, 0xdc, 0xd6, 0x55, 0xee, 0xa9, 0x52, - 0x28, 0xf4, 0x82, 0x40, 0x8f, 0xa1, 0x3e, 0xc2, 0xc3, 0x70, 0x80, 0x33, 0x9a, 0x9a, 0x65, 0x81, - 0xdf, 0xba, 0x8a, 0xbf, 0xd5, 0x12, 0xc5, 0x4f, 0x19, 0x74, 0x1f, 0x56, 0x46, 0x24, 0x65, 0x21, - 0x8d, 0xcd, 0x8a, 0xc0, 0xdb, 0xbf, 0xc1, 0xa5, 0x40, 0xc1, 0x5a, 0xcf, 0xbd, 0x59, 0x11, 0xf7, - 0x8f, 0x53, 0x1a, 0x17, 0xe6, 0xf2, 0x75, 0xde, 0xaf, 0xb5, 0x44, 0x7b, 0x5f, 0x30, 0xdc, 0x3b, - 0x0b, 0x23, 0x42, 0xf3, 0xcc, 0xac, 0x5e, 0xe7, 0x7d, 0x28, 0x05, 0xda, 0x5b, 0xe9, 0xed, 0x7d, - 0x68, 0x5c, 0xca, 0x12, 0xfd, 0x0f, 0xf5, 0x08, 0x8f, 0x3d, 0xbf, 0xc8, 0x08, 0x13, 0xe9, 0x97, - 0xdd, 0x5a, 0x84, 0xc7, 0x5d, 0xfe, 0x1f, 0xfd, 0x0b, 0x2b, 0xbc, 0x18, 0x60, 0x26, 0x02, 0x2e, - 0xbb, 0xd5, 0x08, 0x8f, 0x9f, 0x61, 0x66, 0x7f, 0x36, 0x60, 0x6d, 0x36, 0x59, 0x74, 0x07, 0x10, - 0xd7, 0xe2, 0x80, 0x78, 0x71, 0x1e, 0x79, 0xa2, 0x45, 0xfa, 0x8d, 0xad, 0x08, 0x8f, 0x9f, 0x04, - 0xe4, 0x65, 0x1e, 0x09, 0x6b, 0x86, 0x0e, 0x60, 0x5d, 0x8b, 0xf5, 0x74, 0xa8, 0x16, 0xfe, 0xe7, - 0xc8, 0xf1, 0x71, 0xf4, 0xf8, 0x38, 0x3d, 0x25, 0xe8, 0xd6, 0x4e, 0xbf, 0xb6, 0x4b, 0x1f, 0xbf, - 0xb5, 0x0d, 0x77, 0x4d, 0xbe, 0x4f, 0x57, 0x66, 0x37, 0x51, 0x9e, 0xdd, 0x84, 0x7d, 0x0f, 0x5a, - 0x73, 0x5d, 0x44, 0x36, 0x34, 0x93, 0xdc, 0xf7, 0x4e, 0x48, 0xe1, 0x89, 0xac, 0x4c, 0x63, 0xab, - 0xbc, 0x5d, 0x77, 0x1b, 0x49, 0xee, 0xbf, 0x20, 0xc5, 0x21, 0x5f, 0xb2, 0xef, 0x42, 0x73, 0xa6, - 0x7b, 0xa8, 0x0d, 0x0d, 0x9c, 0x24, 0x9e, 0xee, 0x39, 0xdf, 0x59, 0xc5, 0x05, 0x9c, 0x24, 0x4a, - 0x66, 0x1f, 0xc1, 0xea, 0x73, 0xcc, 0x8e, 0xc9, 0x40, 0x01, 0xb7, 0xa1, 0x25, 0x52, 0xf0, 0xe6, - 0x03, 0x6e, 0x8a, 0xe5, 0x03, 0x9d, 0xb2, 0x0d, 0xcd, 0xa9, 0x6e, 0x9a, 0x75, 0x43, 0xab, 0x78, - 0xe0, 0x1f, 0x0c, 0x68, 0xcd, 0xcd, 0x03, 0xea, 0x41, 0x33, 0x22, 0x8c, 0x89, 0x10, 0xc9, 0x10, - 0x17, 0xea, 0xf0, 0xfc, 0x21, 0xc1, 0x8a, 0x48, 0x6f, 0x55, 0x51, 0x3d, 0x0e, 0xa1, 0x87, 0x50, - 0x4f, 0x52, 0xd2, 0x0f, 0xd9, 0x42, 0x3d, 0x90, 0x6f, 0x98, 0x12, 0xf6, 0x8f, 0x25, 0x68, 0xce, - 0x4c, 0x1a, 0x9f, 0xcd, 0x24, 0xa5, 0x09, 0x65, 0x64, 0xd1, 0x0f, 0xd2, 0x7a, 0xbe, 0x23, 0xf5, - 0xc8, 0x77, 0x94, 0xe1, 0x45, 0xbf, 0x67, 0x55, 0x51, 0x3d, 0x0e, 0xa1, 0x5d, 0xa8, 0x8c, 0x68, - 0x46, 0xd4, 0xa1, 0xbe, 0x11, 0x16, 0x62, 0xf4, 0x08, 0x80, 0xff, 0x2a, 0xdf, 0xca, 0x82, 0x39, - 0x70, 0x44, 0x9a, 0xee, 0x41, 0xb5, 0x4f, 0xa3, 0x28, 0xcc, 0xd4, 0x79, 0xbe, 0x91, 0x55, 0x72, - 0xb4, 0x03, 0xff, 0xf8, 0x45, 0x82, 0x19, 0xf3, 0xe4, 0x82, 0x77, 0xf9, 0x60, 0xd7, 0xdc, 0xbf, - 0x64, 0x71, 0x5f, 0xd4, 0x54, 0xd0, 0xdd, 0x37, 0x9f, 0x26, 0x96, 0x71, 0x3a, 0xb1, 0x8c, 0xb3, - 0x89, 0x65, 0x7c, 0x9f, 0x58, 0xc6, 0xfb, 0x73, 0xab, 0x74, 0x76, 0x6e, 0x95, 0xbe, 0x9c, 0x5b, - 0xa5, 0xa3, 0xbd, 0x20, 0xcc, 0x8e, 0x73, 0xdf, 0xe9, 0xd3, 0xa8, 0x73, 0xf9, 0x4a, 0x9f, 0x3e, - 0xca, 0x3b, 0x7b, 0xfe, 0xba, 0xf7, 0xab, 0x62, 0x7d, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xfc, 0x06, 0xae, 0x9f, 0x09, 0x06, 0x00, 0x00, + // 741 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0x80, 0xe3, 0x26, 0x4d, 0x93, 0x93, 0xa6, 0xa9, 0xe6, 0xde, 0xab, 0x6b, 0x0a, 0x75, 0x8a, + 0x17, 0xa8, 0x12, 0x92, 0x53, 0xb5, 0x42, 0x15, 0x12, 0x3f, 0x6a, 0x92, 0x8a, 0x22, 0x54, 0x40, + 0xa1, 0xb0, 0xe8, 0xc6, 0x1a, 0x27, 0x83, 0x63, 0x35, 0xf6, 0x58, 0x9e, 0x71, 0x14, 0xbf, 0x05, + 0x2b, 0xc4, 0x23, 0xc0, 0x86, 0xe7, 0xe8, 0xb2, 0x4b, 0x56, 0x80, 0xd2, 0x37, 0xe0, 0x09, 0xd0, + 0x8c, 0xc7, 0x4d, 0x93, 0x52, 0x9a, 0x55, 0x9c, 0x39, 0xdf, 0xe7, 0xe3, 0x39, 0xe7, 0xd8, 0x03, + 0xeb, 0x9c, 0x04, 0x3d, 0x12, 0xf9, 0x5e, 0xc0, 0x1b, 0x3c, 0x09, 0x09, 0x6b, 0x84, 0x38, 0xc2, + 0x3e, 0xb3, 0xc2, 0x88, 0x72, 0x8a, 0x56, 0x27, 0x61, 0x4b, 0x86, 0xd7, 0xfe, 0x75, 0xa9, 0x4b, + 0x65, 0xb0, 0x21, 0xae, 0x52, 0x6e, 0xcd, 0x70, 0x29, 0x75, 0x07, 0xa4, 0x21, 0xff, 0x39, 0xf1, + 0xfb, 0x46, 0x2f, 0x8e, 0x30, 0xf7, 0x68, 0x90, 0xc6, 0xcd, 0xaf, 0x79, 0xa8, 0xb5, 0x68, 0xc0, + 0x48, 0xc0, 0x62, 0xf6, 0x5a, 0x66, 0x40, 0x3b, 0xb0, 0xe8, 0x0c, 0x68, 0xf7, 0x44, 0xd7, 0x36, + 0xb4, 0xcd, 0xca, 0xf6, 0xba, 0x35, 0x9b, 0xcb, 0x6a, 0x8a, 0x70, 0x4a, 0x77, 0x52, 0x16, 0x3d, + 0x82, 0x12, 0x19, 0x7a, 0x3d, 0x12, 0x74, 0x89, 0xbe, 0x20, 0xbd, 0x8d, 0xab, 0xde, 0xbe, 0x22, + 0x94, 0x7a, 0x61, 0xa0, 0xa7, 0x50, 0x1e, 0xe2, 0x81, 0xd7, 0xc3, 0x9c, 0x46, 0x7a, 0x5e, 0xea, + 0x77, 0xaf, 0xea, 0xef, 0x32, 0x44, 0xf9, 0x13, 0x07, 0x3d, 0x84, 0xa5, 0x21, 0x89, 0x98, 0x47, + 0x03, 0xbd, 0x20, 0xf5, 0xfa, 0x1f, 0xf4, 0x14, 0x50, 0x72, 0xc6, 0x8b, 0xdc, 0x2c, 0x09, 0xba, + 0xfd, 0x88, 0x06, 0x89, 0xbe, 0x78, 0x5d, 0xee, 0x37, 0x19, 0x92, 0xe5, 0xbe, 0x70, 0x44, 0x6e, + 0xee, 0xf9, 0x84, 0xc6, 0x5c, 0x2f, 0x5e, 0x97, 0xfb, 0x28, 0x05, 0xb2, 0xdc, 0x8a, 0x47, 0x5b, + 0x50, 0xc0, 0x4e, 0xd7, 0xd3, 0x97, 0xa4, 0x77, 0xe7, 0xaa, 0xb7, 0xd7, 0x6c, 0x3d, 0x57, 0x92, + 0x24, 0xcd, 0x16, 0x54, 0x2e, 0x55, 0x1f, 0xdd, 0x86, 0xb2, 0x8f, 0x47, 0xb6, 0x93, 0x70, 0xc2, + 0x64, 0xbf, 0xf2, 0x9d, 0x92, 0x8f, 0x47, 0x4d, 0xf1, 0x1f, 0xfd, 0x0f, 0x4b, 0x22, 0xe8, 0x62, + 0x26, 0x5b, 0x92, 0xef, 0x14, 0x7d, 0x3c, 0x7a, 0x86, 0x99, 0xf9, 0x45, 0x83, 0x95, 0xe9, 0x5e, + 0xa0, 0xfb, 0x80, 0x04, 0x8b, 0x5d, 0x62, 0x07, 0xb1, 0x6f, 0xcb, 0xa6, 0x66, 0x77, 0xac, 0xf9, + 0x78, 0xb4, 0xe7, 0x92, 0x97, 0xb1, 0x2f, 0x53, 0x33, 0x74, 0x08, 0xab, 0x19, 0x9c, 0xcd, 0x93, + 0x6a, 0xfa, 0x2d, 0x2b, 0x1d, 0x38, 0x2b, 0x1b, 0x38, 0xab, 0xad, 0x80, 0x66, 0xe9, 0xf4, 0x7b, + 0x3d, 0xf7, 0xe9, 0x47, 0x5d, 0xeb, 0xac, 0xa4, 0xf7, 0xcb, 0x22, 0xd3, 0x9b, 0xc8, 0x4f, 0x6f, + 0xc2, 0x7c, 0x00, 0xb5, 0x99, 0xbe, 0x23, 0x13, 0xaa, 0x61, 0xec, 0xd8, 0x27, 0x24, 0xb1, 0x65, + 0x95, 0x74, 0x6d, 0x23, 0xbf, 0x59, 0xee, 0x54, 0xc2, 0xd8, 0x79, 0x41, 0x92, 0x23, 0xb1, 0x64, + 0x6e, 0x41, 0x75, 0xaa, 0xdf, 0xa8, 0x0e, 0x15, 0x1c, 0x86, 0x76, 0x36, 0x25, 0x62, 0x67, 0x85, + 0x0e, 0xe0, 0x30, 0x54, 0x98, 0x79, 0x0c, 0xcb, 0x07, 0x98, 0xf5, 0x49, 0x4f, 0x09, 0xf7, 0xa0, + 0x26, 0xab, 0x60, 0xcf, 0x16, 0xb8, 0x2a, 0x97, 0x0f, 0xb3, 0x2a, 0x9b, 0x50, 0x9d, 0x70, 0x93, + 0x5a, 0x57, 0x32, 0x4a, 0x14, 0xfc, 0xa3, 0x06, 0xb5, 0x99, 0x09, 0x42, 0x6d, 0xa8, 0xfa, 0x84, + 0x31, 0x59, 0x44, 0x32, 0xc0, 0x89, 0x7a, 0xdd, 0xfe, 0x52, 0xc1, 0x82, 0xac, 0xde, 0xb2, 0xb2, + 0xda, 0x42, 0x42, 0x8f, 0xa1, 0x1c, 0x46, 0xa4, 0xeb, 0xb1, 0xb9, 0x7a, 0x90, 0xde, 0x61, 0x62, + 0x98, 0xbf, 0x16, 0xa0, 0x3a, 0x35, 0x9b, 0x62, 0x9a, 0xc3, 0x88, 0x86, 0x94, 0x91, 0x79, 0x1f, + 0x28, 0xe3, 0xc5, 0x8e, 0xd4, 0xa5, 0xd8, 0x11, 0xc7, 0xf3, 0x3e, 0xcf, 0xb2, 0xb2, 0xda, 0x42, + 0x42, 0x3b, 0x50, 0x18, 0x52, 0x4e, 0xd4, 0x67, 0xe0, 0x46, 0x59, 0xc2, 0xe8, 0x09, 0x80, 0xf8, + 0x55, 0x79, 0x0b, 0x73, 0xd6, 0x41, 0x28, 0x69, 0xd2, 0x5d, 0x28, 0x76, 0xa9, 0xef, 0x7b, 0x5c, + 0x7d, 0x01, 0x6e, 0x74, 0x15, 0x8e, 0xb6, 0xe1, 0x3f, 0x27, 0x09, 0x31, 0x63, 0x76, 0xba, 0x60, + 0x5f, 0xfe, 0x14, 0x94, 0x3a, 0xff, 0xa4, 0xc1, 0x96, 0x8c, 0xa9, 0x42, 0x9b, 0xaf, 0x00, 0x26, + 0xef, 0x35, 0xda, 0x83, 0x75, 0xf9, 0xe8, 0x64, 0xc4, 0x49, 0x20, 0x9a, 0xc2, 0x6c, 0x12, 0x60, + 0x67, 0x40, 0xec, 0x3e, 0xf1, 0xdc, 0x3e, 0x57, 0x53, 0xb7, 0x26, 0xa0, 0xfd, 0x0b, 0x66, 0x5f, + 0x22, 0x07, 0x92, 0x68, 0xbe, 0xfd, 0x3c, 0x36, 0xb4, 0xd3, 0xb1, 0xa1, 0x9d, 0x8d, 0x0d, 0xed, + 0xe7, 0xd8, 0xd0, 0x3e, 0x9c, 0x1b, 0xb9, 0xb3, 0x73, 0x23, 0xf7, 0xed, 0xdc, 0xc8, 0x1d, 0xef, + 0xba, 0x1e, 0xef, 0xc7, 0x8e, 0xd5, 0xa5, 0x7e, 0xe3, 0xf2, 0xa9, 0x32, 0xb9, 0x4c, 0x8f, 0x8d, + 0xd9, 0x13, 0xc7, 0x29, 0xca, 0xf5, 0x9d, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x28, 0x35, 0x60, + 0x76, 0x8c, 0x06, 0x00, 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -663,6 +730,9 @@ func (this *ConsensusParams) Equal(that interface{}) bool { if !this.Timeout.Equal(that1.Timeout) { return false } + if !this.Abci.Equal(that1.Abci) { + return false + } return true } func (this *BlockParams) Equal(that interface{}) bool { @@ -910,6 +980,30 @@ func (this *TimeoutParams) Equal(that interface{}) bool { } return true } +func (this *ABCIParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ABCIParams) + if !ok { + that2, ok := that.(ABCIParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.VoteExtensionsEnableHeight != that1.VoteExtensionsEnableHeight { + return false + } + return true +} func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -930,6 +1024,18 @@ func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Abci != nil { + { + size, err := m.Abci.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.Timeout != nil { { size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) @@ -1063,12 +1169,12 @@ func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err7 != nil { - return 0, err7 + n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err8 != nil { + return 0, err8 } - i -= n7 - i = encodeVarintParams(dAtA, i, uint64(n7)) + i -= n8 + i = encodeVarintParams(dAtA, i, uint64(n8)) i-- dAtA[i] = 0x12 if m.MaxAgeNumBlocks != 0 { @@ -1193,22 +1299,22 @@ func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if m.Precision != nil { - n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):]) - if err8 != nil { - return 0, err8 + n9, err9 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):]) + if err9 != nil { + return 0, err9 } - i -= n8 - i = encodeVarintParams(dAtA, i, uint64(n8)) + i -= n9 + i = encodeVarintParams(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x12 } if m.MessageDelay != nil { - n9, err9 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):]) - if err9 != nil { - return 0, err9 + n10, err10 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):]) + if err10 != nil { + return 0, err10 } - i -= n9 - i = encodeVarintParams(dAtA, i, uint64(n9)) + i -= n10 + i = encodeVarintParams(dAtA, i, uint64(n10)) i-- dAtA[i] = 0xa } @@ -1246,58 +1352,86 @@ func (m *TimeoutParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x30 } if m.Commit != nil { - n10, err10 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Commit, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintParams(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x2a - } - if m.VoteDelta != nil { - n11, err11 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.VoteDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta):]) + n11, err11 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Commit, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit):]) if err11 != nil { return 0, err11 } i -= n11 i = encodeVarintParams(dAtA, i, uint64(n11)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a } - if m.Vote != nil { - n12, err12 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Vote, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote):]) + if m.VoteDelta != nil { + n12, err12 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.VoteDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta):]) if err12 != nil { return 0, err12 } i -= n12 i = encodeVarintParams(dAtA, i, uint64(n12)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } - if m.ProposeDelta != nil { - n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.ProposeDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta):]) + if m.Vote != nil { + n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Vote, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote):]) if err13 != nil { return 0, err13 } i -= n13 i = encodeVarintParams(dAtA, i, uint64(n13)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - if m.Propose != nil { - n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Propose, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose):]) + if m.ProposeDelta != nil { + n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.ProposeDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta):]) if err14 != nil { return 0, err14 } i -= n14 i = encodeVarintParams(dAtA, i, uint64(n14)) i-- + dAtA[i] = 0x12 + } + if m.Propose != nil { + n15, err15 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Propose, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose):]) + if err15 != nil { + return 0, err15 + } + i -= n15 + i = encodeVarintParams(dAtA, i, uint64(n15)) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } +func (m *ABCIParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VoteExtensionsEnableHeight != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.VoteExtensionsEnableHeight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintParams(dAtA []byte, offset int, v uint64) int { offset -= sovParams(v) base := offset @@ -1339,6 +1473,10 @@ func (m *ConsensusParams) Size() (n int) { l = m.Timeout.Size() n += 1 + l + sovParams(uint64(l)) } + if m.Abci != nil { + l = m.Abci.Size() + n += 1 + l + sovParams(uint64(l)) + } return n } @@ -1465,6 +1603,18 @@ func (m *TimeoutParams) Size() (n int) { return n } +func (m *ABCIParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteExtensionsEnableHeight != 0 { + n += 1 + sovParams(uint64(m.VoteExtensionsEnableHeight)) + } + return n +} + func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1716,6 +1866,42 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Abci", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Abci == nil { + m.Abci = &ABCIParams{} + } + if err := m.Abci.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) @@ -2557,6 +2743,75 @@ func (m *TimeoutParams) Unmarshal(dAtA []byte) error { } return nil } +func (m *ABCIParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtensionsEnableHeight", wireType) + } + m.VoteExtensionsEnableHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VoteExtensionsEnableHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index 466ba464fe..21bbd037d8 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -17,6 +17,7 @@ message ConsensusParams { VersionParams version = 4; SynchronyParams synchrony = 5; TimeoutParams timeout = 6; + ABCIParams abci = 7; } // BlockParams contains limits on the block size. @@ -127,3 +128,17 @@ message TimeoutParams { // for the full commit timeout. bool bypass_commit_timeout = 6; } + +// ABCIParams configure functionality specific to the Application Blockchain Interface. +message ABCIParams { + // vote_extensions_enable_height configures the first height during which + // vote extensions will be enabled. During this specified height, and for all + // subsequent heights, precommit messages that do not contain valid extension data + // will be considered invalid. Prior to this height, vote extensions will not + // be used or accepted by validators on the network. + // + // Once enabled, vote extensions will be created by the application in ExtendVote, + // passed to the application for validation in VerifyVoteExtension and given + // to the application to use when proposing a block during PrepareProposal. + int64 vote_extensions_enable_height = 1; +} diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 90c19e6ffa..5f917d746e 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -66,6 +66,9 @@ var ( txSize = uniformChoice{1024, 4096} // either 1kb or 4kb ipv6 = uniformChoice{false, true} keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} + + voteExtensionEnableHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} + voteExtensionEnabled = uniformChoice{true, false} ) // Generate generates random testnets using the given RNG. @@ -116,6 +119,10 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er TxSize: txSize.Choose(r).(int), } + if voteExtensionEnabled.Choose(r).(bool) { + manifest.VoteExtensionsEnableHeight = manifest.InitialHeight + voteExtensionEnableHeightOffset.Choose(r).(int64) + } + var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 895e629395..dd2ad02bac 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -66,6 +66,11 @@ type Manifest struct { // Number of bytes per tx. Default is 1kb (1024) TxSize int + // VoteExtensionsEnableHeight configures the first height during which + // the chain will use and require vote extension data to be present + // in precommit messages. + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` + // ABCIProtocol specifies the protocol used to communicate with the ABCI // application: "unix", "tcp", "grpc", or "builtin". Defaults to builtin. // builtin will build a complete Tendermint node into the application and diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index f4b75c71a9..ad79c99c6d 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -58,20 +58,21 @@ const ( // Testnet represents a single testnet. type Testnet struct { - Name string - File string - Dir string - IP *net.IPNet - InitialHeight int64 - InitialState map[string]string - Validators map[*Node]int64 - ValidatorUpdates map[int64]map[*Node]int64 - Nodes []*Node - KeyType string - Evidence int - LogLevel string - TxSize int - ABCIProtocol string + Name string + File string + Dir string + IP *net.IPNet + InitialHeight int64 + InitialState map[string]string + Validators map[*Node]int64 + ValidatorUpdates map[int64]map[*Node]int64 + Nodes []*Node + KeyType string + Evidence int + VoteExtensionsEnableHeight int64 + LogLevel string + TxSize int + ABCIProtocol string } // Node represents a Tendermint node in a testnet. diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index fab1d7b20e..9050c52bd2 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -86,9 +86,15 @@ func InjectEvidence(ctx context.Context, logger log.Logger, r *rand.Rand, testne privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) } else { - ev, err = generateDuplicateVoteEvidence(ctx, + var dve *types.DuplicateVoteEvidence + dve, err = generateDuplicateVoteEvidence(ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) + if dve.VoteA.Height < testnet.VoteExtensionsEnableHeight { + dve.VoteA.StripExtension() + dve.VoteB.StripExtension() + } + ev = dve } if err != nil { return err diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 507dc2d044..5f78a5b35a 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -209,6 +209,7 @@ func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { } genesis.ConsensusParams.Evidence.MaxAgeNumBlocks = e2e.EvidenceAgeHeight genesis.ConsensusParams.Evidence.MaxAgeDuration = e2e.EvidenceAgeTime + genesis.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testnet.VoteExtensionsEnableHeight for validator, power := range testnet.Validators { genesis.Validators = append(genesis.Validators, types.GenesisValidator{ Name: validator.Name, diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 20a153c1d7..6b378225a1 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -3,6 +3,7 @@ package e2e_test import ( "bytes" "context" + "errors" "fmt" "math/rand" "strconv" @@ -190,16 +191,25 @@ func TestApp_Tx(t *testing.T) { func TestApp_VoteExtensions(t *testing.T) { testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { - t.Skip() client, err := node.Client() require.NoError(t, err) + info, err := client.ABCIInfo(ctx) + require.NoError(t, err) // This special value should have been created by way of vote extensions resp, err := client.ABCIQuery(ctx, "", []byte("extensionSum")) require.NoError(t, err) extSum, err := strconv.Atoi(string(resp.Response.Value)) - require.NoError(t, err) - require.GreaterOrEqual(t, extSum, 0) + // if extensions are not enabled on the network, we should not expect + // the app to have any extension value set. + if node.Testnet.VoteExtensionsEnableHeight == 0 || + info.Response.LastBlockHeight < node.Testnet.VoteExtensionsEnableHeight+1 { + target := &strconv.NumError{} + require.True(t, errors.As(err, &target)) + } else { + require.NoError(t, err) + require.GreaterOrEqual(t, extSum, 0) + } }) } diff --git a/types/params.go b/types/params.go index 3b5e9a2503..a2651b1861 100644 --- a/types/params.go +++ b/types/params.go @@ -330,6 +330,9 @@ func (params ConsensusParams) ValidateConsensusParams() error { if params.Timeout.Commit <= 0 { return fmt.Errorf("timeout.Commit must be greater than 0. Got: %d", params.Timeout.Commit) } + if params.ABCI.VoteExtensionsEnableHeight < 0 { + return fmt.Errorf("ABCI.VoteExtensionsEnableHeight cannot be negative. Got: %d", params.ABCI.VoteExtensionsEnableHeight) + } if len(params.Validator.PubKeyTypes) == 0 { return errors.New("len(Validator.PubKeyTypes) must be greater than 0") @@ -347,6 +350,30 @@ func (params ConsensusParams) ValidateConsensusParams() error { return nil } +func (params ConsensusParams) ValidateUpdate(updated *tmproto.ConsensusParams, h int64) error { + if updated.Abci == nil { + return nil + } + if params.ABCI.VoteExtensionsEnableHeight == updated.Abci.VoteExtensionsEnableHeight { + return nil + } + if params.ABCI.VoteExtensionsEnableHeight != 0 && updated.Abci.VoteExtensionsEnableHeight == 0 { + return errors.New("vote extensions cannot be disabled once enabled") + } + if updated.Abci.VoteExtensionsEnableHeight <= h { + return fmt.Errorf("VoteExtensionsEnableHeight cannot be updated to a past height, "+ + "initial height: %d, current height %d", + params.ABCI.VoteExtensionsEnableHeight, h) + } + if params.ABCI.VoteExtensionsEnableHeight <= h { + return fmt.Errorf("VoteExtensionsEnableHeight cannot be updated modified once"+ + "the initial height has occurred, "+ + "initial height: %d, current height %d", + params.ABCI.VoteExtensionsEnableHeight, h) + } + return nil +} + // Hash returns a hash of a subset of the parameters to store in the block header. // Only the Block.MaxBytes and Block.MaxGas are included in the hash. // This allows the ConsensusParams to evolve more without breaking the block @@ -373,6 +400,7 @@ func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { params.Version == params2.Version && params.Synchrony == params2.Synchrony && params.Timeout == params2.Timeout && + params.ABCI == params2.ABCI && tmstrings.StringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) } @@ -429,6 +457,9 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa } res.Timeout.BypassCommitTimeout = params2.Timeout.GetBypassCommitTimeout() } + if params2.Abci != nil { + res.ABCI.VoteExtensionsEnableHeight = params2.Abci.GetVoteExtensionsEnableHeight() + } return res } @@ -461,6 +492,9 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams { Commit: ¶ms.Timeout.Commit, BypassCommitTimeout: params.Timeout.BypassCommitTimeout, }, + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: params.ABCI.VoteExtensionsEnableHeight, + }, } } @@ -508,5 +542,8 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams } c.Timeout.BypassCommitTimeout = pbParams.Timeout.BypassCommitTimeout } + if pbParams.Abci != nil { + c.ABCI.VoteExtensionsEnableHeight = pbParams.Abci.GetVoteExtensionsEnableHeight() + } return c } diff --git a/types/params_test.go b/types/params_test.go index f19ed001bf..e434e9534a 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -189,6 +190,8 @@ type makeParamsArgs struct { vote *time.Duration voteDelta *time.Duration commit *time.Duration + + abciExtensionHeight int64 } func makeParams(args makeParamsArgs) ConsensusParams { @@ -235,6 +238,9 @@ func makeParams(args makeParamsArgs) ConsensusParams { Commit: *args.commit, BypassCommitTimeout: args.bypassCommitTimeout, }, + ABCI: ABCIParams{ + VoteExtensionsEnableHeight: args.abciExtensionHeight, + }, } } @@ -267,19 +273,19 @@ func TestConsensusParamsHash(t *testing.T) { func TestConsensusParamsUpdate(t *testing.T) { testCases := []struct { - intialParams ConsensusParams + initialParams ConsensusParams updates *tmproto.ConsensusParams updatedParams ConsensusParams }{ // empty updates { - intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + initialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), updates: &tmproto.ConsensusParams{}, updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), }, { // update synchrony params - intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}), + initialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}), updates: &tmproto.ConsensusParams{ Synchrony: &tmproto.SynchronyParams{ Precision: durationPtr(time.Second * 2), @@ -290,7 +296,21 @@ func TestConsensusParamsUpdate(t *testing.T) { }, { // update timeout params - intialParams: makeParams(makeParamsArgs{ + initialParams: makeParams(makeParamsArgs{ + abciExtensionHeight: 1, + }), + updates: &tmproto.ConsensusParams{ + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: 10, + }, + }, + updatedParams: makeParams(makeParamsArgs{ + abciExtensionHeight: 10, + }), + }, + { + // update timeout params + initialParams: makeParams(makeParamsArgs{ propose: durationPtr(3 * time.Second), proposeDelta: durationPtr(500 * time.Millisecond), vote: durationPtr(time.Second), @@ -319,7 +339,7 @@ func TestConsensusParamsUpdate(t *testing.T) { }, // fine updates { - intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + initialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, @@ -341,7 +361,7 @@ func TestConsensusParamsUpdate(t *testing.T) { pubkeyTypes: valSecp256k1}), }, { - intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + initialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, @@ -366,7 +386,7 @@ func TestConsensusParamsUpdate(t *testing.T) { } for _, tc := range testCases { - assert.Equal(t, tc.updatedParams, tc.intialParams.UpdateConsensusParams(tc.updates)) + assert.Equal(t, tc.updatedParams, tc.initialParams.UpdateConsensusParams(tc.updates)) } } @@ -381,6 +401,78 @@ func TestConsensusParamsUpdate_AppVersion(t *testing.T) { assert.EqualValues(t, 1, updated.Version.AppVersion) } +func TestConsensusParamsUpdate_VoteExtensionsEnableHeight(t *testing.T) { + t.Run("set to height but initial height already run", func(*testing.T) { + initialParams := makeParams(makeParamsArgs{ + abciExtensionHeight: 1, + }) + update := &tmproto.ConsensusParams{ + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: 10, + }, + } + require.Error(t, initialParams.ValidateUpdate(update, 1)) + require.Error(t, initialParams.ValidateUpdate(update, 5)) + }) + t.Run("reset to 0", func(t *testing.T) { + initialParams := makeParams(makeParamsArgs{ + abciExtensionHeight: 1, + }) + update := &tmproto.ConsensusParams{ + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: 0, + }, + } + require.Error(t, initialParams.ValidateUpdate(update, 1)) + }) + t.Run("set to height before current height run", func(*testing.T) { + initialParams := makeParams(makeParamsArgs{ + abciExtensionHeight: 100, + }) + update := &tmproto.ConsensusParams{ + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: 10, + }, + } + require.Error(t, initialParams.ValidateUpdate(update, 11)) + require.Error(t, initialParams.ValidateUpdate(update, 99)) + }) + t.Run("set to height after current height run", func(*testing.T) { + initialParams := makeParams(makeParamsArgs{ + abciExtensionHeight: 300, + }) + update := &tmproto.ConsensusParams{ + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: 99, + }, + } + require.NoError(t, initialParams.ValidateUpdate(update, 11)) + require.NoError(t, initialParams.ValidateUpdate(update, 98)) + }) + t.Run("no error when unchanged", func(*testing.T) { + initialParams := makeParams(makeParamsArgs{ + abciExtensionHeight: 100, + }) + update := &tmproto.ConsensusParams{ + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: 100, + }, + } + require.NoError(t, initialParams.ValidateUpdate(update, 500)) + }) + t.Run("updated from 0 to 0", func(t *testing.T) { + initialParams := makeParams(makeParamsArgs{ + abciExtensionHeight: 0, + }) + update := &tmproto.ConsensusParams{ + Abci: &tmproto.ABCIParams{ + VoteExtensionsEnableHeight: 0, + }, + } + require.NoError(t, initialParams.ValidateUpdate(update, 100)) + }) +} + func TestProto(t *testing.T) { params := []ConsensusParams{ makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}), @@ -393,6 +485,16 @@ func TestProto(t *testing.T) { makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}), makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}), + makeParams(makeParamsArgs{abciExtensionHeight: 100}), + makeParams(makeParamsArgs{abciExtensionHeight: 100}), + makeParams(makeParamsArgs{ + propose: durationPtr(2 * time.Second), + proposeDelta: durationPtr(400 * time.Millisecond), + vote: durationPtr(5 * time.Second), + voteDelta: durationPtr(400 * time.Millisecond), + commit: durationPtr(time.Minute), + bypassCommitTimeout: true, + }), } for i := range params { From 8e0d0046e30167a9fb9a6c809370b6c96c4ba3e0 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 23 May 2022 14:43:56 -0700 Subject: [PATCH 053/203] rpc: fix encoding of block_results responses (#8593) Fixes #8583. The block results include validator updates in ABCI protobuf format, which does not encode "correctly" according to the expected Amino style RPC clients expect. - Write a regression test for this issue. - Add JSON marshaling overrides for ABCI ValidatorUpdate messages. --- abci/types/types.go | 45 +++++++++++++++++++++++++ rpc/coretypes/responses_test.go | 58 +++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) diff --git a/abci/types/types.go b/abci/types/types.go index d13947d1a9..121e721592 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -5,6 +5,9 @@ import ( "encoding/json" "github.com/gogo/protobuf/jsonpb" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/jsontypes" ) const ( @@ -135,6 +138,48 @@ func (r *EventAttribute) UnmarshalJSON(b []byte) error { return jsonpbUnmarshaller.Unmarshal(reader, r) } +// validatorUpdateJSON is the JSON encoding of a validator update. +// +// It handles translation of public keys from the protobuf representation to +// the legacy Amino-compatible format expected by RPC clients. +type validatorUpdateJSON struct { + PubKey json.RawMessage `json:"pub_key,omitempty"` + Power int64 `json:"power,string"` +} + +func (v *ValidatorUpdate) MarshalJSON() ([]byte, error) { + key, err := encoding.PubKeyFromProto(v.PubKey) + if err != nil { + return nil, err + } + jkey, err := jsontypes.Marshal(key) + if err != nil { + return nil, err + } + return json.Marshal(validatorUpdateJSON{ + PubKey: jkey, + Power: v.GetPower(), + }) +} + +func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error { + var vu validatorUpdateJSON + if err := json.Unmarshal(data, &vu); err != nil { + return err + } + var key crypto.PubKey + if err := jsontypes.Unmarshal(vu.PubKey, &key); err != nil { + return err + } + pkey, err := encoding.PubKeyToProto(key) + if err != nil { + return err + } + v.PubKey = pkey + v.Power = vu.Power + return nil +} + // Some compile time assertions to ensure we don't // have accidental runtime surprises later on. diff --git a/rpc/coretypes/responses_test.go b/rpc/coretypes/responses_test.go index d4ced795a4..bf66db0c95 100644 --- a/rpc/coretypes/responses_test.go +++ b/rpc/coretypes/responses_test.go @@ -1,10 +1,18 @@ package coretypes import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" "testing" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + pbcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" "github.com/tendermint/tendermint/types" ) @@ -33,3 +41,53 @@ func TestStatusIndexer(t *testing.T) { assert.Equal(t, tc.expected, status.TxIndexEnabled()) } } + +// A regression test for https://github.com/tendermint/tendermint/issues/8583. +func TestResultBlockResults_regression8583(t *testing.T) { + const keyData = "0123456789abcdef0123456789abcdef" // 32 bytes + wantKey := base64.StdEncoding.EncodeToString([]byte(keyData)) + + rsp := &ResultBlockResults{ + ValidatorUpdates: []abci.ValidatorUpdate{{ + PubKey: pbcrypto.PublicKey{ + Sum: &pbcrypto.PublicKey_Ed25519{Ed25519: []byte(keyData)}, + }, + Power: 400, + }}, + } + + // Use compact here so the test data remain legible. The output from the + // marshaler will have whitespace folded out so we need to do that too for + // the comparison to be valid. + var buf bytes.Buffer + require.NoError(t, json.Compact(&buf, []byte(fmt.Sprintf(` +{ + "height": "0", + "txs_results": null, + "total_gas_used": "0", + "finalize_block_events": null, + "validator_updates": [ + { + "pub_key":{"type": "tendermint/PubKeyEd25519", "value": "%s"}, + "power": "400" + } + ], + "consensus_param_updates": null +}`, wantKey)))) + + bits, err := json.Marshal(rsp) + if err != nil { + t.Fatalf("Encoding block result: %v", err) + } + if diff := cmp.Diff(buf.String(), string(bits)); diff != "" { + t.Errorf("Marshaled result (-want, +got):\n%s", diff) + } + + back := new(ResultBlockResults) + if err := json.Unmarshal(bits, back); err != nil { + t.Fatalf("Unmarshaling: %v", err) + } + if diff := cmp.Diff(rsp, back); diff != "" { + t.Errorf("Unmarshaled result (-want, +got):\n%s", diff) + } +} From 1a52b7cb7b00d3f1d8d08928da67d20bc10ed7b6 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Tue, 24 May 2022 15:07:00 +0200 Subject: [PATCH 054/203] Removed redundant params in FinalizeBlock (#8598) The parameters added in this PR where coming from `Commit` in the first versions of the spec. Later on, we decided to keep `Commit` as it is. As a result, these parameters do not make sense, either in same-block or in next-block execution mode. --- abci/types/types.pb.go | 511 ++++++++++++------------------ proto/tendermint/abci/types.proto | 4 +- 2 files changed, 212 insertions(+), 303 deletions(-) diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index dd13086289..89de1bdcd1 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1645,7 +1645,7 @@ type RequestFinalizeBlock struct { Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - // hash is the merkle root hash of the fields of the proposed block. + // hash is the merkle root hash of the fields of the decided block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` @@ -3255,8 +3255,6 @@ type ResponseFinalizeBlock struct { TxResults []*ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,3,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - RetainHeight int64 `protobuf:"varint,6,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } @@ -3320,20 +3318,6 @@ func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *types1.ConsensusPara return nil } -func (m *ResponseFinalizeBlock) GetAppHash() []byte { - if m != nil { - return m.AppHash - } - return nil -} - -func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { - if m != nil { - return m.RetainHeight - } - return 0 -} - type CommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` @@ -4235,223 +4219,222 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3451 bytes of a gzipped FileDescriptorProto + // 3439 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x73, 0x23, 0xe5, 0xb5, 0x97, 0x5a, 0xef, 0x23, 0xeb, 0xe1, 0xcf, 0x66, 0xd0, 0x88, 0x19, 0x7b, 0xe8, 0xa9, 0x81, 0x99, 0x01, 0x3c, 0x5c, 0xcf, 0x1d, 0x18, 0xee, 0xc0, 0xa5, 0x6c, 0x59, 0x83, 0xcc, 0x78, 0x6c, 0xd3, 0x96, 0x4d, 0x71, 0x6f, 0x32, 0x4d, 0x4b, 0xfd, 0xd9, 0x6a, 0x46, 0x52, 0x37, 0xdd, 0x2d, - 0x23, 0xb3, 0x0c, 0xc5, 0x86, 0x4a, 0x55, 0xd8, 0xa4, 0x92, 0x54, 0x85, 0x5d, 0x52, 0x95, 0xfc, - 0x07, 0x59, 0x65, 0x95, 0x05, 0x8b, 0x2c, 0x58, 0x25, 0xa9, 0x2c, 0x48, 0x0a, 0x76, 0xf9, 0x07, - 0xb2, 0x4b, 0x52, 0xdf, 0xa3, 0x5f, 0x52, 0xb7, 0x1e, 0x0c, 0x50, 0x95, 0x0a, 0x3b, 0xf5, 0xe9, - 0x73, 0x4e, 0x7f, 0x8f, 0xf3, 0x9d, 0xc7, 0xef, 0x7c, 0x82, 0x27, 0x6c, 0xdc, 0x57, 0xb1, 0xd9, + 0x23, 0xb3, 0x0c, 0xc5, 0x86, 0xca, 0x82, 0x4d, 0x2a, 0x49, 0x55, 0xd8, 0x25, 0x55, 0xc9, 0x7f, + 0x90, 0x55, 0x56, 0x59, 0xb0, 0x48, 0x55, 0x58, 0x25, 0xa9, 0x2c, 0x48, 0x0a, 0x76, 0xf9, 0x07, + 0xb2, 0x4b, 0x52, 0xdf, 0xa3, 0x5f, 0x52, 0xb7, 0x1e, 0x0c, 0x50, 0x95, 0x0a, 0x3b, 0x7d, 0xa7, + 0xcf, 0x39, 0xfd, 0x3d, 0x4e, 0x9f, 0xc7, 0xef, 0x7c, 0x82, 0x27, 0x6c, 0xdc, 0x57, 0xb1, 0xd9, 0xd3, 0xfa, 0xf6, 0x0d, 0xa5, 0xd5, 0xd6, 0x6e, 0xd8, 0x67, 0x06, 0xb6, 0xd6, 0x0c, 0x53, 0xb7, - 0x75, 0x54, 0xf2, 0x5e, 0xae, 0x91, 0x97, 0xd5, 0x8b, 0x3e, 0xee, 0xb6, 0x79, 0x66, 0xd8, 0xfa, - 0x0d, 0xc3, 0xd4, 0xf5, 0x63, 0xc6, 0x5f, 0xbd, 0xe0, 0x7b, 0x4d, 0xf5, 0xf8, 0xb5, 0x05, 0xde, - 0x72, 0xe1, 0x87, 0xf8, 0xcc, 0x79, 0x7b, 0x71, 0x4c, 0xd6, 0x50, 0x4c, 0xa5, 0xe7, 0xbc, 0x5e, - 0x3d, 0xd1, 0xf5, 0x93, 0x2e, 0xbe, 0x41, 0x9f, 0x5a, 0x83, 0xe3, 0x1b, 0xb6, 0xd6, 0xc3, 0x96, - 0xad, 0xf4, 0x0c, 0xce, 0xb0, 0x7c, 0xa2, 0x9f, 0xe8, 0xf4, 0xe7, 0x0d, 0xf2, 0x8b, 0x51, 0xc5, - 0x7f, 0x02, 0x64, 0x24, 0xfc, 0xee, 0x00, 0x5b, 0x36, 0x5a, 0x87, 0x24, 0x6e, 0x77, 0xf4, 0x4a, - 0xfc, 0x52, 0xfc, 0x6a, 0x7e, 0xfd, 0xc2, 0xda, 0xc8, 0xe4, 0xd6, 0x38, 0x5f, 0xbd, 0xdd, 0xd1, - 0x1b, 0x31, 0x89, 0xf2, 0xa2, 0x5b, 0x90, 0x3a, 0xee, 0x0e, 0xac, 0x4e, 0x45, 0xa0, 0x42, 0x17, - 0xa3, 0x84, 0xee, 0x12, 0xa6, 0x46, 0x4c, 0x62, 0xdc, 0xe4, 0x53, 0x5a, 0xff, 0x58, 0xaf, 0x24, - 0x26, 0x7f, 0x6a, 0xbb, 0x7f, 0x4c, 0x3f, 0x45, 0x78, 0xd1, 0x26, 0x80, 0xd6, 0xd7, 0x6c, 0xb9, - 0xdd, 0x51, 0xb4, 0x7e, 0x25, 0x49, 0x25, 0x9f, 0x8c, 0x96, 0xd4, 0xec, 0x1a, 0x61, 0x6c, 0xc4, - 0xa4, 0x9c, 0xe6, 0x3c, 0x90, 0xe1, 0xbe, 0x3b, 0xc0, 0xe6, 0x59, 0x25, 0x35, 0x79, 0xb8, 0x6f, - 0x10, 0x26, 0x32, 0x5c, 0xca, 0x8d, 0xb6, 0x21, 0xdf, 0xc2, 0x27, 0x5a, 0x5f, 0x6e, 0x75, 0xf5, - 0xf6, 0xc3, 0x4a, 0x9a, 0x0a, 0x8b, 0x51, 0xc2, 0x9b, 0x84, 0x75, 0x93, 0x70, 0x6e, 0x0a, 0x95, - 0x78, 0x23, 0x26, 0x41, 0xcb, 0xa5, 0xa0, 0x97, 0x21, 0xdb, 0xee, 0xe0, 0xf6, 0x43, 0xd9, 0x1e, - 0x56, 0x32, 0x54, 0xcf, 0x6a, 0x94, 0x9e, 0x1a, 0xe1, 0x6b, 0x0e, 0x1b, 0x31, 0x29, 0xd3, 0x66, - 0x3f, 0xd1, 0x5d, 0x00, 0x15, 0x77, 0xb5, 0x53, 0x6c, 0x12, 0xf9, 0xec, 0xe4, 0x35, 0xd8, 0x62, - 0x9c, 0xcd, 0x21, 0x1f, 0x46, 0x4e, 0x75, 0x08, 0xa8, 0x06, 0x39, 0xdc, 0x57, 0xf9, 0x74, 0x72, - 0x54, 0xcd, 0xa5, 0xc8, 0xfd, 0xee, 0xab, 0xfe, 0xc9, 0x64, 0x31, 0x7f, 0x46, 0xb7, 0x21, 0xdd, - 0xd6, 0x7b, 0x3d, 0xcd, 0xae, 0x00, 0xd5, 0xb0, 0x12, 0x39, 0x11, 0xca, 0xd5, 0x88, 0x49, 0x9c, - 0x1f, 0xed, 0x42, 0xb1, 0xab, 0x59, 0xb6, 0x6c, 0xf5, 0x15, 0xc3, 0xea, 0xe8, 0xb6, 0x55, 0xc9, - 0x53, 0x0d, 0x57, 0xa2, 0x34, 0xec, 0x68, 0x96, 0x7d, 0xe0, 0x30, 0x37, 0x62, 0x52, 0xa1, 0xeb, - 0x27, 0x10, 0x7d, 0xfa, 0xf1, 0x31, 0x36, 0x5d, 0x85, 0x95, 0x85, 0xc9, 0xfa, 0xf6, 0x08, 0xb7, - 0x23, 0x4f, 0xf4, 0xe9, 0x7e, 0x02, 0xfa, 0x7f, 0x58, 0xea, 0xea, 0x8a, 0xea, 0xaa, 0x93, 0xdb, - 0x9d, 0x41, 0xff, 0x61, 0xa5, 0x40, 0x95, 0x5e, 0x8b, 0x1c, 0xa4, 0xae, 0xa8, 0x8e, 0x8a, 0x1a, - 0x11, 0x68, 0xc4, 0xa4, 0xc5, 0xee, 0x28, 0x11, 0x3d, 0x80, 0x65, 0xc5, 0x30, 0xba, 0x67, 0xa3, - 0xda, 0x8b, 0x54, 0xfb, 0xf5, 0x28, 0xed, 0x1b, 0x44, 0x66, 0x54, 0x3d, 0x52, 0xc6, 0xa8, 0xa8, - 0x09, 0x65, 0xc3, 0xc4, 0x86, 0x62, 0x62, 0xd9, 0x30, 0x75, 0x43, 0xb7, 0x94, 0x6e, 0xa5, 0x44, - 0x75, 0x3f, 0x1d, 0xa5, 0x7b, 0x9f, 0xf1, 0xef, 0x73, 0xf6, 0x46, 0x4c, 0x2a, 0x19, 0x41, 0x12, - 0xd3, 0xaa, 0xb7, 0xb1, 0x65, 0x79, 0x5a, 0xcb, 0xd3, 0xb4, 0x52, 0xfe, 0xa0, 0xd6, 0x00, 0x09, - 0xd5, 0x21, 0x8f, 0x87, 0x44, 0x5c, 0x3e, 0xd5, 0x6d, 0x5c, 0x59, 0x9c, 0x7c, 0xb0, 0xea, 0x94, - 0xf5, 0x48, 0xb7, 0x31, 0x39, 0x54, 0xd8, 0x7d, 0x42, 0x0a, 0x3c, 0x76, 0x8a, 0x4d, 0xed, 0xf8, - 0x8c, 0xaa, 0x91, 0xe9, 0x1b, 0x4b, 0xd3, 0xfb, 0x15, 0x44, 0x15, 0x3e, 0x13, 0xa5, 0xf0, 0x88, - 0x0a, 0x11, 0x15, 0x75, 0x47, 0xa4, 0x11, 0x93, 0x96, 0x4e, 0xc7, 0xc9, 0xc4, 0xc4, 0x8e, 0xb5, - 0xbe, 0xd2, 0xd5, 0xde, 0xc7, 0xfc, 0xd8, 0x2c, 0x4d, 0x36, 0xb1, 0xbb, 0x9c, 0x9b, 0x9e, 0x15, + 0x75, 0x54, 0xf2, 0x1e, 0xae, 0x91, 0x87, 0xd5, 0x8b, 0x3e, 0xee, 0xb6, 0x79, 0x66, 0xd8, 0xfa, + 0x0d, 0xc3, 0xd4, 0xf5, 0x63, 0xc6, 0x5f, 0xbd, 0xe0, 0x7b, 0x4c, 0xf5, 0xf8, 0xb5, 0x05, 0x9e, + 0x72, 0xe1, 0x87, 0xf8, 0xcc, 0x79, 0x7a, 0x71, 0x4c, 0xd6, 0x50, 0x4c, 0xa5, 0xe7, 0x3c, 0x5e, + 0x3d, 0xd1, 0xf5, 0x93, 0x2e, 0xbe, 0x41, 0x47, 0xad, 0xc1, 0xf1, 0x0d, 0x5b, 0xeb, 0x61, 0xcb, + 0x56, 0x7a, 0x06, 0x67, 0x58, 0x3e, 0xd1, 0x4f, 0x74, 0xfa, 0xf3, 0x06, 0xf9, 0xc5, 0xa8, 0xe2, + 0x3f, 0x01, 0x32, 0x12, 0x7e, 0x77, 0x80, 0x2d, 0x1b, 0xad, 0x43, 0x12, 0xb7, 0x3b, 0x7a, 0x25, + 0x7e, 0x29, 0x7e, 0x35, 0xbf, 0x7e, 0x61, 0x6d, 0x64, 0x71, 0x6b, 0x9c, 0xaf, 0xde, 0xee, 0xe8, + 0x8d, 0x98, 0x44, 0x79, 0xd1, 0x2d, 0x48, 0x1d, 0x77, 0x07, 0x56, 0xa7, 0x22, 0x50, 0xa1, 0x8b, + 0x51, 0x42, 0x77, 0x09, 0x53, 0x23, 0x26, 0x31, 0x6e, 0xf2, 0x2a, 0xad, 0x7f, 0xac, 0x57, 0x12, + 0x93, 0x5f, 0xb5, 0xdd, 0x3f, 0xa6, 0xaf, 0x22, 0xbc, 0x68, 0x13, 0x40, 0xeb, 0x6b, 0xb6, 0xdc, + 0xee, 0x28, 0x5a, 0xbf, 0x92, 0xa4, 0x92, 0x4f, 0x46, 0x4b, 0x6a, 0x76, 0x8d, 0x30, 0x36, 0x62, + 0x52, 0x4e, 0x73, 0x06, 0x64, 0xba, 0xef, 0x0e, 0xb0, 0x79, 0x56, 0x49, 0x4d, 0x9e, 0xee, 0x1b, + 0x84, 0x89, 0x4c, 0x97, 0x72, 0xa3, 0x6d, 0xc8, 0xb7, 0xf0, 0x89, 0xd6, 0x97, 0x5b, 0x5d, 0xbd, + 0xfd, 0xb0, 0x92, 0xa6, 0xc2, 0x62, 0x94, 0xf0, 0x26, 0x61, 0xdd, 0x24, 0x9c, 0x9b, 0x42, 0x25, + 0xde, 0x88, 0x49, 0xd0, 0x72, 0x29, 0xe8, 0x65, 0xc8, 0xb6, 0x3b, 0xb8, 0xfd, 0x50, 0xb6, 0x87, + 0x95, 0x0c, 0xd5, 0xb3, 0x1a, 0xa5, 0xa7, 0x46, 0xf8, 0x9a, 0xc3, 0x46, 0x4c, 0xca, 0xb4, 0xd9, + 0x4f, 0x74, 0x17, 0x40, 0xc5, 0x5d, 0xed, 0x14, 0x9b, 0x44, 0x3e, 0x3b, 0x79, 0x0f, 0xb6, 0x18, + 0x67, 0x73, 0xc8, 0xa7, 0x91, 0x53, 0x1d, 0x02, 0xaa, 0x41, 0x0e, 0xf7, 0x55, 0xbe, 0x9c, 0x1c, + 0x55, 0x73, 0x29, 0xf2, 0xbc, 0xfb, 0xaa, 0x7f, 0x31, 0x59, 0xcc, 0xc7, 0xe8, 0x36, 0xa4, 0xdb, + 0x7a, 0xaf, 0xa7, 0xd9, 0x15, 0xa0, 0x1a, 0x56, 0x22, 0x17, 0x42, 0xb9, 0x1a, 0x31, 0x89, 0xf3, + 0xa3, 0x5d, 0x28, 0x76, 0x35, 0xcb, 0x96, 0xad, 0xbe, 0x62, 0x58, 0x1d, 0xdd, 0xb6, 0x2a, 0x79, + 0xaa, 0xe1, 0x4a, 0x94, 0x86, 0x1d, 0xcd, 0xb2, 0x0f, 0x1c, 0xe6, 0x46, 0x4c, 0x2a, 0x74, 0xfd, + 0x04, 0xa2, 0x4f, 0x3f, 0x3e, 0xc6, 0xa6, 0xab, 0xb0, 0xb2, 0x30, 0x59, 0xdf, 0x1e, 0xe1, 0x76, + 0xe4, 0x89, 0x3e, 0xdd, 0x4f, 0x40, 0xff, 0x0f, 0x4b, 0x5d, 0x5d, 0x51, 0x5d, 0x75, 0x72, 0xbb, + 0x33, 0xe8, 0x3f, 0xac, 0x14, 0xa8, 0xd2, 0x6b, 0x91, 0x93, 0xd4, 0x15, 0xd5, 0x51, 0x51, 0x23, + 0x02, 0x8d, 0x98, 0xb4, 0xd8, 0x1d, 0x25, 0xa2, 0x07, 0xb0, 0xac, 0x18, 0x46, 0xf7, 0x6c, 0x54, + 0x7b, 0x91, 0x6a, 0xbf, 0x1e, 0xa5, 0x7d, 0x83, 0xc8, 0x8c, 0xaa, 0x47, 0xca, 0x18, 0x15, 0x35, + 0xa1, 0x6c, 0x98, 0xd8, 0x50, 0x4c, 0x2c, 0x1b, 0xa6, 0x6e, 0xe8, 0x96, 0xd2, 0xad, 0x94, 0xa8, + 0xee, 0xa7, 0xa3, 0x74, 0xef, 0x33, 0xfe, 0x7d, 0xce, 0xde, 0x88, 0x49, 0x25, 0x23, 0x48, 0x62, + 0x5a, 0xf5, 0x36, 0xb6, 0x2c, 0x4f, 0x6b, 0x79, 0x9a, 0x56, 0xca, 0x1f, 0xd4, 0x1a, 0x20, 0xa1, + 0x3a, 0xe4, 0xf1, 0x90, 0x88, 0xcb, 0xa7, 0xba, 0x8d, 0x2b, 0x8b, 0x93, 0x3f, 0xac, 0x3a, 0x65, + 0x3d, 0xd2, 0x6d, 0x4c, 0x3e, 0x2a, 0xec, 0x8e, 0x90, 0x02, 0x8f, 0x9d, 0x62, 0x53, 0x3b, 0x3e, + 0xa3, 0x6a, 0x64, 0xfa, 0xc4, 0xd2, 0xf4, 0x7e, 0x05, 0x51, 0x85, 0xcf, 0x44, 0x29, 0x3c, 0xa2, + 0x42, 0x44, 0x45, 0xdd, 0x11, 0x69, 0xc4, 0xa4, 0xa5, 0xd3, 0x71, 0x32, 0x31, 0xb1, 0x63, 0xad, + 0xaf, 0x74, 0xb5, 0xf7, 0x31, 0xff, 0x6c, 0x96, 0x26, 0x9b, 0xd8, 0x5d, 0xce, 0x4d, 0xbf, 0x15, 0x62, 0x62, 0xc7, 0x7e, 0xc2, 0x66, 0x06, 0x52, 0xa7, 0x4a, 0x77, 0x80, 0xc5, 0xa7, 0x21, 0xef, - 0x73, 0xac, 0xa8, 0x02, 0x99, 0x1e, 0xb6, 0x2c, 0xe5, 0x04, 0x53, 0x3f, 0x9c, 0x93, 0x9c, 0x47, - 0xb1, 0x08, 0x0b, 0x7e, 0x67, 0x2a, 0x7e, 0x1c, 0x77, 0x25, 0x89, 0x9f, 0x24, 0x92, 0xa7, 0xd8, - 0xa4, 0xd3, 0xe6, 0x92, 0xfc, 0x11, 0x5d, 0x86, 0x02, 0x1d, 0xb2, 0xec, 0xbc, 0x27, 0xce, 0x3a, - 0x29, 0x2d, 0x50, 0xe2, 0x11, 0x67, 0x5a, 0x85, 0xbc, 0xb1, 0x6e, 0xb8, 0x2c, 0x09, 0xca, 0x02, - 0xc6, 0xba, 0xe1, 0x30, 0x3c, 0x09, 0x0b, 0x64, 0x7e, 0x2e, 0x47, 0x92, 0x7e, 0x24, 0x4f, 0x68, - 0x9c, 0x45, 0xfc, 0xbd, 0x00, 0xe5, 0x51, 0x07, 0x8c, 0x6e, 0x43, 0x92, 0xc4, 0x22, 0x1e, 0x56, - 0xaa, 0x6b, 0x2c, 0x50, 0xad, 0x39, 0x81, 0x6a, 0xad, 0xe9, 0x04, 0xaa, 0xcd, 0xec, 0xa7, 0x9f, - 0xaf, 0xc6, 0x3e, 0xfe, 0xcb, 0x6a, 0x5c, 0xa2, 0x12, 0xe8, 0x3c, 0xf1, 0x95, 0x8a, 0xd6, 0x97, - 0x35, 0x95, 0x0e, 0x39, 0x47, 0x1c, 0xa1, 0xa2, 0xf5, 0xb7, 0x55, 0xb4, 0x03, 0xe5, 0xb6, 0xde, - 0xb7, 0x70, 0xdf, 0x1a, 0x58, 0x32, 0x0b, 0x84, 0x3c, 0x98, 0x04, 0xdc, 0x21, 0x0b, 0xaf, 0x35, - 0x87, 0x73, 0x9f, 0x32, 0x4a, 0xa5, 0x76, 0x90, 0x40, 0xdc, 0xea, 0xa9, 0xd2, 0xd5, 0x54, 0xc5, - 0xd6, 0x4d, 0xab, 0x92, 0xbc, 0x94, 0x08, 0xf5, 0x87, 0x47, 0x0e, 0xcb, 0xa1, 0xa1, 0x2a, 0x36, - 0xde, 0x4c, 0x92, 0xe1, 0x4a, 0x3e, 0x49, 0xf4, 0x14, 0x94, 0x14, 0xc3, 0x90, 0x2d, 0x5b, 0xb1, - 0xb1, 0xdc, 0x3a, 0xb3, 0xb1, 0x45, 0x03, 0xcd, 0x82, 0x54, 0x50, 0x0c, 0xe3, 0x80, 0x50, 0x37, - 0x09, 0x11, 0x5d, 0x81, 0x22, 0x89, 0x49, 0x9a, 0xd2, 0x95, 0x3b, 0x58, 0x3b, 0xe9, 0xd8, 0x34, - 0xa4, 0x24, 0xa4, 0x02, 0xa7, 0x36, 0x28, 0x51, 0x54, 0xdd, 0x1d, 0xa7, 0xf1, 0x08, 0x21, 0x48, - 0xaa, 0x8a, 0xad, 0xd0, 0x95, 0x5c, 0x90, 0xe8, 0x6f, 0x42, 0x33, 0x14, 0xbb, 0xc3, 0xd7, 0x87, - 0xfe, 0x46, 0xe7, 0x20, 0xcd, 0xd5, 0x26, 0xa8, 0x5a, 0xfe, 0x84, 0x96, 0x21, 0x65, 0x98, 0xfa, - 0x29, 0xa6, 0x5b, 0x97, 0x95, 0xd8, 0x83, 0xf8, 0x81, 0x00, 0x8b, 0x63, 0x91, 0x8b, 0xe8, 0xed, - 0x28, 0x56, 0xc7, 0xf9, 0x16, 0xf9, 0x8d, 0x5e, 0x20, 0x7a, 0x15, 0x15, 0x9b, 0x3c, 0xda, 0x57, - 0xc6, 0x97, 0xba, 0x41, 0xdf, 0xf3, 0xa5, 0xe1, 0xdc, 0xe8, 0x1e, 0x94, 0xbb, 0x8a, 0x65, 0xcb, - 0xcc, 0xfb, 0xcb, 0xbe, 0xc8, 0xff, 0xc4, 0xd8, 0x22, 0xb3, 0x58, 0x41, 0x0c, 0x9a, 0x2b, 0x29, - 0x12, 0x51, 0x8f, 0x8a, 0x0e, 0x61, 0xb9, 0x75, 0xf6, 0xbe, 0xd2, 0xb7, 0xb5, 0x3e, 0x96, 0xc7, - 0x76, 0x6d, 0x3c, 0x95, 0xb8, 0xaf, 0x59, 0x2d, 0xdc, 0x51, 0x4e, 0x35, 0xdd, 0x19, 0xd6, 0x92, - 0x2b, 0xef, 0xee, 0xa8, 0x25, 0x4a, 0x50, 0x0c, 0x86, 0x5d, 0x54, 0x04, 0xc1, 0x1e, 0xf2, 0xf9, - 0x0b, 0xf6, 0x10, 0x3d, 0x0f, 0x49, 0x32, 0x47, 0x3a, 0xf7, 0x62, 0xc8, 0x87, 0xb8, 0x5c, 0xf3, - 0xcc, 0xc0, 0x12, 0xe5, 0x14, 0x45, 0xf7, 0x34, 0xb8, 0xa1, 0x78, 0x54, 0xab, 0x78, 0x0d, 0x4a, - 0x23, 0x71, 0xd6, 0xb7, 0x7d, 0x71, 0xff, 0xf6, 0x89, 0x25, 0x28, 0x04, 0x02, 0xaa, 0x78, 0x0e, - 0x96, 0xc3, 0xe2, 0xa3, 0xd8, 0x71, 0xe9, 0x81, 0x38, 0x87, 0x6e, 0x41, 0xd6, 0x0d, 0x90, 0xec, - 0x34, 0x9e, 0x1f, 0x9b, 0x85, 0xc3, 0x2c, 0xb9, 0xac, 0xe4, 0x18, 0x12, 0xab, 0xa6, 0xe6, 0x20, - 0xd0, 0x81, 0x67, 0x14, 0xc3, 0x68, 0x28, 0x56, 0x47, 0x7c, 0x1b, 0x2a, 0x51, 0xc1, 0x6f, 0x64, - 0x1a, 0x49, 0xd7, 0x0a, 0xcf, 0x41, 0xfa, 0x58, 0x37, 0x7b, 0x8a, 0x4d, 0x95, 0x15, 0x24, 0xfe, - 0x44, 0xac, 0x93, 0x05, 0xc2, 0x04, 0x25, 0xb3, 0x07, 0x51, 0x86, 0xf3, 0x91, 0x01, 0x90, 0x88, - 0x68, 0x7d, 0x15, 0xb3, 0xf5, 0x2c, 0x48, 0xec, 0xc1, 0x53, 0xc4, 0x06, 0xcb, 0x1e, 0xc8, 0x67, - 0x2d, 0x3a, 0x57, 0xaa, 0x3f, 0x27, 0xf1, 0x27, 0xf1, 0xd7, 0x09, 0x38, 0x17, 0x1e, 0x06, 0xd1, - 0x25, 0x58, 0xe8, 0x29, 0x43, 0xd9, 0x1e, 0xf2, 0xb3, 0xcc, 0xb6, 0x03, 0x7a, 0xca, 0xb0, 0x39, - 0x64, 0x07, 0xb9, 0x0c, 0x09, 0x7b, 0x68, 0x55, 0x84, 0x4b, 0x89, 0xab, 0x0b, 0x12, 0xf9, 0x89, - 0x0e, 0x61, 0xb1, 0xab, 0xb7, 0x95, 0xae, 0xec, 0xb3, 0x78, 0x6e, 0xec, 0x97, 0xc7, 0x16, 0x9b, - 0x05, 0x34, 0xac, 0x8e, 0x19, 0x7d, 0x89, 0xea, 0xd8, 0x71, 0x2d, 0xff, 0x1b, 0xb2, 0x7a, 0xdf, - 0x1e, 0xa5, 0x02, 0x9e, 0xc2, 0xf1, 0xd9, 0xe9, 0xb9, 0x7d, 0xf6, 0xf3, 0xb0, 0xdc, 0xc7, 0x43, - 0xdb, 0x37, 0x46, 0x66, 0x38, 0x19, 0xba, 0x17, 0x88, 0xbc, 0xf3, 0xbe, 0x4f, 0x6c, 0x08, 0x5d, - 0xa3, 0x99, 0x85, 0xa1, 0x5b, 0xd8, 0x94, 0x15, 0x55, 0x35, 0xb1, 0x65, 0xd1, 0xcc, 0x76, 0x81, - 0xa6, 0x0b, 0x94, 0xbe, 0xc1, 0xc8, 0xe2, 0xcf, 0xfc, 0x7b, 0x15, 0xcc, 0x24, 0xf8, 0x4e, 0xc4, - 0xbd, 0x9d, 0x38, 0x80, 0x65, 0x2e, 0xaf, 0x06, 0x36, 0x43, 0x98, 0xd5, 0xf3, 0x20, 0x47, 0x7c, - 0x86, 0x7d, 0x48, 0x3c, 0xda, 0x3e, 0x38, 0xde, 0x36, 0xe9, 0xf3, 0xb6, 0xff, 0x66, 0x7b, 0xf3, - 0xaa, 0x1b, 0x45, 0xbc, 0x34, 0x2d, 0x34, 0x8a, 0x78, 0xf3, 0x12, 0x02, 0xee, 0xed, 0xe7, 0x71, - 0xa8, 0x46, 0xe7, 0x65, 0xa1, 0xaa, 0x9e, 0x81, 0x45, 0x77, 0x2e, 0xee, 0xf8, 0xd8, 0xa9, 0x2f, - 0xbb, 0x2f, 0xf8, 0x00, 0x23, 0xa3, 0xe2, 0x15, 0x28, 0x8e, 0x64, 0x8d, 0x6c, 0x17, 0x0a, 0xa7, - 0xfe, 0xef, 0x8b, 0x3f, 0x4e, 0xb8, 0x5e, 0x35, 0x90, 0xda, 0x85, 0x58, 0xde, 0x1b, 0xb0, 0xa4, - 0xe2, 0xb6, 0xa6, 0x7e, 0x55, 0xc3, 0x5b, 0xe4, 0xd2, 0xdf, 0xd9, 0xdd, 0x0c, 0x76, 0xf7, 0xc7, - 0x3c, 0x64, 0x25, 0x6c, 0x19, 0x24, 0xa5, 0x43, 0x9b, 0x90, 0xc3, 0xc3, 0x36, 0x36, 0x6c, 0x27, - 0x0b, 0x0e, 0xaf, 0x26, 0x18, 0x77, 0xdd, 0xe1, 0x24, 0xb5, 0xb1, 0x2b, 0x86, 0x6e, 0x72, 0x18, - 0x24, 0x1a, 0xd1, 0xe0, 0xe2, 0x7e, 0x1c, 0xe4, 0x05, 0x07, 0x07, 0x49, 0x44, 0x96, 0xc2, 0x4c, - 0x6a, 0x04, 0x08, 0xb9, 0xc9, 0x81, 0x90, 0xe4, 0x94, 0x8f, 0x05, 0x90, 0x90, 0x5a, 0x00, 0x09, - 0x49, 0x4d, 0x99, 0x66, 0x04, 0x14, 0xf2, 0x82, 0x03, 0x85, 0xa4, 0xa7, 0x8c, 0x78, 0x04, 0x0b, - 0x79, 0x3d, 0x88, 0x85, 0x64, 0x22, 0x42, 0x9b, 0x23, 0x3d, 0x11, 0x0c, 0x79, 0xc5, 0x07, 0x86, - 0x64, 0x23, 0x51, 0x08, 0xa6, 0x28, 0x04, 0x0d, 0x79, 0x2d, 0x80, 0x86, 0xe4, 0xa6, 0xac, 0xc3, - 0x04, 0x38, 0x64, 0xcb, 0x0f, 0x87, 0x40, 0x24, 0xaa, 0xc2, 0xf7, 0x3d, 0x0a, 0x0f, 0x79, 0xc9, - 0xc5, 0x43, 0xf2, 0x91, 0xc0, 0x0e, 0x9f, 0xcb, 0x28, 0x20, 0xb2, 0x37, 0x06, 0x88, 0x30, 0x00, - 0xe3, 0xa9, 0x48, 0x15, 0x53, 0x10, 0x91, 0xbd, 0x31, 0x44, 0xa4, 0x30, 0x45, 0xe1, 0x14, 0x48, - 0xe4, 0x7b, 0xe1, 0x90, 0x48, 0x34, 0x68, 0xc1, 0x87, 0x39, 0x1b, 0x26, 0x22, 0x47, 0x60, 0x22, - 0xa5, 0xc8, 0xfa, 0x9d, 0xa9, 0x9f, 0x19, 0x14, 0x39, 0x0c, 0x01, 0x45, 0x18, 0x7c, 0x71, 0x35, - 0x52, 0xf9, 0x0c, 0xa8, 0xc8, 0x61, 0x08, 0x2a, 0xb2, 0x38, 0x55, 0xed, 0x54, 0x58, 0xe4, 0x6e, - 0x10, 0x16, 0x41, 0x53, 0xce, 0x58, 0x24, 0x2e, 0xd2, 0x8a, 0xc2, 0x45, 0x18, 0x76, 0xf1, 0x6c, - 0xa4, 0xc6, 0x39, 0x80, 0x91, 0xbd, 0x31, 0x60, 0x64, 0x79, 0x8a, 0xa5, 0xcd, 0x8a, 0x8c, 0x5c, - 0x23, 0x19, 0xc5, 0x88, 0xab, 0x26, 0xc9, 0x3d, 0x36, 0x4d, 0xdd, 0xe4, 0x18, 0x07, 0x7b, 0x10, - 0xaf, 0x92, 0x4a, 0xd9, 0x73, 0xcb, 0x13, 0x50, 0x14, 0x5a, 0x44, 0xf9, 0x5c, 0xb1, 0xf8, 0x9b, - 0xb8, 0x27, 0x4b, 0x0b, 0x4c, 0x7f, 0x95, 0x9d, 0xe3, 0x55, 0xb6, 0x0f, 0x5b, 0x11, 0x82, 0xd8, - 0xca, 0x2a, 0xe4, 0x49, 0x71, 0x34, 0x02, 0x9b, 0x28, 0x86, 0x0b, 0x9b, 0x5c, 0x87, 0x45, 0x9a, - 0x04, 0x30, 0x04, 0x86, 0x47, 0xd6, 0x24, 0x8d, 0xac, 0x25, 0xf2, 0x82, 0xad, 0x02, 0x0b, 0xb1, - 0xcf, 0xc1, 0x92, 0x8f, 0xd7, 0x2d, 0xba, 0x18, 0x86, 0x50, 0x76, 0xb9, 0x37, 0x78, 0xf5, 0xf5, - 0xbb, 0xb8, 0xb7, 0x42, 0x1e, 0xde, 0x12, 0x06, 0x8d, 0xc4, 0xbf, 0x26, 0x68, 0x44, 0xf8, 0xca, - 0xd0, 0x88, 0xbf, 0x88, 0x4c, 0x04, 0x8b, 0xc8, 0xbf, 0xc7, 0xbd, 0x3d, 0x71, 0x81, 0x8e, 0xb6, - 0xae, 0x62, 0x5e, 0xd6, 0xd1, 0xdf, 0x24, 0xcd, 0xea, 0xea, 0x27, 0xbc, 0x78, 0x23, 0x3f, 0x09, - 0x97, 0x1b, 0x3b, 0x73, 0x3c, 0x34, 0xba, 0x15, 0x21, 0xcb, 0x5d, 0x78, 0x45, 0x58, 0x86, 0xc4, - 0x43, 0xcc, 0x22, 0xdd, 0x82, 0x44, 0x7e, 0x12, 0x3e, 0x6a, 0x64, 0x3c, 0x07, 0x61, 0x0f, 0xe8, - 0x36, 0xe4, 0x68, 0xbb, 0x46, 0xd6, 0x0d, 0x8b, 0x07, 0xa4, 0x40, 0xba, 0xc6, 0xba, 0x32, 0x6b, - 0xfb, 0x84, 0x67, 0xcf, 0xb0, 0xa4, 0xac, 0xc1, 0x7f, 0xf9, 0x92, 0xa6, 0x5c, 0x20, 0x69, 0xba, - 0x00, 0x39, 0x32, 0x7a, 0xcb, 0x50, 0xda, 0x98, 0x46, 0x96, 0x9c, 0xe4, 0x11, 0xc4, 0x07, 0x80, - 0xc6, 0xe3, 0x24, 0x6a, 0x40, 0x1a, 0x9f, 0xe2, 0xbe, 0xcd, 0x72, 0xca, 0xfc, 0xfa, 0xb9, 0xf1, - 0xba, 0x91, 0xbc, 0xde, 0xac, 0x90, 0x45, 0xfe, 0xdb, 0xe7, 0xab, 0x65, 0xc6, 0xfd, 0xac, 0xde, - 0xd3, 0x6c, 0xdc, 0x33, 0xec, 0x33, 0x89, 0xcb, 0x8b, 0x7f, 0x16, 0xa0, 0x34, 0x12, 0x3f, 0x43, - 0xd7, 0xd6, 0x31, 0x79, 0xc1, 0x07, 0x2c, 0xcd, 0xb6, 0xde, 0x17, 0x01, 0x4e, 0x14, 0x4b, 0x7e, - 0x4f, 0xe9, 0xdb, 0x58, 0xe5, 0x8b, 0x9e, 0x3b, 0x51, 0xac, 0x37, 0x29, 0x81, 0xec, 0x3a, 0x79, - 0x3d, 0xb0, 0xb0, 0xca, 0x21, 0xae, 0xcc, 0x89, 0x62, 0x1d, 0x5a, 0x58, 0xf5, 0xcd, 0x32, 0xf3, - 0x68, 0xb3, 0x0c, 0xae, 0x71, 0x76, 0x64, 0x8d, 0x7d, 0x75, 0x7f, 0xce, 0x5f, 0xf7, 0xa3, 0x2a, - 0x64, 0x0d, 0x53, 0xd3, 0x4d, 0xcd, 0x3e, 0xa3, 0x1b, 0x93, 0x90, 0xdc, 0x67, 0x74, 0x19, 0x0a, - 0x3d, 0xdc, 0x33, 0x74, 0xbd, 0x2b, 0x33, 0x67, 0x93, 0xa7, 0xa2, 0x0b, 0x9c, 0x58, 0xa7, 0x3e, - 0xe7, 0x43, 0xc1, 0x3b, 0x7d, 0x1e, 0xbe, 0xf3, 0xf5, 0x2e, 0xef, 0x4a, 0xc8, 0xf2, 0xfa, 0x28, - 0x64, 0x12, 0x23, 0xeb, 0xeb, 0x3e, 0x7f, 0x5b, 0x0b, 0x2c, 0xfe, 0x90, 0x82, 0xbe, 0xc1, 0xdc, - 0x08, 0x1d, 0xf8, 0x2b, 0xb3, 0x01, 0x75, 0x0a, 0x8e, 0x39, 0xcf, 0xea, 0x3d, 0xbc, 0x0a, 0x8e, - 0x91, 0x2d, 0xf4, 0x16, 0x3c, 0x3e, 0xe2, 0xd9, 0x5c, 0xd5, 0xc2, 0xac, 0x0e, 0xee, 0xb1, 0xa0, - 0x83, 0x73, 0x54, 0x7b, 0x8b, 0x95, 0x78, 0xc4, 0x33, 0xb7, 0x0d, 0xc5, 0x60, 0x9a, 0x17, 0xba, - 0xfd, 0x97, 0xa1, 0x60, 0x62, 0x5b, 0xd1, 0xfa, 0x72, 0xa0, 0x26, 0x5d, 0x60, 0x44, 0x8e, 0xff, - 0xee, 0xc3, 0x63, 0xa1, 0xe9, 0x1e, 0x7a, 0x11, 0x72, 0x5e, 0xa6, 0xc8, 0x56, 0x75, 0x02, 0x92, - 0xe7, 0xf1, 0x8a, 0xbf, 0x8d, 0x7b, 0x2a, 0x83, 0xd8, 0x60, 0x1d, 0xd2, 0x26, 0xb6, 0x06, 0x5d, - 0x86, 0xd6, 0x15, 0xd7, 0x9f, 0x9b, 0x2d, 0x51, 0x24, 0xd4, 0x41, 0xd7, 0x96, 0xb8, 0xb0, 0xf8, - 0x00, 0xd2, 0x8c, 0x82, 0xf2, 0x90, 0x39, 0xdc, 0xbd, 0xb7, 0xbb, 0xf7, 0xe6, 0x6e, 0x39, 0x86, - 0x00, 0xd2, 0x1b, 0xb5, 0x5a, 0x7d, 0xbf, 0x59, 0x8e, 0xa3, 0x1c, 0xa4, 0x36, 0x36, 0xf7, 0xa4, - 0x66, 0x59, 0x20, 0x64, 0xa9, 0xfe, 0x7a, 0xbd, 0xd6, 0x2c, 0x27, 0xd0, 0x22, 0x14, 0xd8, 0x6f, - 0xf9, 0xee, 0x9e, 0x74, 0x7f, 0xa3, 0x59, 0x4e, 0xfa, 0x48, 0x07, 0xf5, 0xdd, 0xad, 0xba, 0x54, - 0x4e, 0x89, 0xff, 0x05, 0xe7, 0x23, 0x53, 0x4b, 0x0f, 0xf8, 0x8b, 0xfb, 0x80, 0x3f, 0xf1, 0xa7, - 0x02, 0x54, 0xa3, 0xf3, 0x45, 0xf4, 0xfa, 0xc8, 0xc4, 0xd7, 0xe7, 0x48, 0x36, 0x47, 0x66, 0x8f, - 0xae, 0x40, 0xd1, 0xc4, 0xc7, 0xd8, 0x6e, 0x77, 0x58, 0xfe, 0xca, 0x02, 0x66, 0x41, 0x2a, 0x70, - 0x2a, 0x15, 0xb2, 0x18, 0xdb, 0x3b, 0xb8, 0x6d, 0xcb, 0xcc, 0x17, 0x31, 0xa3, 0xcb, 0x11, 0x36, - 0x42, 0x3d, 0x60, 0x44, 0xf1, 0xed, 0xb9, 0xd6, 0x32, 0x07, 0x29, 0xa9, 0xde, 0x94, 0xde, 0x2a, - 0x27, 0x10, 0x82, 0x22, 0xfd, 0x29, 0x1f, 0xec, 0x6e, 0xec, 0x1f, 0x34, 0xf6, 0xc8, 0x5a, 0x2e, - 0x41, 0xc9, 0x59, 0x4b, 0x87, 0x98, 0x12, 0xff, 0x20, 0xc0, 0xe3, 0x11, 0xd9, 0x2e, 0xba, 0x0d, - 0x60, 0x0f, 0x65, 0x13, 0xb7, 0x75, 0x53, 0x8d, 0x36, 0xb2, 0xe6, 0x50, 0xa2, 0x1c, 0x52, 0xce, - 0xe6, 0xbf, 0xac, 0x09, 0x78, 0x31, 0x7a, 0x99, 0x2b, 0x25, 0xb3, 0x72, 0x8e, 0xda, 0xc5, 0x10, - 0x58, 0x14, 0xb7, 0x89, 0x62, 0xba, 0xb6, 0x54, 0x31, 0xe5, 0x47, 0xf7, 0xc3, 0x9c, 0xca, 0x8c, - 0xdd, 0x9a, 0xf9, 0xdc, 0x49, 0xea, 0xd1, 0xdc, 0x89, 0xf8, 0x8b, 0x84, 0x7f, 0x61, 0x83, 0xc9, - 0xfd, 0x1e, 0xa4, 0x2d, 0x5b, 0xb1, 0x07, 0x16, 0x37, 0xb8, 0x17, 0x67, 0xad, 0x14, 0xd6, 0x9c, - 0x1f, 0x07, 0x54, 0x5c, 0xe2, 0x6a, 0xbe, 0x5b, 0x6f, 0x4b, 0xbc, 0x05, 0xc5, 0xe0, 0xe2, 0x44, - 0x1f, 0x19, 0xcf, 0xe7, 0x08, 0xe2, 0x1d, 0x2f, 0xff, 0xf2, 0x81, 0x96, 0xe3, 0x80, 0x60, 0x3c, - 0x0c, 0x10, 0xfc, 0x65, 0x1c, 0x9e, 0x98, 0x50, 0x2f, 0xa1, 0x37, 0x46, 0xf6, 0xf9, 0xa5, 0x79, - 0xaa, 0xad, 0x35, 0x46, 0x0b, 0xee, 0xb4, 0x78, 0x13, 0x16, 0xfc, 0xf4, 0xd9, 0x26, 0xf9, 0xa3, - 0x84, 0xe7, 0xf3, 0x83, 0xc8, 0xe5, 0xd7, 0x96, 0x68, 0x8e, 0xd8, 0x99, 0x30, 0xa7, 0x9d, 0x85, - 0x26, 0x0b, 0x89, 0x6f, 0x2e, 0x59, 0x48, 0x3e, 0x62, 0xb2, 0xe0, 0x3f, 0x70, 0xa9, 0xe0, 0x81, - 0x1b, 0x8b, 0xeb, 0xe9, 0x90, 0xb8, 0xfe, 0x16, 0x80, 0xaf, 0xa1, 0xb9, 0x0c, 0x29, 0x53, 0x1f, - 0xf4, 0x55, 0x6a, 0x26, 0x29, 0x89, 0x3d, 0xa0, 0x5b, 0x90, 0x22, 0xe6, 0xe6, 0x2c, 0xe6, 0xb8, - 0xe7, 0x25, 0xe6, 0xe2, 0xc3, 0x8c, 0x19, 0xb7, 0xa8, 0x01, 0x1a, 0x6f, 0x2a, 0x45, 0x7c, 0xe2, - 0x95, 0xe0, 0x27, 0x9e, 0x8c, 0x6c, 0x4f, 0x85, 0x7f, 0xea, 0x7d, 0x48, 0x51, 0xf3, 0x20, 0xf9, - 0x0d, 0x6d, 0x8c, 0xf2, 0x82, 0x99, 0xfc, 0x46, 0xdf, 0x07, 0x50, 0x6c, 0xdb, 0xd4, 0x5a, 0x03, - 0xef, 0x03, 0xab, 0xe1, 0xe6, 0xb5, 0xe1, 0xf0, 0x6d, 0x5e, 0xe0, 0x76, 0xb6, 0xec, 0x89, 0xfa, - 0x6c, 0xcd, 0xa7, 0x50, 0xdc, 0x85, 0x62, 0x50, 0xd6, 0x29, 0xf1, 0xd8, 0x18, 0x82, 0x25, 0x1e, - 0xab, 0xd8, 0x79, 0x89, 0xe7, 0x16, 0x88, 0x09, 0xd6, 0x03, 0xa7, 0x0f, 0xe2, 0x3f, 0xe2, 0xb0, - 0xe0, 0xb7, 0xce, 0xff, 0xb4, 0x2a, 0x49, 0xfc, 0x30, 0x0e, 0x59, 0x77, 0xf2, 0x11, 0x0d, 0x68, - 0x6f, 0xed, 0x04, 0x7f, 0xbb, 0x95, 0x75, 0xb4, 0x13, 0x6e, 0x9f, 0xfc, 0x8e, 0x9b, 0x50, 0x45, - 0x81, 0xda, 0xfe, 0x95, 0x76, 0xae, 0x0a, 0xf0, 0xfc, 0xf1, 0x27, 0x7c, 0x1c, 0x24, 0x93, 0x40, - 0xff, 0x03, 0x69, 0xa5, 0xed, 0x42, 0xf9, 0xc5, 0x10, 0x6c, 0xd7, 0x61, 0x5d, 0x6b, 0x0e, 0x37, - 0x28, 0xa7, 0xc4, 0x25, 0xf8, 0xa8, 0x04, 0xb7, 0xcf, 0xfe, 0x2a, 0xd1, 0xcb, 0x78, 0x82, 0x6e, - 0xb3, 0x08, 0x70, 0xb8, 0x7b, 0x7f, 0x6f, 0x6b, 0xfb, 0xee, 0x76, 0x7d, 0x8b, 0xa7, 0x54, 0x5b, - 0x5b, 0xf5, 0xad, 0xb2, 0x40, 0xf8, 0xa4, 0xfa, 0xfd, 0xbd, 0xa3, 0xfa, 0x56, 0x39, 0x21, 0xde, - 0x81, 0x9c, 0xeb, 0x7a, 0x50, 0x05, 0x32, 0x4e, 0x5b, 0x22, 0xce, 0x1d, 0x00, 0xef, 0x32, 0x2d, - 0x43, 0xca, 0xd0, 0xdf, 0xe3, 0x5d, 0xe6, 0x84, 0xc4, 0x1e, 0x44, 0x15, 0x4a, 0x23, 0x7e, 0x0b, - 0xdd, 0x81, 0x8c, 0x31, 0x68, 0xc9, 0x8e, 0xd1, 0x8e, 0x34, 0x71, 0x1c, 0xa4, 0x61, 0xd0, 0xea, - 0x6a, 0xed, 0x7b, 0xf8, 0xcc, 0x59, 0x26, 0x63, 0xd0, 0xba, 0xc7, 0x6c, 0x9b, 0x7d, 0x45, 0xf0, - 0x7f, 0xe5, 0x14, 0xb2, 0xce, 0x51, 0x45, 0xff, 0x0b, 0x39, 0xd7, 0x25, 0xba, 0x57, 0x6f, 0x22, - 0x7d, 0x29, 0x57, 0xef, 0x89, 0xa0, 0xeb, 0xb0, 0x68, 0x69, 0x27, 0x7d, 0xa7, 0x85, 0xc5, 0x90, - 0x3d, 0x81, 0x9e, 0x99, 0x12, 0x7b, 0xb1, 0xe3, 0xc0, 0x51, 0x24, 0x12, 0x96, 0x47, 0x7d, 0xc5, - 0xb7, 0x39, 0x80, 0x90, 0x88, 0x9d, 0x08, 0x8b, 0xd8, 0x1f, 0x08, 0x90, 0xf7, 0x35, 0xc6, 0xd0, - 0x7f, 0xfb, 0x1c, 0x57, 0x31, 0x24, 0xd4, 0xf8, 0x78, 0xbd, 0x5b, 0x1d, 0xc1, 0x89, 0x09, 0xf3, - 0x4f, 0x2c, 0xaa, 0x0f, 0xe9, 0xf4, 0xd7, 0x92, 0x73, 0xf7, 0xd7, 0x9e, 0x05, 0x64, 0xeb, 0xb6, - 0xd2, 0x95, 0x4f, 0x75, 0x5b, 0xeb, 0x9f, 0xc8, 0xcc, 0x34, 0x98, 0x9b, 0x29, 0xd3, 0x37, 0x47, - 0xf4, 0xc5, 0x3e, 0xb5, 0x92, 0x1f, 0xc4, 0x21, 0xeb, 0x96, 0x7d, 0xf3, 0x5e, 0xd2, 0x38, 0x07, - 0x69, 0x5e, 0xd9, 0xb0, 0x5b, 0x1a, 0xfc, 0x29, 0xb4, 0x91, 0x58, 0x85, 0x6c, 0x0f, 0xdb, 0x0a, - 0xf5, 0x99, 0x2c, 0x4c, 0xba, 0xcf, 0xd7, 0x5f, 0x82, 0xbc, 0xef, 0xbe, 0x0c, 0x71, 0xa3, 0xbb, - 0xf5, 0x37, 0xcb, 0xb1, 0x6a, 0xe6, 0xa3, 0x4f, 0x2e, 0x25, 0x76, 0xf1, 0x7b, 0xe4, 0x84, 0x49, - 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x57, 0x8e, 0x57, 0xf3, 0x1f, 0x7d, 0x72, 0x29, 0x23, 0x61, 0xda, - 0xf7, 0xb9, 0x7e, 0x0f, 0x4a, 0x23, 0x1b, 0x13, 0x3c, 0xd0, 0x08, 0x8a, 0x5b, 0x87, 0xfb, 0x3b, - 0xdb, 0xb5, 0x8d, 0x66, 0x5d, 0x3e, 0xda, 0x6b, 0xd6, 0xcb, 0x71, 0xf4, 0x38, 0x2c, 0xed, 0x6c, - 0xbf, 0xd6, 0x68, 0xca, 0xb5, 0x9d, 0xed, 0xfa, 0x6e, 0x53, 0xde, 0x68, 0x36, 0x37, 0x6a, 0xf7, - 0xca, 0xc2, 0xfa, 0xaf, 0xf2, 0x50, 0xda, 0xd8, 0xac, 0x6d, 0x93, 0xda, 0x4e, 0x6b, 0x2b, 0xd4, - 0x3d, 0xd4, 0x20, 0x49, 0x41, 0xe4, 0x89, 0x37, 0xa0, 0xab, 0x93, 0x1b, 0x83, 0xe8, 0x2e, 0xa4, - 0x28, 0xbe, 0x8c, 0x26, 0x5f, 0x89, 0xae, 0x4e, 0xe9, 0x14, 0x92, 0xc1, 0xd0, 0xe3, 0x34, 0xf1, - 0x8e, 0x74, 0x75, 0x72, 0xe3, 0x10, 0xed, 0x40, 0xc6, 0x81, 0xff, 0xa6, 0xdd, 0x36, 0xae, 0x4e, - 0xed, 0xc0, 0x91, 0xa9, 0x31, 0x98, 0x76, 0xf2, 0xf5, 0xe9, 0xea, 0x94, 0x96, 0x22, 0xda, 0x86, - 0x34, 0x47, 0x48, 0xa6, 0xdc, 0x1c, 0xae, 0x4e, 0xeb, 0xa4, 0x21, 0x09, 0x72, 0x1e, 0x00, 0x3e, - 0xfd, 0x52, 0x78, 0x75, 0x86, 0x6e, 0x29, 0x7a, 0x00, 0x85, 0x20, 0xea, 0x32, 0xdb, 0xed, 0xe4, - 0xea, 0x8c, 0x3d, 0x3b, 0xa2, 0x3f, 0x08, 0xc1, 0xcc, 0x76, 0x5b, 0xb9, 0x3a, 0x63, 0x0b, 0x0f, - 0xbd, 0x03, 0x8b, 0xe3, 0x10, 0xc9, 0xec, 0x97, 0x97, 0xab, 0x73, 0x34, 0xf5, 0x50, 0x0f, 0x50, - 0x08, 0xb4, 0x32, 0xc7, 0x5d, 0xe6, 0xea, 0x3c, 0x3d, 0x3e, 0xa4, 0x42, 0x69, 0x14, 0xae, 0x98, - 0xf5, 0x6e, 0x73, 0x75, 0xe6, 0x7e, 0x1f, 0xfb, 0x4a, 0xb0, 0x76, 0x9f, 0xf5, 0xae, 0x73, 0x75, - 0xe6, 0xf6, 0x1f, 0x3a, 0x04, 0xf0, 0xd5, 0x9e, 0x33, 0xdc, 0x7d, 0xae, 0xce, 0xd2, 0x08, 0x44, - 0x06, 0x2c, 0x85, 0x15, 0xa5, 0xf3, 0x5c, 0x85, 0xae, 0xce, 0xd5, 0x1f, 0x24, 0xf6, 0x1c, 0x2c, - 0x2f, 0x67, 0xbb, 0x1a, 0x5d, 0x9d, 0xb1, 0x51, 0xb8, 0x59, 0xff, 0xf4, 0x8b, 0x95, 0xf8, 0x67, - 0x5f, 0xac, 0xc4, 0xff, 0xfa, 0xc5, 0x4a, 0xfc, 0xe3, 0x2f, 0x57, 0x62, 0x9f, 0x7d, 0xb9, 0x12, - 0xfb, 0xd3, 0x97, 0x2b, 0xb1, 0xff, 0x7b, 0xe6, 0x44, 0xb3, 0x3b, 0x83, 0xd6, 0x5a, 0x5b, 0xef, - 0xdd, 0xf0, 0xff, 0x4b, 0x26, 0xec, 0x9f, 0x3b, 0xad, 0x34, 0x0d, 0xa8, 0x37, 0xff, 0x15, 0x00, - 0x00, 0xff, 0xff, 0xa5, 0x05, 0x49, 0xaf, 0xd9, 0x33, 0x00, 0x00, + 0x73, 0xac, 0xa8, 0x02, 0x99, 0x1e, 0xb6, 0x2c, 0xe5, 0x04, 0x53, 0x3f, 0x9c, 0x93, 0x9c, 0xa1, + 0x58, 0x84, 0x05, 0xbf, 0x33, 0x15, 0x3f, 0x8e, 0xbb, 0x92, 0xc4, 0x4f, 0x12, 0xc9, 0x53, 0x6c, + 0xd2, 0x65, 0x73, 0x49, 0x3e, 0x44, 0x97, 0xa1, 0x40, 0xa7, 0x2c, 0x3b, 0xcf, 0x89, 0xb3, 0x4e, + 0x4a, 0x0b, 0x94, 0x78, 0xc4, 0x99, 0x56, 0x21, 0x6f, 0xac, 0x1b, 0x2e, 0x4b, 0x82, 0xb2, 0x80, + 0xb1, 0x6e, 0x38, 0x0c, 0x4f, 0xc2, 0x02, 0x59, 0x9f, 0xcb, 0x91, 0xa4, 0x2f, 0xc9, 0x13, 0x1a, + 0x67, 0x11, 0x7f, 0x27, 0x40, 0x79, 0xd4, 0x01, 0xa3, 0xdb, 0x90, 0x24, 0xb1, 0x88, 0x87, 0x95, + 0xea, 0x1a, 0x0b, 0x54, 0x6b, 0x4e, 0xa0, 0x5a, 0x6b, 0x3a, 0x81, 0x6a, 0x33, 0xfb, 0xe9, 0xe7, + 0xab, 0xb1, 0x8f, 0xff, 0xb2, 0x1a, 0x97, 0xa8, 0x04, 0x3a, 0x4f, 0x7c, 0xa5, 0xa2, 0xf5, 0x65, + 0x4d, 0xa5, 0x53, 0xce, 0x11, 0x47, 0xa8, 0x68, 0xfd, 0x6d, 0x15, 0xed, 0x40, 0xb9, 0xad, 0xf7, + 0x2d, 0xdc, 0xb7, 0x06, 0x96, 0xcc, 0x02, 0x21, 0x0f, 0x26, 0x01, 0x77, 0xc8, 0xc2, 0x6b, 0xcd, + 0xe1, 0xdc, 0xa7, 0x8c, 0x52, 0xa9, 0x1d, 0x24, 0x10, 0xb7, 0x7a, 0xaa, 0x74, 0x35, 0x55, 0xb1, + 0x75, 0xd3, 0xaa, 0x24, 0x2f, 0x25, 0x42, 0xfd, 0xe1, 0x91, 0xc3, 0x72, 0x68, 0xa8, 0x8a, 0x8d, + 0x37, 0x93, 0x64, 0xba, 0x92, 0x4f, 0x12, 0x3d, 0x05, 0x25, 0xc5, 0x30, 0x64, 0xcb, 0x56, 0x6c, + 0x2c, 0xb7, 0xce, 0x6c, 0x6c, 0xd1, 0x40, 0xb3, 0x20, 0x15, 0x14, 0xc3, 0x38, 0x20, 0xd4, 0x4d, + 0x42, 0x44, 0x57, 0xa0, 0x48, 0x62, 0x92, 0xa6, 0x74, 0xe5, 0x0e, 0xd6, 0x4e, 0x3a, 0x36, 0x0d, + 0x29, 0x09, 0xa9, 0xc0, 0xa9, 0x0d, 0x4a, 0x14, 0x55, 0xf7, 0xc4, 0x69, 0x3c, 0x42, 0x08, 0x92, + 0xaa, 0x62, 0x2b, 0x74, 0x27, 0x17, 0x24, 0xfa, 0x9b, 0xd0, 0x0c, 0xc5, 0xee, 0xf0, 0xfd, 0xa1, + 0xbf, 0xd1, 0x39, 0x48, 0x73, 0xb5, 0x09, 0xaa, 0x96, 0x8f, 0xd0, 0x32, 0xa4, 0x0c, 0x53, 0x3f, + 0xc5, 0xf4, 0xe8, 0xb2, 0x12, 0x1b, 0x88, 0x1f, 0x08, 0xb0, 0x38, 0x16, 0xb9, 0x88, 0xde, 0x8e, + 0x62, 0x75, 0x9c, 0x77, 0x91, 0xdf, 0xe8, 0x05, 0xa2, 0x57, 0x51, 0xb1, 0xc9, 0xa3, 0x7d, 0x65, + 0x7c, 0xab, 0x1b, 0xf4, 0x39, 0xdf, 0x1a, 0xce, 0x8d, 0xee, 0x41, 0xb9, 0xab, 0x58, 0xb6, 0xcc, + 0xbc, 0xbf, 0xec, 0x8b, 0xfc, 0x4f, 0x8c, 0x6d, 0x32, 0x8b, 0x15, 0xc4, 0xa0, 0xb9, 0x92, 0x22, + 0x11, 0xf5, 0xa8, 0xe8, 0x10, 0x96, 0x5b, 0x67, 0xef, 0x2b, 0x7d, 0x5b, 0xeb, 0x63, 0x79, 0xec, + 0xd4, 0xc6, 0x53, 0x89, 0xfb, 0x9a, 0xd5, 0xc2, 0x1d, 0xe5, 0x54, 0xd3, 0x9d, 0x69, 0x2d, 0xb9, + 0xf2, 0xee, 0x89, 0x5a, 0xa2, 0x04, 0xc5, 0x60, 0xd8, 0x45, 0x45, 0x10, 0xec, 0x21, 0x5f, 0xbf, + 0x60, 0x0f, 0xd1, 0xf3, 0x90, 0x24, 0x6b, 0xa4, 0x6b, 0x2f, 0x86, 0xbc, 0x88, 0xcb, 0x35, 0xcf, + 0x0c, 0x2c, 0x51, 0x4e, 0x51, 0x74, 0xbf, 0x06, 0x37, 0x14, 0x8f, 0x6a, 0x15, 0xaf, 0x41, 0x69, + 0x24, 0xce, 0xfa, 0x8e, 0x2f, 0xee, 0x3f, 0x3e, 0xb1, 0x04, 0x85, 0x40, 0x40, 0x15, 0xcf, 0xc1, + 0x72, 0x58, 0x7c, 0x14, 0x3b, 0x2e, 0x3d, 0x10, 0xe7, 0xd0, 0x2d, 0xc8, 0xba, 0x01, 0x92, 0x7d, + 0x8d, 0xe7, 0xc7, 0x56, 0xe1, 0x30, 0x4b, 0x2e, 0x2b, 0xf9, 0x0c, 0x89, 0x55, 0x53, 0x73, 0x10, + 0xe8, 0xc4, 0x33, 0x8a, 0x61, 0x34, 0x14, 0xab, 0x23, 0xbe, 0x0d, 0x95, 0xa8, 0xe0, 0x37, 0xb2, + 0x8c, 0xa4, 0x6b, 0x85, 0xe7, 0x20, 0x7d, 0xac, 0x9b, 0x3d, 0xc5, 0xa6, 0xca, 0x0a, 0x12, 0x1f, + 0x11, 0xeb, 0x64, 0x81, 0x30, 0x41, 0xc9, 0x6c, 0x20, 0xca, 0x70, 0x3e, 0x32, 0x00, 0x12, 0x11, + 0xad, 0xaf, 0x62, 0xb6, 0x9f, 0x05, 0x89, 0x0d, 0x3c, 0x45, 0x6c, 0xb2, 0x6c, 0x40, 0x5e, 0x6b, + 0xd1, 0xb5, 0x52, 0xfd, 0x39, 0x89, 0x8f, 0xc4, 0x5f, 0x25, 0xe0, 0x5c, 0x78, 0x18, 0x44, 0x97, + 0x60, 0xa1, 0xa7, 0x0c, 0x65, 0x7b, 0xc8, 0xbf, 0x65, 0x76, 0x1c, 0xd0, 0x53, 0x86, 0xcd, 0x21, + 0xfb, 0x90, 0xcb, 0x90, 0xb0, 0x87, 0x56, 0x45, 0xb8, 0x94, 0xb8, 0xba, 0x20, 0x91, 0x9f, 0xe8, + 0x10, 0x16, 0xbb, 0x7a, 0x5b, 0xe9, 0xca, 0x3e, 0x8b, 0xe7, 0xc6, 0x7e, 0x79, 0x6c, 0xb3, 0x59, + 0x40, 0xc3, 0xea, 0x98, 0xd1, 0x97, 0xa8, 0x8e, 0x1d, 0xd7, 0xf2, 0xbf, 0x21, 0xab, 0xf7, 0x9d, + 0x51, 0x2a, 0xe0, 0x29, 0x1c, 0x9f, 0x9d, 0x9e, 0xdb, 0x67, 0x3f, 0x0f, 0xcb, 0x7d, 0x3c, 0xb4, + 0x7d, 0x73, 0x64, 0x86, 0x93, 0xa1, 0x67, 0x81, 0xc8, 0x33, 0xef, 0xfd, 0xc4, 0x86, 0xd0, 0x35, + 0x9a, 0x59, 0x18, 0xba, 0x85, 0x4d, 0x59, 0x51, 0x55, 0x13, 0x5b, 0x16, 0xcd, 0x6c, 0x17, 0x68, + 0xba, 0x40, 0xe9, 0x1b, 0x8c, 0x2c, 0xfe, 0xd4, 0x7f, 0x56, 0xc1, 0x4c, 0x82, 0x9f, 0x44, 0xdc, + 0x3b, 0x89, 0x03, 0x58, 0xe6, 0xf2, 0x6a, 0xe0, 0x30, 0x84, 0x59, 0x3d, 0x0f, 0x72, 0xc4, 0x67, + 0x38, 0x87, 0xc4, 0xa3, 0x9d, 0x83, 0xe3, 0x6d, 0x93, 0x3e, 0x6f, 0xfb, 0x6f, 0x76, 0x36, 0xaf, + 0xba, 0x51, 0xc4, 0x4b, 0xd3, 0x42, 0xa3, 0x88, 0xb7, 0x2e, 0x21, 0xe0, 0xde, 0x7e, 0x16, 0x87, + 0x6a, 0x74, 0x5e, 0x16, 0xaa, 0xea, 0x19, 0x58, 0x74, 0xd7, 0xe2, 0xce, 0x8f, 0x7d, 0xf5, 0x65, + 0xf7, 0x01, 0x9f, 0x60, 0x64, 0x54, 0xbc, 0x02, 0xc5, 0x91, 0xac, 0x91, 0x9d, 0x42, 0xe1, 0xd4, + 0xff, 0x7e, 0xf1, 0x47, 0x09, 0xd7, 0xab, 0x06, 0x52, 0xbb, 0x10, 0xcb, 0x7b, 0x03, 0x96, 0x54, + 0xdc, 0xd6, 0xd4, 0xaf, 0x6a, 0x78, 0x8b, 0x5c, 0xfa, 0x3b, 0xbb, 0x9b, 0xc1, 0xee, 0xfe, 0x98, + 0x87, 0xac, 0x84, 0x2d, 0x83, 0xa4, 0x74, 0x68, 0x13, 0x72, 0x78, 0xd8, 0xc6, 0x86, 0xed, 0x64, + 0xc1, 0xe1, 0xd5, 0x04, 0xe3, 0xae, 0x3b, 0x9c, 0xa4, 0x36, 0x76, 0xc5, 0xd0, 0x4d, 0x0e, 0x83, + 0x44, 0x23, 0x1a, 0x5c, 0xdc, 0x8f, 0x83, 0xbc, 0xe0, 0xe0, 0x20, 0x89, 0xc8, 0x52, 0x98, 0x49, + 0x8d, 0x00, 0x21, 0x37, 0x39, 0x10, 0x92, 0x9c, 0xf2, 0xb2, 0x00, 0x12, 0x52, 0x0b, 0x20, 0x21, + 0xa9, 0x29, 0xcb, 0x8c, 0x80, 0x42, 0x5e, 0x70, 0xa0, 0x90, 0xf4, 0x94, 0x19, 0x8f, 0x60, 0x21, + 0xaf, 0x07, 0xb1, 0x90, 0x4c, 0x44, 0x68, 0x73, 0xa4, 0x27, 0x82, 0x21, 0xaf, 0xf8, 0xc0, 0x90, + 0x6c, 0x24, 0x0a, 0xc1, 0x14, 0x85, 0xa0, 0x21, 0xaf, 0x05, 0xd0, 0x90, 0xdc, 0x94, 0x7d, 0x98, + 0x00, 0x87, 0x6c, 0xf9, 0xe1, 0x10, 0x88, 0x44, 0x55, 0xf8, 0xb9, 0x47, 0xe1, 0x21, 0x2f, 0xb9, + 0x78, 0x48, 0x3e, 0x12, 0xd8, 0xe1, 0x6b, 0x19, 0x05, 0x44, 0xf6, 0xc6, 0x00, 0x11, 0x06, 0x60, + 0x3c, 0x15, 0xa9, 0x62, 0x0a, 0x22, 0xb2, 0x37, 0x86, 0x88, 0x14, 0xa6, 0x28, 0x9c, 0x02, 0x89, + 0x7c, 0x2f, 0x1c, 0x12, 0x89, 0x06, 0x2d, 0xf8, 0x34, 0x67, 0xc3, 0x44, 0xe4, 0x08, 0x4c, 0xa4, + 0x14, 0x59, 0xbf, 0x33, 0xf5, 0x33, 0x83, 0x22, 0x87, 0x21, 0xa0, 0x08, 0x83, 0x2f, 0xae, 0x46, + 0x2a, 0x9f, 0x01, 0x15, 0x39, 0x0c, 0x41, 0x45, 0x16, 0xa7, 0xaa, 0x9d, 0x0a, 0x8b, 0xdc, 0x0d, + 0xc2, 0x22, 0x68, 0xca, 0x37, 0x16, 0x89, 0x8b, 0xb4, 0xa2, 0x70, 0x11, 0x86, 0x5d, 0x3c, 0x1b, + 0xa9, 0x71, 0x0e, 0x60, 0x64, 0x6f, 0x0c, 0x18, 0x59, 0x9e, 0x62, 0x69, 0xb3, 0x22, 0x23, 0xd7, + 0x48, 0x46, 0x31, 0xe2, 0xaa, 0x49, 0x72, 0x8f, 0x4d, 0x53, 0x37, 0x39, 0xc6, 0xc1, 0x06, 0xe2, + 0x55, 0x52, 0x29, 0x7b, 0x6e, 0x79, 0x02, 0x8a, 0x42, 0x8b, 0x28, 0x9f, 0x2b, 0x16, 0x7f, 0x1d, + 0xf7, 0x64, 0x69, 0x81, 0xe9, 0xaf, 0xb2, 0x73, 0xbc, 0xca, 0xf6, 0x61, 0x2b, 0x42, 0x10, 0x5b, + 0x59, 0x85, 0x3c, 0x29, 0x8e, 0x46, 0x60, 0x13, 0xc5, 0x70, 0x61, 0x93, 0xeb, 0xb0, 0x48, 0x93, + 0x00, 0x86, 0xc0, 0xf0, 0xc8, 0x9a, 0xa4, 0x91, 0xb5, 0x44, 0x1e, 0xb0, 0x5d, 0x60, 0x21, 0xf6, + 0x39, 0x58, 0xf2, 0xf1, 0xba, 0x45, 0x17, 0xc3, 0x10, 0xca, 0x2e, 0xf7, 0x06, 0xaf, 0xbe, 0x7e, + 0x1b, 0xf7, 0x76, 0xc8, 0xc3, 0x5b, 0xc2, 0xa0, 0x91, 0xf8, 0xd7, 0x04, 0x8d, 0x08, 0x5f, 0x19, + 0x1a, 0xf1, 0x17, 0x91, 0x89, 0x60, 0x11, 0xf9, 0xf7, 0xb8, 0x77, 0x26, 0x2e, 0xd0, 0xd1, 0xd6, + 0x55, 0xcc, 0xcb, 0x3a, 0xfa, 0x9b, 0xa4, 0x59, 0x5d, 0xfd, 0x84, 0x17, 0x6f, 0xe4, 0x27, 0xe1, + 0x72, 0x63, 0x67, 0x8e, 0x87, 0x46, 0xb7, 0x22, 0x64, 0xb9, 0x0b, 0xaf, 0x08, 0xcb, 0x90, 0x78, + 0x88, 0x59, 0xa4, 0x5b, 0x90, 0xc8, 0x4f, 0xc2, 0x47, 0x8d, 0x8c, 0xe7, 0x20, 0x6c, 0x80, 0x6e, + 0x43, 0x8e, 0xb6, 0x6b, 0x64, 0xdd, 0xb0, 0x78, 0x40, 0x0a, 0xa4, 0x6b, 0xac, 0x2b, 0xb3, 0xb6, + 0x4f, 0x78, 0xf6, 0x0c, 0x4b, 0xca, 0x1a, 0xfc, 0x97, 0x2f, 0x69, 0xca, 0x05, 0x92, 0xa6, 0x0b, + 0x90, 0x23, 0xb3, 0xb7, 0x0c, 0xa5, 0x8d, 0x69, 0x64, 0xc9, 0x49, 0x1e, 0x41, 0x7c, 0x00, 0x68, + 0x3c, 0x4e, 0xa2, 0x06, 0xa4, 0xf1, 0x29, 0xee, 0xdb, 0x2c, 0xa7, 0xcc, 0xaf, 0x9f, 0x1b, 0xaf, + 0x1b, 0xc9, 0xe3, 0xcd, 0x0a, 0xd9, 0xe4, 0xbf, 0x7d, 0xbe, 0x5a, 0x66, 0xdc, 0xcf, 0xea, 0x3d, + 0xcd, 0xc6, 0x3d, 0xc3, 0x3e, 0x93, 0xb8, 0xbc, 0xf8, 0x67, 0x01, 0x4a, 0x23, 0xf1, 0x33, 0x74, + 0x6f, 0x1d, 0x93, 0x17, 0x7c, 0xc0, 0xd2, 0x6c, 0xfb, 0x7d, 0x11, 0xe0, 0x44, 0xb1, 0xe4, 0xf7, + 0x94, 0xbe, 0x8d, 0x55, 0xbe, 0xe9, 0xb9, 0x13, 0xc5, 0x7a, 0x93, 0x12, 0xc8, 0xa9, 0x93, 0xc7, + 0x03, 0x0b, 0xab, 0x1c, 0xe2, 0xca, 0x9c, 0x28, 0xd6, 0xa1, 0x85, 0x55, 0xdf, 0x2a, 0x33, 0x8f, + 0xb6, 0xca, 0xe0, 0x1e, 0x67, 0x47, 0xf6, 0xd8, 0x57, 0xf7, 0xe7, 0xfc, 0x75, 0x3f, 0xaa, 0x42, + 0xd6, 0x30, 0x35, 0xdd, 0xd4, 0xec, 0x33, 0x7a, 0x30, 0x09, 0xc9, 0x1d, 0xa3, 0xcb, 0x50, 0xe8, + 0xe1, 0x9e, 0xa1, 0xeb, 0x5d, 0x99, 0x39, 0x9b, 0x3c, 0x15, 0x5d, 0xe0, 0xc4, 0x3a, 0xf5, 0x39, + 0x1f, 0x0a, 0xde, 0xd7, 0xe7, 0xe1, 0x3b, 0x5f, 0xef, 0xf6, 0xae, 0x84, 0x6c, 0xaf, 0x8f, 0x42, + 0x16, 0x31, 0xb2, 0xbf, 0xee, 0xf8, 0xdb, 0xda, 0x60, 0xf1, 0x87, 0x14, 0xf4, 0x0d, 0xe6, 0x46, + 0xe8, 0xc0, 0x5f, 0x99, 0x0d, 0xa8, 0x53, 0x70, 0xcc, 0x79, 0x56, 0xef, 0xe1, 0x55, 0x70, 0x8c, + 0x6c, 0xa1, 0xb7, 0xe0, 0xf1, 0x11, 0xcf, 0xe6, 0xaa, 0x16, 0x66, 0x75, 0x70, 0x8f, 0x05, 0x1d, + 0x9c, 0xa3, 0xda, 0xdb, 0xac, 0xc4, 0x23, 0x7e, 0x73, 0xdb, 0x50, 0x0c, 0xa6, 0x79, 0xa1, 0xc7, + 0x7f, 0x19, 0x0a, 0x26, 0xb6, 0x15, 0xad, 0x2f, 0x07, 0x6a, 0xd2, 0x05, 0x46, 0xe4, 0xf8, 0xef, + 0x3e, 0x3c, 0x16, 0x9a, 0xee, 0xa1, 0x17, 0x21, 0xe7, 0x65, 0x8a, 0x6c, 0x57, 0x27, 0x20, 0x79, + 0x1e, 0xaf, 0xf8, 0x9b, 0xb8, 0xa7, 0x32, 0x88, 0x0d, 0xd6, 0x21, 0x6d, 0x62, 0x6b, 0xd0, 0x65, + 0x68, 0x5d, 0x71, 0xfd, 0xb9, 0xd9, 0x12, 0x45, 0x42, 0x1d, 0x74, 0x6d, 0x89, 0x0b, 0x8b, 0x0f, + 0x20, 0xcd, 0x28, 0x28, 0x0f, 0x99, 0xc3, 0xdd, 0x7b, 0xbb, 0x7b, 0x6f, 0xee, 0x96, 0x63, 0x08, + 0x20, 0xbd, 0x51, 0xab, 0xd5, 0xf7, 0x9b, 0xe5, 0x38, 0xca, 0x41, 0x6a, 0x63, 0x73, 0x4f, 0x6a, + 0x96, 0x05, 0x42, 0x96, 0xea, 0xaf, 0xd7, 0x6b, 0xcd, 0x72, 0x02, 0x2d, 0x42, 0x81, 0xfd, 0x96, + 0xef, 0xee, 0x49, 0xf7, 0x37, 0x9a, 0xe5, 0xa4, 0x8f, 0x74, 0x50, 0xdf, 0xdd, 0xaa, 0x4b, 0xe5, + 0x94, 0xf8, 0x5f, 0x70, 0x3e, 0x32, 0xb5, 0xf4, 0x80, 0xbf, 0xb8, 0x0f, 0xf8, 0x13, 0x7f, 0x22, + 0x40, 0x35, 0x3a, 0x5f, 0x44, 0xaf, 0x8f, 0x2c, 0x7c, 0x7d, 0x8e, 0x64, 0x73, 0x64, 0xf5, 0xe8, + 0x0a, 0x14, 0x4d, 0x7c, 0x8c, 0xed, 0x76, 0x87, 0xe5, 0xaf, 0x2c, 0x60, 0x16, 0xa4, 0x02, 0xa7, + 0x52, 0x21, 0x8b, 0xb1, 0xbd, 0x83, 0xdb, 0xb6, 0xcc, 0x7c, 0x11, 0x33, 0xba, 0x1c, 0x61, 0x23, + 0xd4, 0x03, 0x46, 0x14, 0xdf, 0x9e, 0x6b, 0x2f, 0x73, 0x90, 0x92, 0xea, 0x4d, 0xe9, 0xad, 0x72, + 0x02, 0x21, 0x28, 0xd2, 0x9f, 0xf2, 0xc1, 0xee, 0xc6, 0xfe, 0x41, 0x63, 0x8f, 0xec, 0xe5, 0x12, + 0x94, 0x9c, 0xbd, 0x74, 0x88, 0x29, 0xf1, 0x0f, 0x02, 0x3c, 0x1e, 0x91, 0xed, 0xa2, 0xdb, 0x00, + 0xf6, 0x50, 0x36, 0x71, 0x5b, 0x37, 0xd5, 0x68, 0x23, 0x6b, 0x0e, 0x25, 0xca, 0x21, 0xe5, 0x6c, + 0xfe, 0xcb, 0x9a, 0x80, 0x17, 0xa3, 0x97, 0xb9, 0x52, 0xb2, 0x2a, 0xe7, 0x53, 0xbb, 0x18, 0x02, + 0x8b, 0xe2, 0x36, 0x51, 0x4c, 0xf7, 0x96, 0x2a, 0xa6, 0xfc, 0xe8, 0x7e, 0x98, 0x53, 0x99, 0xb1, + 0x5b, 0x33, 0x9f, 0x3b, 0x49, 0x3d, 0x9a, 0x3b, 0x11, 0x7f, 0x9e, 0xf0, 0x6f, 0x6c, 0x30, 0xb9, + 0xdf, 0x83, 0xb4, 0x65, 0x2b, 0xf6, 0xc0, 0xe2, 0x06, 0xf7, 0xe2, 0xac, 0x95, 0xc2, 0x9a, 0xf3, + 0xe3, 0x80, 0x8a, 0x4b, 0x5c, 0xcd, 0x77, 0xfb, 0x6d, 0x89, 0xb7, 0xa0, 0x18, 0xdc, 0x9c, 0xe8, + 0x4f, 0xc6, 0xf3, 0x39, 0x82, 0x78, 0xc7, 0xcb, 0xbf, 0x7c, 0xa0, 0xe5, 0x38, 0x20, 0x18, 0x0f, + 0x03, 0x04, 0x7f, 0x11, 0x87, 0x27, 0x26, 0xd4, 0x4b, 0xe8, 0x8d, 0x91, 0x73, 0x7e, 0x69, 0x9e, + 0x6a, 0x6b, 0x8d, 0xd1, 0x82, 0x27, 0x2d, 0xde, 0x84, 0x05, 0x3f, 0x7d, 0xb6, 0x45, 0xfe, 0x5e, + 0xf0, 0x7c, 0x7e, 0x10, 0xb9, 0xfc, 0xda, 0x12, 0xcd, 0x11, 0x3b, 0x13, 0xe6, 0xb4, 0xb3, 0xd0, + 0x64, 0x21, 0xf1, 0xcd, 0x25, 0x0b, 0xc9, 0x47, 0xb4, 0xb6, 0xb7, 0x00, 0x7c, 0x0d, 0xc9, 0x65, + 0x48, 0x99, 0xfa, 0xa0, 0xaf, 0xd2, 0x63, 0x4e, 0x49, 0x6c, 0x80, 0x6e, 0x41, 0x8a, 0x98, 0x8b, + 0xb3, 0x19, 0xe3, 0x9e, 0x93, 0x1c, 0xb7, 0x0f, 0xf3, 0x65, 0xdc, 0xa2, 0x06, 0x68, 0xbc, 0x29, + 0x14, 0xf1, 0x8a, 0x57, 0x82, 0xaf, 0x78, 0x32, 0xb2, 0xbd, 0x14, 0xfe, 0xaa, 0xf7, 0x21, 0x45, + 0x8f, 0x97, 0xe4, 0x27, 0xb4, 0xb1, 0xc9, 0x0b, 0x5e, 0xf2, 0x1b, 0x7d, 0x1f, 0x40, 0xb1, 0x6d, + 0x53, 0x6b, 0x0d, 0xbc, 0x17, 0xac, 0x86, 0x9b, 0xc7, 0x86, 0xc3, 0xb7, 0x79, 0x81, 0xdb, 0xc9, + 0xb2, 0x27, 0xea, 0xb3, 0x15, 0x9f, 0x42, 0x71, 0x17, 0x8a, 0x41, 0x59, 0xa7, 0x44, 0x63, 0x73, + 0x08, 0x96, 0x68, 0xac, 0xe2, 0xe6, 0x25, 0x9a, 0x5b, 0xe0, 0x25, 0x58, 0x0f, 0x9b, 0x0e, 0xc4, + 0x7f, 0xc4, 0x61, 0xc1, 0x6f, 0x5d, 0xff, 0x69, 0x55, 0x8e, 0xf8, 0x61, 0x1c, 0xb2, 0xee, 0xe2, + 0x23, 0x1a, 0xc8, 0xde, 0xde, 0x09, 0xfe, 0x76, 0x29, 0xeb, 0x48, 0x27, 0xdc, 0x3e, 0xf7, 0x1d, + 0x37, 0x21, 0x8a, 0x02, 0xa5, 0xfd, 0x3b, 0xed, 0xb4, 0xfa, 0x79, 0xfe, 0xf7, 0x63, 0x3e, 0x0f, + 0x92, 0x09, 0xa0, 0xff, 0x81, 0xb4, 0xd2, 0x76, 0xa1, 0xf8, 0x62, 0x08, 0x36, 0xeb, 0xb0, 0xae, + 0x35, 0x87, 0x1b, 0x94, 0x53, 0xe2, 0x12, 0x7c, 0x56, 0x82, 0xdb, 0x27, 0x7f, 0x95, 0xe8, 0x65, + 0x3c, 0x41, 0xb7, 0x57, 0x04, 0x38, 0xdc, 0xbd, 0xbf, 0xb7, 0xb5, 0x7d, 0x77, 0xbb, 0xbe, 0xc5, + 0x53, 0xa2, 0xad, 0xad, 0xfa, 0x56, 0x59, 0x20, 0x7c, 0x52, 0xfd, 0xfe, 0xde, 0x51, 0x7d, 0xab, + 0x9c, 0x10, 0xef, 0x40, 0xce, 0x75, 0x1d, 0xa8, 0x02, 0x19, 0xa7, 0xad, 0x10, 0xe7, 0x11, 0x93, + 0x77, 0x89, 0x96, 0x21, 0x65, 0xe8, 0xef, 0xf1, 0x2e, 0x71, 0x42, 0x62, 0x03, 0x51, 0x85, 0xd2, + 0x88, 0xdf, 0x41, 0x77, 0x20, 0x63, 0x0c, 0x5a, 0xb2, 0x63, 0xb4, 0x23, 0x4d, 0x18, 0x07, 0x29, + 0x18, 0xb4, 0xba, 0x5a, 0xfb, 0x1e, 0x3e, 0x73, 0xb6, 0xc9, 0x18, 0xb4, 0xee, 0x31, 0xdb, 0x66, + 0x6f, 0x11, 0xfc, 0x6f, 0x39, 0x85, 0xac, 0xf3, 0xa9, 0xa2, 0xff, 0x85, 0x9c, 0xeb, 0xd2, 0xdc, + 0xab, 0x33, 0x91, 0xbe, 0x90, 0xab, 0xf7, 0x44, 0xd0, 0x75, 0x58, 0xb4, 0xb4, 0x93, 0xbe, 0xd3, + 0x82, 0x62, 0xc8, 0x9c, 0x40, 0xbf, 0x99, 0x12, 0x7b, 0xb0, 0xe3, 0xc0, 0x49, 0x24, 0x92, 0x95, + 0x47, 0x7d, 0xc5, 0xb7, 0x39, 0x81, 0x90, 0x88, 0x9b, 0x08, 0x8b, 0xb8, 0x1f, 0x08, 0x90, 0xf7, + 0x35, 0xb6, 0xd0, 0x7f, 0xfb, 0x1c, 0x57, 0x31, 0x24, 0x54, 0xf8, 0x78, 0xbd, 0x5b, 0x19, 0xc1, + 0x85, 0x09, 0xf3, 0x2f, 0x2c, 0xaa, 0x8f, 0xe8, 0xf4, 0xc7, 0x92, 0x73, 0xf7, 0xc7, 0x9e, 0x05, + 0x64, 0xeb, 0xb6, 0xd2, 0x95, 0x4f, 0x75, 0x5b, 0xeb, 0x9f, 0xc8, 0xcc, 0x34, 0x98, 0x9b, 0x29, + 0xd3, 0x27, 0x47, 0xf4, 0xc1, 0x3e, 0xb5, 0x92, 0x1f, 0xc4, 0x21, 0xeb, 0x96, 0x6d, 0xf3, 0x5e, + 0xb2, 0x38, 0x07, 0x69, 0x5e, 0x99, 0xb0, 0x5b, 0x16, 0x7c, 0x14, 0xda, 0x08, 0xac, 0x42, 0xb6, + 0x87, 0x6d, 0x85, 0xfa, 0x4c, 0x06, 0x41, 0xba, 0xe3, 0xeb, 0x2f, 0x41, 0xde, 0x77, 0xdf, 0x85, + 0xb8, 0xd1, 0xdd, 0xfa, 0x9b, 0xe5, 0x58, 0x35, 0xf3, 0xd1, 0x27, 0x97, 0x12, 0xbb, 0xf8, 0x3d, + 0xf2, 0x85, 0x49, 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x57, 0x8e, 0x57, 0xf3, 0x1f, 0x7d, 0x72, 0x29, + 0x23, 0x61, 0xda, 0xb7, 0xb9, 0x7e, 0x0f, 0x4a, 0x23, 0x07, 0x13, 0xfc, 0xa0, 0x11, 0x14, 0xb7, + 0x0e, 0xf7, 0x77, 0xb6, 0x6b, 0x1b, 0xcd, 0xba, 0x7c, 0xb4, 0xd7, 0xac, 0x97, 0xe3, 0xe8, 0x71, + 0x58, 0xda, 0xd9, 0x7e, 0xad, 0xd1, 0x94, 0x6b, 0x3b, 0xdb, 0xf5, 0xdd, 0xa6, 0xbc, 0xd1, 0x6c, + 0x6e, 0xd4, 0xee, 0x95, 0x85, 0xf5, 0x5f, 0xe6, 0xa1, 0xb4, 0xb1, 0x59, 0xdb, 0x26, 0xb5, 0x99, + 0xd6, 0x56, 0xa8, 0x7b, 0xa8, 0x41, 0x92, 0x82, 0xc0, 0x13, 0x6f, 0x30, 0x57, 0x27, 0x37, 0xf6, + 0xd0, 0x5d, 0x48, 0x51, 0x7c, 0x18, 0x4d, 0xbe, 0xd2, 0x5c, 0x9d, 0xd2, 0xe9, 0x23, 0x93, 0xa1, + 0x9f, 0xd3, 0xc4, 0x3b, 0xce, 0xd5, 0xc9, 0x8d, 0x3f, 0xb4, 0x03, 0x19, 0x07, 0xbe, 0x9b, 0x76, + 0x5b, 0xb8, 0x3a, 0xb5, 0x83, 0x46, 0x96, 0xc6, 0x60, 0xd6, 0xc9, 0xd7, 0x9f, 0xab, 0x53, 0x5a, + 0x82, 0x68, 0x1b, 0xd2, 0x1c, 0xe1, 0x98, 0x72, 0xf3, 0xb7, 0x3a, 0xad, 0x13, 0x86, 0x24, 0xc8, + 0x79, 0x00, 0xf6, 0xf4, 0x4b, 0xdd, 0xd5, 0x19, 0xba, 0x9d, 0xe8, 0x01, 0x14, 0x82, 0xa8, 0xc9, + 0x6c, 0xb7, 0x8b, 0xab, 0x33, 0xf6, 0xdc, 0x88, 0xfe, 0x20, 0x84, 0x32, 0xdb, 0x6d, 0xe3, 0xea, + 0x8c, 0x2d, 0x38, 0xf4, 0x0e, 0x2c, 0x8e, 0x43, 0x1c, 0xb3, 0x5f, 0x3e, 0xae, 0xce, 0xd1, 0x94, + 0x43, 0x3d, 0x40, 0x21, 0xd0, 0xc8, 0x1c, 0x77, 0x91, 0xab, 0xf3, 0xf4, 0xe8, 0x90, 0x0a, 0xa5, + 0x51, 0xb8, 0x61, 0xd6, 0xbb, 0xc9, 0xd5, 0x99, 0xfb, 0x75, 0xec, 0x2d, 0xc1, 0xda, 0x7b, 0xd6, + 0xbb, 0xca, 0xd5, 0x99, 0xdb, 0x77, 0xe8, 0x10, 0xc0, 0x57, 0x3b, 0xce, 0x70, 0x77, 0xb9, 0x3a, + 0x4b, 0x23, 0x0f, 0x19, 0xb0, 0x14, 0x56, 0x54, 0xce, 0x73, 0x95, 0xb9, 0x3a, 0x57, 0x7f, 0x8f, + 0xd8, 0x73, 0xb0, 0x3c, 0x9c, 0xed, 0x6a, 0x73, 0x75, 0xc6, 0x46, 0xdf, 0x66, 0xfd, 0xd3, 0x2f, + 0x56, 0xe2, 0x9f, 0x7d, 0xb1, 0x12, 0xff, 0xeb, 0x17, 0x2b, 0xf1, 0x8f, 0xbf, 0x5c, 0x89, 0x7d, + 0xf6, 0xe5, 0x4a, 0xec, 0x4f, 0x5f, 0xae, 0xc4, 0xfe, 0xef, 0x99, 0x13, 0xcd, 0xee, 0x0c, 0x5a, + 0x6b, 0x6d, 0xbd, 0x77, 0xc3, 0xff, 0x2f, 0x97, 0xb0, 0x7f, 0xde, 0xb4, 0xd2, 0x34, 0xa0, 0xde, + 0xfc, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x41, 0x05, 0x99, 0x33, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -7859,18 +7842,6 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) - i-- - dAtA[i] = 0x30 - } - if len(m.AppHash) > 0 { - i -= len(m.AppHash) - copy(dAtA[i:], m.AppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) - i-- - dAtA[i] = 0x2a - } if m.ConsensusParamUpdates != nil { { size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) @@ -9900,13 +9871,6 @@ func (m *ResponseFinalizeBlock) Size() (n int) { l = m.ConsensusParamUpdates.Size() n += 1 + l + sovTypes(uint64(l)) } - l = len(m.AppHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.RetainHeight != 0 { - n += 1 + sovTypes(uint64(m.RetainHeight)) - } return n } @@ -17380,59 +17344,6 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) - if m.AppHash == nil { - m.AppHash = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) - } - m.RetainHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RetainHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index d8143feb3f..c16c9c2ed2 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -166,7 +166,7 @@ message RequestFinalizeBlock { repeated bytes txs = 1; CommitInfo decided_last_commit = 2 [(gogoproto.nullable) = false]; repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; - // hash is the merkle root hash of the fields of the proposed block. + // hash is the merkle root hash of the fields of the decided block. bytes hash = 4; int64 height = 5; google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; @@ -368,8 +368,6 @@ message ResponseFinalizeBlock { repeated ExecTxResult tx_results = 2; repeated ValidatorUpdate validator_updates = 3 [(gogoproto.nullable) = false]; tendermint.types.ConsensusParams consensus_param_updates = 4; - bytes app_hash = 5; - int64 retain_height = 6; } //---------------------------------------- From d59a53be0124901e6daf4b8492282b34f05e9627 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 24 May 2022 17:19:32 +0200 Subject: [PATCH 055/203] p2p: reduce ability of SendError to disconnect peers (#8597) --- internal/p2p/channel.go | 1 + internal/p2p/peermanager.go | 7 +++++++ internal/p2p/router.go | 16 ++++++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index d3d7d104ff..d6763543ab 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -46,6 +46,7 @@ type Wrapper interface { type PeerError struct { NodeID types.NodeID Err error + Fatal bool } func (pe PeerError) Error() string { return fmt.Sprintf("peer=%q: %s", pe.NodeID, pe.Err.Error()) } diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 165b00e61a..7391de4ea7 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -430,6 +430,13 @@ func (m *PeerManager) PeerRatio() float64 { return float64(m.store.Size()) / float64(m.options.MaxPeers) } +func (m *PeerManager) HasMaxPeerCapacity() bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + return len(m.connected) >= int(m.options.MaxConnected) +} + // DialNext finds an appropriate peer address to dial, and marks it as dialing. // If no peer is found, or all connection slots are full, it blocks until one // becomes available. The caller must call Dialed() or DialFailed() for the diff --git a/internal/p2p/router.go b/internal/p2p/router.go index a9a01f3c7f..267d55a962 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -396,9 +396,21 @@ func (r *Router) routeChannel( return } - r.logger.Error("peer error, evicting", "peer", peerError.NodeID, "err", peerError.Err) + shouldEvict := peerError.Fatal || r.peerManager.HasMaxPeerCapacity() + r.logger.Error("peer error", + "peer", peerError.NodeID, + "err", peerError.Err, + "evicting", shouldEvict, + ) + if shouldEvict { + r.peerManager.Errored(peerError.NodeID, peerError.Err) + } else { + r.peerManager.processPeerEvent(ctx, PeerUpdate{ + NodeID: peerError.NodeID, + Status: PeerStatusBad, + }) + } - r.peerManager.Errored(peerError.NodeID, peerError.Err) case <-ctx.Done(): return } From 10ce3d472969bb8f1a041bd8d1a8e421b5ff16aa Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 25 May 2022 00:15:36 -0700 Subject: [PATCH 056/203] rpc: fix OpenAPI docs for /events filter argument (#8599) I incorrectly documented the "query" field as "filter". --- rpc/openapi/openapi.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index d44463da7e..566419b854 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -1507,7 +1507,7 @@ components: description: Event filter query type: object properties: - filter: + query: type: string example: "tm.event = 'Tx'" EventsResponse: From 5d1bffe85728313169cb1e0742d44899d45f320f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 May 2022 09:03:02 +0000 Subject: [PATCH 057/203] build(deps): Bump github.com/vektra/mockery/v2 from 2.12.2 to 2.12.3 (#8607) Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.12.2 to 2.12.3.
Release notes

Sourced from github.com/vektra/mockery/v2's releases.

v2.12.3

Changelog

  • 41e99e1 Add explicit generation for ExpecterTest
  • 68d25fe Merge pull request #466 from LandonTClipp/testing_tb
  • 356a8cd Reduce size of interface passed to mock constructor
  • b338b68 Updating mocks and fixing tests/behavior
Commits
  • 68d25fe Merge pull request #466 from LandonTClipp/testing_tb
  • 41e99e1 Add explicit generation for ExpecterTest
  • b338b68 Updating mocks and fixing tests/behavior
  • 356a8cd Reduce size of interface passed to mock constructor
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/vektra/mockery/v2&package-manager=go_modules&previous-version=2.12.2&new-version=2.12.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 90142c5b1d..842c74013a 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 github.com/google/go-cmp v0.5.8 - github.com/vektra/mockery/v2 v2.12.2 + github.com/vektra/mockery/v2 v2.12.3 gotest.tools v2.2.0+incompatible ) diff --git a/go.sum b/go.sum index c26a91a662..0f278bc216 100644 --- a/go.sum +++ b/go.sum @@ -1074,8 +1074,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektra/mockery/v2 v2.12.2 h1:JbRx9F+XcCJiDTyCm3V5lXYwl56m5ZouV6I9eZa1Dj0= -github.com/vektra/mockery/v2 v2.12.2/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U= +github.com/vektra/mockery/v2 v2.12.3 h1:74h0R+p75tdr3QNwiNz3MXeCwSP/I5bYUbZY6oT4t20= +github.com/vektra/mockery/v2 v2.12.3/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= From 4cb0ec55e45653030b6e3c0edeec591205cc1965 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Wed, 25 May 2022 05:33:38 -0400 Subject: [PATCH 058/203] makefile: update buf commands to use tools.go (#8609) This will keep the version of `buf` consistent between all developer machines. --- Makefile | 9 +++------ go.mod | 17 ++++++++++++++++- go.sum | 37 +++++++++++++++++++++++++++++++++++++ tools/tools.go | 1 + 4 files changed, 57 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index cd9380768d..1c2b8f7dd4 100644 --- a/Makefile +++ b/Makefile @@ -77,9 +77,6 @@ $(BUILDDIR)/: ############################################################################### check-proto-deps: -ifeq (,$(shell which buf)) - $(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.") -endif ifeq (,$(shell which protoc-gen-gogofaster)) $(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install") endif @@ -93,7 +90,7 @@ endif proto-gen: check-proto-deps @echo "Generating Protobuf files" - @buf generate + @go run github.com/bufbuild/buf/cmd/buf generate @mv ./proto/tendermint/abci/types.pb.go ./abci/types/ .PHONY: proto-gen @@ -101,7 +98,7 @@ proto-gen: check-proto-deps # execution only. proto-lint: check-proto-deps @echo "Linting Protobuf files" - @buf lint + @go run github.com/bufbuild/buf/cmd/buf lint .PHONY: proto-lint proto-format: check-proto-format-deps @@ -114,7 +111,7 @@ proto-check-breaking: check-proto-deps @echo "Note: This is only useful if your changes have not yet been committed." @echo " Otherwise read up on buf's \"breaking\" command usage:" @echo " https://docs.buf.build/breaking/usage" - @buf breaking --against ".git" + @go run github.com/bufbuild/buf/cmd/buf breaking --against ".git" .PHONY: proto-check-breaking ############################################################################### diff --git a/go.mod b/go.mod index 842c74013a..bbaa94afc8 100644 --- a/go.mod +++ b/go.mod @@ -33,11 +33,11 @@ require ( golang.org/x/net v0.0.0-20220412020605-290c469a71a5 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.46.2 - gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) require ( + github.com/bufbuild/buf v1.3.1 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 @@ -48,11 +48,26 @@ require ( require ( github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/firefart/nonamedreturns v1.0.1 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect + github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect + github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4 // indirect + github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/pgzip v1.2.5 // indirect github.com/lufeee/execinquery v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.0.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/profile v1.6.0 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect ) diff --git a/go.sum b/go.sum index 0f278bc216..1665d1ba65 100644 --- a/go.sum +++ b/go.sum @@ -141,6 +141,8 @@ github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVj github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -173,6 +175,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/bufbuild/buf v1.3.1 h1:AelWcENnbNEjwxmQXIZaU51GHgnWQ8Mc94kZdDUKgRs= +github.com/bufbuild/buf v1.3.1/go.mod h1:CTRUb23N+zlm1U8ZIBKz0Sqluk++qQloB2i/MZNZHIs= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= @@ -224,9 +228,11 @@ github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= @@ -368,6 +374,8 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -386,6 +394,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -599,11 +608,17 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a h1:d4+I1YEKVmWZrgkt6jpXBnLgV2ZjO0YxEtLDdfIZfH4= +github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f h1:BNuUg9k2EiJmlMwjoef3e8vZLHplbVw6DrjGFjLL+Yo= +github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4 h1:E2CdxLXYSn6Zrj2+u8DWrwMJW3YZLSWtM/7kIL8OL18= +github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= @@ -646,6 +661,10 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -792,6 +811,7 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+ github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= @@ -838,6 +858,7 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -851,12 +872,16 @@ github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9oc github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM= +github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -932,8 +957,10 @@ github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= @@ -1122,24 +1149,32 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1399,6 +1434,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1738,6 +1774,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/tools/tools.go b/tools/tools.go index 9fc291d99a..52a676b009 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -8,6 +8,7 @@ package tools import ( + _ "github.com/bufbuild/buf/cmd/buf" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/vektra/mockery/v2" ) From a988cefe5d7ded73d242b3de9417e81cfe021429 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 25 May 2022 08:48:56 -0700 Subject: [PATCH 059/203] Update generated mocks after #8607. (#8612) --- abci/client/mocks/client.go | 11 +++++++---- abci/types/mocks/application.go | 11 +++++++---- internal/consensus/mocks/cons_sync_reactor.go | 11 +++++++---- internal/evidence/mocks/block_store.go | 12 +++++++----- internal/mempool/mocks/mempool.go | 11 +++++++---- internal/p2p/mocks/connection.go | 11 +++++++---- internal/p2p/mocks/transport.go | 11 +++++++---- internal/state/indexer/mocks/event_sink.go | 11 +++++++---- internal/state/mocks/block_store.go | 11 +++++++---- internal/state/mocks/evidence_pool.go | 11 +++++++---- internal/state/mocks/store.go | 11 +++++++---- internal/statesync/mocks/state_provider.go | 11 +++++++---- libs/time/mocks/source.go | 13 ++++++++----- light/provider/mocks/provider.go | 11 +++++++---- light/rpc/mocks/light_client.go | 11 +++++++---- rpc/client/mocks/client.go | 11 +++++++---- 16 files changed, 113 insertions(+), 66 deletions(-) diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index e5f2898f33..add3c2ae9d 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -4,10 +4,8 @@ package mocks import ( context "context" - testing "testing" mock "github.com/stretchr/testify/mock" - types "github.com/tendermint/tendermint/abci/types" ) @@ -422,8 +420,13 @@ func (_m *Client) Wait() { _m.Called() } -// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t testing.TB) *Client { +type NewClientT interface { + mock.TestingT + Cleanup(func()) +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t NewClientT) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/abci/types/mocks/application.go b/abci/types/mocks/application.go index 2d35c481f0..62cf929057 100644 --- a/abci/types/mocks/application.go +++ b/abci/types/mocks/application.go @@ -4,10 +4,8 @@ package mocks import ( context "context" - testing "testing" mock "github.com/stretchr/testify/mock" - types "github.com/tendermint/tendermint/abci/types" ) @@ -338,8 +336,13 @@ func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.Reque return r0, r1 } -// NewApplication creates a new instance of Application. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewApplication(t testing.TB) *Application { +type NewApplicationT interface { + mock.TestingT + Cleanup(func()) +} + +// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewApplication(t NewApplicationT) *Application { mock := &Application{} mock.Mock.Test(t) diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index f904e9129a..2c694742ba 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -3,8 +3,6 @@ package mocks import ( - testing "testing" - mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" ) @@ -29,8 +27,13 @@ func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) { _m.Called(_a0, _a1) } -// NewConsSyncReactor creates a new instance of ConsSyncReactor. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewConsSyncReactor(t testing.TB) *ConsSyncReactor { +type NewConsSyncReactorT interface { + mock.TestingT + Cleanup(func()) +} + +// NewConsSyncReactor creates a new instance of ConsSyncReactor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewConsSyncReactor(t NewConsSyncReactorT) *ConsSyncReactor { mock := &ConsSyncReactor{} mock.Mock.Test(t) diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index e45b281b90..b0c67ff874 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -3,10 +3,7 @@ package mocks import ( - testing "testing" - mock "github.com/stretchr/testify/mock" - types "github.com/tendermint/tendermint/types" ) @@ -61,8 +58,13 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } -// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t testing.TB) *BlockStore { +type NewBlockStoreT interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t NewBlockStoreT) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/internal/mempool/mocks/mempool.go b/internal/mempool/mocks/mempool.go index b82d7d63e8..454ca602f1 100644 --- a/internal/mempool/mocks/mempool.go +++ b/internal/mempool/mocks/mempool.go @@ -11,8 +11,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -173,8 +171,13 @@ func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types return r0 } -// NewMempool creates a new instance of Mempool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewMempool(t testing.TB) *Mempool { +type NewMempoolT interface { + mock.TestingT + Cleanup(func()) +} + +// NewMempool creates a new instance of Mempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMempool(t NewMempoolT) *Mempool { mock := &Mempool{} mock.Mock.Test(t) diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 73b6cfc3b3..766bbf6576 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -13,8 +13,6 @@ import ( p2p "github.com/tendermint/tendermint/internal/p2p" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -153,8 +151,13 @@ func (_m *Connection) String() string { return r0 } -// NewConnection creates a new instance of Connection. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnection(t testing.TB) *Connection { +type NewConnectionT interface { + mock.TestingT + Cleanup(func()) +} + +// NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewConnection(t NewConnectionT) *Connection { mock := &Connection{} mock.Mock.Test(t) diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 34ebec20e7..436c961c3f 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -10,8 +10,6 @@ import ( mock "github.com/stretchr/testify/mock" p2p "github.com/tendermint/tendermint/internal/p2p" - - testing "testing" ) // Transport is an autogenerated mock type for the Transport type @@ -151,8 +149,13 @@ func (_m *Transport) String() string { return r0 } -// NewTransport creates a new instance of Transport. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransport(t testing.TB) *Transport { +type NewTransportT interface { + mock.TestingT + Cleanup(func()) +} + +// NewTransport creates a new instance of Transport. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewTransport(t NewTransportT) *Transport { mock := &Transport{} mock.Mock.Test(t) diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index decf551abd..a7221a0876 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -12,8 +12,6 @@ import ( tenderminttypes "github.com/tendermint/tendermint/types" - testing "testing" - types "github.com/tendermint/tendermint/abci/types" ) @@ -168,8 +166,13 @@ func (_m *EventSink) Type() indexer.EventSinkType { return r0 } -// NewEventSink creates a new instance of EventSink. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventSink(t testing.TB) *EventSink { +type NewEventSinkT interface { + mock.TestingT + Cleanup(func()) +} + +// NewEventSink creates a new instance of EventSink. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEventSink(t NewEventSinkT) *EventSink { mock := &EventSink{} mock.Mock.Test(t) diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index 58fc640fc5..3b8eca45ef 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -5,8 +5,6 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -232,8 +230,13 @@ func (_m *BlockStore) Size() int64 { return r0 } -// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t testing.TB) *BlockStore { +type NewBlockStoreT interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t NewBlockStoreT) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 49633269b1..98abb4776c 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -74,8 +72,13 @@ func (_m *EvidencePool) Update(_a0 context.Context, _a1 state.State, _a2 types.E _m.Called(_a0, _a1, _a2) } -// NewEvidencePool creates a new instance of EvidencePool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvidencePool(t testing.TB) *EvidencePool { +type NewEvidencePoolT interface { + mock.TestingT + Cleanup(func()) +} + +// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEvidencePool(t NewEvidencePoolT) *EvidencePool { mock := &EvidencePool{} mock.Mock.Test(t) diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 9b41f3c1bc..d08ba4c9e4 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -7,8 +7,6 @@ import ( state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -189,8 +187,13 @@ func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet return r0 } -// NewStore creates a new instance of Store. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewStore(t testing.TB) *Store { +type NewStoreT interface { + mock.TestingT + Cleanup(func()) +} + +// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStore(t NewStoreT) *Store { mock := &Store{} mock.Mock.Test(t) diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 582ebcd9c4..17ddb54ac0 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -85,8 +83,13 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, return r0, r1 } -// NewStateProvider creates a new instance of StateProvider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateProvider(t testing.TB) *StateProvider { +type NewStateProviderT interface { + mock.TestingT + Cleanup(func()) +} + +// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStateProvider(t NewStateProviderT) *StateProvider { mock := &StateProvider{} mock.Mock.Test(t) diff --git a/libs/time/mocks/source.go b/libs/time/mocks/source.go index 7878d86f51..386d20c45f 100644 --- a/libs/time/mocks/source.go +++ b/libs/time/mocks/source.go @@ -3,11 +3,9 @@ package mocks import ( - testing "testing" + time "time" mock "github.com/stretchr/testify/mock" - - time "time" ) // Source is an autogenerated mock type for the Source type @@ -29,8 +27,13 @@ func (_m *Source) Now() time.Time { return r0 } -// NewSource creates a new instance of Source. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewSource(t testing.TB) *Source { +type NewSourceT interface { + mock.TestingT + Cleanup(func()) +} + +// NewSource creates a new instance of Source. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSource(t NewSourceT) *Source { mock := &Source{} mock.Mock.Test(t) diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index e136046f9d..af2e9c930f 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -68,8 +66,13 @@ func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) erro return r0 } -// NewProvider creates a new instance of Provider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewProvider(t testing.TB) *Provider { +type NewProviderT interface { + mock.TestingT + Cleanup(func()) +} + +// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewProvider(t NewProviderT) *Provider { mock := &Provider{} mock.Mock.Test(t) diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index ea6d6a2d44..4130c9575c 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - time "time" types "github.com/tendermint/tendermint/types" @@ -118,8 +116,13 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 return r0, r1 } -// NewLightClient creates a new instance of LightClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewLightClient(t testing.TB) *LightClient { +type NewLightClientT interface { + mock.TestingT + Cleanup(func()) +} + +// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewLightClient(t NewLightClientT) *LightClient { mock := &LightClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 9a286eaf24..0bc478fc3f 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -12,8 +12,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -798,8 +796,13 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP return r0, r1 } -// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t testing.TB) *Client { +type NewClientT interface { + mock.TestingT + Cleanup(func()) +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t NewClientT) *Client { mock := &Client{} mock.Mock.Test(t) From f33722b4233159a31cdf6c33258fbc601cdbd1d4 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 25 May 2022 18:42:07 +0200 Subject: [PATCH 060/203] migrate: reorder collection ordering (#8613) --- cmd/tendermint/commands/key_migrate.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go index 5866be341b..6f8817fe15 100644 --- a/cmd/tendermint/commands/key_migrate.go +++ b/cmd/tendermint/commands/key_migrate.go @@ -21,15 +21,16 @@ func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command { defer cancel() contexts := []string{ - // this is ordered to put the - // (presumably) biggest/most important - // subsets first. + // this is ordered to put + // the more ephemeral tables first to + // forclose the possiblity of the + // ephemeral data overwriting later data + "tx_index", + "peerstore", + "light", "blockstore", "state", - "peerstore", - "tx_index", "evidence", - "light", } for idx, dbctx := range contexts { From 4c857a7ed269c23b727e530c70ecf715581c8cb8 Mon Sep 17 00:00:00 2001 From: Jasmina Malicevic Date: Wed, 25 May 2022 23:06:16 +0200 Subject: [PATCH 061/203] abci: remove unused fields from CheckTXResponse (part 1) (#8605) abci: Removed Info, Log, Events and GasUsed from ResponseCheckTx. spec: Updated info on ResponseCheckTx to reflect field removal. --- CHANGELOG_PENDING.md | 1 + abci/cmd/abci-cli/abci-cli.go | 2 - abci/tests/server/client.go | 6 +- abci/types/messages_test.go | 17 - abci/types/types.pb.go | 738 ++++++++---------- internal/consensus/mempool_test.go | 1 - internal/rpc/core/mempool.go | 1 - proto/tendermint/abci/types.proto | 12 +- rpc/client/mock/abci.go | 2 - rpc/coretypes/responses.go | 1 - .../abci++_app_requirements_002_draft.md | 3 - spec/abci++/abci++_methods_002_draft.md | 4 - spec/abci/apps.md | 5 +- test/e2e/app/app.go | 1 - 14 files changed, 326 insertions(+), 468 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index d38caf50b8..d31ff9bc7e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -27,6 +27,7 @@ Special thanks to external contributors on this release: - [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec). - [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish) + - [abci] \#8605 Remove info, log, events and gasUsed fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic) - P2P Protocol diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index cb1603be34..b1b1b2c7ec 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -546,8 +546,6 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { printResponse(cmd, args, response{ Code: res.Code, Data: res.Data, - Info: res.Info, - Log: res.Log, }) return nil } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index b02fd0b141..eeae747460 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -72,11 +72,11 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes}) - code, data, log := res.Code, res.Data, res.Log + code, data := res.Code, res.Data if code != codeExp { fmt.Println("Failed test: CheckTx") - fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n", - code, codeExp, log) + fmt.Printf("CheckTx response code was unexpected. Got %v expected %v.,", + code, codeExp) return errors.New("checkTx") } if !bytes.Equal(data, dataExp) { diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 4f17f9f83c..404d552225 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -21,14 +21,6 @@ func TestMarshalJSON(t *testing.T) { Code: 1, Data: []byte("hello"), GasWanted: 43, - Events: []Event{ - { - Type: "testEvent", - Attributes: []EventAttribute{ - {Key: "pho", Value: "bo"}, - }, - }, - }, } b, err = json.Marshal(&r1) assert.NoError(t, err) @@ -86,16 +78,7 @@ func TestWriteReadMessage2(t *testing.T) { cases := []proto.Message{ &ResponseCheckTx{ Data: []byte(phrase), - Log: phrase, GasWanted: 10, - Events: []Event{ - { - Type: "testEvent", - Attributes: []EventAttribute{ - {Key: "abc", Value: "def"}, - }, - }, - }, }, // TODO: add the rest } diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 89de1bdcd1..c2a8fa4261 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1645,7 +1645,7 @@ type RequestFinalizeBlock struct { Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - // hash is the merkle root hash of the fields of the decided block. + // hash is the merkle root hash of the fields of the proposed block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` @@ -2481,16 +2481,12 @@ func (m *ResponseBeginBlock) GetEvents() []Event { } type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` - Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` // ABCI applications creating a ResponseCheckTX should not set mempool_error. MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } @@ -2542,20 +2538,6 @@ func (m *ResponseCheckTx) GetData() []byte { return nil } -func (m *ResponseCheckTx) GetLog() string { - if m != nil { - return m.Log - } - return "" -} - -func (m *ResponseCheckTx) GetInfo() string { - if m != nil { - return m.Info - } - return "" -} - func (m *ResponseCheckTx) GetGasWanted() int64 { if m != nil { return m.GasWanted @@ -2563,20 +2545,6 @@ func (m *ResponseCheckTx) GetGasWanted() int64 { return 0 } -func (m *ResponseCheckTx) GetGasUsed() int64 { - if m != nil { - return m.GasUsed - } - return 0 -} - -func (m *ResponseCheckTx) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - func (m *ResponseCheckTx) GetCodespace() string { if m != nil { return m.Codespace @@ -3255,6 +3223,8 @@ type ResponseFinalizeBlock struct { TxResults []*ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,3,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + RetainHeight int64 `protobuf:"varint,6,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } @@ -3318,6 +3288,20 @@ func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *types1.ConsensusPara return nil } +func (m *ResponseFinalizeBlock) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight + } + return 0 +} + type CommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` @@ -3429,7 +3413,7 @@ func (m *ExtendedCommitInfo) GetVotes() []ExtendedVoteInfo { } // Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// ResponseBeginBlock, ResponseEndBlock and ResponseDeliverTx. // Later, transactions may be queried using these events. type Event struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` @@ -4219,222 +4203,225 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3439 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x73, 0x23, 0xe5, - 0xb5, 0x97, 0x5a, 0xef, 0x23, 0xeb, 0xe1, 0xcf, 0x66, 0xd0, 0x88, 0x19, 0x7b, 0xe8, 0xa9, 0x81, - 0x99, 0x01, 0x3c, 0x5c, 0xcf, 0x1d, 0x18, 0xee, 0xc0, 0xa5, 0x6c, 0x59, 0x83, 0xcc, 0x78, 0x6c, - 0xd3, 0x96, 0x4d, 0x71, 0x6f, 0x32, 0x4d, 0x4b, 0xfd, 0xd9, 0x6a, 0x46, 0x52, 0x37, 0xdd, 0x2d, - 0x23, 0xb3, 0x0c, 0xc5, 0x86, 0xca, 0x82, 0x4d, 0x2a, 0x49, 0x55, 0xd8, 0x25, 0x55, 0xc9, 0x7f, - 0x90, 0x55, 0x56, 0x59, 0xb0, 0x48, 0x55, 0x58, 0x25, 0xa9, 0x2c, 0x48, 0x0a, 0x76, 0xf9, 0x07, - 0xb2, 0x4b, 0x52, 0xdf, 0xa3, 0x5f, 0x52, 0xb7, 0x1e, 0x0c, 0x50, 0x95, 0x0a, 0x3b, 0x7d, 0xa7, - 0xcf, 0x39, 0xfd, 0x3d, 0x4e, 0x9f, 0xc7, 0xef, 0x7c, 0x82, 0x27, 0x6c, 0xdc, 0x57, 0xb1, 0xd9, - 0xd3, 0xfa, 0xf6, 0x0d, 0xa5, 0xd5, 0xd6, 0x6e, 0xd8, 0x67, 0x06, 0xb6, 0xd6, 0x0c, 0x53, 0xb7, - 0x75, 0x54, 0xf2, 0x1e, 0xae, 0x91, 0x87, 0xd5, 0x8b, 0x3e, 0xee, 0xb6, 0x79, 0x66, 0xd8, 0xfa, - 0x0d, 0xc3, 0xd4, 0xf5, 0x63, 0xc6, 0x5f, 0xbd, 0xe0, 0x7b, 0x4c, 0xf5, 0xf8, 0xb5, 0x05, 0x9e, - 0x72, 0xe1, 0x87, 0xf8, 0xcc, 0x79, 0x7a, 0x71, 0x4c, 0xd6, 0x50, 0x4c, 0xa5, 0xe7, 0x3c, 0x5e, - 0x3d, 0xd1, 0xf5, 0x93, 0x2e, 0xbe, 0x41, 0x47, 0xad, 0xc1, 0xf1, 0x0d, 0x5b, 0xeb, 0x61, 0xcb, - 0x56, 0x7a, 0x06, 0x67, 0x58, 0x3e, 0xd1, 0x4f, 0x74, 0xfa, 0xf3, 0x06, 0xf9, 0xc5, 0xa8, 0xe2, - 0x3f, 0x01, 0x32, 0x12, 0x7e, 0x77, 0x80, 0x2d, 0x1b, 0xad, 0x43, 0x12, 0xb7, 0x3b, 0x7a, 0x25, - 0x7e, 0x29, 0x7e, 0x35, 0xbf, 0x7e, 0x61, 0x6d, 0x64, 0x71, 0x6b, 0x9c, 0xaf, 0xde, 0xee, 0xe8, - 0x8d, 0x98, 0x44, 0x79, 0xd1, 0x2d, 0x48, 0x1d, 0x77, 0x07, 0x56, 0xa7, 0x22, 0x50, 0xa1, 0x8b, - 0x51, 0x42, 0x77, 0x09, 0x53, 0x23, 0x26, 0x31, 0x6e, 0xf2, 0x2a, 0xad, 0x7f, 0xac, 0x57, 0x12, - 0x93, 0x5f, 0xb5, 0xdd, 0x3f, 0xa6, 0xaf, 0x22, 0xbc, 0x68, 0x13, 0x40, 0xeb, 0x6b, 0xb6, 0xdc, - 0xee, 0x28, 0x5a, 0xbf, 0x92, 0xa4, 0x92, 0x4f, 0x46, 0x4b, 0x6a, 0x76, 0x8d, 0x30, 0x36, 0x62, - 0x52, 0x4e, 0x73, 0x06, 0x64, 0xba, 0xef, 0x0e, 0xb0, 0x79, 0x56, 0x49, 0x4d, 0x9e, 0xee, 0x1b, - 0x84, 0x89, 0x4c, 0x97, 0x72, 0xa3, 0x6d, 0xc8, 0xb7, 0xf0, 0x89, 0xd6, 0x97, 0x5b, 0x5d, 0xbd, - 0xfd, 0xb0, 0x92, 0xa6, 0xc2, 0x62, 0x94, 0xf0, 0x26, 0x61, 0xdd, 0x24, 0x9c, 0x9b, 0x42, 0x25, - 0xde, 0x88, 0x49, 0xd0, 0x72, 0x29, 0xe8, 0x65, 0xc8, 0xb6, 0x3b, 0xb8, 0xfd, 0x50, 0xb6, 0x87, - 0x95, 0x0c, 0xd5, 0xb3, 0x1a, 0xa5, 0xa7, 0x46, 0xf8, 0x9a, 0xc3, 0x46, 0x4c, 0xca, 0xb4, 0xd9, - 0x4f, 0x74, 0x17, 0x40, 0xc5, 0x5d, 0xed, 0x14, 0x9b, 0x44, 0x3e, 0x3b, 0x79, 0x0f, 0xb6, 0x18, - 0x67, 0x73, 0xc8, 0xa7, 0x91, 0x53, 0x1d, 0x02, 0xaa, 0x41, 0x0e, 0xf7, 0x55, 0xbe, 0x9c, 0x1c, - 0x55, 0x73, 0x29, 0xf2, 0xbc, 0xfb, 0xaa, 0x7f, 0x31, 0x59, 0xcc, 0xc7, 0xe8, 0x36, 0xa4, 0xdb, - 0x7a, 0xaf, 0xa7, 0xd9, 0x15, 0xa0, 0x1a, 0x56, 0x22, 0x17, 0x42, 0xb9, 0x1a, 0x31, 0x89, 0xf3, - 0xa3, 0x5d, 0x28, 0x76, 0x35, 0xcb, 0x96, 0xad, 0xbe, 0x62, 0x58, 0x1d, 0xdd, 0xb6, 0x2a, 0x79, - 0xaa, 0xe1, 0x4a, 0x94, 0x86, 0x1d, 0xcd, 0xb2, 0x0f, 0x1c, 0xe6, 0x46, 0x4c, 0x2a, 0x74, 0xfd, - 0x04, 0xa2, 0x4f, 0x3f, 0x3e, 0xc6, 0xa6, 0xab, 0xb0, 0xb2, 0x30, 0x59, 0xdf, 0x1e, 0xe1, 0x76, - 0xe4, 0x89, 0x3e, 0xdd, 0x4f, 0x40, 0xff, 0x0f, 0x4b, 0x5d, 0x5d, 0x51, 0x5d, 0x75, 0x72, 0xbb, - 0x33, 0xe8, 0x3f, 0xac, 0x14, 0xa8, 0xd2, 0x6b, 0x91, 0x93, 0xd4, 0x15, 0xd5, 0x51, 0x51, 0x23, - 0x02, 0x8d, 0x98, 0xb4, 0xd8, 0x1d, 0x25, 0xa2, 0x07, 0xb0, 0xac, 0x18, 0x46, 0xf7, 0x6c, 0x54, - 0x7b, 0x91, 0x6a, 0xbf, 0x1e, 0xa5, 0x7d, 0x83, 0xc8, 0x8c, 0xaa, 0x47, 0xca, 0x18, 0x15, 0x35, - 0xa1, 0x6c, 0x98, 0xd8, 0x50, 0x4c, 0x2c, 0x1b, 0xa6, 0x6e, 0xe8, 0x96, 0xd2, 0xad, 0x94, 0xa8, - 0xee, 0xa7, 0xa3, 0x74, 0xef, 0x33, 0xfe, 0x7d, 0xce, 0xde, 0x88, 0x49, 0x25, 0x23, 0x48, 0x62, - 0x5a, 0xf5, 0x36, 0xb6, 0x2c, 0x4f, 0x6b, 0x79, 0x9a, 0x56, 0xca, 0x1f, 0xd4, 0x1a, 0x20, 0xa1, - 0x3a, 0xe4, 0xf1, 0x90, 0x88, 0xcb, 0xa7, 0xba, 0x8d, 0x2b, 0x8b, 0x93, 0x3f, 0xac, 0x3a, 0x65, - 0x3d, 0xd2, 0x6d, 0x4c, 0x3e, 0x2a, 0xec, 0x8e, 0x90, 0x02, 0x8f, 0x9d, 0x62, 0x53, 0x3b, 0x3e, - 0xa3, 0x6a, 0x64, 0xfa, 0xc4, 0xd2, 0xf4, 0x7e, 0x05, 0x51, 0x85, 0xcf, 0x44, 0x29, 0x3c, 0xa2, - 0x42, 0x44, 0x45, 0xdd, 0x11, 0x69, 0xc4, 0xa4, 0xa5, 0xd3, 0x71, 0x32, 0x31, 0xb1, 0x63, 0xad, - 0xaf, 0x74, 0xb5, 0xf7, 0x31, 0xff, 0x6c, 0x96, 0x26, 0x9b, 0xd8, 0x5d, 0xce, 0x4d, 0xbf, 0x15, - 0x62, 0x62, 0xc7, 0x7e, 0xc2, 0x66, 0x06, 0x52, 0xa7, 0x4a, 0x77, 0x80, 0xc5, 0xa7, 0x21, 0xef, - 0x73, 0xac, 0xa8, 0x02, 0x99, 0x1e, 0xb6, 0x2c, 0xe5, 0x04, 0x53, 0x3f, 0x9c, 0x93, 0x9c, 0xa1, - 0x58, 0x84, 0x05, 0xbf, 0x33, 0x15, 0x3f, 0x8e, 0xbb, 0x92, 0xc4, 0x4f, 0x12, 0xc9, 0x53, 0x6c, - 0xd2, 0x65, 0x73, 0x49, 0x3e, 0x44, 0x97, 0xa1, 0x40, 0xa7, 0x2c, 0x3b, 0xcf, 0x89, 0xb3, 0x4e, - 0x4a, 0x0b, 0x94, 0x78, 0xc4, 0x99, 0x56, 0x21, 0x6f, 0xac, 0x1b, 0x2e, 0x4b, 0x82, 0xb2, 0x80, - 0xb1, 0x6e, 0x38, 0x0c, 0x4f, 0xc2, 0x02, 0x59, 0x9f, 0xcb, 0x91, 0xa4, 0x2f, 0xc9, 0x13, 0x1a, - 0x67, 0x11, 0x7f, 0x27, 0x40, 0x79, 0xd4, 0x01, 0xa3, 0xdb, 0x90, 0x24, 0xb1, 0x88, 0x87, 0x95, - 0xea, 0x1a, 0x0b, 0x54, 0x6b, 0x4e, 0xa0, 0x5a, 0x6b, 0x3a, 0x81, 0x6a, 0x33, 0xfb, 0xe9, 0xe7, - 0xab, 0xb1, 0x8f, 0xff, 0xb2, 0x1a, 0x97, 0xa8, 0x04, 0x3a, 0x4f, 0x7c, 0xa5, 0xa2, 0xf5, 0x65, - 0x4d, 0xa5, 0x53, 0xce, 0x11, 0x47, 0xa8, 0x68, 0xfd, 0x6d, 0x15, 0xed, 0x40, 0xb9, 0xad, 0xf7, - 0x2d, 0xdc, 0xb7, 0x06, 0x96, 0xcc, 0x02, 0x21, 0x0f, 0x26, 0x01, 0x77, 0xc8, 0xc2, 0x6b, 0xcd, - 0xe1, 0xdc, 0xa7, 0x8c, 0x52, 0xa9, 0x1d, 0x24, 0x10, 0xb7, 0x7a, 0xaa, 0x74, 0x35, 0x55, 0xb1, - 0x75, 0xd3, 0xaa, 0x24, 0x2f, 0x25, 0x42, 0xfd, 0xe1, 0x91, 0xc3, 0x72, 0x68, 0xa8, 0x8a, 0x8d, - 0x37, 0x93, 0x64, 0xba, 0x92, 0x4f, 0x12, 0x3d, 0x05, 0x25, 0xc5, 0x30, 0x64, 0xcb, 0x56, 0x6c, - 0x2c, 0xb7, 0xce, 0x6c, 0x6c, 0xd1, 0x40, 0xb3, 0x20, 0x15, 0x14, 0xc3, 0x38, 0x20, 0xd4, 0x4d, - 0x42, 0x44, 0x57, 0xa0, 0x48, 0x62, 0x92, 0xa6, 0x74, 0xe5, 0x0e, 0xd6, 0x4e, 0x3a, 0x36, 0x0d, - 0x29, 0x09, 0xa9, 0xc0, 0xa9, 0x0d, 0x4a, 0x14, 0x55, 0xf7, 0xc4, 0x69, 0x3c, 0x42, 0x08, 0x92, - 0xaa, 0x62, 0x2b, 0x74, 0x27, 0x17, 0x24, 0xfa, 0x9b, 0xd0, 0x0c, 0xc5, 0xee, 0xf0, 0xfd, 0xa1, - 0xbf, 0xd1, 0x39, 0x48, 0x73, 0xb5, 0x09, 0xaa, 0x96, 0x8f, 0xd0, 0x32, 0xa4, 0x0c, 0x53, 0x3f, - 0xc5, 0xf4, 0xe8, 0xb2, 0x12, 0x1b, 0x88, 0x1f, 0x08, 0xb0, 0x38, 0x16, 0xb9, 0x88, 0xde, 0x8e, - 0x62, 0x75, 0x9c, 0x77, 0x91, 0xdf, 0xe8, 0x05, 0xa2, 0x57, 0x51, 0xb1, 0xc9, 0xa3, 0x7d, 0x65, - 0x7c, 0xab, 0x1b, 0xf4, 0x39, 0xdf, 0x1a, 0xce, 0x8d, 0xee, 0x41, 0xb9, 0xab, 0x58, 0xb6, 0xcc, - 0xbc, 0xbf, 0xec, 0x8b, 0xfc, 0x4f, 0x8c, 0x6d, 0x32, 0x8b, 0x15, 0xc4, 0xa0, 0xb9, 0x92, 0x22, - 0x11, 0xf5, 0xa8, 0xe8, 0x10, 0x96, 0x5b, 0x67, 0xef, 0x2b, 0x7d, 0x5b, 0xeb, 0x63, 0x79, 0xec, - 0xd4, 0xc6, 0x53, 0x89, 0xfb, 0x9a, 0xd5, 0xc2, 0x1d, 0xe5, 0x54, 0xd3, 0x9d, 0x69, 0x2d, 0xb9, - 0xf2, 0xee, 0x89, 0x5a, 0xa2, 0x04, 0xc5, 0x60, 0xd8, 0x45, 0x45, 0x10, 0xec, 0x21, 0x5f, 0xbf, - 0x60, 0x0f, 0xd1, 0xf3, 0x90, 0x24, 0x6b, 0xa4, 0x6b, 0x2f, 0x86, 0xbc, 0x88, 0xcb, 0x35, 0xcf, - 0x0c, 0x2c, 0x51, 0x4e, 0x51, 0x74, 0xbf, 0x06, 0x37, 0x14, 0x8f, 0x6a, 0x15, 0xaf, 0x41, 0x69, - 0x24, 0xce, 0xfa, 0x8e, 0x2f, 0xee, 0x3f, 0x3e, 0xb1, 0x04, 0x85, 0x40, 0x40, 0x15, 0xcf, 0xc1, - 0x72, 0x58, 0x7c, 0x14, 0x3b, 0x2e, 0x3d, 0x10, 0xe7, 0xd0, 0x2d, 0xc8, 0xba, 0x01, 0x92, 0x7d, - 0x8d, 0xe7, 0xc7, 0x56, 0xe1, 0x30, 0x4b, 0x2e, 0x2b, 0xf9, 0x0c, 0x89, 0x55, 0x53, 0x73, 0x10, - 0xe8, 0xc4, 0x33, 0x8a, 0x61, 0x34, 0x14, 0xab, 0x23, 0xbe, 0x0d, 0x95, 0xa8, 0xe0, 0x37, 0xb2, - 0x8c, 0xa4, 0x6b, 0x85, 0xe7, 0x20, 0x7d, 0xac, 0x9b, 0x3d, 0xc5, 0xa6, 0xca, 0x0a, 0x12, 0x1f, - 0x11, 0xeb, 0x64, 0x81, 0x30, 0x41, 0xc9, 0x6c, 0x20, 0xca, 0x70, 0x3e, 0x32, 0x00, 0x12, 0x11, - 0xad, 0xaf, 0x62, 0xb6, 0x9f, 0x05, 0x89, 0x0d, 0x3c, 0x45, 0x6c, 0xb2, 0x6c, 0x40, 0x5e, 0x6b, - 0xd1, 0xb5, 0x52, 0xfd, 0x39, 0x89, 0x8f, 0xc4, 0x5f, 0x25, 0xe0, 0x5c, 0x78, 0x18, 0x44, 0x97, - 0x60, 0xa1, 0xa7, 0x0c, 0x65, 0x7b, 0xc8, 0xbf, 0x65, 0x76, 0x1c, 0xd0, 0x53, 0x86, 0xcd, 0x21, - 0xfb, 0x90, 0xcb, 0x90, 0xb0, 0x87, 0x56, 0x45, 0xb8, 0x94, 0xb8, 0xba, 0x20, 0x91, 0x9f, 0xe8, - 0x10, 0x16, 0xbb, 0x7a, 0x5b, 0xe9, 0xca, 0x3e, 0x8b, 0xe7, 0xc6, 0x7e, 0x79, 0x6c, 0xb3, 0x59, - 0x40, 0xc3, 0xea, 0x98, 0xd1, 0x97, 0xa8, 0x8e, 0x1d, 0xd7, 0xf2, 0xbf, 0x21, 0xab, 0xf7, 0x9d, - 0x51, 0x2a, 0xe0, 0x29, 0x1c, 0x9f, 0x9d, 0x9e, 0xdb, 0x67, 0x3f, 0x0f, 0xcb, 0x7d, 0x3c, 0xb4, - 0x7d, 0x73, 0x64, 0x86, 0x93, 0xa1, 0x67, 0x81, 0xc8, 0x33, 0xef, 0xfd, 0xc4, 0x86, 0xd0, 0x35, - 0x9a, 0x59, 0x18, 0xba, 0x85, 0x4d, 0x59, 0x51, 0x55, 0x13, 0x5b, 0x16, 0xcd, 0x6c, 0x17, 0x68, - 0xba, 0x40, 0xe9, 0x1b, 0x8c, 0x2c, 0xfe, 0xd4, 0x7f, 0x56, 0xc1, 0x4c, 0x82, 0x9f, 0x44, 0xdc, - 0x3b, 0x89, 0x03, 0x58, 0xe6, 0xf2, 0x6a, 0xe0, 0x30, 0x84, 0x59, 0x3d, 0x0f, 0x72, 0xc4, 0x67, - 0x38, 0x87, 0xc4, 0xa3, 0x9d, 0x83, 0xe3, 0x6d, 0x93, 0x3e, 0x6f, 0xfb, 0x6f, 0x76, 0x36, 0xaf, - 0xba, 0x51, 0xc4, 0x4b, 0xd3, 0x42, 0xa3, 0x88, 0xb7, 0x2e, 0x21, 0xe0, 0xde, 0x7e, 0x16, 0x87, - 0x6a, 0x74, 0x5e, 0x16, 0xaa, 0xea, 0x19, 0x58, 0x74, 0xd7, 0xe2, 0xce, 0x8f, 0x7d, 0xf5, 0x65, - 0xf7, 0x01, 0x9f, 0x60, 0x64, 0x54, 0xbc, 0x02, 0xc5, 0x91, 0xac, 0x91, 0x9d, 0x42, 0xe1, 0xd4, - 0xff, 0x7e, 0xf1, 0x47, 0x09, 0xd7, 0xab, 0x06, 0x52, 0xbb, 0x10, 0xcb, 0x7b, 0x03, 0x96, 0x54, - 0xdc, 0xd6, 0xd4, 0xaf, 0x6a, 0x78, 0x8b, 0x5c, 0xfa, 0x3b, 0xbb, 0x9b, 0xc1, 0xee, 0xfe, 0x98, - 0x87, 0xac, 0x84, 0x2d, 0x83, 0xa4, 0x74, 0x68, 0x13, 0x72, 0x78, 0xd8, 0xc6, 0x86, 0xed, 0x64, - 0xc1, 0xe1, 0xd5, 0x04, 0xe3, 0xae, 0x3b, 0x9c, 0xa4, 0x36, 0x76, 0xc5, 0xd0, 0x4d, 0x0e, 0x83, - 0x44, 0x23, 0x1a, 0x5c, 0xdc, 0x8f, 0x83, 0xbc, 0xe0, 0xe0, 0x20, 0x89, 0xc8, 0x52, 0x98, 0x49, - 0x8d, 0x00, 0x21, 0x37, 0x39, 0x10, 0x92, 0x9c, 0xf2, 0xb2, 0x00, 0x12, 0x52, 0x0b, 0x20, 0x21, - 0xa9, 0x29, 0xcb, 0x8c, 0x80, 0x42, 0x5e, 0x70, 0xa0, 0x90, 0xf4, 0x94, 0x19, 0x8f, 0x60, 0x21, - 0xaf, 0x07, 0xb1, 0x90, 0x4c, 0x44, 0x68, 0x73, 0xa4, 0x27, 0x82, 0x21, 0xaf, 0xf8, 0xc0, 0x90, - 0x6c, 0x24, 0x0a, 0xc1, 0x14, 0x85, 0xa0, 0x21, 0xaf, 0x05, 0xd0, 0x90, 0xdc, 0x94, 0x7d, 0x98, - 0x00, 0x87, 0x6c, 0xf9, 0xe1, 0x10, 0x88, 0x44, 0x55, 0xf8, 0xb9, 0x47, 0xe1, 0x21, 0x2f, 0xb9, - 0x78, 0x48, 0x3e, 0x12, 0xd8, 0xe1, 0x6b, 0x19, 0x05, 0x44, 0xf6, 0xc6, 0x00, 0x11, 0x06, 0x60, - 0x3c, 0x15, 0xa9, 0x62, 0x0a, 0x22, 0xb2, 0x37, 0x86, 0x88, 0x14, 0xa6, 0x28, 0x9c, 0x02, 0x89, - 0x7c, 0x2f, 0x1c, 0x12, 0x89, 0x06, 0x2d, 0xf8, 0x34, 0x67, 0xc3, 0x44, 0xe4, 0x08, 0x4c, 0xa4, - 0x14, 0x59, 0xbf, 0x33, 0xf5, 0x33, 0x83, 0x22, 0x87, 0x21, 0xa0, 0x08, 0x83, 0x2f, 0xae, 0x46, - 0x2a, 0x9f, 0x01, 0x15, 0x39, 0x0c, 0x41, 0x45, 0x16, 0xa7, 0xaa, 0x9d, 0x0a, 0x8b, 0xdc, 0x0d, - 0xc2, 0x22, 0x68, 0xca, 0x37, 0x16, 0x89, 0x8b, 0xb4, 0xa2, 0x70, 0x11, 0x86, 0x5d, 0x3c, 0x1b, - 0xa9, 0x71, 0x0e, 0x60, 0x64, 0x6f, 0x0c, 0x18, 0x59, 0x9e, 0x62, 0x69, 0xb3, 0x22, 0x23, 0xd7, - 0x48, 0x46, 0x31, 0xe2, 0xaa, 0x49, 0x72, 0x8f, 0x4d, 0x53, 0x37, 0x39, 0xc6, 0xc1, 0x06, 0xe2, - 0x55, 0x52, 0x29, 0x7b, 0x6e, 0x79, 0x02, 0x8a, 0x42, 0x8b, 0x28, 0x9f, 0x2b, 0x16, 0x7f, 0x1d, - 0xf7, 0x64, 0x69, 0x81, 0xe9, 0xaf, 0xb2, 0x73, 0xbc, 0xca, 0xf6, 0x61, 0x2b, 0x42, 0x10, 0x5b, - 0x59, 0x85, 0x3c, 0x29, 0x8e, 0x46, 0x60, 0x13, 0xc5, 0x70, 0x61, 0x93, 0xeb, 0xb0, 0x48, 0x93, - 0x00, 0x86, 0xc0, 0xf0, 0xc8, 0x9a, 0xa4, 0x91, 0xb5, 0x44, 0x1e, 0xb0, 0x5d, 0x60, 0x21, 0xf6, - 0x39, 0x58, 0xf2, 0xf1, 0xba, 0x45, 0x17, 0xc3, 0x10, 0xca, 0x2e, 0xf7, 0x06, 0xaf, 0xbe, 0x7e, - 0x1b, 0xf7, 0x76, 0xc8, 0xc3, 0x5b, 0xc2, 0xa0, 0x91, 0xf8, 0xd7, 0x04, 0x8d, 0x08, 0x5f, 0x19, - 0x1a, 0xf1, 0x17, 0x91, 0x89, 0x60, 0x11, 0xf9, 0xf7, 0xb8, 0x77, 0x26, 0x2e, 0xd0, 0xd1, 0xd6, - 0x55, 0xcc, 0xcb, 0x3a, 0xfa, 0x9b, 0xa4, 0x59, 0x5d, 0xfd, 0x84, 0x17, 0x6f, 0xe4, 0x27, 0xe1, - 0x72, 0x63, 0x67, 0x8e, 0x87, 0x46, 0xb7, 0x22, 0x64, 0xb9, 0x0b, 0xaf, 0x08, 0xcb, 0x90, 0x78, - 0x88, 0x59, 0xa4, 0x5b, 0x90, 0xc8, 0x4f, 0xc2, 0x47, 0x8d, 0x8c, 0xe7, 0x20, 0x6c, 0x80, 0x6e, - 0x43, 0x8e, 0xb6, 0x6b, 0x64, 0xdd, 0xb0, 0x78, 0x40, 0x0a, 0xa4, 0x6b, 0xac, 0x2b, 0xb3, 0xb6, - 0x4f, 0x78, 0xf6, 0x0c, 0x4b, 0xca, 0x1a, 0xfc, 0x97, 0x2f, 0x69, 0xca, 0x05, 0x92, 0xa6, 0x0b, - 0x90, 0x23, 0xb3, 0xb7, 0x0c, 0xa5, 0x8d, 0x69, 0x64, 0xc9, 0x49, 0x1e, 0x41, 0x7c, 0x00, 0x68, - 0x3c, 0x4e, 0xa2, 0x06, 0xa4, 0xf1, 0x29, 0xee, 0xdb, 0x2c, 0xa7, 0xcc, 0xaf, 0x9f, 0x1b, 0xaf, - 0x1b, 0xc9, 0xe3, 0xcd, 0x0a, 0xd9, 0xe4, 0xbf, 0x7d, 0xbe, 0x5a, 0x66, 0xdc, 0xcf, 0xea, 0x3d, - 0xcd, 0xc6, 0x3d, 0xc3, 0x3e, 0x93, 0xb8, 0xbc, 0xf8, 0x67, 0x01, 0x4a, 0x23, 0xf1, 0x33, 0x74, - 0x6f, 0x1d, 0x93, 0x17, 0x7c, 0xc0, 0xd2, 0x6c, 0xfb, 0x7d, 0x11, 0xe0, 0x44, 0xb1, 0xe4, 0xf7, - 0x94, 0xbe, 0x8d, 0x55, 0xbe, 0xe9, 0xb9, 0x13, 0xc5, 0x7a, 0x93, 0x12, 0xc8, 0xa9, 0x93, 0xc7, - 0x03, 0x0b, 0xab, 0x1c, 0xe2, 0xca, 0x9c, 0x28, 0xd6, 0xa1, 0x85, 0x55, 0xdf, 0x2a, 0x33, 0x8f, - 0xb6, 0xca, 0xe0, 0x1e, 0x67, 0x47, 0xf6, 0xd8, 0x57, 0xf7, 0xe7, 0xfc, 0x75, 0x3f, 0xaa, 0x42, - 0xd6, 0x30, 0x35, 0xdd, 0xd4, 0xec, 0x33, 0x7a, 0x30, 0x09, 0xc9, 0x1d, 0xa3, 0xcb, 0x50, 0xe8, - 0xe1, 0x9e, 0xa1, 0xeb, 0x5d, 0x99, 0x39, 0x9b, 0x3c, 0x15, 0x5d, 0xe0, 0xc4, 0x3a, 0xf5, 0x39, - 0x1f, 0x0a, 0xde, 0xd7, 0xe7, 0xe1, 0x3b, 0x5f, 0xef, 0xf6, 0xae, 0x84, 0x6c, 0xaf, 0x8f, 0x42, - 0x16, 0x31, 0xb2, 0xbf, 0xee, 0xf8, 0xdb, 0xda, 0x60, 0xf1, 0x87, 0x14, 0xf4, 0x0d, 0xe6, 0x46, - 0xe8, 0xc0, 0x5f, 0x99, 0x0d, 0xa8, 0x53, 0x70, 0xcc, 0x79, 0x56, 0xef, 0xe1, 0x55, 0x70, 0x8c, - 0x6c, 0xa1, 0xb7, 0xe0, 0xf1, 0x11, 0xcf, 0xe6, 0xaa, 0x16, 0x66, 0x75, 0x70, 0x8f, 0x05, 0x1d, - 0x9c, 0xa3, 0xda, 0xdb, 0xac, 0xc4, 0x23, 0x7e, 0x73, 0xdb, 0x50, 0x0c, 0xa6, 0x79, 0xa1, 0xc7, - 0x7f, 0x19, 0x0a, 0x26, 0xb6, 0x15, 0xad, 0x2f, 0x07, 0x6a, 0xd2, 0x05, 0x46, 0xe4, 0xf8, 0xef, - 0x3e, 0x3c, 0x16, 0x9a, 0xee, 0xa1, 0x17, 0x21, 0xe7, 0x65, 0x8a, 0x6c, 0x57, 0x27, 0x20, 0x79, - 0x1e, 0xaf, 0xf8, 0x9b, 0xb8, 0xa7, 0x32, 0x88, 0x0d, 0xd6, 0x21, 0x6d, 0x62, 0x6b, 0xd0, 0x65, - 0x68, 0x5d, 0x71, 0xfd, 0xb9, 0xd9, 0x12, 0x45, 0x42, 0x1d, 0x74, 0x6d, 0x89, 0x0b, 0x8b, 0x0f, - 0x20, 0xcd, 0x28, 0x28, 0x0f, 0x99, 0xc3, 0xdd, 0x7b, 0xbb, 0x7b, 0x6f, 0xee, 0x96, 0x63, 0x08, - 0x20, 0xbd, 0x51, 0xab, 0xd5, 0xf7, 0x9b, 0xe5, 0x38, 0xca, 0x41, 0x6a, 0x63, 0x73, 0x4f, 0x6a, - 0x96, 0x05, 0x42, 0x96, 0xea, 0xaf, 0xd7, 0x6b, 0xcd, 0x72, 0x02, 0x2d, 0x42, 0x81, 0xfd, 0x96, - 0xef, 0xee, 0x49, 0xf7, 0x37, 0x9a, 0xe5, 0xa4, 0x8f, 0x74, 0x50, 0xdf, 0xdd, 0xaa, 0x4b, 0xe5, - 0x94, 0xf8, 0x5f, 0x70, 0x3e, 0x32, 0xb5, 0xf4, 0x80, 0xbf, 0xb8, 0x0f, 0xf8, 0x13, 0x7f, 0x22, - 0x40, 0x35, 0x3a, 0x5f, 0x44, 0xaf, 0x8f, 0x2c, 0x7c, 0x7d, 0x8e, 0x64, 0x73, 0x64, 0xf5, 0xe8, - 0x0a, 0x14, 0x4d, 0x7c, 0x8c, 0xed, 0x76, 0x87, 0xe5, 0xaf, 0x2c, 0x60, 0x16, 0xa4, 0x02, 0xa7, - 0x52, 0x21, 0x8b, 0xb1, 0xbd, 0x83, 0xdb, 0xb6, 0xcc, 0x7c, 0x11, 0x33, 0xba, 0x1c, 0x61, 0x23, - 0xd4, 0x03, 0x46, 0x14, 0xdf, 0x9e, 0x6b, 0x2f, 0x73, 0x90, 0x92, 0xea, 0x4d, 0xe9, 0xad, 0x72, - 0x02, 0x21, 0x28, 0xd2, 0x9f, 0xf2, 0xc1, 0xee, 0xc6, 0xfe, 0x41, 0x63, 0x8f, 0xec, 0xe5, 0x12, - 0x94, 0x9c, 0xbd, 0x74, 0x88, 0x29, 0xf1, 0x0f, 0x02, 0x3c, 0x1e, 0x91, 0xed, 0xa2, 0xdb, 0x00, - 0xf6, 0x50, 0x36, 0x71, 0x5b, 0x37, 0xd5, 0x68, 0x23, 0x6b, 0x0e, 0x25, 0xca, 0x21, 0xe5, 0x6c, - 0xfe, 0xcb, 0x9a, 0x80, 0x17, 0xa3, 0x97, 0xb9, 0x52, 0xb2, 0x2a, 0xe7, 0x53, 0xbb, 0x18, 0x02, - 0x8b, 0xe2, 0x36, 0x51, 0x4c, 0xf7, 0x96, 0x2a, 0xa6, 0xfc, 0xe8, 0x7e, 0x98, 0x53, 0x99, 0xb1, - 0x5b, 0x33, 0x9f, 0x3b, 0x49, 0x3d, 0x9a, 0x3b, 0x11, 0x7f, 0x9e, 0xf0, 0x6f, 0x6c, 0x30, 0xb9, - 0xdf, 0x83, 0xb4, 0x65, 0x2b, 0xf6, 0xc0, 0xe2, 0x06, 0xf7, 0xe2, 0xac, 0x95, 0xc2, 0x9a, 0xf3, - 0xe3, 0x80, 0x8a, 0x4b, 0x5c, 0xcd, 0x77, 0xfb, 0x6d, 0x89, 0xb7, 0xa0, 0x18, 0xdc, 0x9c, 0xe8, - 0x4f, 0xc6, 0xf3, 0x39, 0x82, 0x78, 0xc7, 0xcb, 0xbf, 0x7c, 0xa0, 0xe5, 0x38, 0x20, 0x18, 0x0f, - 0x03, 0x04, 0x7f, 0x11, 0x87, 0x27, 0x26, 0xd4, 0x4b, 0xe8, 0x8d, 0x91, 0x73, 0x7e, 0x69, 0x9e, - 0x6a, 0x6b, 0x8d, 0xd1, 0x82, 0x27, 0x2d, 0xde, 0x84, 0x05, 0x3f, 0x7d, 0xb6, 0x45, 0xfe, 0x5e, - 0xf0, 0x7c, 0x7e, 0x10, 0xb9, 0xfc, 0xda, 0x12, 0xcd, 0x11, 0x3b, 0x13, 0xe6, 0xb4, 0xb3, 0xd0, - 0x64, 0x21, 0xf1, 0xcd, 0x25, 0x0b, 0xc9, 0x47, 0xb4, 0xb6, 0xb7, 0x00, 0x7c, 0x0d, 0xc9, 0x65, - 0x48, 0x99, 0xfa, 0xa0, 0xaf, 0xd2, 0x63, 0x4e, 0x49, 0x6c, 0x80, 0x6e, 0x41, 0x8a, 0x98, 0x8b, - 0xb3, 0x19, 0xe3, 0x9e, 0x93, 0x1c, 0xb7, 0x0f, 0xf3, 0x65, 0xdc, 0xa2, 0x06, 0x68, 0xbc, 0x29, - 0x14, 0xf1, 0x8a, 0x57, 0x82, 0xaf, 0x78, 0x32, 0xb2, 0xbd, 0x14, 0xfe, 0xaa, 0xf7, 0x21, 0x45, - 0x8f, 0x97, 0xe4, 0x27, 0xb4, 0xb1, 0xc9, 0x0b, 0x5e, 0xf2, 0x1b, 0x7d, 0x1f, 0x40, 0xb1, 0x6d, - 0x53, 0x6b, 0x0d, 0xbc, 0x17, 0xac, 0x86, 0x9b, 0xc7, 0x86, 0xc3, 0xb7, 0x79, 0x81, 0xdb, 0xc9, - 0xb2, 0x27, 0xea, 0xb3, 0x15, 0x9f, 0x42, 0x71, 0x17, 0x8a, 0x41, 0x59, 0xa7, 0x44, 0x63, 0x73, - 0x08, 0x96, 0x68, 0xac, 0xe2, 0xe6, 0x25, 0x9a, 0x5b, 0xe0, 0x25, 0x58, 0x0f, 0x9b, 0x0e, 0xc4, - 0x7f, 0xc4, 0x61, 0xc1, 0x6f, 0x5d, 0xff, 0x69, 0x55, 0x8e, 0xf8, 0x61, 0x1c, 0xb2, 0xee, 0xe2, - 0x23, 0x1a, 0xc8, 0xde, 0xde, 0x09, 0xfe, 0x76, 0x29, 0xeb, 0x48, 0x27, 0xdc, 0x3e, 0xf7, 0x1d, - 0x37, 0x21, 0x8a, 0x02, 0xa5, 0xfd, 0x3b, 0xed, 0xb4, 0xfa, 0x79, 0xfe, 0xf7, 0x63, 0x3e, 0x0f, - 0x92, 0x09, 0xa0, 0xff, 0x81, 0xb4, 0xd2, 0x76, 0xa1, 0xf8, 0x62, 0x08, 0x36, 0xeb, 0xb0, 0xae, - 0x35, 0x87, 0x1b, 0x94, 0x53, 0xe2, 0x12, 0x7c, 0x56, 0x82, 0xdb, 0x27, 0x7f, 0x95, 0xe8, 0x65, - 0x3c, 0x41, 0xb7, 0x57, 0x04, 0x38, 0xdc, 0xbd, 0xbf, 0xb7, 0xb5, 0x7d, 0x77, 0xbb, 0xbe, 0xc5, - 0x53, 0xa2, 0xad, 0xad, 0xfa, 0x56, 0x59, 0x20, 0x7c, 0x52, 0xfd, 0xfe, 0xde, 0x51, 0x7d, 0xab, - 0x9c, 0x10, 0xef, 0x40, 0xce, 0x75, 0x1d, 0xa8, 0x02, 0x19, 0xa7, 0xad, 0x10, 0xe7, 0x11, 0x93, - 0x77, 0x89, 0x96, 0x21, 0x65, 0xe8, 0xef, 0xf1, 0x2e, 0x71, 0x42, 0x62, 0x03, 0x51, 0x85, 0xd2, - 0x88, 0xdf, 0x41, 0x77, 0x20, 0x63, 0x0c, 0x5a, 0xb2, 0x63, 0xb4, 0x23, 0x4d, 0x18, 0x07, 0x29, - 0x18, 0xb4, 0xba, 0x5a, 0xfb, 0x1e, 0x3e, 0x73, 0xb6, 0xc9, 0x18, 0xb4, 0xee, 0x31, 0xdb, 0x66, - 0x6f, 0x11, 0xfc, 0x6f, 0x39, 0x85, 0xac, 0xf3, 0xa9, 0xa2, 0xff, 0x85, 0x9c, 0xeb, 0xd2, 0xdc, - 0xab, 0x33, 0x91, 0xbe, 0x90, 0xab, 0xf7, 0x44, 0xd0, 0x75, 0x58, 0xb4, 0xb4, 0x93, 0xbe, 0xd3, - 0x82, 0x62, 0xc8, 0x9c, 0x40, 0xbf, 0x99, 0x12, 0x7b, 0xb0, 0xe3, 0xc0, 0x49, 0x24, 0x92, 0x95, - 0x47, 0x7d, 0xc5, 0xb7, 0x39, 0x81, 0x90, 0x88, 0x9b, 0x08, 0x8b, 0xb8, 0x1f, 0x08, 0x90, 0xf7, - 0x35, 0xb6, 0xd0, 0x7f, 0xfb, 0x1c, 0x57, 0x31, 0x24, 0x54, 0xf8, 0x78, 0xbd, 0x5b, 0x19, 0xc1, - 0x85, 0x09, 0xf3, 0x2f, 0x2c, 0xaa, 0x8f, 0xe8, 0xf4, 0xc7, 0x92, 0x73, 0xf7, 0xc7, 0x9e, 0x05, - 0x64, 0xeb, 0xb6, 0xd2, 0x95, 0x4f, 0x75, 0x5b, 0xeb, 0x9f, 0xc8, 0xcc, 0x34, 0x98, 0x9b, 0x29, - 0xd3, 0x27, 0x47, 0xf4, 0xc1, 0x3e, 0xb5, 0x92, 0x1f, 0xc4, 0x21, 0xeb, 0x96, 0x6d, 0xf3, 0x5e, - 0xb2, 0x38, 0x07, 0x69, 0x5e, 0x99, 0xb0, 0x5b, 0x16, 0x7c, 0x14, 0xda, 0x08, 0xac, 0x42, 0xb6, - 0x87, 0x6d, 0x85, 0xfa, 0x4c, 0x06, 0x41, 0xba, 0xe3, 0xeb, 0x2f, 0x41, 0xde, 0x77, 0xdf, 0x85, - 0xb8, 0xd1, 0xdd, 0xfa, 0x9b, 0xe5, 0x58, 0x35, 0xf3, 0xd1, 0x27, 0x97, 0x12, 0xbb, 0xf8, 0x3d, - 0xf2, 0x85, 0x49, 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x57, 0x8e, 0x57, 0xf3, 0x1f, 0x7d, 0x72, 0x29, - 0x23, 0x61, 0xda, 0xb7, 0xb9, 0x7e, 0x0f, 0x4a, 0x23, 0x07, 0x13, 0xfc, 0xa0, 0x11, 0x14, 0xb7, - 0x0e, 0xf7, 0x77, 0xb6, 0x6b, 0x1b, 0xcd, 0xba, 0x7c, 0xb4, 0xd7, 0xac, 0x97, 0xe3, 0xe8, 0x71, - 0x58, 0xda, 0xd9, 0x7e, 0xad, 0xd1, 0x94, 0x6b, 0x3b, 0xdb, 0xf5, 0xdd, 0xa6, 0xbc, 0xd1, 0x6c, - 0x6e, 0xd4, 0xee, 0x95, 0x85, 0xf5, 0x5f, 0xe6, 0xa1, 0xb4, 0xb1, 0x59, 0xdb, 0x26, 0xb5, 0x99, - 0xd6, 0x56, 0xa8, 0x7b, 0xa8, 0x41, 0x92, 0x82, 0xc0, 0x13, 0x6f, 0x30, 0x57, 0x27, 0x37, 0xf6, - 0xd0, 0x5d, 0x48, 0x51, 0x7c, 0x18, 0x4d, 0xbe, 0xd2, 0x5c, 0x9d, 0xd2, 0xe9, 0x23, 0x93, 0xa1, - 0x9f, 0xd3, 0xc4, 0x3b, 0xce, 0xd5, 0xc9, 0x8d, 0x3f, 0xb4, 0x03, 0x19, 0x07, 0xbe, 0x9b, 0x76, - 0x5b, 0xb8, 0x3a, 0xb5, 0x83, 0x46, 0x96, 0xc6, 0x60, 0xd6, 0xc9, 0xd7, 0x9f, 0xab, 0x53, 0x5a, - 0x82, 0x68, 0x1b, 0xd2, 0x1c, 0xe1, 0x98, 0x72, 0xf3, 0xb7, 0x3a, 0xad, 0x13, 0x86, 0x24, 0xc8, - 0x79, 0x00, 0xf6, 0xf4, 0x4b, 0xdd, 0xd5, 0x19, 0xba, 0x9d, 0xe8, 0x01, 0x14, 0x82, 0xa8, 0xc9, - 0x6c, 0xb7, 0x8b, 0xab, 0x33, 0xf6, 0xdc, 0x88, 0xfe, 0x20, 0x84, 0x32, 0xdb, 0x6d, 0xe3, 0xea, - 0x8c, 0x2d, 0x38, 0xf4, 0x0e, 0x2c, 0x8e, 0x43, 0x1c, 0xb3, 0x5f, 0x3e, 0xae, 0xce, 0xd1, 0x94, - 0x43, 0x3d, 0x40, 0x21, 0xd0, 0xc8, 0x1c, 0x77, 0x91, 0xab, 0xf3, 0xf4, 0xe8, 0x90, 0x0a, 0xa5, - 0x51, 0xb8, 0x61, 0xd6, 0xbb, 0xc9, 0xd5, 0x99, 0xfb, 0x75, 0xec, 0x2d, 0xc1, 0xda, 0x7b, 0xd6, - 0xbb, 0xca, 0xd5, 0x99, 0xdb, 0x77, 0xe8, 0x10, 0xc0, 0x57, 0x3b, 0xce, 0x70, 0x77, 0xb9, 0x3a, - 0x4b, 0x23, 0x0f, 0x19, 0xb0, 0x14, 0x56, 0x54, 0xce, 0x73, 0x95, 0xb9, 0x3a, 0x57, 0x7f, 0x8f, - 0xd8, 0x73, 0xb0, 0x3c, 0x9c, 0xed, 0x6a, 0x73, 0x75, 0xc6, 0x46, 0xdf, 0x66, 0xfd, 0xd3, 0x2f, - 0x56, 0xe2, 0x9f, 0x7d, 0xb1, 0x12, 0xff, 0xeb, 0x17, 0x2b, 0xf1, 0x8f, 0xbf, 0x5c, 0x89, 0x7d, - 0xf6, 0xe5, 0x4a, 0xec, 0x4f, 0x5f, 0xae, 0xc4, 0xfe, 0xef, 0x99, 0x13, 0xcd, 0xee, 0x0c, 0x5a, - 0x6b, 0x6d, 0xbd, 0x77, 0xc3, 0xff, 0x2f, 0x97, 0xb0, 0x7f, 0xde, 0xb4, 0xd2, 0x34, 0xa0, 0xde, - 0xfc, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x41, 0x05, 0x99, 0x33, 0x00, 0x00, + // 3474 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x73, 0x23, 0xd5, + 0xd5, 0xd7, 0xfb, 0x71, 0x64, 0x49, 0xed, 0x6b, 0x33, 0x68, 0xc4, 0x8c, 0x3d, 0xf4, 0x14, 0x30, + 0x33, 0x80, 0x87, 0xcf, 0xf3, 0x0d, 0x0c, 0xdf, 0xc0, 0x47, 0xd9, 0xb2, 0x06, 0x79, 0xc6, 0x63, + 0x9b, 0xb6, 0x6c, 0x8a, 0x3c, 0xa6, 0x69, 0x49, 0xd7, 0x56, 0x33, 0x92, 0xba, 0xe9, 0x6e, 0x19, + 0x99, 0x65, 0x28, 0x36, 0x54, 0xaa, 0xc2, 0x26, 0x95, 0xa4, 0x2a, 0xec, 0x92, 0xaa, 0xe4, 0x3f, + 0xc8, 0x2a, 0xab, 0x2c, 0x58, 0x64, 0xc1, 0x2a, 0xc9, 0x8a, 0xa4, 0x60, 0x91, 0xaa, 0xfc, 0x03, + 0xd9, 0x25, 0xa9, 0xfb, 0xe8, 0x97, 0xd4, 0x2d, 0xb5, 0x18, 0xa0, 0x2a, 0x55, 0xec, 0x74, 0x4f, + 0x9f, 0x73, 0xfa, 0x3e, 0xce, 0x3d, 0x8f, 0xdf, 0x69, 0xc1, 0x13, 0x16, 0x1e, 0x74, 0xb0, 0xd1, + 0x57, 0x07, 0xd6, 0x75, 0xa5, 0xd5, 0x56, 0xaf, 0x5b, 0x67, 0x3a, 0x36, 0xd7, 0x74, 0x43, 0xb3, + 0x34, 0x54, 0x76, 0x1f, 0xae, 0x91, 0x87, 0xd5, 0x8b, 0x1e, 0xee, 0xb6, 0x71, 0xa6, 0x5b, 0xda, + 0x75, 0xdd, 0xd0, 0xb4, 0x63, 0xc6, 0x5f, 0xbd, 0xe0, 0x79, 0x4c, 0xf5, 0x78, 0xb5, 0xf9, 0x9e, + 0x72, 0xe1, 0x87, 0xf8, 0xcc, 0x7e, 0x7a, 0x71, 0x42, 0x56, 0x57, 0x0c, 0xa5, 0x6f, 0x3f, 0x5e, + 0x3d, 0xd1, 0xb4, 0x93, 0x1e, 0xbe, 0x4e, 0x47, 0xad, 0xe1, 0xf1, 0x75, 0x4b, 0xed, 0x63, 0xd3, + 0x52, 0xfa, 0x3a, 0x67, 0x58, 0x3e, 0xd1, 0x4e, 0x34, 0xfa, 0xf3, 0x3a, 0xf9, 0xc5, 0xa8, 0xe2, + 0xbf, 0x01, 0xb2, 0x12, 0x7e, 0x77, 0x88, 0x4d, 0x0b, 0xad, 0x43, 0x0a, 0xb7, 0xbb, 0x5a, 0x25, + 0x7e, 0x29, 0x7e, 0xa5, 0xb0, 0x7e, 0x61, 0x6d, 0x6c, 0x71, 0x6b, 0x9c, 0xaf, 0xde, 0xee, 0x6a, + 0x8d, 0x98, 0x44, 0x79, 0xd1, 0x4d, 0x48, 0x1f, 0xf7, 0x86, 0x66, 0xb7, 0x92, 0xa0, 0x42, 0x17, + 0xc3, 0x84, 0xee, 0x10, 0xa6, 0x46, 0x4c, 0x62, 0xdc, 0xe4, 0x55, 0xea, 0xe0, 0x58, 0xab, 0x24, + 0xa7, 0xbf, 0x6a, 0x7b, 0x70, 0x4c, 0x5f, 0x45, 0x78, 0xd1, 0x26, 0x80, 0x3a, 0x50, 0x2d, 0xb9, + 0xdd, 0x55, 0xd4, 0x41, 0x25, 0x45, 0x25, 0x9f, 0x0c, 0x97, 0x54, 0xad, 0x1a, 0x61, 0x6c, 0xc4, + 0xa4, 0xbc, 0x6a, 0x0f, 0xc8, 0x74, 0xdf, 0x1d, 0x62, 0xe3, 0xac, 0x92, 0x9e, 0x3e, 0xdd, 0x37, + 0x08, 0x13, 0x99, 0x2e, 0xe5, 0x46, 0xdb, 0x50, 0x68, 0xe1, 0x13, 0x75, 0x20, 0xb7, 0x7a, 0x5a, + 0xfb, 0x61, 0x25, 0x43, 0x85, 0xc5, 0x30, 0xe1, 0x4d, 0xc2, 0xba, 0x49, 0x38, 0x37, 0x13, 0x95, + 0x78, 0x23, 0x26, 0x41, 0xcb, 0xa1, 0xa0, 0x57, 0x20, 0xd7, 0xee, 0xe2, 0xf6, 0x43, 0xd9, 0x1a, + 0x55, 0xb2, 0x54, 0xcf, 0x6a, 0x98, 0x9e, 0x1a, 0xe1, 0x6b, 0x8e, 0x1a, 0x31, 0x29, 0xdb, 0x66, + 0x3f, 0xd1, 0x1d, 0x80, 0x0e, 0xee, 0xa9, 0xa7, 0xd8, 0x20, 0xf2, 0xb9, 0xe9, 0x7b, 0xb0, 0xc5, + 0x38, 0x9b, 0x23, 0x3e, 0x8d, 0x7c, 0xc7, 0x26, 0xa0, 0x1a, 0xe4, 0xf1, 0xa0, 0xc3, 0x97, 0x93, + 0xa7, 0x6a, 0x2e, 0x85, 0x9e, 0xf7, 0xa0, 0xe3, 0x5d, 0x4c, 0x0e, 0xf3, 0x31, 0xba, 0x05, 0x99, + 0xb6, 0xd6, 0xef, 0xab, 0x56, 0x05, 0xa8, 0x86, 0x95, 0xd0, 0x85, 0x50, 0xae, 0x46, 0x4c, 0xe2, + 0xfc, 0x68, 0x17, 0x4a, 0x3d, 0xd5, 0xb4, 0x64, 0x73, 0xa0, 0xe8, 0x66, 0x57, 0xb3, 0xcc, 0x4a, + 0x81, 0x6a, 0x78, 0x2a, 0x4c, 0xc3, 0x8e, 0x6a, 0x5a, 0x07, 0x36, 0x73, 0x23, 0x26, 0x15, 0x7b, + 0x5e, 0x02, 0xd1, 0xa7, 0x1d, 0x1f, 0x63, 0xc3, 0x51, 0x58, 0x59, 0x98, 0xae, 0x6f, 0x8f, 0x70, + 0xdb, 0xf2, 0x44, 0x9f, 0xe6, 0x25, 0xa0, 0xef, 0xc3, 0x52, 0x4f, 0x53, 0x3a, 0x8e, 0x3a, 0xb9, + 0xdd, 0x1d, 0x0e, 0x1e, 0x56, 0x8a, 0x54, 0xe9, 0xd5, 0xd0, 0x49, 0x6a, 0x4a, 0xc7, 0x56, 0x51, + 0x23, 0x02, 0x8d, 0x98, 0xb4, 0xd8, 0x1b, 0x27, 0xa2, 0x07, 0xb0, 0xac, 0xe8, 0x7a, 0xef, 0x6c, + 0x5c, 0x7b, 0x89, 0x6a, 0xbf, 0x16, 0xa6, 0x7d, 0x83, 0xc8, 0x8c, 0xab, 0x47, 0xca, 0x04, 0x15, + 0x35, 0x41, 0xd0, 0x0d, 0xac, 0x2b, 0x06, 0x96, 0x75, 0x43, 0xd3, 0x35, 0x53, 0xe9, 0x55, 0xca, + 0x54, 0xf7, 0x33, 0x61, 0xba, 0xf7, 0x19, 0xff, 0x3e, 0x67, 0x6f, 0xc4, 0xa4, 0xb2, 0xee, 0x27, + 0x31, 0xad, 0x5a, 0x1b, 0x9b, 0xa6, 0xab, 0x55, 0x98, 0xa5, 0x95, 0xf2, 0xfb, 0xb5, 0xfa, 0x48, + 0xa8, 0x0e, 0x05, 0x3c, 0x22, 0xe2, 0xf2, 0xa9, 0x66, 0xe1, 0xca, 0xe2, 0xf4, 0x8b, 0x55, 0xa7, + 0xac, 0x47, 0x9a, 0x85, 0xc9, 0xa5, 0xc2, 0xce, 0x08, 0x29, 0xf0, 0xd8, 0x29, 0x36, 0xd4, 0xe3, + 0x33, 0xaa, 0x46, 0xa6, 0x4f, 0x4c, 0x55, 0x1b, 0x54, 0x10, 0x55, 0xf8, 0x6c, 0x98, 0xc2, 0x23, + 0x2a, 0x44, 0x54, 0xd4, 0x6d, 0x91, 0x46, 0x4c, 0x5a, 0x3a, 0x9d, 0x24, 0x13, 0x13, 0x3b, 0x56, + 0x07, 0x4a, 0x4f, 0x7d, 0x1f, 0xf3, 0x6b, 0xb3, 0x34, 0xdd, 0xc4, 0xee, 0x70, 0x6e, 0x7a, 0x57, + 0x88, 0x89, 0x1d, 0x7b, 0x09, 0x9b, 0x59, 0x48, 0x9f, 0x2a, 0xbd, 0x21, 0x16, 0x9f, 0x81, 0x82, + 0xc7, 0xb1, 0xa2, 0x0a, 0x64, 0xfb, 0xd8, 0x34, 0x95, 0x13, 0x4c, 0xfd, 0x70, 0x5e, 0xb2, 0x87, + 0x62, 0x09, 0x16, 0xbc, 0xce, 0x54, 0xfc, 0x38, 0xee, 0x48, 0x12, 0x3f, 0x49, 0x24, 0x4f, 0xb1, + 0x41, 0x97, 0xcd, 0x25, 0xf9, 0x10, 0x5d, 0x86, 0x22, 0x9d, 0xb2, 0x6c, 0x3f, 0x27, 0xce, 0x3a, + 0x25, 0x2d, 0x50, 0xe2, 0x11, 0x67, 0x5a, 0x85, 0x82, 0xbe, 0xae, 0x3b, 0x2c, 0x49, 0xca, 0x02, + 0xfa, 0xba, 0x6e, 0x33, 0x3c, 0x09, 0x0b, 0x64, 0x7d, 0x0e, 0x47, 0x8a, 0xbe, 0xa4, 0x40, 0x68, + 0x9c, 0x45, 0xfc, 0x63, 0x02, 0x84, 0x71, 0x07, 0x8c, 0x6e, 0x41, 0x8a, 0xc4, 0x22, 0x1e, 0x56, + 0xaa, 0x6b, 0x2c, 0x50, 0xad, 0xd9, 0x81, 0x6a, 0xad, 0x69, 0x07, 0xaa, 0xcd, 0xdc, 0xa7, 0x9f, + 0xaf, 0xc6, 0x3e, 0xfe, 0xeb, 0x6a, 0x5c, 0xa2, 0x12, 0xe8, 0x3c, 0xf1, 0x95, 0x8a, 0x3a, 0x90, + 0xd5, 0x0e, 0x9d, 0x72, 0x9e, 0x38, 0x42, 0x45, 0x1d, 0x6c, 0x77, 0xd0, 0x0e, 0x08, 0x6d, 0x6d, + 0x60, 0xe2, 0x81, 0x39, 0x34, 0x65, 0x16, 0x08, 0x79, 0x30, 0xf1, 0xb9, 0x43, 0x16, 0x5e, 0x6b, + 0x36, 0xe7, 0x3e, 0x65, 0x94, 0xca, 0x6d, 0x3f, 0x81, 0xb8, 0xd5, 0x53, 0xa5, 0xa7, 0x76, 0x14, + 0x4b, 0x33, 0xcc, 0x4a, 0xea, 0x52, 0x32, 0xd0, 0x1f, 0x1e, 0xd9, 0x2c, 0x87, 0x7a, 0x47, 0xb1, + 0xf0, 0x66, 0x8a, 0x4c, 0x57, 0xf2, 0x48, 0xa2, 0xa7, 0xa1, 0xac, 0xe8, 0xba, 0x6c, 0x5a, 0x8a, + 0x85, 0xe5, 0xd6, 0x99, 0x85, 0x4d, 0x1a, 0x68, 0x16, 0xa4, 0xa2, 0xa2, 0xeb, 0x07, 0x84, 0xba, + 0x49, 0x88, 0xe8, 0x29, 0x28, 0x91, 0x98, 0xa4, 0x2a, 0x3d, 0xb9, 0x8b, 0xd5, 0x93, 0xae, 0x45, + 0x43, 0x4a, 0x52, 0x2a, 0x72, 0x6a, 0x83, 0x12, 0xc5, 0x8e, 0x73, 0xe2, 0x34, 0x1e, 0x21, 0x04, + 0xa9, 0x8e, 0x62, 0x29, 0x74, 0x27, 0x17, 0x24, 0xfa, 0x9b, 0xd0, 0x74, 0xc5, 0xea, 0xf2, 0xfd, + 0xa1, 0xbf, 0xd1, 0x39, 0xc8, 0x70, 0xb5, 0x49, 0xaa, 0x96, 0x8f, 0xd0, 0x32, 0xa4, 0x75, 0x43, + 0x3b, 0xc5, 0xf4, 0xe8, 0x72, 0x12, 0x1b, 0x88, 0x1f, 0x24, 0x60, 0x71, 0x22, 0x72, 0x11, 0xbd, + 0x5d, 0xc5, 0xec, 0xda, 0xef, 0x22, 0xbf, 0xd1, 0x8b, 0x44, 0xaf, 0xd2, 0xc1, 0x06, 0x8f, 0xf6, + 0x95, 0xc9, 0xad, 0x6e, 0xd0, 0xe7, 0x7c, 0x6b, 0x38, 0x37, 0xba, 0x07, 0x42, 0x4f, 0x31, 0x2d, + 0x99, 0x79, 0x7f, 0xd9, 0x13, 0xf9, 0x9f, 0x98, 0xd8, 0x64, 0x16, 0x2b, 0x88, 0x41, 0x73, 0x25, + 0x25, 0x22, 0xea, 0x52, 0xd1, 0x21, 0x2c, 0xb7, 0xce, 0xde, 0x57, 0x06, 0x96, 0x3a, 0xc0, 0xf2, + 0xc4, 0xa9, 0x4d, 0xa6, 0x12, 0xf7, 0x55, 0xb3, 0x85, 0xbb, 0xca, 0xa9, 0xaa, 0xd9, 0xd3, 0x5a, + 0x72, 0xe4, 0x9d, 0x13, 0x35, 0x45, 0x09, 0x4a, 0xfe, 0xb0, 0x8b, 0x4a, 0x90, 0xb0, 0x46, 0x7c, + 0xfd, 0x09, 0x6b, 0x84, 0x5e, 0x80, 0x14, 0x59, 0x23, 0x5d, 0x7b, 0x29, 0xe0, 0x45, 0x5c, 0xae, + 0x79, 0xa6, 0x63, 0x89, 0x72, 0x8a, 0xa2, 0x73, 0x1b, 0x9c, 0x50, 0x3c, 0xae, 0x55, 0xbc, 0x0a, + 0xe5, 0xb1, 0x38, 0xeb, 0x39, 0xbe, 0xb8, 0xf7, 0xf8, 0xc4, 0x32, 0x14, 0x7d, 0x01, 0x55, 0x3c, + 0x07, 0xcb, 0x41, 0xf1, 0x51, 0xec, 0x3a, 0x74, 0x5f, 0x9c, 0x43, 0x37, 0x21, 0xe7, 0x04, 0x48, + 0x76, 0x1b, 0xcf, 0x4f, 0xac, 0xc2, 0x66, 0x96, 0x1c, 0x56, 0x72, 0x0d, 0x89, 0x55, 0x53, 0x73, + 0x48, 0xd0, 0x89, 0x67, 0x15, 0x5d, 0x6f, 0x28, 0x66, 0x57, 0x7c, 0x1b, 0x2a, 0x61, 0xc1, 0x6f, + 0x6c, 0x19, 0x29, 0xc7, 0x0a, 0xcf, 0x41, 0xe6, 0x58, 0x33, 0xfa, 0x8a, 0x45, 0x95, 0x15, 0x25, + 0x3e, 0x22, 0xd6, 0xc9, 0x02, 0x61, 0x92, 0x92, 0xd9, 0x40, 0x94, 0xe1, 0x7c, 0x68, 0x00, 0x24, + 0x22, 0xea, 0xa0, 0x83, 0xd9, 0x7e, 0x16, 0x25, 0x36, 0x70, 0x15, 0xb1, 0xc9, 0xb2, 0x01, 0x79, + 0xad, 0x49, 0xd7, 0x4a, 0xf5, 0xe7, 0x25, 0x3e, 0x12, 0x7f, 0x9b, 0x84, 0x73, 0xc1, 0x61, 0x10, + 0x5d, 0x82, 0x85, 0xbe, 0x32, 0x92, 0xad, 0x11, 0xbf, 0xcb, 0xec, 0x38, 0xa0, 0xaf, 0x8c, 0x9a, + 0x23, 0x76, 0x91, 0x05, 0x48, 0x5a, 0x23, 0xb3, 0x92, 0xb8, 0x94, 0xbc, 0xb2, 0x20, 0x91, 0x9f, + 0xe8, 0x10, 0x16, 0x7b, 0x5a, 0x5b, 0xe9, 0xc9, 0x1e, 0x8b, 0xe7, 0xc6, 0x7e, 0x79, 0x62, 0xb3, + 0x59, 0x40, 0xc3, 0x9d, 0x09, 0xa3, 0x2f, 0x53, 0x1d, 0x3b, 0x8e, 0xe5, 0x7f, 0x43, 0x56, 0xef, + 0x39, 0xa3, 0xb4, 0xcf, 0x53, 0xd8, 0x3e, 0x3b, 0x33, 0xb7, 0xcf, 0x7e, 0x01, 0x96, 0x07, 0x78, + 0x64, 0x79, 0xe6, 0xc8, 0x0c, 0x27, 0x4b, 0xcf, 0x02, 0x91, 0x67, 0xee, 0xfb, 0x89, 0x0d, 0xa1, + 0xab, 0x34, 0xb3, 0xd0, 0x35, 0x13, 0x1b, 0xb2, 0xd2, 0xe9, 0x18, 0xd8, 0x34, 0x69, 0x66, 0xbb, + 0x40, 0xd3, 0x05, 0x4a, 0xdf, 0x60, 0x64, 0xf1, 0x17, 0xde, 0xb3, 0xf2, 0x67, 0x12, 0xfc, 0x24, + 0xe2, 0xee, 0x49, 0x1c, 0xc0, 0x32, 0x97, 0xef, 0xf8, 0x0e, 0x23, 0x11, 0xd5, 0xf3, 0x20, 0x5b, + 0x3c, 0xc2, 0x39, 0x24, 0x1f, 0xed, 0x1c, 0x6c, 0x6f, 0x9b, 0xf2, 0x78, 0xdb, 0xff, 0xb2, 0xb3, + 0x79, 0xcd, 0x89, 0x22, 0x6e, 0x9a, 0x16, 0x18, 0x45, 0xdc, 0x75, 0x25, 0x7c, 0xee, 0xed, 0x97, + 0x71, 0xa8, 0x86, 0xe7, 0x65, 0x81, 0xaa, 0x9e, 0x85, 0x45, 0x67, 0x2d, 0xce, 0xfc, 0xd8, 0xad, + 0x17, 0x9c, 0x07, 0x7c, 0x82, 0xa1, 0x51, 0xf1, 0x29, 0x28, 0x8d, 0x65, 0x8d, 0xec, 0x14, 0x8a, + 0xa7, 0xde, 0xf7, 0x8b, 0x3f, 0x4d, 0x3a, 0x5e, 0xd5, 0x97, 0xda, 0x05, 0x58, 0xde, 0x1b, 0xb0, + 0xd4, 0xc1, 0x6d, 0xb5, 0xf3, 0x55, 0x0d, 0x6f, 0x91, 0x4b, 0x7f, 0x67, 0x77, 0x11, 0xec, 0xee, + 0xcf, 0x05, 0xc8, 0x49, 0xd8, 0xd4, 0x49, 0x4a, 0x87, 0x36, 0x21, 0x8f, 0x47, 0x6d, 0xac, 0x5b, + 0x76, 0x16, 0x1c, 0x5c, 0x4d, 0x30, 0xee, 0xba, 0xcd, 0x49, 0x6a, 0x63, 0x47, 0x0c, 0xdd, 0xe0, + 0x30, 0x48, 0x38, 0xa2, 0xc1, 0xc5, 0xbd, 0x38, 0xc8, 0x8b, 0x36, 0x0e, 0x92, 0x0c, 0x2d, 0x85, + 0x99, 0xd4, 0x18, 0x10, 0x72, 0x83, 0x03, 0x21, 0xa9, 0x19, 0x2f, 0xf3, 0x21, 0x21, 0x35, 0x1f, + 0x12, 0x92, 0x9e, 0xb1, 0xcc, 0x10, 0x28, 0xe4, 0x45, 0x1b, 0x0a, 0xc9, 0xcc, 0x98, 0xf1, 0x18, + 0x16, 0x72, 0xd7, 0x8f, 0x85, 0x64, 0x43, 0x42, 0x9b, 0x2d, 0x3d, 0x15, 0x0c, 0x79, 0xd5, 0x03, + 0x86, 0xe4, 0x42, 0x51, 0x08, 0xa6, 0x28, 0x00, 0x0d, 0x79, 0xdd, 0x87, 0x86, 0xe4, 0x67, 0xec, + 0xc3, 0x14, 0x38, 0x64, 0xcb, 0x0b, 0x87, 0x40, 0x28, 0xaa, 0xc2, 0xcf, 0x3d, 0x0c, 0x0f, 0x79, + 0xd9, 0xc1, 0x43, 0x0a, 0xa1, 0xc0, 0x0e, 0x5f, 0xcb, 0x38, 0x20, 0xb2, 0x37, 0x01, 0x88, 0x30, + 0x00, 0xe3, 0xe9, 0x50, 0x15, 0x33, 0x10, 0x91, 0xbd, 0x09, 0x44, 0xa4, 0x38, 0x43, 0xe1, 0x0c, + 0x48, 0xe4, 0x07, 0xc1, 0x90, 0x48, 0x38, 0x68, 0xc1, 0xa7, 0x19, 0x0d, 0x13, 0x91, 0x43, 0x30, + 0x91, 0x72, 0x68, 0xfd, 0xce, 0xd4, 0x47, 0x06, 0x45, 0x0e, 0x03, 0x40, 0x11, 0x06, 0x5f, 0x5c, + 0x09, 0x55, 0x1e, 0x01, 0x15, 0x39, 0x0c, 0x40, 0x45, 0x16, 0x67, 0xaa, 0x9d, 0x09, 0x8b, 0xdc, + 0xf1, 0xc3, 0x22, 0x68, 0xc6, 0x1d, 0x0b, 0xc5, 0x45, 0x5a, 0x61, 0xb8, 0x08, 0xc3, 0x2e, 0x9e, + 0x0b, 0xd5, 0x38, 0x07, 0x30, 0xb2, 0x37, 0x01, 0x8c, 0x2c, 0xcf, 0xb0, 0xb4, 0xa8, 0xc8, 0xc8, + 0x55, 0x92, 0x51, 0x8c, 0xb9, 0x6a, 0x92, 0xdc, 0x63, 0xc3, 0xd0, 0x0c, 0x8e, 0x71, 0xb0, 0x81, + 0x78, 0x85, 0x54, 0xca, 0xae, 0x5b, 0x9e, 0x82, 0xa2, 0xd0, 0x22, 0xca, 0xe3, 0x8a, 0xc5, 0xdf, + 0xc5, 0x5d, 0x59, 0x5a, 0x60, 0x7a, 0xab, 0xec, 0x3c, 0xaf, 0xb2, 0x3d, 0xd8, 0x4a, 0xc2, 0x8f, + 0xad, 0xac, 0x42, 0x81, 0x14, 0x47, 0x63, 0xb0, 0x89, 0xa2, 0x3b, 0xb0, 0xc9, 0x35, 0x58, 0xa4, + 0x49, 0x00, 0x43, 0x60, 0x78, 0x64, 0x4d, 0xd1, 0xc8, 0x5a, 0x26, 0x0f, 0xd8, 0x2e, 0xb0, 0x10, + 0xfb, 0x3c, 0x2c, 0x79, 0x78, 0x9d, 0xa2, 0x8b, 0x61, 0x08, 0x82, 0xc3, 0xbd, 0xc1, 0xab, 0xaf, + 0x3f, 0xc4, 0xdd, 0x1d, 0x72, 0xf1, 0x96, 0x20, 0x68, 0x24, 0xfe, 0x35, 0x41, 0x23, 0x89, 0xaf, + 0x0c, 0x8d, 0x78, 0x8b, 0xc8, 0xa4, 0xbf, 0x88, 0xfc, 0x67, 0xdc, 0x3d, 0x13, 0x07, 0xe8, 0x68, + 0x6b, 0x1d, 0xcc, 0xcb, 0x3a, 0xfa, 0x9b, 0xa4, 0x59, 0x3d, 0xed, 0x84, 0x17, 0x6f, 0xe4, 0x27, + 0xe1, 0x72, 0x62, 0x67, 0x9e, 0x87, 0x46, 0xa7, 0x22, 0x64, 0xb9, 0x0b, 0xaf, 0x08, 0x05, 0x48, + 0x3e, 0xc4, 0x2c, 0xd2, 0x2d, 0x48, 0xe4, 0x27, 0xe1, 0xa3, 0x46, 0xc6, 0x73, 0x10, 0x36, 0x40, + 0xb7, 0x20, 0x4f, 0xdb, 0x35, 0xb2, 0xa6, 0x9b, 0x3c, 0x20, 0xf9, 0xd2, 0x35, 0xd6, 0x95, 0x59, + 0xdb, 0x27, 0x3c, 0x7b, 0xba, 0x29, 0xe5, 0x74, 0xfe, 0xcb, 0x93, 0x34, 0xe5, 0x7d, 0x49, 0xd3, + 0x05, 0xc8, 0x93, 0xd9, 0x9b, 0xba, 0xd2, 0xc6, 0x34, 0xb2, 0xe4, 0x25, 0x97, 0x20, 0x3e, 0x00, + 0x34, 0x19, 0x27, 0x51, 0x03, 0x32, 0xf8, 0x14, 0x0f, 0x2c, 0x96, 0x53, 0x16, 0xd6, 0xcf, 0x4d, + 0xd6, 0x8d, 0xe4, 0xf1, 0x66, 0x85, 0x6c, 0xf2, 0x3f, 0x3e, 0x5f, 0x15, 0x18, 0xf7, 0x73, 0x5a, + 0x5f, 0xb5, 0x70, 0x5f, 0xb7, 0xce, 0x24, 0x2e, 0x2f, 0xfe, 0x3d, 0x0e, 0xe5, 0xb1, 0xf8, 0x19, + 0xb8, 0xb7, 0xb6, 0xc9, 0x27, 0x3c, 0xc0, 0xd2, 0x45, 0x80, 0x13, 0xc5, 0x94, 0xdf, 0x53, 0x06, + 0x16, 0xee, 0xf0, 0xed, 0xcc, 0x9f, 0x28, 0xe6, 0x9b, 0x94, 0xe0, 0x5f, 0x58, 0x6e, 0x6c, 0x61, + 0x9e, 0x62, 0x3b, 0xef, 0x2d, 0xb6, 0x51, 0x15, 0x72, 0xba, 0xa1, 0x6a, 0x86, 0x6a, 0x9d, 0xd1, + 0xdd, 0x48, 0x4a, 0xce, 0x18, 0x5d, 0x86, 0x62, 0x1f, 0xf7, 0x75, 0x4d, 0xeb, 0xc9, 0xec, 0x86, + 0x17, 0xa8, 0xe8, 0x02, 0x27, 0xd6, 0x09, 0xed, 0x6e, 0x2a, 0x97, 0x14, 0x52, 0x77, 0x53, 0xb9, + 0x94, 0x90, 0xbe, 0x9b, 0xca, 0x65, 0x84, 0xec, 0xdd, 0x54, 0x2e, 0x2b, 0xe4, 0xc4, 0x0f, 0x13, + 0xee, 0x55, 0x70, 0xc1, 0x96, 0xa8, 0x6b, 0x8d, 0x66, 0x5b, 0x2b, 0x01, 0x3b, 0xe2, 0xa1, 0x90, + 0xc5, 0x91, 0xd1, 0xd0, 0xc4, 0x1d, 0x8e, 0xe7, 0x39, 0x63, 0xcf, 0x99, 0x66, 0x1f, 0xed, 0x4c, + 0xa7, 0x6f, 0xbc, 0xf8, 0x63, 0x8a, 0xc0, 0xfa, 0x13, 0x15, 0x74, 0xe0, 0x2d, 0x93, 0x86, 0xf4, + 0x86, 0xda, 0xb6, 0x15, 0xf5, 0x2a, 0xbb, 0xe5, 0x14, 0x23, 0x9b, 0xe8, 0x2d, 0x78, 0x7c, 0xcc, + 0xcd, 0x38, 0xaa, 0x13, 0x51, 0xbd, 0xcd, 0x63, 0x7e, 0x6f, 0x63, 0xab, 0x76, 0x37, 0x2b, 0xf9, + 0x88, 0x17, 0x60, 0x1b, 0x4a, 0xfe, 0x9c, 0x2b, 0xf0, 0xf8, 0x2f, 0x43, 0xd1, 0xc0, 0x96, 0xa2, + 0x0e, 0x64, 0x5f, 0x81, 0xb8, 0xc0, 0x88, 0x1c, 0x8c, 0xdd, 0x87, 0xc7, 0x02, 0x73, 0x2f, 0xf4, + 0x12, 0xe4, 0xdd, 0xb4, 0x8d, 0xed, 0xea, 0x14, 0x58, 0xcd, 0xe5, 0x15, 0x7f, 0x1f, 0x77, 0x55, + 0xfa, 0x81, 0xba, 0x3a, 0x64, 0x0c, 0x6c, 0x0e, 0x7b, 0x0c, 0x3a, 0x2b, 0xad, 0x3f, 0x1f, 0x2d, + 0x6b, 0x23, 0xd4, 0x61, 0xcf, 0x92, 0xb8, 0xb0, 0xf8, 0x00, 0x32, 0x8c, 0x82, 0x0a, 0x90, 0x3d, + 0xdc, 0xbd, 0xb7, 0xbb, 0xf7, 0xe6, 0xae, 0x10, 0x43, 0x00, 0x99, 0x8d, 0x5a, 0xad, 0xbe, 0xdf, + 0x14, 0xe2, 0x28, 0x0f, 0xe9, 0x8d, 0xcd, 0x3d, 0xa9, 0x29, 0x24, 0x08, 0x59, 0xaa, 0xdf, 0xad, + 0xd7, 0x9a, 0x42, 0x12, 0x2d, 0x42, 0x91, 0xfd, 0x96, 0xef, 0xec, 0x49, 0xf7, 0x37, 0x9a, 0x42, + 0xca, 0x43, 0x3a, 0xa8, 0xef, 0x6e, 0xd5, 0x25, 0x21, 0x2d, 0xfe, 0x0f, 0x9c, 0x0f, 0xcd, 0xf3, + 0x5c, 0x14, 0x2e, 0xee, 0x41, 0xe1, 0xc4, 0x9f, 0x27, 0x48, 0x91, 0x1f, 0x96, 0xbc, 0xa1, 0xbb, + 0x63, 0x0b, 0x5f, 0x9f, 0x23, 0xf3, 0x1b, 0x5b, 0x3d, 0xa9, 0xeb, 0x0d, 0x7c, 0x8c, 0xad, 0x76, + 0x97, 0x25, 0x93, 0x2c, 0x7a, 0x15, 0xa5, 0x22, 0xa7, 0x52, 0x21, 0x93, 0xb1, 0xbd, 0x83, 0xdb, + 0x96, 0xcc, 0x7c, 0x14, 0x33, 0xba, 0x3c, 0x61, 0x23, 0xd4, 0x03, 0x46, 0x14, 0xdf, 0x9e, 0x6b, + 0x2f, 0xf3, 0x90, 0x96, 0xea, 0x4d, 0xe9, 0x2d, 0x21, 0x89, 0x10, 0x94, 0xe8, 0x4f, 0xf9, 0x60, + 0x77, 0x63, 0xff, 0xa0, 0xb1, 0x47, 0xf6, 0x72, 0x09, 0xca, 0xf6, 0x5e, 0xda, 0xc4, 0xb4, 0xf8, + 0xa7, 0x04, 0x3c, 0x1e, 0x92, 0x7a, 0xa2, 0x5b, 0x00, 0xd6, 0x48, 0x36, 0x70, 0x5b, 0x33, 0x3a, + 0xe1, 0x46, 0xd6, 0x1c, 0x49, 0x94, 0x43, 0xca, 0x5b, 0xfc, 0x97, 0x39, 0x05, 0xbc, 0x45, 0xaf, + 0x70, 0xa5, 0x64, 0x55, 0xf6, 0x55, 0xbb, 0x18, 0x80, 0x51, 0xe2, 0x36, 0x51, 0x4c, 0xf7, 0x96, + 0x2a, 0xa6, 0xfc, 0xe8, 0x7e, 0x90, 0x53, 0x89, 0xd8, 0x3a, 0x99, 0xcf, 0x9d, 0xa4, 0x1f, 0xcd, + 0x9d, 0x88, 0xbf, 0x4a, 0x7a, 0x37, 0xd6, 0x9f, 0x69, 0xef, 0x41, 0xc6, 0xb4, 0x14, 0x6b, 0x68, + 0x72, 0x83, 0x7b, 0x29, 0x6a, 0xda, 0xbe, 0x66, 0xff, 0x38, 0xa0, 0xe2, 0x12, 0x57, 0xf3, 0xdd, + 0x7e, 0x9b, 0xe2, 0x4d, 0x28, 0xf9, 0x37, 0x27, 0xfc, 0xca, 0xb8, 0x3e, 0x27, 0x21, 0xde, 0x76, + 0x93, 0x21, 0x0f, 0x82, 0x38, 0x89, 0xce, 0xc5, 0x83, 0xd0, 0xb9, 0x5f, 0xc7, 0xe1, 0x89, 0x29, + 0xc5, 0x0b, 0x7a, 0x63, 0xec, 0x9c, 0x5f, 0x9e, 0xa7, 0xf4, 0x59, 0x63, 0x34, 0xff, 0x49, 0x8b, + 0x37, 0x60, 0xc1, 0x4b, 0x8f, 0xb6, 0xc8, 0x9f, 0x24, 0x5d, 0x9f, 0xef, 0x87, 0x11, 0xbf, 0xb6, + 0xac, 0x6f, 0xcc, 0xce, 0x12, 0x73, 0xda, 0x59, 0x60, 0xb2, 0x90, 0xfc, 0xe6, 0x92, 0x85, 0xd4, + 0x23, 0x26, 0x0b, 0xde, 0x0b, 0x97, 0xf6, 0x5f, 0xb8, 0x89, 0xb8, 0x9e, 0x09, 0x88, 0xeb, 0x6f, + 0x01, 0x78, 0xba, 0x8b, 0xcb, 0x90, 0x36, 0xb4, 0xe1, 0xa0, 0x43, 0xcd, 0x24, 0x2d, 0xb1, 0x01, + 0xba, 0x09, 0x69, 0x62, 0x6e, 0xf6, 0x66, 0x4e, 0x7a, 0x5e, 0x62, 0x2e, 0x1e, 0x00, 0x97, 0x71, + 0x8b, 0x2a, 0xa0, 0xc9, 0x0e, 0x4f, 0xc8, 0x2b, 0x5e, 0xf5, 0xbf, 0xe2, 0xc9, 0xd0, 0x5e, 0x51, + 0xf0, 0xab, 0xde, 0x87, 0x34, 0x35, 0x0f, 0x92, 0xdf, 0xd0, 0x2e, 0x25, 0xaf, 0x5e, 0xc9, 0x6f, + 0xf4, 0x43, 0x00, 0xc5, 0xb2, 0x0c, 0xb5, 0x35, 0x74, 0x5f, 0xb0, 0x1a, 0x6c, 0x5e, 0x1b, 0x36, + 0xdf, 0xe6, 0x05, 0x6e, 0x67, 0xcb, 0xae, 0xa8, 0xc7, 0xd6, 0x3c, 0x0a, 0xc5, 0x5d, 0x28, 0xf9, + 0x65, 0xed, 0x7a, 0x8b, 0xcd, 0xc1, 0x5f, 0x6f, 0xb1, 0xf2, 0x99, 0xd7, 0x5b, 0x4e, 0xb5, 0x96, + 0x64, 0x0d, 0x69, 0x3a, 0x10, 0xff, 0x15, 0x87, 0x05, 0xaf, 0x75, 0x7e, 0xcd, 0x69, 0xfc, 0x8c, + 0xc2, 0xe6, 0xfc, 0x44, 0x16, 0x9f, 0x3d, 0x51, 0xcc, 0xc3, 0x6f, 0x33, 0x89, 0xff, 0x30, 0x0e, + 0x39, 0x67, 0xf1, 0x21, 0xdd, 0x60, 0x77, 0xef, 0x12, 0xde, 0xde, 0x27, 0x6b, 0x2f, 0x27, 0x9d, + 0xa6, 0xf5, 0x6d, 0x27, 0xa1, 0x0a, 0x43, 0x98, 0xbd, 0x3b, 0x6d, 0xf7, 0xed, 0x79, 0xfe, 0xf8, + 0x33, 0x3e, 0x0f, 0x92, 0x49, 0xa0, 0xff, 0x83, 0x8c, 0xd2, 0x76, 0x70, 0xf5, 0x52, 0x00, 0xd0, + 0x6a, 0xb3, 0xae, 0x35, 0x47, 0x1b, 0x94, 0x53, 0xe2, 0x12, 0x7c, 0x56, 0x09, 0xa7, 0xe9, 0xfd, + 0x1a, 0xd1, 0xcb, 0x78, 0xfc, 0x6e, 0xb3, 0x04, 0x70, 0xb8, 0x7b, 0x7f, 0x6f, 0x6b, 0xfb, 0xce, + 0x76, 0x7d, 0x8b, 0xa7, 0x54, 0x5b, 0x5b, 0xf5, 0x2d, 0x21, 0x41, 0xf8, 0xa4, 0xfa, 0xfd, 0xbd, + 0xa3, 0xfa, 0x96, 0x90, 0x14, 0x6f, 0x43, 0xde, 0x71, 0x3d, 0xa8, 0x02, 0x59, 0xbb, 0x47, 0x10, + 0xe7, 0x0e, 0x80, 0xb7, 0x7c, 0x96, 0x21, 0xad, 0x6b, 0xef, 0xf1, 0x96, 0x6f, 0x52, 0x62, 0x03, + 0xb1, 0x03, 0xe5, 0x31, 0xbf, 0x85, 0x6e, 0x43, 0x56, 0x1f, 0xb6, 0x64, 0xdb, 0x68, 0xc7, 0x3a, + 0x2a, 0x76, 0xd9, 0x3f, 0x6c, 0xf5, 0xd4, 0xf6, 0x3d, 0x7c, 0x66, 0x6f, 0x93, 0x3e, 0x6c, 0xdd, + 0x63, 0xb6, 0xcd, 0xde, 0x92, 0xf0, 0xbe, 0xe5, 0x14, 0x72, 0xf6, 0x55, 0x45, 0xff, 0x0f, 0x79, + 0xc7, 0x25, 0x3a, 0xdf, 0xc1, 0x84, 0xfa, 0x52, 0xae, 0xde, 0x15, 0x41, 0xd7, 0x60, 0xd1, 0x54, + 0x4f, 0x06, 0x76, 0x3f, 0x89, 0xc1, 0x6c, 0x09, 0x7a, 0x67, 0xca, 0xec, 0xc1, 0x8e, 0x8d, 0x0d, + 0x91, 0x48, 0x28, 0x8c, 0xfb, 0x8a, 0x6f, 0x73, 0x02, 0x01, 0x11, 0x3b, 0x19, 0x14, 0xb1, 0x3f, + 0x48, 0x40, 0xc1, 0xd3, 0xa5, 0x42, 0xff, 0xeb, 0x71, 0x5c, 0xa5, 0x80, 0x50, 0xe3, 0xe1, 0x75, + 0x3f, 0xb1, 0xf0, 0x2f, 0x2c, 0x31, 0xff, 0xc2, 0xc2, 0x9a, 0x82, 0x76, 0xb3, 0x2b, 0x35, 0x77, + 0xb3, 0xeb, 0x39, 0x40, 0x96, 0x66, 0x29, 0x3d, 0xf9, 0x54, 0xb3, 0xd4, 0xc1, 0x89, 0xcc, 0x4c, + 0x83, 0xb9, 0x19, 0x81, 0x3e, 0x39, 0xa2, 0x0f, 0xf6, 0xa9, 0x95, 0xfc, 0x28, 0x0e, 0x39, 0xa7, + 0xec, 0x9b, 0xf7, 0x8b, 0x89, 0x73, 0x90, 0xe1, 0x95, 0x0d, 0xfb, 0x64, 0x82, 0x8f, 0x02, 0xbb, + 0x7a, 0x55, 0xc8, 0xf5, 0xb1, 0xa5, 0x50, 0x9f, 0xc9, 0xc2, 0xa4, 0x33, 0xbe, 0xf6, 0x32, 0x14, + 0x3c, 0x1f, 0xaf, 0x10, 0x37, 0xba, 0x5b, 0x7f, 0x53, 0x88, 0x55, 0xb3, 0x1f, 0x7d, 0x72, 0x29, + 0xb9, 0x8b, 0xdf, 0x23, 0x37, 0x4c, 0xaa, 0xd7, 0x1a, 0xf5, 0xda, 0x3d, 0x21, 0x5e, 0x2d, 0x7c, + 0xf4, 0xc9, 0xa5, 0xac, 0x84, 0x69, 0x13, 0xe6, 0xda, 0x3d, 0x28, 0x8f, 0x1d, 0x8c, 0xff, 0x42, + 0x23, 0x28, 0x6d, 0x1d, 0xee, 0xef, 0x6c, 0xd7, 0x36, 0x9a, 0x75, 0xf9, 0x68, 0xaf, 0x59, 0x17, + 0xe2, 0xe8, 0x71, 0x58, 0xda, 0xd9, 0x7e, 0xbd, 0xd1, 0x94, 0x6b, 0x3b, 0xdb, 0xf5, 0xdd, 0xa6, + 0xbc, 0xd1, 0x6c, 0x6e, 0xd4, 0xee, 0x09, 0x89, 0xf5, 0xdf, 0x14, 0xa0, 0xbc, 0xb1, 0x59, 0xdb, + 0x26, 0xb5, 0x9d, 0xda, 0x56, 0xa8, 0x7b, 0xa8, 0x41, 0x8a, 0x22, 0xba, 0x53, 0x3f, 0x47, 0xae, + 0x4e, 0xef, 0xd2, 0xa1, 0x3b, 0x90, 0xa6, 0x60, 0x2f, 0x9a, 0xfe, 0x7d, 0x72, 0x75, 0x46, 0xdb, + 0x8e, 0x4c, 0x86, 0x5e, 0xa7, 0xa9, 0x1f, 0x2c, 0x57, 0xa7, 0x77, 0xf1, 0xd0, 0x0e, 0x64, 0x6d, + 0x2c, 0x6e, 0xd6, 0xa7, 0xbf, 0xd5, 0x99, 0xed, 0x30, 0xb2, 0x34, 0x86, 0x99, 0x4e, 0xff, 0x96, + 0xb9, 0x3a, 0xa3, 0xbf, 0x87, 0xb6, 0x21, 0xc3, 0x11, 0x92, 0x19, 0x9f, 0xf1, 0x56, 0x67, 0xb5, + 0xb5, 0x90, 0x04, 0x79, 0x17, 0x8d, 0x9e, 0xfd, 0x85, 0x76, 0x35, 0x42, 0xeb, 0x12, 0x3d, 0x80, + 0xa2, 0x1f, 0x75, 0x89, 0xf6, 0xa9, 0x70, 0x35, 0x62, 0x03, 0x8d, 0xe8, 0xf7, 0x43, 0x30, 0xd1, + 0x3e, 0x1d, 0xae, 0x46, 0xec, 0xa7, 0xa1, 0x77, 0x60, 0x71, 0x12, 0x22, 0x89, 0xfe, 0x25, 0x71, + 0x75, 0x8e, 0x0e, 0x1b, 0xea, 0x03, 0x0a, 0x80, 0x56, 0xe6, 0xf8, 0xb0, 0xb8, 0x3a, 0x4f, 0xc3, + 0x0d, 0x75, 0xa0, 0x3c, 0x0e, 0x57, 0x44, 0xfd, 0xd0, 0xb8, 0x1a, 0xb9, 0xf9, 0xc6, 0xde, 0xe2, + 0xaf, 0xdd, 0xa3, 0x7e, 0x78, 0x5c, 0x8d, 0xdc, 0x8b, 0x43, 0x87, 0x00, 0x9e, 0xda, 0x33, 0xc2, + 0x87, 0xc8, 0xd5, 0x28, 0x5d, 0x39, 0xa4, 0xc3, 0x52, 0x50, 0x51, 0x3a, 0xcf, 0x77, 0xc9, 0xd5, + 0xb9, 0x9a, 0x75, 0xc4, 0x9e, 0xfd, 0xe5, 0x65, 0xb4, 0xef, 0x94, 0xab, 0x11, 0xbb, 0x76, 0x9b, + 0xf5, 0x4f, 0xbf, 0x58, 0x89, 0x7f, 0xf6, 0xc5, 0x4a, 0xfc, 0x6f, 0x5f, 0xac, 0xc4, 0x3f, 0xfe, + 0x72, 0x25, 0xf6, 0xd9, 0x97, 0x2b, 0xb1, 0xbf, 0x7c, 0xb9, 0x12, 0xfb, 0xde, 0xb3, 0x27, 0xaa, + 0xd5, 0x1d, 0xb6, 0xd6, 0xda, 0x5a, 0xff, 0xba, 0xf7, 0x2f, 0x2b, 0x41, 0x7f, 0xa3, 0x69, 0x65, + 0x68, 0x40, 0xbd, 0xf1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xce, 0x0a, 0x41, 0x94, 0x66, 0x33, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -7224,44 +7211,11 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x42 } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.GasUsed != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 - } if m.GasWanted != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) i-- dAtA[i] = 0x28 } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 - } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a - } if len(m.Data) > 0 { i -= len(m.Data) copy(dAtA[i:], m.Data) @@ -7842,6 +7796,18 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x30 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x2a + } if m.ConsensusParamUpdates != nil { { size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) @@ -9567,26 +9533,9 @@ func (m *ResponseCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } if m.GasWanted != 0 { n += 1 + sovTypes(uint64(m.GasWanted)) } - if m.GasUsed != 0 { - n += 1 + sovTypes(uint64(m.GasUsed)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } l = len(m.Codespace) if l > 0 { n += 1 + l + sovTypes(uint64(l)) @@ -9871,6 +9820,13 @@ func (m *ResponseFinalizeBlock) Size() (n int) { l = m.ConsensusParamUpdates.Size() n += 1 + l + sovTypes(uint64(l)) } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } return n } @@ -15381,70 +15337,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { m.Data = []byte{} } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Log = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) @@ -15464,59 +15356,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) - } - m.GasUsed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) @@ -17344,6 +17183,59 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + } + m.RetainHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetainHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index ac04e40275..747d95750a 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -295,7 +295,6 @@ func (app *CounterApplication) CheckTx(_ context.Context, req *abci.RequestCheck if txValue != uint64(app.mempoolTxCount) { return &abci.ResponseCheckTx{ Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue), }, nil } app.mempoolTxCount++ diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 5fc2b9fcf7..ca649ec3ad 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -56,7 +56,6 @@ func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.Requ return &coretypes.ResultBroadcastTx{ Code: r.Code, Data: r.Data, - Log: r.Log, Codespace: r.Codespace, MempoolError: r.MempoolError, Hash: req.Tx.Hash(), diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index c16c9c2ed2..6e3579c8dd 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -166,7 +166,7 @@ message RequestFinalizeBlock { repeated bytes txs = 1; CommitInfo decided_last_commit = 2 [(gogoproto.nullable) = false]; repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; - // hash is the merkle root hash of the fields of the decided block. + // hash is the merkle root hash of the fields of the proposed block. bytes hash = 4; int64 height = 5; google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; @@ -251,11 +251,7 @@ message ResponseBeginBlock { message ResponseCheckTx { uint32 code = 1; bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic int64 gas_wanted = 5; - int64 gas_used = 6; - repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; string codespace = 8; string sender = 9; int64 priority = 10; @@ -264,6 +260,8 @@ message ResponseCheckTx { // ABCI applications creating a ResponseCheckTX should not set mempool_error. string mempool_error = 11; + + reserved 3, 4, 6, 7; // see https://github.com/tendermint/tendermint/issues/8543 } message ResponseDeliverTx { @@ -368,6 +366,8 @@ message ResponseFinalizeBlock { repeated ExecTxResult tx_results = 2; repeated ValidatorUpdate validator_updates = 3 [(gogoproto.nullable) = false]; tendermint.types.ConsensusParams consensus_param_updates = 4; + bytes app_hash = 5; + int64 retain_height = 6; } //---------------------------------------- @@ -390,7 +390,7 @@ message ExtendedCommitInfo { } // Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// ResponseBeginBlock, ResponseEndBlock and ResponseDeliverTx. // Later, transactions may be queried using these events. message Event { string type = 1; diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index a6aebdb14b..2252275860 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -88,7 +88,6 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes. return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, - Log: c.Log, Codespace: c.Codespace, Hash: tx.Hash(), }, nil @@ -107,7 +106,6 @@ func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.R return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, - Log: c.Log, Codespace: c.Codespace, Hash: tx.Hash(), }, nil diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index 51d988f3ad..8fda326830 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -233,7 +233,6 @@ type ResultConsensusState struct { type ResultBroadcastTx struct { Code uint32 `json:"code"` Data bytes.HexBytes `json:"data"` - Log string `json:"log"` Codespace string `json:"codespace"` MempoolError string `json:"mempool_error"` Hash bytes.HexBytes `json:"hash"` diff --git a/spec/abci++/abci++_app_requirements_002_draft.md b/spec/abci++/abci++_app_requirements_002_draft.md index ff9df2c56a..3203f7a957 100644 --- a/spec/abci++/abci++_app_requirements_002_draft.md +++ b/spec/abci++/abci++_app_requirements_002_draft.md @@ -436,9 +436,6 @@ might have a different *CheckTxState* values when they receive it and check thei via `CheckTx`. Tendermint ignores this value in `ResponseCheckTx`. -`Events` include any events for the execution, though since the transaction has not -been committed yet, they are effectively ignored by Tendermint. - From v0.35.x on, there is a `Priority` field in `ResponseCheckTx` that can be used to explicitly prioritize transactions in the mempool for inclusion in a block proposal. diff --git a/spec/abci++/abci++_methods_002_draft.md b/spec/abci++/abci++_methods_002_draft.md index 4113a0c585..4eb1bb295e 100644 --- a/spec/abci++/abci++_methods_002_draft.md +++ b/spec/abci++/abci++_methods_002_draft.md @@ -136,11 +136,7 @@ title: Methods |------------|-------------------------------------------------------------|-----------------------------------------------------------------------|--------------| | code | uint32 | Response code. | 1 | | data | bytes | Result bytes, if any. | 2 | - | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | - | info | string | Additional information. **May be non-deterministic.** | 4 | | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | - | gas_used | int64 | Amount of gas consumed by transaction. | 6 | - | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing transactions (eg. by account). | 7 | | codespace | string | Namespace for the `code`. | 8 | | sender | string | The transaction's sender (e.g. the signer) | 9 | | priority | int64 | The transaction's priority (for mempool ordering) | 10 | diff --git a/spec/abci/apps.md b/spec/abci/apps.md index d6ec198323..5ee93e613c 100644 --- a/spec/abci/apps.md +++ b/spec/abci/apps.md @@ -13,7 +13,7 @@ Here we cover the following components of ABCI applications: and the differences between `CheckTx` and `DeliverTx`. - [Transaction Results](#transaction-results) - rules around transaction results and validity -- [Validator Set Updates](#validator-updates) - how validator sets are +- [Validator Set Updates](#updating-the-validator-set) - how validator sets are changed during `InitChain` and `EndBlock` - [Query](#query) - standards for using the `Query` method and proofs about the application state @@ -204,9 +204,6 @@ not broadcasted to other peers and not included in a proposal block. `Data` contains the result of the CheckTx transaction execution, if any. It is semantically meaningless to Tendermint. -`Events` include any events for the execution, though since the transaction has not -been committed yet, they are effectively ignored by Tendermint. - ### DeliverTx DeliverTx is the workhorse of the blockchain. Tendermint sends the diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 64ccd2e28f..203a60f6aa 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -162,7 +162,6 @@ func (app *Application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*a if err != nil { return &abci.ResponseCheckTx{ Code: code.CodeTypeEncodingError, - Log: err.Error(), }, nil } return &abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil From b0ec8a0ea7d8fe7f4fcaebfe973a8d20feb5ee80 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 25 May 2022 23:57:23 +0200 Subject: [PATCH 062/203] mempool: migrate rechecktx to be a consensus parameter (#8514) --- CHANGELOG_PENDING.md | 3 +- UPGRADING.md | 13 ++ abci/types/types.pb.go | 270 +++++++++++++++++++----- config/config.go | 8 +- config/toml.go | 6 +- docs/nodes/running-in-production.md | 8 - docs/tendermint-core/mempool/config.md | 19 +- internal/blocksync/reactor_test.go | 1 + internal/consensus/replay_stubs.go | 1 + internal/mempool/mempool.go | 3 +- internal/mempool/mempool_test.go | 12 +- internal/mempool/mocks/mempool.go | 10 +- internal/mempool/reactor_test.go | 4 +- internal/mempool/types.go | 1 + internal/state/execution.go | 1 + internal/state/execution_test.go | 5 + internal/state/validation_test.go | 3 + proto/tendermint/blocksync/types.pb.go | 30 ++- proto/tendermint/consensus/types.pb.go | 50 ++++- proto/tendermint/consensus/wal.pb.go | 25 ++- proto/tendermint/crypto/keys.pb.go | 5 +- proto/tendermint/crypto/proof.pb.go | 25 ++- proto/tendermint/libs/bits/types.pb.go | 5 +- proto/tendermint/mempool/types.pb.go | 10 +- proto/tendermint/p2p/conn.pb.go | 25 ++- proto/tendermint/p2p/pex.pb.go | 20 +- proto/tendermint/p2p/types.pb.go | 25 ++- proto/tendermint/privval/types.pb.go | 55 ++++- proto/tendermint/state/types.pb.go | 25 ++- proto/tendermint/statesync/types.pb.go | 45 +++- proto/tendermint/types/block.pb.go | 5 +- proto/tendermint/types/canonical.pb.go | 25 ++- proto/tendermint/types/events.pb.go | 5 +- proto/tendermint/types/evidence.pb.go | 20 +- proto/tendermint/types/params.pb.go | 188 ++++++++++++----- proto/tendermint/types/params.proto | 4 + proto/tendermint/types/types.pb.go | 75 +++++-- proto/tendermint/types/validator.pb.go | 15 +- proto/tendermint/version/types.pb.go | 5 +- scripts/confix/plan.go | 6 + scripts/confix/testdata/diff-35-36.txt | 1 + scripts/confix/testdata/v36-config.toml | 6 +- spec/abci/apps.md | 14 +- spec/core/data_structures.md | 95 ++++----- types/params.go | 7 + types/params_test.go | 2 + 46 files changed, 882 insertions(+), 304 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index d31ff9bc7e..9350d70b76 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -59,13 +59,14 @@ Special thanks to external contributors on this release: - [rpc] [\#7701] Add `ApplicationInfo` to `status` rpc call which contains the application version. (@jonasbostoen) - [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade. - [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) -- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParameter` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) +- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParams` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) - [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield) - [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield) - [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca) - [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca) - [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca) - [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair) +- [consenus] \#8514 move `RecheckTx` from the local node mempool config to a global `ConsensusParams` field in `BlockParams` (@cmwaters) ### IMPROVEMENTS diff --git a/UPGRADING.md b/UPGRADING.md index 93cd6c20fe..324a891d5b 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -126,6 +126,19 @@ lays out the reasoning for the changes as well as [RFC 009](https://tinyurl.com/rfc009) for a discussion of the complexities of upgrading consensus parameters. +### RecheckTx Parameter Change + +`RecheckTx` was previously enabled by the `recheck` parameter in the mempool +section of the `config.toml`. +Setting it to true made Tendermint invoke another `CheckTx` ABCI call on +each transaction remaining in the mempool following the execution of a block. +Similar to the timeout parameter changes, this parameter makes more sense as a +network-wide coordinated variable so that applications can be written knowing +either all nodes agree on whether to run `RecheckTx`. + +Applications can turn on `RecheckTx` by altering the `ConsensusParams` in the +`FinalizeBlock` ABCI response. + ### CLI Changes The functionality around resetting a node has been extended to make it safer. The diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index c2a8fa4261..47e3462985 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -10798,7 +10798,10 @@ func (m *Request) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10880,7 +10883,10 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10930,7 +10936,10 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11082,7 +11091,10 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11320,7 +11332,10 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11475,7 +11490,10 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11659,7 +11677,10 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11762,7 +11783,10 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11846,7 +11870,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11915,7 +11942,10 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11965,7 +11995,10 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12015,7 +12048,10 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12135,7 +12171,10 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12242,7 +12281,10 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12377,7 +12419,10 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12665,7 +12710,10 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12968,7 +13016,10 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13071,7 +13122,10 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13242,7 +13296,10 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13545,7 +13602,10 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14295,7 +14355,10 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14377,7 +14440,10 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14459,7 +14525,10 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14509,7 +14578,10 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14695,7 +14767,10 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14849,7 +14924,10 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15156,7 +15234,10 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15240,7 +15321,10 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15477,7 +15561,10 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15748,7 +15835,10 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15902,7 +15992,10 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16005,7 +16098,10 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16089,7 +16185,10 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16158,7 +16257,10 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16242,7 +16344,10 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16419,7 +16524,10 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16641,7 +16749,10 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16848,7 +16959,10 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16932,7 +17046,10 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17001,7 +17118,10 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17242,7 +17362,10 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17345,7 +17468,10 @@ func (m *CommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17448,7 +17574,10 @@ func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17564,7 +17693,10 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17698,7 +17830,10 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17969,7 +18104,10 @@ func (m *ExecTxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18124,7 +18262,10 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18227,7 +18368,10 @@ func (m *TxRecord) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18330,7 +18474,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18432,7 +18579,10 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18535,7 +18685,10 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18672,7 +18825,10 @@ func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18845,7 +19001,10 @@ func (m *Misbehavior) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19020,7 +19179,10 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/config/config.go b/config/config.go index c1fa4223a1..7d0a4915e6 100644 --- a/config/config.go +++ b/config/config.go @@ -734,9 +734,10 @@ func TestP2PConfig() *P2PConfig { // MempoolConfig defines the configuration options for the Tendermint mempool. type MempoolConfig struct { - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - Broadcast bool `mapstructure:"broadcast"` + RootDir string `mapstructure:"home"` + + // Whether to broadcast transactions to other nodes + Broadcast bool `mapstructure:"broadcast"` // Maximum number of transactions in the mempool Size int `mapstructure:"size"` @@ -783,7 +784,6 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the Tendermint mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Recheck: true, Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck diff --git a/config/toml.go b/config/toml.go index 578718ca50..0fac73cdda 100644 --- a/config/toml.go +++ b/config/toml.go @@ -355,7 +355,11 @@ recv-rate = {{ .P2P.RecvRate }} ####################################################### [mempool] -recheck = {{ .Mempool.Recheck }} +# recheck has been moved from a config option to a global +# consensus param in v0.36 +# See https://github.com/tendermint/tendermint/issues/8244 for more information. + +# Set true to broadcast transactions in the mempool to other nodes broadcast = {{ .Mempool.Broadcast }} # Maximum number of transactions in the mempool diff --git a/docs/nodes/running-in-production.md b/docs/nodes/running-in-production.md index d8d73689a5..40ad26b5ed 100644 --- a/docs/nodes/running-in-production.md +++ b/docs/nodes/running-in-production.md @@ -295,14 +295,6 @@ flush-throttle-timeout=10 max-packet-msg-payload-size=10240 # 10KB ``` -- `mempool.recheck` - -After every block, Tendermint rechecks every transaction left in the -mempool to see if transactions committed in that block affected the -application state, so some of the transactions left may become invalid. -If that does not apply to your application, you can disable it by -setting `mempool.recheck=false`. - - `mempool.broadcast` Setting this to false will stop the mempool from relaying transactions diff --git a/docs/tendermint-core/mempool/config.md b/docs/tendermint-core/mempool/config.md index 4e8a9ec73d..4a904ef253 100644 --- a/docs/tendermint-core/mempool/config.md +++ b/docs/tendermint-core/mempool/config.md @@ -14,9 +14,8 @@ Config: ```toml [mempool] -recheck = true +# Set true to broadcast transactions in the mempool to other nodes broadcast = true -wal-dir = "" # Maximum number of transactions in the mempool size = 5000 @@ -44,20 +43,6 @@ max-tx-bytes = 1048576 max-batch-bytes = 0 ``` - - -## Recheck - -Recheck determines if the mempool rechecks all pending -transactions after a block was committed. Once a block -is committed, the mempool removes all valid transactions -that were successfully included in the block. - -If `recheck` is true, then it will rerun CheckTx on -all remaining transactions with the new block state. - ## Broadcast Determines whether this node gossips any valid transactions @@ -92,7 +77,7 @@ Cache size determines the size of the cache holding transactions we have already ## Keep Invalid Transactions In Cache -Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. +Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. ## Max Transaction Bytes diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 0477eb45df..141eaf7ece 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -126,6 +126,7 @@ func makeReactor( mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) eventbus := eventbus.NewDefault(logger) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 3cd5bdac03..407ec925e0 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -35,6 +35,7 @@ func (emptyMempool) Update( _ []*abci.ExecTxResult, _ mempool.PreCheckFunc, _ mempool.PostCheckFunc, + _ bool, ) error { return nil } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 629fa0bdae..ef85af0881 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -420,6 +420,7 @@ func (txmp *TxMempool) Update( execTxResult []*abci.ExecTxResult, newPreFn PreCheckFunc, newPostFn PostCheckFunc, + recheck bool, ) error { txmp.height = blockHeight txmp.notifiedTxsAvailable = false @@ -452,7 +453,7 @@ func (txmp *TxMempool) Update( // initiate re-CheckTx per remaining transaction or notify that remaining // transactions are left. if txmp.Size() > 0 { - if txmp.config.Recheck { + if recheck { txmp.logger.Debug( "executing re-CheckTx for all remaining transactions", "num_txs", txmp.Size(), diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 946377b1cd..33b6dd8aae 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -173,7 +173,7 @@ func TestTxMempool_TxsAvailable(t *testing.T) { // commit half the transactions and ensure we fire an event txmp.Lock() - require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil, true)) txmp.Unlock() ensureTxFire() ensureNoTxFire() @@ -210,7 +210,7 @@ func TestTxMempool_Size(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil, true)) txmp.Unlock() require.Equal(t, len(rawTxs)/2, txmp.Size()) @@ -243,7 +243,7 @@ func TestTxMempool_Flush(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil, true)) txmp.Unlock() txmp.Flush() @@ -501,7 +501,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil, true)) txmp.Unlock() height++ @@ -547,7 +547,7 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil, true)) txmp.Unlock() require.Equal(t, 95, txmp.Size()) @@ -573,7 +573,7 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil, true)) txmp.Unlock() require.GreaterOrEqual(t, txmp.Size(), 45) diff --git a/internal/mempool/mocks/mempool.go b/internal/mempool/mocks/mempool.go index 454ca602f1..e1f6994d28 100644 --- a/internal/mempool/mocks/mempool.go +++ b/internal/mempool/mocks/mempool.go @@ -157,13 +157,13 @@ func (_m *Mempool) Unlock() { _m.Called() } -// Update provides a mock function with given fields: ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn -func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { - ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) +// Update provides a mock function with given fields: ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck +func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc, recheck bool) error { + ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { - r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) + if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc, bool) error); ok { + r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck) } else { r0 = ret.Error(0) } diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 351315bae7..034c5eaa26 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -253,7 +253,7 @@ func TestReactorConcurrency(t *testing.T) { deliverTxResponses[i] = &abci.ExecTxResult{Code: 0} } - require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil)) + require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil, true)) }() // 1. submit a bunch of txs @@ -267,7 +267,7 @@ func TestReactorConcurrency(t *testing.T) { mempool.Lock() defer mempool.Unlock() - err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil) + err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil, true) require.NoError(t, err) }() } diff --git a/internal/mempool/types.go b/internal/mempool/types.go index a51d286e28..481ced3fab 100644 --- a/internal/mempool/types.go +++ b/internal/mempool/types.go @@ -71,6 +71,7 @@ type Mempool interface { txResults []*abci.ExecTxResult, newPreFn PreCheckFunc, newPostFn PostCheckFunc, + recheck bool, ) error // FlushAppConn flushes the mempool connection to ensure async callback calls diff --git a/internal/state/execution.go b/internal/state/execution.go index 1d87104d41..cc3e63d7ae 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -373,6 +373,7 @@ func (blockExec *BlockExecutor) Commit( txResults, TxPreCheckForState(state), TxPostCheckForState(state), + state.ConsensusParams.ABCI.RecheckTx, ) return res.Data, res.RetainHeight, err diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 5fb4dc297f..79557b7876 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -64,6 +64,7 @@ func TestApplyBlock(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, eventBus, sm.NopMetrics()) @@ -125,6 +126,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) eventBus := eventbus.NewDefault(logger) @@ -250,6 +252,7 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) eventBus := eventbus.NewDefault(logger) @@ -511,6 +514,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) @@ -645,6 +649,7 @@ func TestEmptyPrepareProposal(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 0f43db5eb7..9e4cd1ec40 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -52,6 +52,7 @@ func TestValidateBlockHeader(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) blockStore := store.NewBlockStore(dbm.NewMemDB()) @@ -158,6 +159,7 @@ func TestValidateBlockCommit(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) blockStore := store.NewBlockStore(dbm.NewMemDB()) @@ -314,6 +316,7 @@ func TestValidateBlockEvidence(t *testing.T) { mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) state.ConsensusParams.Evidence.MaxBytes = 1000 diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index 8757f8ab3e..910ccea476 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -927,7 +927,10 @@ func (m *BlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -996,7 +999,10 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1118,7 +1124,10 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1168,7 +1177,10 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1256,7 +1268,10 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1481,7 +1496,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 4ae9abc9e1..d542d929e3 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -1935,7 +1935,10 @@ func (m *NewRoundStep) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2112,7 +2115,10 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2195,7 +2201,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2316,7 +2325,10 @@ func (m *ProposalPOL) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2437,7 +2449,10 @@ func (m *BlockPart) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2523,7 +2538,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2649,7 +2667,10 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2789,7 +2810,10 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2962,7 +2986,10 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3327,7 +3354,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go index fd80819cd0..86ff1be01f 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/proto/tendermint/consensus/wal.pb.go @@ -921,7 +921,10 @@ func (m *MsgInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1061,7 +1064,10 @@ func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1130,7 +1136,10 @@ func (m *EndHeight) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1320,7 +1329,10 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1439,7 +1451,10 @@ func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go index 24c6c1b1ba..8ff4c4a4fe 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/proto/tendermint/crypto/keys.pb.go @@ -687,7 +687,10 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthKeys + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthKeys } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 82fb943fcd..97350c64c7 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -820,7 +820,10 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -940,7 +943,10 @@ func (m *ValueOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1086,7 +1092,10 @@ func (m *DominoOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1236,7 +1245,10 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1320,7 +1332,10 @@ func (m *ProofOps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go index c0ebcb9760..ad87f854f4 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/proto/tendermint/libs/bits/types.pb.go @@ -307,7 +307,10 @@ func (m *BitArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 11e259551d..3487652bc8 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -370,7 +370,10 @@ func (m *Txs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -455,7 +458,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go index 47a3bb0cd8..7c26d3fcd4 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/proto/tendermint/p2p/conn.pb.go @@ -723,7 +723,10 @@ func (m *PacketPing) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -773,7 +776,10 @@ func (m *PacketPong) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -896,7 +902,10 @@ func (m *PacketMsg) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1051,7 +1060,10 @@ func (m *Packet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1168,7 +1180,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 15ccce15e5..25d636e43d 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -587,7 +587,10 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -637,7 +640,10 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -721,7 +727,10 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -841,7 +850,10 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index bffa6884fe..a0e647ee7b 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -917,7 +917,10 @@ func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1227,7 +1230,10 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1341,7 +1347,10 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1493,7 +1502,10 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1666,7 +1678,10 @@ func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index 56b35e7271..da30f75270 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -1708,7 +1708,10 @@ func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1909,7 +1915,10 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2027,7 +2036,10 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2146,7 +2158,10 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2264,7 +2279,10 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2383,7 +2401,10 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2433,7 +2454,10 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2483,7 +2507,10 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2813,7 +2840,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2930,7 +2960,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index af5c64ecf8..8db184011b 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -944,7 +944,10 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1049,7 +1052,10 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1151,7 +1157,10 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1266,7 +1275,10 @@ func (m *Version) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1732,7 +1744,10 @@ func (m *State) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 5541c28037..93e844730a 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1740,7 +1740,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1965,7 +1971,10 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2072,7 +2081,10 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2233,7 +2245,10 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2302,7 +2317,10 @@ func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2388,7 +2406,10 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2457,7 +2478,10 @@ func (m *ParamsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2559,7 +2583,10 @@ func (m *ParamsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index f2077aad8b..aacb90fab7 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -389,7 +389,10 @@ func (m *Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthBlock + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthBlock } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index 50c0c84fa2..e08342a460 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -920,7 +920,10 @@ func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1023,7 +1026,10 @@ func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1232,7 +1238,10 @@ func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1422,7 +1431,10 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1558,7 +1570,10 @@ func (m *CanonicalVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go index a9aa26a799..1c49aef647 100644 --- a/proto/tendermint/types/events.pb.go +++ b/proto/tendermint/types/events.pb.go @@ -285,7 +285,10 @@ func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvents } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index 052fb0e6b7..746d853130 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -827,7 +827,10 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1020,7 +1023,10 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1211,7 +1217,10 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1295,7 +1304,10 @@ func (m *EvidenceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 764d7b385a..a13ff77236 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -586,6 +586,9 @@ type ABCIParams struct { // passed to the application for validation in VerifyVoteExtension and given // to the application to use when proposing a block during PrepareProposal. VoteExtensionsEnableHeight int64 `protobuf:"varint,1,opt,name=vote_extensions_enable_height,json=voteExtensionsEnableHeight,proto3" json:"vote_extensions_enable_height,omitempty"` + // Indicates if CheckTx should be called on all the transactions + // remaining in the mempool after a block is executed. + RecheckTx bool `protobuf:"varint,2,opt,name=recheck_tx,json=recheckTx,proto3" json:"recheck_tx,omitempty"` } func (m *ABCIParams) Reset() { *m = ABCIParams{} } @@ -628,6 +631,13 @@ func (m *ABCIParams) GetVoteExtensionsEnableHeight() int64 { return 0 } +func (m *ABCIParams) GetRecheckTx() bool { + if m != nil { + return m.RecheckTx + } + return false +} + func init() { proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams") proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams") @@ -643,54 +653,55 @@ func init() { func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } var fileDescriptor_e12598271a686f57 = []byte{ - // 741 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x80, 0xe3, 0x26, 0x4d, 0x93, 0x93, 0xa6, 0xa9, 0xe6, 0xde, 0xab, 0x6b, 0x0a, 0x75, 0x8a, - 0x17, 0xa8, 0x12, 0x92, 0x53, 0xb5, 0x42, 0x15, 0x12, 0x3f, 0x6a, 0x92, 0x8a, 0x22, 0x54, 0x40, - 0xa1, 0xb0, 0xe8, 0xc6, 0x1a, 0x27, 0x83, 0x63, 0x35, 0xf6, 0x58, 0x9e, 0x71, 0x14, 0xbf, 0x05, - 0x2b, 0xc4, 0x23, 0xc0, 0x86, 0xe7, 0xe8, 0xb2, 0x4b, 0x56, 0x80, 0xd2, 0x37, 0xe0, 0x09, 0xd0, - 0x8c, 0xc7, 0x4d, 0x93, 0x52, 0x9a, 0x55, 0x9c, 0x39, 0xdf, 0xe7, 0xe3, 0x39, 0xe7, 0xd8, 0x03, - 0xeb, 0x9c, 0x04, 0x3d, 0x12, 0xf9, 0x5e, 0xc0, 0x1b, 0x3c, 0x09, 0x09, 0x6b, 0x84, 0x38, 0xc2, - 0x3e, 0xb3, 0xc2, 0x88, 0x72, 0x8a, 0x56, 0x27, 0x61, 0x4b, 0x86, 0xd7, 0xfe, 0x75, 0xa9, 0x4b, - 0x65, 0xb0, 0x21, 0xae, 0x52, 0x6e, 0xcd, 0x70, 0x29, 0x75, 0x07, 0xa4, 0x21, 0xff, 0x39, 0xf1, - 0xfb, 0x46, 0x2f, 0x8e, 0x30, 0xf7, 0x68, 0x90, 0xc6, 0xcd, 0xaf, 0x79, 0xa8, 0xb5, 0x68, 0xc0, - 0x48, 0xc0, 0x62, 0xf6, 0x5a, 0x66, 0x40, 0x3b, 0xb0, 0xe8, 0x0c, 0x68, 0xf7, 0x44, 0xd7, 0x36, - 0xb4, 0xcd, 0xca, 0xf6, 0xba, 0x35, 0x9b, 0xcb, 0x6a, 0x8a, 0x70, 0x4a, 0x77, 0x52, 0x16, 0x3d, - 0x82, 0x12, 0x19, 0x7a, 0x3d, 0x12, 0x74, 0x89, 0xbe, 0x20, 0xbd, 0x8d, 0xab, 0xde, 0xbe, 0x22, - 0x94, 0x7a, 0x61, 0xa0, 0xa7, 0x50, 0x1e, 0xe2, 0x81, 0xd7, 0xc3, 0x9c, 0x46, 0x7a, 0x5e, 0xea, - 0x77, 0xaf, 0xea, 0xef, 0x32, 0x44, 0xf9, 0x13, 0x07, 0x3d, 0x84, 0xa5, 0x21, 0x89, 0x98, 0x47, - 0x03, 0xbd, 0x20, 0xf5, 0xfa, 0x1f, 0xf4, 0x14, 0x50, 0x72, 0xc6, 0x8b, 0xdc, 0x2c, 0x09, 0xba, - 0xfd, 0x88, 0x06, 0x89, 0xbe, 0x78, 0x5d, 0xee, 0x37, 0x19, 0x92, 0xe5, 0xbe, 0x70, 0x44, 0x6e, - 0xee, 0xf9, 0x84, 0xc6, 0x5c, 0x2f, 0x5e, 0x97, 0xfb, 0x28, 0x05, 0xb2, 0xdc, 0x8a, 0x47, 0x5b, - 0x50, 0xc0, 0x4e, 0xd7, 0xd3, 0x97, 0xa4, 0x77, 0xe7, 0xaa, 0xb7, 0xd7, 0x6c, 0x3d, 0x57, 0x92, - 0x24, 0xcd, 0x16, 0x54, 0x2e, 0x55, 0x1f, 0xdd, 0x86, 0xb2, 0x8f, 0x47, 0xb6, 0x93, 0x70, 0xc2, - 0x64, 0xbf, 0xf2, 0x9d, 0x92, 0x8f, 0x47, 0x4d, 0xf1, 0x1f, 0xfd, 0x0f, 0x4b, 0x22, 0xe8, 0x62, - 0x26, 0x5b, 0x92, 0xef, 0x14, 0x7d, 0x3c, 0x7a, 0x86, 0x99, 0xf9, 0x45, 0x83, 0x95, 0xe9, 0x5e, - 0xa0, 0xfb, 0x80, 0x04, 0x8b, 0x5d, 0x62, 0x07, 0xb1, 0x6f, 0xcb, 0xa6, 0x66, 0x77, 0xac, 0xf9, - 0x78, 0xb4, 0xe7, 0x92, 0x97, 0xb1, 0x2f, 0x53, 0x33, 0x74, 0x08, 0xab, 0x19, 0x9c, 0xcd, 0x93, - 0x6a, 0xfa, 0x2d, 0x2b, 0x1d, 0x38, 0x2b, 0x1b, 0x38, 0xab, 0xad, 0x80, 0x66, 0xe9, 0xf4, 0x7b, - 0x3d, 0xf7, 0xe9, 0x47, 0x5d, 0xeb, 0xac, 0xa4, 0xf7, 0xcb, 0x22, 0xd3, 0x9b, 0xc8, 0x4f, 0x6f, - 0xc2, 0x7c, 0x00, 0xb5, 0x99, 0xbe, 0x23, 0x13, 0xaa, 0x61, 0xec, 0xd8, 0x27, 0x24, 0xb1, 0x65, - 0x95, 0x74, 0x6d, 0x23, 0xbf, 0x59, 0xee, 0x54, 0xc2, 0xd8, 0x79, 0x41, 0x92, 0x23, 0xb1, 0x64, - 0x6e, 0x41, 0x75, 0xaa, 0xdf, 0xa8, 0x0e, 0x15, 0x1c, 0x86, 0x76, 0x36, 0x25, 0x62, 0x67, 0x85, - 0x0e, 0xe0, 0x30, 0x54, 0x98, 0x79, 0x0c, 0xcb, 0x07, 0x98, 0xf5, 0x49, 0x4f, 0x09, 0xf7, 0xa0, - 0x26, 0xab, 0x60, 0xcf, 0x16, 0xb8, 0x2a, 0x97, 0x0f, 0xb3, 0x2a, 0x9b, 0x50, 0x9d, 0x70, 0x93, - 0x5a, 0x57, 0x32, 0x4a, 0x14, 0xfc, 0xa3, 0x06, 0xb5, 0x99, 0x09, 0x42, 0x6d, 0xa8, 0xfa, 0x84, - 0x31, 0x59, 0x44, 0x32, 0xc0, 0x89, 0x7a, 0xdd, 0xfe, 0x52, 0xc1, 0x82, 0xac, 0xde, 0xb2, 0xb2, - 0xda, 0x42, 0x42, 0x8f, 0xa1, 0x1c, 0x46, 0xa4, 0xeb, 0xb1, 0xb9, 0x7a, 0x90, 0xde, 0x61, 0x62, - 0x98, 0xbf, 0x16, 0xa0, 0x3a, 0x35, 0x9b, 0x62, 0x9a, 0xc3, 0x88, 0x86, 0x94, 0x91, 0x79, 0x1f, - 0x28, 0xe3, 0xc5, 0x8e, 0xd4, 0xa5, 0xd8, 0x11, 0xc7, 0xf3, 0x3e, 0xcf, 0xb2, 0xb2, 0xda, 0x42, - 0x42, 0x3b, 0x50, 0x18, 0x52, 0x4e, 0xd4, 0x67, 0xe0, 0x46, 0x59, 0xc2, 0xe8, 0x09, 0x80, 0xf8, - 0x55, 0x79, 0x0b, 0x73, 0xd6, 0x41, 0x28, 0x69, 0xd2, 0x5d, 0x28, 0x76, 0xa9, 0xef, 0x7b, 0x5c, - 0x7d, 0x01, 0x6e, 0x74, 0x15, 0x8e, 0xb6, 0xe1, 0x3f, 0x27, 0x09, 0x31, 0x63, 0x76, 0xba, 0x60, - 0x5f, 0xfe, 0x14, 0x94, 0x3a, 0xff, 0xa4, 0xc1, 0x96, 0x8c, 0xa9, 0x42, 0x9b, 0xaf, 0x00, 0x26, - 0xef, 0x35, 0xda, 0x83, 0x75, 0xf9, 0xe8, 0x64, 0xc4, 0x49, 0x20, 0x9a, 0xc2, 0x6c, 0x12, 0x60, - 0x67, 0x40, 0xec, 0x3e, 0xf1, 0xdc, 0x3e, 0x57, 0x53, 0xb7, 0x26, 0xa0, 0xfd, 0x0b, 0x66, 0x5f, - 0x22, 0x07, 0x92, 0x68, 0xbe, 0xfd, 0x3c, 0x36, 0xb4, 0xd3, 0xb1, 0xa1, 0x9d, 0x8d, 0x0d, 0xed, - 0xe7, 0xd8, 0xd0, 0x3e, 0x9c, 0x1b, 0xb9, 0xb3, 0x73, 0x23, 0xf7, 0xed, 0xdc, 0xc8, 0x1d, 0xef, - 0xba, 0x1e, 0xef, 0xc7, 0x8e, 0xd5, 0xa5, 0x7e, 0xe3, 0xf2, 0xa9, 0x32, 0xb9, 0x4c, 0x8f, 0x8d, - 0xd9, 0x13, 0xc7, 0x29, 0xca, 0xf5, 0x9d, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x28, 0x35, 0x60, - 0x76, 0x8c, 0x06, 0x00, 0x00, + // 762 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xdf, 0x6e, 0xdb, 0x36, + 0x14, 0xc6, 0xad, 0xd8, 0x71, 0xec, 0xe3, 0x38, 0x0e, 0xb8, 0x0d, 0xd3, 0xb2, 0x59, 0xce, 0x74, + 0x31, 0x04, 0x18, 0x20, 0x07, 0x09, 0x86, 0x60, 0xc0, 0xfe, 0x20, 0xb6, 0x83, 0x65, 0x18, 0x32, + 0x0c, 0x5a, 0xda, 0x8b, 0xdc, 0x08, 0x94, 0xcc, 0xca, 0x42, 0x2c, 0x51, 0x10, 0x29, 0xc3, 0x7a, + 0x8b, 0x5e, 0x15, 0x7d, 0x84, 0xf6, 0xa6, 0xcf, 0x91, 0xcb, 0x5c, 0xf6, 0xaa, 0x2d, 0x9c, 0x37, + 0xe8, 0x13, 0x14, 0xa4, 0xa8, 0x38, 0x76, 0x9a, 0xc6, 0x57, 0xa6, 0x79, 0xbe, 0x1f, 0x0f, 0xf9, + 0x9d, 0x23, 0x12, 0xda, 0x9c, 0x44, 0x43, 0x92, 0x84, 0x41, 0xc4, 0xbb, 0x3c, 0x8b, 0x09, 0xeb, + 0xc6, 0x38, 0xc1, 0x21, 0xb3, 0xe2, 0x84, 0x72, 0x8a, 0xb6, 0xe7, 0x61, 0x4b, 0x86, 0x77, 0xbe, + 0xf6, 0xa9, 0x4f, 0x65, 0xb0, 0x2b, 0x46, 0xb9, 0x6e, 0xc7, 0xf0, 0x29, 0xf5, 0xc7, 0xa4, 0x2b, + 0xff, 0xb9, 0xe9, 0xb3, 0xee, 0x30, 0x4d, 0x30, 0x0f, 0x68, 0x94, 0xc7, 0xcd, 0x37, 0x65, 0x68, + 0xf5, 0x69, 0xc4, 0x48, 0xc4, 0x52, 0xf6, 0x9f, 0xcc, 0x80, 0x0e, 0x61, 0xdd, 0x1d, 0x53, 0xef, + 0x52, 0xd7, 0x76, 0xb5, 0xbd, 0xc6, 0x41, 0xdb, 0x5a, 0xce, 0x65, 0xf5, 0x44, 0x38, 0x57, 0xdb, + 0xb9, 0x16, 0xfd, 0x06, 0x35, 0x32, 0x09, 0x86, 0x24, 0xf2, 0x88, 0xbe, 0x26, 0xb9, 0xdd, 0xfb, + 0xdc, 0x89, 0x52, 0x28, 0xf4, 0x96, 0x40, 0x7f, 0x42, 0x7d, 0x82, 0xc7, 0xc1, 0x10, 0x73, 0x9a, + 0xe8, 0x65, 0x89, 0xff, 0x78, 0x1f, 0x7f, 0x5a, 0x48, 0x14, 0x3f, 0x67, 0xd0, 0xaf, 0xb0, 0x31, + 0x21, 0x09, 0x0b, 0x68, 0xa4, 0x57, 0x24, 0xde, 0xf9, 0x0c, 0x9e, 0x0b, 0x14, 0x5c, 0xe8, 0x45, + 0x6e, 0x96, 0x45, 0xde, 0x28, 0xa1, 0x51, 0xa6, 0xaf, 0x3f, 0x94, 0xfb, 0xff, 0x42, 0x52, 0xe4, + 0xbe, 0x65, 0x44, 0x6e, 0x1e, 0x84, 0x84, 0xa6, 0x5c, 0xaf, 0x3e, 0x94, 0xfb, 0x3c, 0x17, 0x14, + 0xb9, 0x95, 0x1e, 0xed, 0x43, 0x05, 0xbb, 0x5e, 0xa0, 0x6f, 0x48, 0xee, 0x87, 0xfb, 0xdc, 0x71, + 0xaf, 0xff, 0xb7, 0x82, 0xa4, 0xd2, 0xec, 0x43, 0xe3, 0x8e, 0xfb, 0xe8, 0x7b, 0xa8, 0x87, 0x78, + 0xea, 0xb8, 0x19, 0x27, 0x4c, 0xd6, 0xab, 0x6c, 0xd7, 0x42, 0x3c, 0xed, 0x89, 0xff, 0xe8, 0x5b, + 0xd8, 0x10, 0x41, 0x1f, 0x33, 0x59, 0x92, 0xb2, 0x5d, 0x0d, 0xf1, 0xf4, 0x2f, 0xcc, 0xcc, 0xd7, + 0x1a, 0x6c, 0x2d, 0xd6, 0x02, 0xfd, 0x0c, 0x48, 0x68, 0xb1, 0x4f, 0x9c, 0x28, 0x0d, 0x1d, 0x59, + 0xd4, 0x62, 0xc5, 0x56, 0x88, 0xa7, 0xc7, 0x3e, 0xf9, 0x37, 0x0d, 0x65, 0x6a, 0x86, 0xce, 0x60, + 0xbb, 0x10, 0x17, 0xfd, 0xa4, 0x8a, 0xfe, 0x9d, 0x95, 0x37, 0x9c, 0x55, 0x34, 0x9c, 0x35, 0x50, + 0x82, 0x5e, 0xed, 0xea, 0x5d, 0xa7, 0xf4, 0xf2, 0x7d, 0x47, 0xb3, 0xb7, 0xf2, 0xf5, 0x8a, 0xc8, + 0xe2, 0x21, 0xca, 0x8b, 0x87, 0x30, 0x7f, 0x81, 0xd6, 0x52, 0xdd, 0x91, 0x09, 0xcd, 0x38, 0x75, + 0x9d, 0x4b, 0x92, 0x39, 0xd2, 0x25, 0x5d, 0xdb, 0x2d, 0xef, 0xd5, 0xed, 0x46, 0x9c, 0xba, 0xff, + 0x90, 0xec, 0x5c, 0x4c, 0x99, 0xfb, 0xd0, 0x5c, 0xa8, 0x37, 0xea, 0x40, 0x03, 0xc7, 0xb1, 0x53, + 0x74, 0x89, 0x38, 0x59, 0xc5, 0x06, 0x1c, 0xc7, 0x4a, 0x66, 0x5e, 0xc0, 0xe6, 0x29, 0x66, 0x23, + 0x32, 0x54, 0xc0, 0x4f, 0xd0, 0x92, 0x2e, 0x38, 0xcb, 0x06, 0x37, 0xe5, 0xf4, 0x59, 0xe1, 0xb2, + 0x09, 0xcd, 0xb9, 0x6e, 0xee, 0x75, 0xa3, 0x50, 0x09, 0xc3, 0x5f, 0x68, 0xd0, 0x5a, 0xea, 0x20, + 0x34, 0x80, 0x66, 0x48, 0x18, 0x93, 0x26, 0x92, 0x31, 0xce, 0xd4, 0xe7, 0xf6, 0x05, 0x07, 0x2b, + 0xd2, 0xbd, 0x4d, 0x45, 0x0d, 0x04, 0x84, 0x7e, 0x87, 0x7a, 0x9c, 0x10, 0x2f, 0x60, 0x2b, 0xd5, + 0x20, 0x5f, 0x61, 0x4e, 0x98, 0x1f, 0xd7, 0xa0, 0xb9, 0xd0, 0x9b, 0xa2, 0x9b, 0xe3, 0x84, 0xc6, + 0x94, 0x91, 0x55, 0x37, 0x54, 0xe8, 0xc5, 0x89, 0xd4, 0x50, 0x9c, 0x88, 0xe3, 0x55, 0xf7, 0xb3, + 0xa9, 0xa8, 0x81, 0x80, 0xd0, 0x21, 0x54, 0x26, 0x94, 0x13, 0x75, 0x0d, 0x3c, 0x0a, 0x4b, 0x31, + 0xfa, 0x03, 0x40, 0xfc, 0xaa, 0xbc, 0x95, 0x15, 0x7d, 0x10, 0x48, 0x9e, 0xf4, 0x08, 0xaa, 0x1e, + 0x0d, 0xc3, 0x80, 0xab, 0x1b, 0xe0, 0x51, 0x56, 0xc9, 0xd1, 0x01, 0x7c, 0xe3, 0x66, 0x31, 0x66, + 0xcc, 0xc9, 0x27, 0x9c, 0xbb, 0x57, 0x41, 0xcd, 0xfe, 0x2a, 0x0f, 0xf6, 0x65, 0x4c, 0x19, 0x6d, + 0x46, 0x00, 0xf3, 0xef, 0x1a, 0x1d, 0x43, 0x5b, 0x6e, 0x9d, 0x4c, 0x39, 0x89, 0x44, 0x51, 0x98, + 0x43, 0x22, 0xec, 0x8e, 0x89, 0x33, 0x22, 0x81, 0x3f, 0xe2, 0xaa, 0xeb, 0x76, 0x84, 0xe8, 0xe4, + 0x56, 0x73, 0x22, 0x25, 0xa7, 0x52, 0x81, 0xda, 0x00, 0x09, 0xf1, 0x46, 0xc4, 0xbb, 0x74, 0xf8, + 0x54, 0xba, 0x5e, 0xb3, 0xeb, 0x6a, 0xe6, 0x7c, 0xda, 0x7b, 0xf2, 0x6a, 0x66, 0x68, 0x57, 0x33, + 0x43, 0xbb, 0x9e, 0x19, 0xda, 0x87, 0x99, 0xa1, 0x3d, 0xbf, 0x31, 0x4a, 0xd7, 0x37, 0x46, 0xe9, + 0xed, 0x8d, 0x51, 0xba, 0x38, 0xf2, 0x03, 0x3e, 0x4a, 0x5d, 0xcb, 0xa3, 0x61, 0xf7, 0xee, 0xa3, + 0x33, 0x1f, 0xe6, 0xaf, 0xca, 0xf2, 0x83, 0xe4, 0x56, 0xe5, 0xfc, 0xe1, 0xa7, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf0, 0x06, 0x54, 0xd3, 0xab, 0x06, 0x00, 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -1002,6 +1013,9 @@ func (this *ABCIParams) Equal(that interface{}) bool { if this.VoteExtensionsEnableHeight != that1.VoteExtensionsEnableHeight { return false } + if this.RecheckTx != that1.RecheckTx { + return false + } return true } func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { @@ -1424,6 +1438,16 @@ func (m *ABCIParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RecheckTx { + i-- + if m.RecheckTx { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.VoteExtensionsEnableHeight != 0 { i = encodeVarintParams(dAtA, i, uint64(m.VoteExtensionsEnableHeight)) i-- @@ -1612,6 +1636,9 @@ func (m *ABCIParams) Size() (n int) { if m.VoteExtensionsEnableHeight != 0 { n += 1 + sovParams(uint64(m.VoteExtensionsEnableHeight)) } + if m.RecheckTx { + n += 2 + } return n } @@ -1908,7 +1935,10 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1996,7 +2026,10 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2117,7 +2150,10 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2199,7 +2235,10 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2268,7 +2307,10 @@ func (m *VersionParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2356,7 +2398,10 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2478,7 +2523,10 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2728,7 +2776,10 @@ func (m *TimeoutParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2791,13 +2842,36 @@ func (m *ABCIParams) Unmarshal(dAtA []byte) error { break } } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecheckTx", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RecheckTx = bool(v != 0) default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index 21bbd037d8..058e30155f 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -141,4 +141,8 @@ message ABCIParams { // passed to the application for validation in VerifyVoteExtension and given // to the application to use when proposing a block during PrepareProposal. int64 vote_extensions_enable_height = 1; + + // Indicates if CheckTx should be called on all the transactions + // remaining in the mempool after a block is executed. + bool recheck_tx = 2; } diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index fcfbc01f54..f6f8a33f3f 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -2650,7 +2650,10 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2786,7 +2789,10 @@ func (m *Part) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2903,7 +2909,10 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3409,7 +3418,10 @@ func (m *Header) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3491,7 +3503,10 @@ func (m *Data) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3819,7 +3834,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3974,7 +3992,10 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4144,7 +4165,10 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4299,7 +4323,10 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4537,7 +4564,10 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4763,7 +4793,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4885,7 +4918,10 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5007,7 +5043,10 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5161,7 +5200,10 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5315,7 +5357,10 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go index 23b30ed3cb..2c3468b83f 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/proto/tendermint/types/validator.pb.go @@ -583,7 +583,10 @@ func (m *ValidatorSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -738,7 +741,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -843,7 +849,10 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 76a94fd3c0..7aefd7747b 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -265,7 +265,10 @@ func (m *Consensus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/scripts/confix/plan.go b/scripts/confix/plan.go index 653bca9fd2..a0ceef9379 100644 --- a/scripts/confix/plan.go +++ b/scripts/confix/plan.go @@ -222,4 +222,10 @@ var plan = transform.Plan{ return fmt.Errorf("unrecognized value: %v", idx.KeyValue) }), }, + { + // Since https://github.com/tendermint/tendermint/pull/8514. + Desc: "Remove the recheck option from the [mempool] section", + T: transform.Remove(parser.Key{"mempool", "recheck"}), + ErrorOK: true, + }, } diff --git a/scripts/confix/testdata/diff-35-36.txt b/scripts/confix/testdata/diff-35-36.txt index 13fd268af2..76f541b28b 100644 --- a/scripts/confix/testdata/diff-35-36.txt +++ b/scripts/confix/testdata/diff-35-36.txt @@ -9,6 +9,7 @@ -M consensus.timeout-prevote-delta -M consensus.timeout-propose -M consensus.timeout-propose-delta +-M mempool.recheck -M mempool.version -M p2p.addr-book-file -M p2p.addr-book-strict diff --git a/scripts/confix/testdata/v36-config.toml b/scripts/confix/testdata/v36-config.toml index e49b97d890..0182ab14ca 100644 --- a/scripts/confix/testdata/v36-config.toml +++ b/scripts/confix/testdata/v36-config.toml @@ -281,7 +281,11 @@ recv-rate = 5120000 ####################################################### [mempool] -recheck = true +# recheck has been moved from a config option to a global +# consensus param in v0.36 +# See https://github.com/tendermint/tendermint/issues/8244 for more information. + +# Set true to broadcast transactions in the mempool to other nodes broadcast = true # Maximum number of transactions in the mempool diff --git a/spec/abci/apps.md b/spec/abci/apps.md index 5ee93e613c..0439f2c850 100644 --- a/spec/abci/apps.md +++ b/spec/abci/apps.md @@ -312,6 +312,18 @@ txs included in a proposed block. Must have `MaxGas >= -1`. If `MaxGas == -1`, no limit is enforced. +### BlockParams.RecheckTx + +This indicates whether all nodes in the network should perform a `CheckTx` on all +transactions remaining in the mempool directly *after* the execution of every block, +i.e. whenever a new application state is created. This is often useful for garbage +collection. + +The change will come into effect immediately after `FinalizeBlock` has been +called. + +This was previously a local mempool config parameter. + ### EvidenceParams.MaxAgeDuration This is the maximum age of evidence in time units. @@ -352,7 +364,7 @@ are expected to have clocks that differ by at most `Precision`. ### SynchronyParams.MessageDelay -`SynchronyParams.MessageDelay` is a parameter of the Proposer-Based Timestamps +`SynchronyParams.MessageDelay` is a parameter of the Proposer-Based Timestamps algorithm that configures the acceptable upper-bound for transmitting a `Proposal` message from the proposer to all of the validators on the network. diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md index dde3ec3542..3e3cd08a47 100644 --- a/spec/core/data_structures.md +++ b/spec/core/data_structures.md @@ -5,40 +5,40 @@ Here we describe the data structures in the Tendermint blockchain and the rules The Tendermint blockchains consists of a short list of data types: - [Data Structures](#data-structures) - - [Block](#block) - - [Execution](#execution) - - [Header](#header) - - [Version](#version) - - [BlockID](#blockid) - - [PartSetHeader](#partsetheader) - - [Part](#part) - - [Time](#time) - - [Data](#data) - - [Commit](#commit) - - [CommitSig](#commitsig) - - [BlockIDFlag](#blockidflag) - - [Vote](#vote) - - [CanonicalVote](#canonicalvote) - - [Proposal](#proposal) - - [SignedMsgType](#signedmsgtype) - - [Signature](#signature) - - [EvidenceList](#evidencelist) - - [Evidence](#evidence) - - [DuplicateVoteEvidence](#duplicatevoteevidence) - - [LightClientAttackEvidence](#lightclientattackevidence) - - [LightBlock](#lightblock) - - [SignedHeader](#signedheader) - - [ValidatorSet](#validatorset) - - [Validator](#validator) - - [Address](#address) - - [ConsensusParams](#consensusparams) - - [BlockParams](#blockparams) - - [EvidenceParams](#evidenceparams) - - [ValidatorParams](#validatorparams) - - [VersionParams](#versionparams) - - [SynchronyParams](#synchronyparams) - - [TimeoutParams](#timeoutparams) - - [Proof](#proof) + - [Block](#block) + - [Execution](#execution) + - [Header](#header) + - [Version](#version) + - [BlockID](#blockid) + - [PartSetHeader](#partsetheader) + - [Part](#part) + - [Time](#time) + - [Data](#data) + - [Commit](#commit) + - [CommitSig](#commitsig) + - [BlockIDFlag](#blockidflag) + - [Vote](#vote) + - [CanonicalVote](#canonicalvote) + - [Proposal](#proposal) + - [SignedMsgType](#signedmsgtype) + - [Signature](#signature) + - [EvidenceList](#evidencelist) + - [Evidence](#evidence) + - [DuplicateVoteEvidence](#duplicatevoteevidence) + - [LightClientAttackEvidence](#lightclientattackevidence) + - [LightBlock](#lightblock) + - [SignedHeader](#signedheader) + - [ValidatorSet](#validatorset) + - [Validator](#validator) + - [Address](#address) + - [ConsensusParams](#consensusparams) + - [BlockParams](#blockparams) + - [EvidenceParams](#evidenceparams) + - [ValidatorParams](#validatorparams) + - [VersionParams](#versionparams) + - [SynchronyParams](#synchronyparams) + - [TimeoutParams](#timeoutparams) + - [Proof](#proof) ## Block @@ -49,7 +49,7 @@ and a list of evidence of malfeasance (ie. signing conflicting votes). |--------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| | Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header] (#header) | Must adhere to the validation rules of [header](#header) | | Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to Tendermint. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](../abci/abci.md#checktx). -| Evidence | [EvidenceList](#evidence_list) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidence_list) apply | +| Evidence | [EvidenceList](#evidencelist) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidencelist) apply | | LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). | ## Execution @@ -152,7 +152,7 @@ The `BlockID` contains two distinct Merkle roots of the block. The `BlockID` inc | Name | Type | Description | Validation | |---------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------| | Hash | slice of bytes (`[]byte`) | MerkleRoot of all the fields in the header (ie. `MerkleRoot(header)`. | hash must be of length 32 | -| PartSetHeader | [PartSetHeader](#PartSetHeader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#PartSetHeader) | +| PartSetHeader | [PartSetHeader](#partsetheader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#partsetheader) | See [MerkleRoot](./encoding.md#MerkleRoot) for details. @@ -238,7 +238,7 @@ The vote extension is not part of the [`CanonicalVote`](#canonicalvote). | Height | uint64 | Height for which this vote was created. | Must be > 0 | | Round | int32 | Round that the commit corresponds to. | Must be > 0 | | BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | -| Timestamp | [Time](#Time) | The time at which a validator signed. | [Time](#time) | +| Timestamp | [Time](#time) | The time at which a validator signed. | [Time](#time) | | ValidatorAddress | slice of bytes (`[]byte`) | Address of the validator | Length must be equal to 20 | | ValidatorIndex | int32 | Index at a specific block height that corresponds to the Index of the validator in the set. | must be > 0 | | Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | @@ -295,7 +295,7 @@ is locked in POLRound. The message is signed by the validator private key. | Round | int32 | Round that the commit corresponds to. | Must be > 0 | | POLRound | int64 | Proof of lock | Must be > 0 | | BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | -| Timestamp | [Time](#Time) | Timestamp represents the time at which a validator signed. | [Time](#time) | +| Timestamp | [Time](#time) | Timestamp represents the time at which a validator signed. | [Time](#time) | | Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | ## SignedMsgType @@ -346,7 +346,7 @@ in the same round of the same height. Votes are lexicographically sorted on `Blo | VoteB | [Vote](#vote) | The second vote submitted by a validator when they equivocated | VoteB must adhere to [Vote](#vote) validation rules | | TotalVotingPower | int64 | The total power of the validator set at the height of equivocation | Must be equal to nodes own copy of the data | | ValidatorPower | int64 | Power of the equivocating validator at the height | Must be equal to the nodes own copy of the data | -| Timestamp | [Time](#Time) | Time of the block where the equivocation occurred | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#time) | Time of the block where the equivocation occurred | Must be equal to the nodes own copy of the data | ### LightClientAttackEvidence @@ -355,13 +355,13 @@ a light client such that a full node can verify, propose and commit the evidence punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this [here](../light-client/accountability#the_misbehavior_of_faulty_validators) -| Name | Type | Description | Validation | -|----------------------|------------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------| -| ConflictingBlock | [LightBlock](#LightBlock) | Read Below | Must adhere to the validation rules of [lightBlock](#lightblock) | -| CommonHeight | int64 | Read Below | must be > 0 | -| Byzantine Validators | Array of [Validators](#Validators) | validators that acted maliciously | Read Below | -| TotalVotingPower | int64 | The total power of the validator set at the height of the infraction | Must be equal to the nodes own copy of the data | -| Timestamp | [Time](#Time) | Time of the block where the infraction occurred | Must be equal to the nodes own copy of the data | +| Name | Type | Description | Validation | +|----------------------|----------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------| +| ConflictingBlock | [LightBlock](#lightblock) | Read Below | Must adhere to the validation rules of [lightBlock](#lightblock) | +| CommonHeight | int64 | Read Below | must be > 0 | +| Byzantine Validators | Array of [Validator](#validator) | validators that acted maliciously | Read Below | +| TotalVotingPower | int64 | The total power of the validator set at the height of the infraction | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#time) | Time of the block where the infraction occurred | Must be equal to the nodes own copy of the data | ## LightBlock @@ -380,7 +380,7 @@ The SignedhHeader is the [header](#header) accompanied by the commit to prove it | Name | Type | Description | Validation | |--------|-------------------|-------------------|-----------------------------------------------------------------------------------| -| Header | [Header](#Header) | [Header](#header) | Header cannot be nil and must adhere to the [Header](#Header) validation criteria | +| Header | [Header](#header) | [Header](#header) | Header cannot be nil and must adhere to the [Header](#header) validation criteria | | Commit | [Commit](#commit) | [Commit](#commit) | Commit cannot be nil and must adhere to the [Commit](#commit) criteria | ## ValidatorSet @@ -429,6 +429,7 @@ func SumTruncated(bz []byte) []byte { |--------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | max_bytes | int64 | Max size of a block, in bytes. | 1 | | max_gas | int64 | Max sum of `GasWanted` in a proposed block. NOTE: blocks that violate this may be committed if there are Byzantine proposers. It's the application's responsibility to handle this when processing a block! | 2 | +| recheck_tx | bool | Indicated whether to run `CheckTx` on all remaining transactions *after* every execution of a block | 3 | ### EvidenceParams diff --git a/types/params.go b/types/params.go index a2651b1861..28c969e461 100644 --- a/types/params.go +++ b/types/params.go @@ -101,6 +101,7 @@ type TimeoutParams struct { // Interface. type ABCIParams struct { VoteExtensionsEnableHeight int64 `json:"vote_extensions_enable_height"` + RecheckTx bool `json:"recheck_tx"` } // VoteExtensionsEnabled returns true if vote extensions are enabled at height h @@ -197,6 +198,8 @@ func DefaultABCIParams() ABCIParams { return ABCIParams{ // When set to 0, vote extensions are not required. VoteExtensionsEnableHeight: 0, + // When true, run CheckTx on each transaction in the mempool after each height. + RecheckTx: true, } } @@ -378,6 +381,7 @@ func (params ConsensusParams) ValidateUpdate(updated *tmproto.ConsensusParams, h // Only the Block.MaxBytes and Block.MaxGas are included in the hash. // This allows the ConsensusParams to evolve more without breaking the block // protocol. No need for a Merkle tree here, just a small struct to hash. +// TODO: We should hash the other parameters as well func (params ConsensusParams) HashConsensusParams() []byte { hp := tmproto.HashedParams{ BlockMaxBytes: params.Block.MaxBytes, @@ -459,6 +463,7 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa } if params2.Abci != nil { res.ABCI.VoteExtensionsEnableHeight = params2.Abci.GetVoteExtensionsEnableHeight() + res.ABCI.RecheckTx = params2.Abci.GetRecheckTx() } return res } @@ -494,6 +499,7 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams { }, Abci: &tmproto.ABCIParams{ VoteExtensionsEnableHeight: params.ABCI.VoteExtensionsEnableHeight, + RecheckTx: params.ABCI.RecheckTx, }, } } @@ -544,6 +550,7 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams } if pbParams.Abci != nil { c.ABCI.VoteExtensionsEnableHeight = pbParams.Abci.GetVoteExtensionsEnableHeight() + c.ABCI.RecheckTx = pbParams.Abci.GetRecheckTx() } return c } diff --git a/types/params_test.go b/types/params_test.go index e434e9534a..f5cf1b82ca 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -178,6 +178,7 @@ func TestConsensusParamsValidation(t *testing.T) { type makeParamsArgs struct { blockBytes int64 blockGas int64 + recheck bool evidenceAge int64 maxEvidenceBytes int64 pubkeyTypes []string @@ -240,6 +241,7 @@ func makeParams(args makeParamsArgs) ConsensusParams { }, ABCI: ABCIParams{ VoteExtensionsEnableHeight: args.abciExtensionHeight, + RecheckTx: args.recheck, }, } } From cb9722c2b09e0765bfeb493f5817240f169a1a92 Mon Sep 17 00:00:00 2001 From: Jasmina Malicevic Date: Thu, 26 May 2022 11:18:27 +0200 Subject: [PATCH 063/203] abci: strip mempoolerror from responsechectx (#8620) * abci:mempoolError from ResponseCheckTx * responseCheckTx returns an error if Tendermint decides not to accept an app after CheckTx *updated spec, upgrading.md and changelog.md --- CHANGELOG_PENDING.md | 2 +- UPGRADING.md | 10 +- abci/types/types.pb.go | 759 +++++++++---------------- internal/mempool/mempool.go | 17 +- internal/mempool/mempool_test.go | 11 +- internal/rpc/core/mempool.go | 11 +- proto/tendermint/abci/types.proto | 7 +- proto/tendermint/blocksync/types.pb.go | 30 +- proto/tendermint/consensus/types.pb.go | 50 +- proto/tendermint/consensus/wal.pb.go | 25 +- proto/tendermint/crypto/keys.pb.go | 5 +- proto/tendermint/crypto/proof.pb.go | 25 +- proto/tendermint/libs/bits/types.pb.go | 5 +- proto/tendermint/mempool/types.pb.go | 10 +- proto/tendermint/p2p/conn.pb.go | 25 +- proto/tendermint/p2p/pex.pb.go | 20 +- proto/tendermint/p2p/types.pb.go | 25 +- proto/tendermint/privval/types.pb.go | 55 +- proto/tendermint/state/types.pb.go | 25 +- proto/tendermint/statesync/types.pb.go | 45 +- proto/tendermint/types/block.pb.go | 5 +- proto/tendermint/types/canonical.pb.go | 25 +- proto/tendermint/types/events.pb.go | 5 +- proto/tendermint/types/evidence.pb.go | 20 +- proto/tendermint/types/params.pb.go | 45 +- proto/tendermint/types/types.pb.go | 75 +-- proto/tendermint/types/validator.pb.go | 15 +- proto/tendermint/version/types.pb.go | 5 +- rpc/coretypes/responses.go | 9 +- 29 files changed, 418 insertions(+), 948 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9350d70b76..f16a21aa4c 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -27,7 +27,7 @@ Special thanks to external contributors on this release: - [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec). - [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish) - - [abci] \#8605 Remove info, log, events and gasUsed fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic) + - [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic) - P2P Protocol diff --git a/UPGRADING.md b/UPGRADING.md index 324a891d5b..43caddb6b5 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -6,6 +6,14 @@ This guide provides instructions for upgrading to specific versions of Tendermin ### ABCI Changes +### ResponseCheckTx Parameter Change + +`ResponseCheckTx` had fields that are not used by Tendermint, they are now removed. +In 0.36, we removed the following fields, from `ResponseCheckTx`: `Log`, `Info`, `Events`, + `GasUsed` and `MempoolError`. +`MempoolError` was used to signal to operators that a transaction was rejected from the mempool +by Tendermint itself. Right now, we return a regular error when this happens. + #### ABCI++ Coming soon... @@ -137,7 +145,7 @@ network-wide coordinated variable so that applications can be written knowing either all nodes agree on whether to run `RecheckTx`. Applications can turn on `RecheckTx` by altering the `ConsensusParams` in the -`FinalizeBlock` ABCI response. +`FinalizeBlock` ABCI response. ### CLI Changes diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 47e3462985..5b43a74ced 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -2487,8 +2487,6 @@ type ResponseCheckTx struct { Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` - // ABCI applications creating a ResponseCheckTX should not set mempool_error. - MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } @@ -2566,13 +2564,6 @@ func (m *ResponseCheckTx) GetPriority() int64 { return 0 } -func (m *ResponseCheckTx) GetMempoolError() string { - if m != nil { - return m.MempoolError - } - return "" -} - type ResponseDeliverTx struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -4203,225 +4194,224 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3474 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x73, 0x23, 0xd5, - 0xd5, 0xd7, 0xfb, 0x71, 0x64, 0x49, 0xed, 0x6b, 0x33, 0x68, 0xc4, 0x8c, 0x3d, 0xf4, 0x14, 0x30, - 0x33, 0x80, 0x87, 0xcf, 0xf3, 0x0d, 0x0c, 0xdf, 0xc0, 0x47, 0xd9, 0xb2, 0x06, 0x79, 0xc6, 0x63, - 0x9b, 0xb6, 0x6c, 0x8a, 0x3c, 0xa6, 0x69, 0x49, 0xd7, 0x56, 0x33, 0x92, 0xba, 0xe9, 0x6e, 0x19, - 0x99, 0x65, 0x28, 0x36, 0x54, 0xaa, 0xc2, 0x26, 0x95, 0xa4, 0x2a, 0xec, 0x92, 0xaa, 0xe4, 0x3f, - 0xc8, 0x2a, 0xab, 0x2c, 0x58, 0x64, 0xc1, 0x2a, 0xc9, 0x8a, 0xa4, 0x60, 0x91, 0xaa, 0xfc, 0x03, - 0xd9, 0x25, 0xa9, 0xfb, 0xe8, 0x97, 0xd4, 0x2d, 0xb5, 0x18, 0xa0, 0x2a, 0x55, 0xec, 0x74, 0x4f, - 0x9f, 0x73, 0xfa, 0x3e, 0xce, 0x3d, 0x8f, 0xdf, 0x69, 0xc1, 0x13, 0x16, 0x1e, 0x74, 0xb0, 0xd1, - 0x57, 0x07, 0xd6, 0x75, 0xa5, 0xd5, 0x56, 0xaf, 0x5b, 0x67, 0x3a, 0x36, 0xd7, 0x74, 0x43, 0xb3, - 0x34, 0x54, 0x76, 0x1f, 0xae, 0x91, 0x87, 0xd5, 0x8b, 0x1e, 0xee, 0xb6, 0x71, 0xa6, 0x5b, 0xda, - 0x75, 0xdd, 0xd0, 0xb4, 0x63, 0xc6, 0x5f, 0xbd, 0xe0, 0x79, 0x4c, 0xf5, 0x78, 0xb5, 0xf9, 0x9e, - 0x72, 0xe1, 0x87, 0xf8, 0xcc, 0x7e, 0x7a, 0x71, 0x42, 0x56, 0x57, 0x0c, 0xa5, 0x6f, 0x3f, 0x5e, - 0x3d, 0xd1, 0xb4, 0x93, 0x1e, 0xbe, 0x4e, 0x47, 0xad, 0xe1, 0xf1, 0x75, 0x4b, 0xed, 0x63, 0xd3, - 0x52, 0xfa, 0x3a, 0x67, 0x58, 0x3e, 0xd1, 0x4e, 0x34, 0xfa, 0xf3, 0x3a, 0xf9, 0xc5, 0xa8, 0xe2, - 0xbf, 0x01, 0xb2, 0x12, 0x7e, 0x77, 0x88, 0x4d, 0x0b, 0xad, 0x43, 0x0a, 0xb7, 0xbb, 0x5a, 0x25, - 0x7e, 0x29, 0x7e, 0xa5, 0xb0, 0x7e, 0x61, 0x6d, 0x6c, 0x71, 0x6b, 0x9c, 0xaf, 0xde, 0xee, 0x6a, - 0x8d, 0x98, 0x44, 0x79, 0xd1, 0x4d, 0x48, 0x1f, 0xf7, 0x86, 0x66, 0xb7, 0x92, 0xa0, 0x42, 0x17, - 0xc3, 0x84, 0xee, 0x10, 0xa6, 0x46, 0x4c, 0x62, 0xdc, 0xe4, 0x55, 0xea, 0xe0, 0x58, 0xab, 0x24, - 0xa7, 0xbf, 0x6a, 0x7b, 0x70, 0x4c, 0x5f, 0x45, 0x78, 0xd1, 0x26, 0x80, 0x3a, 0x50, 0x2d, 0xb9, - 0xdd, 0x55, 0xd4, 0x41, 0x25, 0x45, 0x25, 0x9f, 0x0c, 0x97, 0x54, 0xad, 0x1a, 0x61, 0x6c, 0xc4, - 0xa4, 0xbc, 0x6a, 0x0f, 0xc8, 0x74, 0xdf, 0x1d, 0x62, 0xe3, 0xac, 0x92, 0x9e, 0x3e, 0xdd, 0x37, - 0x08, 0x13, 0x99, 0x2e, 0xe5, 0x46, 0xdb, 0x50, 0x68, 0xe1, 0x13, 0x75, 0x20, 0xb7, 0x7a, 0x5a, - 0xfb, 0x61, 0x25, 0x43, 0x85, 0xc5, 0x30, 0xe1, 0x4d, 0xc2, 0xba, 0x49, 0x38, 0x37, 0x13, 0x95, - 0x78, 0x23, 0x26, 0x41, 0xcb, 0xa1, 0xa0, 0x57, 0x20, 0xd7, 0xee, 0xe2, 0xf6, 0x43, 0xd9, 0x1a, - 0x55, 0xb2, 0x54, 0xcf, 0x6a, 0x98, 0x9e, 0x1a, 0xe1, 0x6b, 0x8e, 0x1a, 0x31, 0x29, 0xdb, 0x66, - 0x3f, 0xd1, 0x1d, 0x80, 0x0e, 0xee, 0xa9, 0xa7, 0xd8, 0x20, 0xf2, 0xb9, 0xe9, 0x7b, 0xb0, 0xc5, - 0x38, 0x9b, 0x23, 0x3e, 0x8d, 0x7c, 0xc7, 0x26, 0xa0, 0x1a, 0xe4, 0xf1, 0xa0, 0xc3, 0x97, 0x93, - 0xa7, 0x6a, 0x2e, 0x85, 0x9e, 0xf7, 0xa0, 0xe3, 0x5d, 0x4c, 0x0e, 0xf3, 0x31, 0xba, 0x05, 0x99, - 0xb6, 0xd6, 0xef, 0xab, 0x56, 0x05, 0xa8, 0x86, 0x95, 0xd0, 0x85, 0x50, 0xae, 0x46, 0x4c, 0xe2, - 0xfc, 0x68, 0x17, 0x4a, 0x3d, 0xd5, 0xb4, 0x64, 0x73, 0xa0, 0xe8, 0x66, 0x57, 0xb3, 0xcc, 0x4a, - 0x81, 0x6a, 0x78, 0x2a, 0x4c, 0xc3, 0x8e, 0x6a, 0x5a, 0x07, 0x36, 0x73, 0x23, 0x26, 0x15, 0x7b, - 0x5e, 0x02, 0xd1, 0xa7, 0x1d, 0x1f, 0x63, 0xc3, 0x51, 0x58, 0x59, 0x98, 0xae, 0x6f, 0x8f, 0x70, - 0xdb, 0xf2, 0x44, 0x9f, 0xe6, 0x25, 0xa0, 0xef, 0xc3, 0x52, 0x4f, 0x53, 0x3a, 0x8e, 0x3a, 0xb9, - 0xdd, 0x1d, 0x0e, 0x1e, 0x56, 0x8a, 0x54, 0xe9, 0xd5, 0xd0, 0x49, 0x6a, 0x4a, 0xc7, 0x56, 0x51, - 0x23, 0x02, 0x8d, 0x98, 0xb4, 0xd8, 0x1b, 0x27, 0xa2, 0x07, 0xb0, 0xac, 0xe8, 0x7a, 0xef, 0x6c, - 0x5c, 0x7b, 0x89, 0x6a, 0xbf, 0x16, 0xa6, 0x7d, 0x83, 0xc8, 0x8c, 0xab, 0x47, 0xca, 0x04, 0x15, - 0x35, 0x41, 0xd0, 0x0d, 0xac, 0x2b, 0x06, 0x96, 0x75, 0x43, 0xd3, 0x35, 0x53, 0xe9, 0x55, 0xca, - 0x54, 0xf7, 0x33, 0x61, 0xba, 0xf7, 0x19, 0xff, 0x3e, 0x67, 0x6f, 0xc4, 0xa4, 0xb2, 0xee, 0x27, - 0x31, 0xad, 0x5a, 0x1b, 0x9b, 0xa6, 0xab, 0x55, 0x98, 0xa5, 0x95, 0xf2, 0xfb, 0xb5, 0xfa, 0x48, - 0xa8, 0x0e, 0x05, 0x3c, 0x22, 0xe2, 0xf2, 0xa9, 0x66, 0xe1, 0xca, 0xe2, 0xf4, 0x8b, 0x55, 0xa7, - 0xac, 0x47, 0x9a, 0x85, 0xc9, 0xa5, 0xc2, 0xce, 0x08, 0x29, 0xf0, 0xd8, 0x29, 0x36, 0xd4, 0xe3, - 0x33, 0xaa, 0x46, 0xa6, 0x4f, 0x4c, 0x55, 0x1b, 0x54, 0x10, 0x55, 0xf8, 0x6c, 0x98, 0xc2, 0x23, - 0x2a, 0x44, 0x54, 0xd4, 0x6d, 0x91, 0x46, 0x4c, 0x5a, 0x3a, 0x9d, 0x24, 0x13, 0x13, 0x3b, 0x56, - 0x07, 0x4a, 0x4f, 0x7d, 0x1f, 0xf3, 0x6b, 0xb3, 0x34, 0xdd, 0xc4, 0xee, 0x70, 0x6e, 0x7a, 0x57, - 0x88, 0x89, 0x1d, 0x7b, 0x09, 0x9b, 0x59, 0x48, 0x9f, 0x2a, 0xbd, 0x21, 0x16, 0x9f, 0x81, 0x82, - 0xc7, 0xb1, 0xa2, 0x0a, 0x64, 0xfb, 0xd8, 0x34, 0x95, 0x13, 0x4c, 0xfd, 0x70, 0x5e, 0xb2, 0x87, - 0x62, 0x09, 0x16, 0xbc, 0xce, 0x54, 0xfc, 0x38, 0xee, 0x48, 0x12, 0x3f, 0x49, 0x24, 0x4f, 0xb1, - 0x41, 0x97, 0xcd, 0x25, 0xf9, 0x10, 0x5d, 0x86, 0x22, 0x9d, 0xb2, 0x6c, 0x3f, 0x27, 0xce, 0x3a, - 0x25, 0x2d, 0x50, 0xe2, 0x11, 0x67, 0x5a, 0x85, 0x82, 0xbe, 0xae, 0x3b, 0x2c, 0x49, 0xca, 0x02, - 0xfa, 0xba, 0x6e, 0x33, 0x3c, 0x09, 0x0b, 0x64, 0x7d, 0x0e, 0x47, 0x8a, 0xbe, 0xa4, 0x40, 0x68, - 0x9c, 0x45, 0xfc, 0x63, 0x02, 0x84, 0x71, 0x07, 0x8c, 0x6e, 0x41, 0x8a, 0xc4, 0x22, 0x1e, 0x56, - 0xaa, 0x6b, 0x2c, 0x50, 0xad, 0xd9, 0x81, 0x6a, 0xad, 0x69, 0x07, 0xaa, 0xcd, 0xdc, 0xa7, 0x9f, - 0xaf, 0xc6, 0x3e, 0xfe, 0xeb, 0x6a, 0x5c, 0xa2, 0x12, 0xe8, 0x3c, 0xf1, 0x95, 0x8a, 0x3a, 0x90, - 0xd5, 0x0e, 0x9d, 0x72, 0x9e, 0x38, 0x42, 0x45, 0x1d, 0x6c, 0x77, 0xd0, 0x0e, 0x08, 0x6d, 0x6d, - 0x60, 0xe2, 0x81, 0x39, 0x34, 0x65, 0x16, 0x08, 0x79, 0x30, 0xf1, 0xb9, 0x43, 0x16, 0x5e, 0x6b, - 0x36, 0xe7, 0x3e, 0x65, 0x94, 0xca, 0x6d, 0x3f, 0x81, 0xb8, 0xd5, 0x53, 0xa5, 0xa7, 0x76, 0x14, - 0x4b, 0x33, 0xcc, 0x4a, 0xea, 0x52, 0x32, 0xd0, 0x1f, 0x1e, 0xd9, 0x2c, 0x87, 0x7a, 0x47, 0xb1, - 0xf0, 0x66, 0x8a, 0x4c, 0x57, 0xf2, 0x48, 0xa2, 0xa7, 0xa1, 0xac, 0xe8, 0xba, 0x6c, 0x5a, 0x8a, - 0x85, 0xe5, 0xd6, 0x99, 0x85, 0x4d, 0x1a, 0x68, 0x16, 0xa4, 0xa2, 0xa2, 0xeb, 0x07, 0x84, 0xba, - 0x49, 0x88, 0xe8, 0x29, 0x28, 0x91, 0x98, 0xa4, 0x2a, 0x3d, 0xb9, 0x8b, 0xd5, 0x93, 0xae, 0x45, - 0x43, 0x4a, 0x52, 0x2a, 0x72, 0x6a, 0x83, 0x12, 0xc5, 0x8e, 0x73, 0xe2, 0x34, 0x1e, 0x21, 0x04, - 0xa9, 0x8e, 0x62, 0x29, 0x74, 0x27, 0x17, 0x24, 0xfa, 0x9b, 0xd0, 0x74, 0xc5, 0xea, 0xf2, 0xfd, - 0xa1, 0xbf, 0xd1, 0x39, 0xc8, 0x70, 0xb5, 0x49, 0xaa, 0x96, 0x8f, 0xd0, 0x32, 0xa4, 0x75, 0x43, - 0x3b, 0xc5, 0xf4, 0xe8, 0x72, 0x12, 0x1b, 0x88, 0x1f, 0x24, 0x60, 0x71, 0x22, 0x72, 0x11, 0xbd, - 0x5d, 0xc5, 0xec, 0xda, 0xef, 0x22, 0xbf, 0xd1, 0x8b, 0x44, 0xaf, 0xd2, 0xc1, 0x06, 0x8f, 0xf6, - 0x95, 0xc9, 0xad, 0x6e, 0xd0, 0xe7, 0x7c, 0x6b, 0x38, 0x37, 0xba, 0x07, 0x42, 0x4f, 0x31, 0x2d, - 0x99, 0x79, 0x7f, 0xd9, 0x13, 0xf9, 0x9f, 0x98, 0xd8, 0x64, 0x16, 0x2b, 0x88, 0x41, 0x73, 0x25, - 0x25, 0x22, 0xea, 0x52, 0xd1, 0x21, 0x2c, 0xb7, 0xce, 0xde, 0x57, 0x06, 0x96, 0x3a, 0xc0, 0xf2, - 0xc4, 0xa9, 0x4d, 0xa6, 0x12, 0xf7, 0x55, 0xb3, 0x85, 0xbb, 0xca, 0xa9, 0xaa, 0xd9, 0xd3, 0x5a, - 0x72, 0xe4, 0x9d, 0x13, 0x35, 0x45, 0x09, 0x4a, 0xfe, 0xb0, 0x8b, 0x4a, 0x90, 0xb0, 0x46, 0x7c, - 0xfd, 0x09, 0x6b, 0x84, 0x5e, 0x80, 0x14, 0x59, 0x23, 0x5d, 0x7b, 0x29, 0xe0, 0x45, 0x5c, 0xae, - 0x79, 0xa6, 0x63, 0x89, 0x72, 0x8a, 0xa2, 0x73, 0x1b, 0x9c, 0x50, 0x3c, 0xae, 0x55, 0xbc, 0x0a, - 0xe5, 0xb1, 0x38, 0xeb, 0x39, 0xbe, 0xb8, 0xf7, 0xf8, 0xc4, 0x32, 0x14, 0x7d, 0x01, 0x55, 0x3c, - 0x07, 0xcb, 0x41, 0xf1, 0x51, 0xec, 0x3a, 0x74, 0x5f, 0x9c, 0x43, 0x37, 0x21, 0xe7, 0x04, 0x48, - 0x76, 0x1b, 0xcf, 0x4f, 0xac, 0xc2, 0x66, 0x96, 0x1c, 0x56, 0x72, 0x0d, 0x89, 0x55, 0x53, 0x73, - 0x48, 0xd0, 0x89, 0x67, 0x15, 0x5d, 0x6f, 0x28, 0x66, 0x57, 0x7c, 0x1b, 0x2a, 0x61, 0xc1, 0x6f, - 0x6c, 0x19, 0x29, 0xc7, 0x0a, 0xcf, 0x41, 0xe6, 0x58, 0x33, 0xfa, 0x8a, 0x45, 0x95, 0x15, 0x25, - 0x3e, 0x22, 0xd6, 0xc9, 0x02, 0x61, 0x92, 0x92, 0xd9, 0x40, 0x94, 0xe1, 0x7c, 0x68, 0x00, 0x24, - 0x22, 0xea, 0xa0, 0x83, 0xd9, 0x7e, 0x16, 0x25, 0x36, 0x70, 0x15, 0xb1, 0xc9, 0xb2, 0x01, 0x79, - 0xad, 0x49, 0xd7, 0x4a, 0xf5, 0xe7, 0x25, 0x3e, 0x12, 0x7f, 0x9b, 0x84, 0x73, 0xc1, 0x61, 0x10, - 0x5d, 0x82, 0x85, 0xbe, 0x32, 0x92, 0xad, 0x11, 0xbf, 0xcb, 0xec, 0x38, 0xa0, 0xaf, 0x8c, 0x9a, - 0x23, 0x76, 0x91, 0x05, 0x48, 0x5a, 0x23, 0xb3, 0x92, 0xb8, 0x94, 0xbc, 0xb2, 0x20, 0x91, 0x9f, - 0xe8, 0x10, 0x16, 0x7b, 0x5a, 0x5b, 0xe9, 0xc9, 0x1e, 0x8b, 0xe7, 0xc6, 0x7e, 0x79, 0x62, 0xb3, - 0x59, 0x40, 0xc3, 0x9d, 0x09, 0xa3, 0x2f, 0x53, 0x1d, 0x3b, 0x8e, 0xe5, 0x7f, 0x43, 0x56, 0xef, - 0x39, 0xa3, 0xb4, 0xcf, 0x53, 0xd8, 0x3e, 0x3b, 0x33, 0xb7, 0xcf, 0x7e, 0x01, 0x96, 0x07, 0x78, - 0x64, 0x79, 0xe6, 0xc8, 0x0c, 0x27, 0x4b, 0xcf, 0x02, 0x91, 0x67, 0xee, 0xfb, 0x89, 0x0d, 0xa1, - 0xab, 0x34, 0xb3, 0xd0, 0x35, 0x13, 0x1b, 0xb2, 0xd2, 0xe9, 0x18, 0xd8, 0x34, 0x69, 0x66, 0xbb, - 0x40, 0xd3, 0x05, 0x4a, 0xdf, 0x60, 0x64, 0xf1, 0x17, 0xde, 0xb3, 0xf2, 0x67, 0x12, 0xfc, 0x24, - 0xe2, 0xee, 0x49, 0x1c, 0xc0, 0x32, 0x97, 0xef, 0xf8, 0x0e, 0x23, 0x11, 0xd5, 0xf3, 0x20, 0x5b, - 0x3c, 0xc2, 0x39, 0x24, 0x1f, 0xed, 0x1c, 0x6c, 0x6f, 0x9b, 0xf2, 0x78, 0xdb, 0xff, 0xb2, 0xb3, - 0x79, 0xcd, 0x89, 0x22, 0x6e, 0x9a, 0x16, 0x18, 0x45, 0xdc, 0x75, 0x25, 0x7c, 0xee, 0xed, 0x97, - 0x71, 0xa8, 0x86, 0xe7, 0x65, 0x81, 0xaa, 0x9e, 0x85, 0x45, 0x67, 0x2d, 0xce, 0xfc, 0xd8, 0xad, - 0x17, 0x9c, 0x07, 0x7c, 0x82, 0xa1, 0x51, 0xf1, 0x29, 0x28, 0x8d, 0x65, 0x8d, 0xec, 0x14, 0x8a, - 0xa7, 0xde, 0xf7, 0x8b, 0x3f, 0x4d, 0x3a, 0x5e, 0xd5, 0x97, 0xda, 0x05, 0x58, 0xde, 0x1b, 0xb0, - 0xd4, 0xc1, 0x6d, 0xb5, 0xf3, 0x55, 0x0d, 0x6f, 0x91, 0x4b, 0x7f, 0x67, 0x77, 0x11, 0xec, 0xee, - 0xcf, 0x05, 0xc8, 0x49, 0xd8, 0xd4, 0x49, 0x4a, 0x87, 0x36, 0x21, 0x8f, 0x47, 0x6d, 0xac, 0x5b, - 0x76, 0x16, 0x1c, 0x5c, 0x4d, 0x30, 0xee, 0xba, 0xcd, 0x49, 0x6a, 0x63, 0x47, 0x0c, 0xdd, 0xe0, - 0x30, 0x48, 0x38, 0xa2, 0xc1, 0xc5, 0xbd, 0x38, 0xc8, 0x8b, 0x36, 0x0e, 0x92, 0x0c, 0x2d, 0x85, - 0x99, 0xd4, 0x18, 0x10, 0x72, 0x83, 0x03, 0x21, 0xa9, 0x19, 0x2f, 0xf3, 0x21, 0x21, 0x35, 0x1f, - 0x12, 0x92, 0x9e, 0xb1, 0xcc, 0x10, 0x28, 0xe4, 0x45, 0x1b, 0x0a, 0xc9, 0xcc, 0x98, 0xf1, 0x18, - 0x16, 0x72, 0xd7, 0x8f, 0x85, 0x64, 0x43, 0x42, 0x9b, 0x2d, 0x3d, 0x15, 0x0c, 0x79, 0xd5, 0x03, - 0x86, 0xe4, 0x42, 0x51, 0x08, 0xa6, 0x28, 0x00, 0x0d, 0x79, 0xdd, 0x87, 0x86, 0xe4, 0x67, 0xec, - 0xc3, 0x14, 0x38, 0x64, 0xcb, 0x0b, 0x87, 0x40, 0x28, 0xaa, 0xc2, 0xcf, 0x3d, 0x0c, 0x0f, 0x79, - 0xd9, 0xc1, 0x43, 0x0a, 0xa1, 0xc0, 0x0e, 0x5f, 0xcb, 0x38, 0x20, 0xb2, 0x37, 0x01, 0x88, 0x30, - 0x00, 0xe3, 0xe9, 0x50, 0x15, 0x33, 0x10, 0x91, 0xbd, 0x09, 0x44, 0xa4, 0x38, 0x43, 0xe1, 0x0c, - 0x48, 0xe4, 0x07, 0xc1, 0x90, 0x48, 0x38, 0x68, 0xc1, 0xa7, 0x19, 0x0d, 0x13, 0x91, 0x43, 0x30, - 0x91, 0x72, 0x68, 0xfd, 0xce, 0xd4, 0x47, 0x06, 0x45, 0x0e, 0x03, 0x40, 0x11, 0x06, 0x5f, 0x5c, - 0x09, 0x55, 0x1e, 0x01, 0x15, 0x39, 0x0c, 0x40, 0x45, 0x16, 0x67, 0xaa, 0x9d, 0x09, 0x8b, 0xdc, - 0xf1, 0xc3, 0x22, 0x68, 0xc6, 0x1d, 0x0b, 0xc5, 0x45, 0x5a, 0x61, 0xb8, 0x08, 0xc3, 0x2e, 0x9e, - 0x0b, 0xd5, 0x38, 0x07, 0x30, 0xb2, 0x37, 0x01, 0x8c, 0x2c, 0xcf, 0xb0, 0xb4, 0xa8, 0xc8, 0xc8, - 0x55, 0x92, 0x51, 0x8c, 0xb9, 0x6a, 0x92, 0xdc, 0x63, 0xc3, 0xd0, 0x0c, 0x8e, 0x71, 0xb0, 0x81, - 0x78, 0x85, 0x54, 0xca, 0xae, 0x5b, 0x9e, 0x82, 0xa2, 0xd0, 0x22, 0xca, 0xe3, 0x8a, 0xc5, 0xdf, - 0xc5, 0x5d, 0x59, 0x5a, 0x60, 0x7a, 0xab, 0xec, 0x3c, 0xaf, 0xb2, 0x3d, 0xd8, 0x4a, 0xc2, 0x8f, - 0xad, 0xac, 0x42, 0x81, 0x14, 0x47, 0x63, 0xb0, 0x89, 0xa2, 0x3b, 0xb0, 0xc9, 0x35, 0x58, 0xa4, - 0x49, 0x00, 0x43, 0x60, 0x78, 0x64, 0x4d, 0xd1, 0xc8, 0x5a, 0x26, 0x0f, 0xd8, 0x2e, 0xb0, 0x10, - 0xfb, 0x3c, 0x2c, 0x79, 0x78, 0x9d, 0xa2, 0x8b, 0x61, 0x08, 0x82, 0xc3, 0xbd, 0xc1, 0xab, 0xaf, - 0x3f, 0xc4, 0xdd, 0x1d, 0x72, 0xf1, 0x96, 0x20, 0x68, 0x24, 0xfe, 0x35, 0x41, 0x23, 0x89, 0xaf, - 0x0c, 0x8d, 0x78, 0x8b, 0xc8, 0xa4, 0xbf, 0x88, 0xfc, 0x67, 0xdc, 0x3d, 0x13, 0x07, 0xe8, 0x68, - 0x6b, 0x1d, 0xcc, 0xcb, 0x3a, 0xfa, 0x9b, 0xa4, 0x59, 0x3d, 0xed, 0x84, 0x17, 0x6f, 0xe4, 0x27, - 0xe1, 0x72, 0x62, 0x67, 0x9e, 0x87, 0x46, 0xa7, 0x22, 0x64, 0xb9, 0x0b, 0xaf, 0x08, 0x05, 0x48, - 0x3e, 0xc4, 0x2c, 0xd2, 0x2d, 0x48, 0xe4, 0x27, 0xe1, 0xa3, 0x46, 0xc6, 0x73, 0x10, 0x36, 0x40, - 0xb7, 0x20, 0x4f, 0xdb, 0x35, 0xb2, 0xa6, 0x9b, 0x3c, 0x20, 0xf9, 0xd2, 0x35, 0xd6, 0x95, 0x59, - 0xdb, 0x27, 0x3c, 0x7b, 0xba, 0x29, 0xe5, 0x74, 0xfe, 0xcb, 0x93, 0x34, 0xe5, 0x7d, 0x49, 0xd3, - 0x05, 0xc8, 0x93, 0xd9, 0x9b, 0xba, 0xd2, 0xc6, 0x34, 0xb2, 0xe4, 0x25, 0x97, 0x20, 0x3e, 0x00, - 0x34, 0x19, 0x27, 0x51, 0x03, 0x32, 0xf8, 0x14, 0x0f, 0x2c, 0x96, 0x53, 0x16, 0xd6, 0xcf, 0x4d, - 0xd6, 0x8d, 0xe4, 0xf1, 0x66, 0x85, 0x6c, 0xf2, 0x3f, 0x3e, 0x5f, 0x15, 0x18, 0xf7, 0x73, 0x5a, - 0x5f, 0xb5, 0x70, 0x5f, 0xb7, 0xce, 0x24, 0x2e, 0x2f, 0xfe, 0x3d, 0x0e, 0xe5, 0xb1, 0xf8, 0x19, - 0xb8, 0xb7, 0xb6, 0xc9, 0x27, 0x3c, 0xc0, 0xd2, 0x45, 0x80, 0x13, 0xc5, 0x94, 0xdf, 0x53, 0x06, - 0x16, 0xee, 0xf0, 0xed, 0xcc, 0x9f, 0x28, 0xe6, 0x9b, 0x94, 0xe0, 0x5f, 0x58, 0x6e, 0x6c, 0x61, - 0x9e, 0x62, 0x3b, 0xef, 0x2d, 0xb6, 0x51, 0x15, 0x72, 0xba, 0xa1, 0x6a, 0x86, 0x6a, 0x9d, 0xd1, - 0xdd, 0x48, 0x4a, 0xce, 0x18, 0x5d, 0x86, 0x62, 0x1f, 0xf7, 0x75, 0x4d, 0xeb, 0xc9, 0xec, 0x86, - 0x17, 0xa8, 0xe8, 0x02, 0x27, 0xd6, 0x09, 0xed, 0x6e, 0x2a, 0x97, 0x14, 0x52, 0x77, 0x53, 0xb9, - 0x94, 0x90, 0xbe, 0x9b, 0xca, 0x65, 0x84, 0xec, 0xdd, 0x54, 0x2e, 0x2b, 0xe4, 0xc4, 0x0f, 0x13, - 0xee, 0x55, 0x70, 0xc1, 0x96, 0xa8, 0x6b, 0x8d, 0x66, 0x5b, 0x2b, 0x01, 0x3b, 0xe2, 0xa1, 0x90, - 0xc5, 0x91, 0xd1, 0xd0, 0xc4, 0x1d, 0x8e, 0xe7, 0x39, 0x63, 0xcf, 0x99, 0x66, 0x1f, 0xed, 0x4c, - 0xa7, 0x6f, 0xbc, 0xf8, 0x63, 0x8a, 0xc0, 0xfa, 0x13, 0x15, 0x74, 0xe0, 0x2d, 0x93, 0x86, 0xf4, - 0x86, 0xda, 0xb6, 0x15, 0xf5, 0x2a, 0xbb, 0xe5, 0x14, 0x23, 0x9b, 0xe8, 0x2d, 0x78, 0x7c, 0xcc, - 0xcd, 0x38, 0xaa, 0x13, 0x51, 0xbd, 0xcd, 0x63, 0x7e, 0x6f, 0x63, 0xab, 0x76, 0x37, 0x2b, 0xf9, - 0x88, 0x17, 0x60, 0x1b, 0x4a, 0xfe, 0x9c, 0x2b, 0xf0, 0xf8, 0x2f, 0x43, 0xd1, 0xc0, 0x96, 0xa2, - 0x0e, 0x64, 0x5f, 0x81, 0xb8, 0xc0, 0x88, 0x1c, 0x8c, 0xdd, 0x87, 0xc7, 0x02, 0x73, 0x2f, 0xf4, - 0x12, 0xe4, 0xdd, 0xb4, 0x8d, 0xed, 0xea, 0x14, 0x58, 0xcd, 0xe5, 0x15, 0x7f, 0x1f, 0x77, 0x55, - 0xfa, 0x81, 0xba, 0x3a, 0x64, 0x0c, 0x6c, 0x0e, 0x7b, 0x0c, 0x3a, 0x2b, 0xad, 0x3f, 0x1f, 0x2d, - 0x6b, 0x23, 0xd4, 0x61, 0xcf, 0x92, 0xb8, 0xb0, 0xf8, 0x00, 0x32, 0x8c, 0x82, 0x0a, 0x90, 0x3d, - 0xdc, 0xbd, 0xb7, 0xbb, 0xf7, 0xe6, 0xae, 0x10, 0x43, 0x00, 0x99, 0x8d, 0x5a, 0xad, 0xbe, 0xdf, - 0x14, 0xe2, 0x28, 0x0f, 0xe9, 0x8d, 0xcd, 0x3d, 0xa9, 0x29, 0x24, 0x08, 0x59, 0xaa, 0xdf, 0xad, - 0xd7, 0x9a, 0x42, 0x12, 0x2d, 0x42, 0x91, 0xfd, 0x96, 0xef, 0xec, 0x49, 0xf7, 0x37, 0x9a, 0x42, - 0xca, 0x43, 0x3a, 0xa8, 0xef, 0x6e, 0xd5, 0x25, 0x21, 0x2d, 0xfe, 0x0f, 0x9c, 0x0f, 0xcd, 0xf3, - 0x5c, 0x14, 0x2e, 0xee, 0x41, 0xe1, 0xc4, 0x9f, 0x27, 0x48, 0x91, 0x1f, 0x96, 0xbc, 0xa1, 0xbb, - 0x63, 0x0b, 0x5f, 0x9f, 0x23, 0xf3, 0x1b, 0x5b, 0x3d, 0xa9, 0xeb, 0x0d, 0x7c, 0x8c, 0xad, 0x76, - 0x97, 0x25, 0x93, 0x2c, 0x7a, 0x15, 0xa5, 0x22, 0xa7, 0x52, 0x21, 0x93, 0xb1, 0xbd, 0x83, 0xdb, - 0x96, 0xcc, 0x7c, 0x14, 0x33, 0xba, 0x3c, 0x61, 0x23, 0xd4, 0x03, 0x46, 0x14, 0xdf, 0x9e, 0x6b, - 0x2f, 0xf3, 0x90, 0x96, 0xea, 0x4d, 0xe9, 0x2d, 0x21, 0x89, 0x10, 0x94, 0xe8, 0x4f, 0xf9, 0x60, - 0x77, 0x63, 0xff, 0xa0, 0xb1, 0x47, 0xf6, 0x72, 0x09, 0xca, 0xf6, 0x5e, 0xda, 0xc4, 0xb4, 0xf8, - 0xa7, 0x04, 0x3c, 0x1e, 0x92, 0x7a, 0xa2, 0x5b, 0x00, 0xd6, 0x48, 0x36, 0x70, 0x5b, 0x33, 0x3a, - 0xe1, 0x46, 0xd6, 0x1c, 0x49, 0x94, 0x43, 0xca, 0x5b, 0xfc, 0x97, 0x39, 0x05, 0xbc, 0x45, 0xaf, - 0x70, 0xa5, 0x64, 0x55, 0xf6, 0x55, 0xbb, 0x18, 0x80, 0x51, 0xe2, 0x36, 0x51, 0x4c, 0xf7, 0x96, - 0x2a, 0xa6, 0xfc, 0xe8, 0x7e, 0x90, 0x53, 0x89, 0xd8, 0x3a, 0x99, 0xcf, 0x9d, 0xa4, 0x1f, 0xcd, - 0x9d, 0x88, 0xbf, 0x4a, 0x7a, 0x37, 0xd6, 0x9f, 0x69, 0xef, 0x41, 0xc6, 0xb4, 0x14, 0x6b, 0x68, - 0x72, 0x83, 0x7b, 0x29, 0x6a, 0xda, 0xbe, 0x66, 0xff, 0x38, 0xa0, 0xe2, 0x12, 0x57, 0xf3, 0xdd, - 0x7e, 0x9b, 0xe2, 0x4d, 0x28, 0xf9, 0x37, 0x27, 0xfc, 0xca, 0xb8, 0x3e, 0x27, 0x21, 0xde, 0x76, - 0x93, 0x21, 0x0f, 0x82, 0x38, 0x89, 0xce, 0xc5, 0x83, 0xd0, 0xb9, 0x5f, 0xc7, 0xe1, 0x89, 0x29, - 0xc5, 0x0b, 0x7a, 0x63, 0xec, 0x9c, 0x5f, 0x9e, 0xa7, 0xf4, 0x59, 0x63, 0x34, 0xff, 0x49, 0x8b, - 0x37, 0x60, 0xc1, 0x4b, 0x8f, 0xb6, 0xc8, 0x9f, 0x24, 0x5d, 0x9f, 0xef, 0x87, 0x11, 0xbf, 0xb6, - 0xac, 0x6f, 0xcc, 0xce, 0x12, 0x73, 0xda, 0x59, 0x60, 0xb2, 0x90, 0xfc, 0xe6, 0x92, 0x85, 0xd4, - 0x23, 0x26, 0x0b, 0xde, 0x0b, 0x97, 0xf6, 0x5f, 0xb8, 0x89, 0xb8, 0x9e, 0x09, 0x88, 0xeb, 0x6f, - 0x01, 0x78, 0xba, 0x8b, 0xcb, 0x90, 0x36, 0xb4, 0xe1, 0xa0, 0x43, 0xcd, 0x24, 0x2d, 0xb1, 0x01, - 0xba, 0x09, 0x69, 0x62, 0x6e, 0xf6, 0x66, 0x4e, 0x7a, 0x5e, 0x62, 0x2e, 0x1e, 0x00, 0x97, 0x71, - 0x8b, 0x2a, 0xa0, 0xc9, 0x0e, 0x4f, 0xc8, 0x2b, 0x5e, 0xf5, 0xbf, 0xe2, 0xc9, 0xd0, 0x5e, 0x51, - 0xf0, 0xab, 0xde, 0x87, 0x34, 0x35, 0x0f, 0x92, 0xdf, 0xd0, 0x2e, 0x25, 0xaf, 0x5e, 0xc9, 0x6f, - 0xf4, 0x43, 0x00, 0xc5, 0xb2, 0x0c, 0xb5, 0x35, 0x74, 0x5f, 0xb0, 0x1a, 0x6c, 0x5e, 0x1b, 0x36, - 0xdf, 0xe6, 0x05, 0x6e, 0x67, 0xcb, 0xae, 0xa8, 0xc7, 0xd6, 0x3c, 0x0a, 0xc5, 0x5d, 0x28, 0xf9, - 0x65, 0xed, 0x7a, 0x8b, 0xcd, 0xc1, 0x5f, 0x6f, 0xb1, 0xf2, 0x99, 0xd7, 0x5b, 0x4e, 0xb5, 0x96, - 0x64, 0x0d, 0x69, 0x3a, 0x10, 0xff, 0x15, 0x87, 0x05, 0xaf, 0x75, 0x7e, 0xcd, 0x69, 0xfc, 0x8c, - 0xc2, 0xe6, 0xfc, 0x44, 0x16, 0x9f, 0x3d, 0x51, 0xcc, 0xc3, 0x6f, 0x33, 0x89, 0xff, 0x30, 0x0e, - 0x39, 0x67, 0xf1, 0x21, 0xdd, 0x60, 0x77, 0xef, 0x12, 0xde, 0xde, 0x27, 0x6b, 0x2f, 0x27, 0x9d, - 0xa6, 0xf5, 0x6d, 0x27, 0xa1, 0x0a, 0x43, 0x98, 0xbd, 0x3b, 0x6d, 0xf7, 0xed, 0x79, 0xfe, 0xf8, - 0x33, 0x3e, 0x0f, 0x92, 0x49, 0xa0, 0xff, 0x83, 0x8c, 0xd2, 0x76, 0x70, 0xf5, 0x52, 0x00, 0xd0, - 0x6a, 0xb3, 0xae, 0x35, 0x47, 0x1b, 0x94, 0x53, 0xe2, 0x12, 0x7c, 0x56, 0x09, 0xa7, 0xe9, 0xfd, - 0x1a, 0xd1, 0xcb, 0x78, 0xfc, 0x6e, 0xb3, 0x04, 0x70, 0xb8, 0x7b, 0x7f, 0x6f, 0x6b, 0xfb, 0xce, - 0x76, 0x7d, 0x8b, 0xa7, 0x54, 0x5b, 0x5b, 0xf5, 0x2d, 0x21, 0x41, 0xf8, 0xa4, 0xfa, 0xfd, 0xbd, - 0xa3, 0xfa, 0x96, 0x90, 0x14, 0x6f, 0x43, 0xde, 0x71, 0x3d, 0xa8, 0x02, 0x59, 0xbb, 0x47, 0x10, - 0xe7, 0x0e, 0x80, 0xb7, 0x7c, 0x96, 0x21, 0xad, 0x6b, 0xef, 0xf1, 0x96, 0x6f, 0x52, 0x62, 0x03, - 0xb1, 0x03, 0xe5, 0x31, 0xbf, 0x85, 0x6e, 0x43, 0x56, 0x1f, 0xb6, 0x64, 0xdb, 0x68, 0xc7, 0x3a, - 0x2a, 0x76, 0xd9, 0x3f, 0x6c, 0xf5, 0xd4, 0xf6, 0x3d, 0x7c, 0x66, 0x6f, 0x93, 0x3e, 0x6c, 0xdd, - 0x63, 0xb6, 0xcd, 0xde, 0x92, 0xf0, 0xbe, 0xe5, 0x14, 0x72, 0xf6, 0x55, 0x45, 0xff, 0x0f, 0x79, - 0xc7, 0x25, 0x3a, 0xdf, 0xc1, 0x84, 0xfa, 0x52, 0xae, 0xde, 0x15, 0x41, 0xd7, 0x60, 0xd1, 0x54, - 0x4f, 0x06, 0x76, 0x3f, 0x89, 0xc1, 0x6c, 0x09, 0x7a, 0x67, 0xca, 0xec, 0xc1, 0x8e, 0x8d, 0x0d, - 0x91, 0x48, 0x28, 0x8c, 0xfb, 0x8a, 0x6f, 0x73, 0x02, 0x01, 0x11, 0x3b, 0x19, 0x14, 0xb1, 0x3f, - 0x48, 0x40, 0xc1, 0xd3, 0xa5, 0x42, 0xff, 0xeb, 0x71, 0x5c, 0xa5, 0x80, 0x50, 0xe3, 0xe1, 0x75, - 0x3f, 0xb1, 0xf0, 0x2f, 0x2c, 0x31, 0xff, 0xc2, 0xc2, 0x9a, 0x82, 0x76, 0xb3, 0x2b, 0x35, 0x77, - 0xb3, 0xeb, 0x39, 0x40, 0x96, 0x66, 0x29, 0x3d, 0xf9, 0x54, 0xb3, 0xd4, 0xc1, 0x89, 0xcc, 0x4c, - 0x83, 0xb9, 0x19, 0x81, 0x3e, 0x39, 0xa2, 0x0f, 0xf6, 0xa9, 0x95, 0xfc, 0x28, 0x0e, 0x39, 0xa7, - 0xec, 0x9b, 0xf7, 0x8b, 0x89, 0x73, 0x90, 0xe1, 0x95, 0x0d, 0xfb, 0x64, 0x82, 0x8f, 0x02, 0xbb, - 0x7a, 0x55, 0xc8, 0xf5, 0xb1, 0xa5, 0x50, 0x9f, 0xc9, 0xc2, 0xa4, 0x33, 0xbe, 0xf6, 0x32, 0x14, - 0x3c, 0x1f, 0xaf, 0x10, 0x37, 0xba, 0x5b, 0x7f, 0x53, 0x88, 0x55, 0xb3, 0x1f, 0x7d, 0x72, 0x29, - 0xb9, 0x8b, 0xdf, 0x23, 0x37, 0x4c, 0xaa, 0xd7, 0x1a, 0xf5, 0xda, 0x3d, 0x21, 0x5e, 0x2d, 0x7c, - 0xf4, 0xc9, 0xa5, 0xac, 0x84, 0x69, 0x13, 0xe6, 0xda, 0x3d, 0x28, 0x8f, 0x1d, 0x8c, 0xff, 0x42, - 0x23, 0x28, 0x6d, 0x1d, 0xee, 0xef, 0x6c, 0xd7, 0x36, 0x9a, 0x75, 0xf9, 0x68, 0xaf, 0x59, 0x17, - 0xe2, 0xe8, 0x71, 0x58, 0xda, 0xd9, 0x7e, 0xbd, 0xd1, 0x94, 0x6b, 0x3b, 0xdb, 0xf5, 0xdd, 0xa6, - 0xbc, 0xd1, 0x6c, 0x6e, 0xd4, 0xee, 0x09, 0x89, 0xf5, 0xdf, 0x14, 0xa0, 0xbc, 0xb1, 0x59, 0xdb, - 0x26, 0xb5, 0x9d, 0xda, 0x56, 0xa8, 0x7b, 0xa8, 0x41, 0x8a, 0x22, 0xba, 0x53, 0x3f, 0x47, 0xae, - 0x4e, 0xef, 0xd2, 0xa1, 0x3b, 0x90, 0xa6, 0x60, 0x2f, 0x9a, 0xfe, 0x7d, 0x72, 0x75, 0x46, 0xdb, - 0x8e, 0x4c, 0x86, 0x5e, 0xa7, 0xa9, 0x1f, 0x2c, 0x57, 0xa7, 0x77, 0xf1, 0xd0, 0x0e, 0x64, 0x6d, - 0x2c, 0x6e, 0xd6, 0xa7, 0xbf, 0xd5, 0x99, 0xed, 0x30, 0xb2, 0x34, 0x86, 0x99, 0x4e, 0xff, 0x96, - 0xb9, 0x3a, 0xa3, 0xbf, 0x87, 0xb6, 0x21, 0xc3, 0x11, 0x92, 0x19, 0x9f, 0xf1, 0x56, 0x67, 0xb5, - 0xb5, 0x90, 0x04, 0x79, 0x17, 0x8d, 0x9e, 0xfd, 0x85, 0x76, 0x35, 0x42, 0xeb, 0x12, 0x3d, 0x80, - 0xa2, 0x1f, 0x75, 0x89, 0xf6, 0xa9, 0x70, 0x35, 0x62, 0x03, 0x8d, 0xe8, 0xf7, 0x43, 0x30, 0xd1, - 0x3e, 0x1d, 0xae, 0x46, 0xec, 0xa7, 0xa1, 0x77, 0x60, 0x71, 0x12, 0x22, 0x89, 0xfe, 0x25, 0x71, - 0x75, 0x8e, 0x0e, 0x1b, 0xea, 0x03, 0x0a, 0x80, 0x56, 0xe6, 0xf8, 0xb0, 0xb8, 0x3a, 0x4f, 0xc3, - 0x0d, 0x75, 0xa0, 0x3c, 0x0e, 0x57, 0x44, 0xfd, 0xd0, 0xb8, 0x1a, 0xb9, 0xf9, 0xc6, 0xde, 0xe2, - 0xaf, 0xdd, 0xa3, 0x7e, 0x78, 0x5c, 0x8d, 0xdc, 0x8b, 0x43, 0x87, 0x00, 0x9e, 0xda, 0x33, 0xc2, - 0x87, 0xc8, 0xd5, 0x28, 0x5d, 0x39, 0xa4, 0xc3, 0x52, 0x50, 0x51, 0x3a, 0xcf, 0x77, 0xc9, 0xd5, - 0xb9, 0x9a, 0x75, 0xc4, 0x9e, 0xfd, 0xe5, 0x65, 0xb4, 0xef, 0x94, 0xab, 0x11, 0xbb, 0x76, 0x9b, - 0xf5, 0x4f, 0xbf, 0x58, 0x89, 0x7f, 0xf6, 0xc5, 0x4a, 0xfc, 0x6f, 0x5f, 0xac, 0xc4, 0x3f, 0xfe, - 0x72, 0x25, 0xf6, 0xd9, 0x97, 0x2b, 0xb1, 0xbf, 0x7c, 0xb9, 0x12, 0xfb, 0xde, 0xb3, 0x27, 0xaa, - 0xd5, 0x1d, 0xb6, 0xd6, 0xda, 0x5a, 0xff, 0xba, 0xf7, 0x2f, 0x2b, 0x41, 0x7f, 0xa3, 0x69, 0x65, - 0x68, 0x40, 0xbd, 0xf1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xce, 0x0a, 0x41, 0x94, 0x66, 0x33, - 0x00, 0x00, + // 3459 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x73, 0x23, 0xe5, + 0xb5, 0xd7, 0xfb, 0x71, 0x64, 0x49, 0xed, 0xcf, 0x66, 0xd0, 0x88, 0x19, 0x7b, 0x68, 0x0a, 0x98, + 0x19, 0xc0, 0xc3, 0xf5, 0xdc, 0x81, 0xe1, 0x0e, 0x5c, 0xca, 0x96, 0x35, 0xc8, 0x1e, 0x8f, 0x6d, + 0xda, 0xb2, 0x29, 0xee, 0x4d, 0xa6, 0x69, 0x49, 0x9f, 0xad, 0x66, 0x24, 0x75, 0xd3, 0xdd, 0x32, + 0x32, 0xcb, 0x50, 0x6c, 0xa8, 0x54, 0x85, 0x4d, 0x2a, 0x49, 0x55, 0xd8, 0x25, 0x55, 0xc9, 0x7f, + 0x90, 0x55, 0x56, 0x59, 0xb0, 0xc8, 0x82, 0x55, 0x92, 0x15, 0x49, 0xc1, 0x2e, 0xff, 0x40, 0x76, + 0x49, 0xea, 0x7b, 0xf4, 0x4b, 0xea, 0x96, 0x5a, 0x0c, 0x50, 0x95, 0x2a, 0x76, 0xfa, 0x4e, 0x9f, + 0x73, 0xfa, 0x7b, 0x9c, 0xef, 0x3c, 0x7e, 0xa7, 0x05, 0x4f, 0x58, 0x78, 0xd0, 0xc1, 0x46, 0x5f, + 0x1d, 0x58, 0x37, 0x94, 0x56, 0x5b, 0xbd, 0x61, 0x9d, 0xeb, 0xd8, 0x5c, 0xd3, 0x0d, 0xcd, 0xd2, + 0x50, 0xd9, 0x7d, 0xb8, 0x46, 0x1e, 0x56, 0x2f, 0x7b, 0xb8, 0xdb, 0xc6, 0xb9, 0x6e, 0x69, 0x37, + 0x74, 0x43, 0xd3, 0x4e, 0x18, 0x7f, 0xf5, 0x92, 0xe7, 0x31, 0xd5, 0xe3, 0xd5, 0xe6, 0x7b, 0xca, + 0x85, 0x1f, 0xe2, 0x73, 0xfb, 0xe9, 0xe5, 0x09, 0x59, 0x5d, 0x31, 0x94, 0xbe, 0xfd, 0x78, 0xf5, + 0x54, 0xd3, 0x4e, 0x7b, 0xf8, 0x06, 0x1d, 0xb5, 0x86, 0x27, 0x37, 0x2c, 0xb5, 0x8f, 0x4d, 0x4b, + 0xe9, 0xeb, 0x9c, 0x61, 0xf9, 0x54, 0x3b, 0xd5, 0xe8, 0xcf, 0x1b, 0xe4, 0x17, 0xa3, 0x8a, 0xff, + 0x02, 0xc8, 0x4a, 0xf8, 0xbd, 0x21, 0x36, 0x2d, 0xb4, 0x0e, 0x29, 0xdc, 0xee, 0x6a, 0x95, 0xf8, + 0x95, 0xf8, 0xd5, 0xc2, 0xfa, 0xa5, 0xb5, 0xb1, 0xc5, 0xad, 0x71, 0xbe, 0x7a, 0xbb, 0xab, 0x35, + 0x62, 0x12, 0xe5, 0x45, 0xb7, 0x20, 0x7d, 0xd2, 0x1b, 0x9a, 0xdd, 0x4a, 0x82, 0x0a, 0x5d, 0x0e, + 0x13, 0xba, 0x4b, 0x98, 0x1a, 0x31, 0x89, 0x71, 0x93, 0x57, 0xa9, 0x83, 0x13, 0xad, 0x92, 0x9c, + 0xfe, 0xaa, 0xed, 0xc1, 0x09, 0x7d, 0x15, 0xe1, 0x45, 0x9b, 0x00, 0xea, 0x40, 0xb5, 0xe4, 0x76, + 0x57, 0x51, 0x07, 0x95, 0x14, 0x95, 0x7c, 0x32, 0x5c, 0x52, 0xb5, 0x6a, 0x84, 0xb1, 0x11, 0x93, + 0xf2, 0xaa, 0x3d, 0x20, 0xd3, 0x7d, 0x6f, 0x88, 0x8d, 0xf3, 0x4a, 0x7a, 0xfa, 0x74, 0xdf, 0x24, + 0x4c, 0x64, 0xba, 0x94, 0x1b, 0x6d, 0x43, 0xa1, 0x85, 0x4f, 0xd5, 0x81, 0xdc, 0xea, 0x69, 0xed, + 0x87, 0x95, 0x0c, 0x15, 0x16, 0xc3, 0x84, 0x37, 0x09, 0xeb, 0x26, 0xe1, 0xdc, 0x4c, 0x54, 0xe2, + 0x8d, 0x98, 0x04, 0x2d, 0x87, 0x82, 0x5e, 0x85, 0x5c, 0xbb, 0x8b, 0xdb, 0x0f, 0x65, 0x6b, 0x54, + 0xc9, 0x52, 0x3d, 0xab, 0x61, 0x7a, 0x6a, 0x84, 0xaf, 0x39, 0x6a, 0xc4, 0xa4, 0x6c, 0x9b, 0xfd, + 0x44, 0x77, 0x01, 0x3a, 0xb8, 0xa7, 0x9e, 0x61, 0x83, 0xc8, 0xe7, 0xa6, 0xef, 0xc1, 0x16, 0xe3, + 0x6c, 0x8e, 0xf8, 0x34, 0xf2, 0x1d, 0x9b, 0x80, 0x6a, 0x90, 0xc7, 0x83, 0x0e, 0x5f, 0x4e, 0x9e, + 0xaa, 0xb9, 0x12, 0x7a, 0xde, 0x83, 0x8e, 0x77, 0x31, 0x39, 0xcc, 0xc7, 0xe8, 0x36, 0x64, 0xda, + 0x5a, 0xbf, 0xaf, 0x5a, 0x15, 0xa0, 0x1a, 0x56, 0x42, 0x17, 0x42, 0xb9, 0x1a, 0x31, 0x89, 0xf3, + 0xa3, 0x3d, 0x28, 0xf5, 0x54, 0xd3, 0x92, 0xcd, 0x81, 0xa2, 0x9b, 0x5d, 0xcd, 0x32, 0x2b, 0x05, + 0xaa, 0xe1, 0xe9, 0x30, 0x0d, 0xbb, 0xaa, 0x69, 0x1d, 0xda, 0xcc, 0x8d, 0x98, 0x54, 0xec, 0x79, + 0x09, 0x44, 0x9f, 0x76, 0x72, 0x82, 0x0d, 0x47, 0x61, 0x65, 0x61, 0xba, 0xbe, 0x7d, 0xc2, 0x6d, + 0xcb, 0x13, 0x7d, 0x9a, 0x97, 0x80, 0xfe, 0x1f, 0x96, 0x7a, 0x9a, 0xd2, 0x71, 0xd4, 0xc9, 0xed, + 0xee, 0x70, 0xf0, 0xb0, 0x52, 0xa4, 0x4a, 0xaf, 0x85, 0x4e, 0x52, 0x53, 0x3a, 0xb6, 0x8a, 0x1a, + 0x11, 0x68, 0xc4, 0xa4, 0xc5, 0xde, 0x38, 0x11, 0x3d, 0x80, 0x65, 0x45, 0xd7, 0x7b, 0xe7, 0xe3, + 0xda, 0x4b, 0x54, 0xfb, 0xf5, 0x30, 0xed, 0x1b, 0x44, 0x66, 0x5c, 0x3d, 0x52, 0x26, 0xa8, 0xa8, + 0x09, 0x82, 0x6e, 0x60, 0x5d, 0x31, 0xb0, 0xac, 0x1b, 0x9a, 0xae, 0x99, 0x4a, 0xaf, 0x52, 0xa6, + 0xba, 0x9f, 0x0d, 0xd3, 0x7d, 0xc0, 0xf8, 0x0f, 0x38, 0x7b, 0x23, 0x26, 0x95, 0x75, 0x3f, 0x89, + 0x69, 0xd5, 0xda, 0xd8, 0x34, 0x5d, 0xad, 0xc2, 0x2c, 0xad, 0x94, 0xdf, 0xaf, 0xd5, 0x47, 0x42, + 0x75, 0x28, 0xe0, 0x11, 0x11, 0x97, 0xcf, 0x34, 0x0b, 0x57, 0x16, 0xa7, 0x5f, 0xac, 0x3a, 0x65, + 0x3d, 0xd6, 0x2c, 0x4c, 0x2e, 0x15, 0x76, 0x46, 0x48, 0x81, 0xc7, 0xce, 0xb0, 0xa1, 0x9e, 0x9c, + 0x53, 0x35, 0x32, 0x7d, 0x62, 0xaa, 0xda, 0xa0, 0x82, 0xa8, 0xc2, 0xe7, 0xc2, 0x14, 0x1e, 0x53, + 0x21, 0xa2, 0xa2, 0x6e, 0x8b, 0x34, 0x62, 0xd2, 0xd2, 0xd9, 0x24, 0x99, 0x98, 0xd8, 0x89, 0x3a, + 0x50, 0x7a, 0xea, 0x07, 0x98, 0x5f, 0x9b, 0xa5, 0xe9, 0x26, 0x76, 0x97, 0x73, 0xd3, 0xbb, 0x42, + 0x4c, 0xec, 0xc4, 0x4b, 0xd8, 0xcc, 0x42, 0xfa, 0x4c, 0xe9, 0x0d, 0xb1, 0xf8, 0x2c, 0x14, 0x3c, + 0x8e, 0x15, 0x55, 0x20, 0xdb, 0xc7, 0xa6, 0xa9, 0x9c, 0x62, 0xea, 0x87, 0xf3, 0x92, 0x3d, 0x14, + 0x4b, 0xb0, 0xe0, 0x75, 0xa6, 0xe2, 0x27, 0x71, 0x47, 0x92, 0xf8, 0x49, 0x22, 0x79, 0x86, 0x0d, + 0xba, 0x6c, 0x2e, 0xc9, 0x87, 0xe8, 0x29, 0x28, 0xd2, 0x29, 0xcb, 0xf6, 0x73, 0xe2, 0xac, 0x53, + 0xd2, 0x02, 0x25, 0x1e, 0x73, 0xa6, 0x55, 0x28, 0xe8, 0xeb, 0xba, 0xc3, 0x92, 0xa4, 0x2c, 0xa0, + 0xaf, 0xeb, 0x36, 0xc3, 0x93, 0xb0, 0x40, 0xd6, 0xe7, 0x70, 0xa4, 0xe8, 0x4b, 0x0a, 0x84, 0xc6, + 0x59, 0xc4, 0x3f, 0x26, 0x40, 0x18, 0x77, 0xc0, 0xe8, 0x36, 0xa4, 0x48, 0x2c, 0xe2, 0x61, 0xa5, + 0xba, 0xc6, 0x02, 0xd5, 0x9a, 0x1d, 0xa8, 0xd6, 0x9a, 0x76, 0xa0, 0xda, 0xcc, 0x7d, 0xf6, 0xc5, + 0x6a, 0xec, 0x93, 0xbf, 0xae, 0xc6, 0x25, 0x2a, 0x81, 0x2e, 0x12, 0x5f, 0xa9, 0xa8, 0x03, 0x59, + 0xed, 0xd0, 0x29, 0xe7, 0x89, 0x23, 0x54, 0xd4, 0xc1, 0x76, 0x07, 0xed, 0x82, 0xd0, 0xd6, 0x06, + 0x26, 0x1e, 0x98, 0x43, 0x53, 0x66, 0x81, 0x90, 0x07, 0x13, 0x9f, 0x3b, 0x64, 0xe1, 0xb5, 0x66, + 0x73, 0x1e, 0x50, 0x46, 0xa9, 0xdc, 0xf6, 0x13, 0x88, 0x5b, 0x3d, 0x53, 0x7a, 0x6a, 0x47, 0xb1, + 0x34, 0xc3, 0xac, 0xa4, 0xae, 0x24, 0x03, 0xfd, 0xe1, 0xb1, 0xcd, 0x72, 0xa4, 0x77, 0x14, 0x0b, + 0x6f, 0xa6, 0xc8, 0x74, 0x25, 0x8f, 0x24, 0x7a, 0x06, 0xca, 0x8a, 0xae, 0xcb, 0xa6, 0xa5, 0x58, + 0x58, 0x6e, 0x9d, 0x5b, 0xd8, 0xa4, 0x81, 0x66, 0x41, 0x2a, 0x2a, 0xba, 0x7e, 0x48, 0xa8, 0x9b, + 0x84, 0x88, 0x9e, 0x86, 0x12, 0x89, 0x49, 0xaa, 0xd2, 0x93, 0xbb, 0x58, 0x3d, 0xed, 0x5a, 0x34, + 0xa4, 0x24, 0xa5, 0x22, 0xa7, 0x36, 0x28, 0x51, 0xec, 0x38, 0x27, 0x4e, 0xe3, 0x11, 0x42, 0x90, + 0xea, 0x28, 0x96, 0x42, 0x77, 0x72, 0x41, 0xa2, 0xbf, 0x09, 0x4d, 0x57, 0xac, 0x2e, 0xdf, 0x1f, + 0xfa, 0x1b, 0x5d, 0x80, 0x0c, 0x57, 0x9b, 0xa4, 0x6a, 0xf9, 0x08, 0x2d, 0x43, 0x5a, 0x37, 0xb4, + 0x33, 0x4c, 0x8f, 0x2e, 0x27, 0xb1, 0x81, 0xf8, 0x61, 0x02, 0x16, 0x27, 0x22, 0x17, 0xd1, 0xdb, + 0x55, 0xcc, 0xae, 0xfd, 0x2e, 0xf2, 0x1b, 0xbd, 0x44, 0xf4, 0x2a, 0x1d, 0x6c, 0xf0, 0x68, 0x5f, + 0x99, 0xdc, 0xea, 0x06, 0x7d, 0xce, 0xb7, 0x86, 0x73, 0xa3, 0x7b, 0x20, 0xf4, 0x14, 0xd3, 0x92, + 0x99, 0xf7, 0x97, 0x3d, 0x91, 0xff, 0x89, 0x89, 0x4d, 0x66, 0xb1, 0x82, 0x18, 0x34, 0x57, 0x52, + 0x22, 0xa2, 0x2e, 0x15, 0x1d, 0xc1, 0x72, 0xeb, 0xfc, 0x03, 0x65, 0x60, 0xa9, 0x03, 0x2c, 0x4f, + 0x9c, 0xda, 0x64, 0x2a, 0x71, 0x5f, 0x35, 0x5b, 0xb8, 0xab, 0x9c, 0xa9, 0x9a, 0x3d, 0xad, 0x25, + 0x47, 0xde, 0x39, 0x51, 0x53, 0x94, 0xa0, 0xe4, 0x0f, 0xbb, 0xa8, 0x04, 0x09, 0x6b, 0xc4, 0xd7, + 0x9f, 0xb0, 0x46, 0xe8, 0x45, 0x48, 0x91, 0x35, 0xd2, 0xb5, 0x97, 0x02, 0x5e, 0xc4, 0xe5, 0x9a, + 0xe7, 0x3a, 0x96, 0x28, 0xa7, 0x28, 0x3a, 0xb7, 0xc1, 0x09, 0xc5, 0xe3, 0x5a, 0xc5, 0x6b, 0x50, + 0x1e, 0x8b, 0xb3, 0x9e, 0xe3, 0x8b, 0x7b, 0x8f, 0x4f, 0x2c, 0x43, 0xd1, 0x17, 0x50, 0xc5, 0x0b, + 0xb0, 0x1c, 0x14, 0x1f, 0xc5, 0xae, 0x43, 0xf7, 0xc5, 0x39, 0x74, 0x0b, 0x72, 0x4e, 0x80, 0x64, + 0xb7, 0xf1, 0xe2, 0xc4, 0x2a, 0x6c, 0x66, 0xc9, 0x61, 0x25, 0xd7, 0x90, 0x58, 0x35, 0x35, 0x87, + 0x04, 0x9d, 0x78, 0x56, 0xd1, 0xf5, 0x86, 0x62, 0x76, 0xc5, 0x77, 0xa0, 0x12, 0x16, 0xfc, 0xc6, + 0x96, 0x91, 0x72, 0xac, 0xf0, 0x02, 0x64, 0x4e, 0x34, 0xa3, 0xaf, 0x58, 0x54, 0x59, 0x51, 0xe2, + 0x23, 0x62, 0x9d, 0x2c, 0x10, 0x26, 0x29, 0x99, 0x0d, 0x44, 0x19, 0x2e, 0x86, 0x06, 0x40, 0x22, + 0xa2, 0x0e, 0x3a, 0x98, 0xed, 0x67, 0x51, 0x62, 0x03, 0x57, 0x11, 0x9b, 0x2c, 0x1b, 0x90, 0xd7, + 0x9a, 0x74, 0xad, 0x54, 0x7f, 0x5e, 0xe2, 0x23, 0xf1, 0xb7, 0x49, 0xb8, 0x10, 0x1c, 0x06, 0xd1, + 0x15, 0x58, 0xe8, 0x2b, 0x23, 0xd9, 0x1a, 0xf1, 0xbb, 0xcc, 0x8e, 0x03, 0xfa, 0xca, 0xa8, 0x39, + 0x62, 0x17, 0x59, 0x80, 0xa4, 0x35, 0x32, 0x2b, 0x89, 0x2b, 0xc9, 0xab, 0x0b, 0x12, 0xf9, 0x89, + 0x8e, 0x60, 0xb1, 0xa7, 0xb5, 0x95, 0x9e, 0xec, 0xb1, 0x78, 0x6e, 0xec, 0x4f, 0x4d, 0x6c, 0x36, + 0x0b, 0x68, 0xb8, 0x33, 0x61, 0xf4, 0x65, 0xaa, 0x63, 0xd7, 0xb1, 0xfc, 0x6f, 0xc9, 0xea, 0x3d, + 0x67, 0x94, 0xf6, 0x79, 0x0a, 0xdb, 0x67, 0x67, 0xe6, 0xf6, 0xd9, 0x2f, 0xc2, 0xf2, 0x00, 0x8f, + 0x2c, 0xcf, 0x1c, 0x99, 0xe1, 0x64, 0xe9, 0x59, 0x20, 0xf2, 0xcc, 0x7d, 0x3f, 0xb1, 0x21, 0x74, + 0x8d, 0x66, 0x16, 0xba, 0x66, 0x62, 0x43, 0x56, 0x3a, 0x1d, 0x03, 0x9b, 0x26, 0xcd, 0x6c, 0x17, + 0x68, 0xba, 0x40, 0xe9, 0x1b, 0x8c, 0x2c, 0xfe, 0xc2, 0x7b, 0x56, 0xfe, 0x4c, 0x82, 0x9f, 0x44, + 0xdc, 0x3d, 0x89, 0x43, 0x58, 0xe6, 0xf2, 0x1d, 0xdf, 0x61, 0x24, 0xa2, 0x7a, 0x1e, 0x64, 0x8b, + 0x47, 0x38, 0x87, 0xe4, 0xa3, 0x9d, 0x83, 0xed, 0x6d, 0x53, 0x1e, 0x6f, 0xfb, 0x1f, 0x76, 0x36, + 0xaf, 0x3b, 0x51, 0xc4, 0x4d, 0xd3, 0x02, 0xa3, 0x88, 0xbb, 0xae, 0x84, 0xcf, 0xbd, 0xfd, 0x32, + 0x0e, 0xd5, 0xf0, 0xbc, 0x2c, 0x50, 0xd5, 0x73, 0xb0, 0xe8, 0xac, 0xc5, 0x99, 0x1f, 0xbb, 0xf5, + 0x82, 0xf3, 0x80, 0x4f, 0x30, 0x34, 0x2a, 0x3e, 0x0d, 0xa5, 0xb1, 0xac, 0x91, 0x9d, 0x42, 0xf1, + 0xcc, 0xfb, 0x7e, 0xf1, 0xa7, 0x49, 0xc7, 0xab, 0xfa, 0x52, 0xbb, 0x00, 0xcb, 0x7b, 0x13, 0x96, + 0x3a, 0xb8, 0xad, 0x76, 0xbe, 0xae, 0xe1, 0x2d, 0x72, 0xe9, 0xef, 0xed, 0x2e, 0x82, 0xdd, 0xfd, + 0xb9, 0x00, 0x39, 0x09, 0x9b, 0x3a, 0x49, 0xe9, 0xd0, 0x26, 0xe4, 0xf1, 0xa8, 0x8d, 0x75, 0xcb, + 0xce, 0x82, 0x83, 0xab, 0x09, 0xc6, 0x5d, 0xb7, 0x39, 0x49, 0x6d, 0xec, 0x88, 0xa1, 0x9b, 0x1c, + 0x06, 0x09, 0x47, 0x34, 0xb8, 0xb8, 0x17, 0x07, 0x79, 0xc9, 0xc6, 0x41, 0x92, 0xa1, 0xa5, 0x30, + 0x93, 0x1a, 0x03, 0x42, 0x6e, 0x72, 0x20, 0x24, 0x35, 0xe3, 0x65, 0x3e, 0x24, 0xa4, 0xe6, 0x43, + 0x42, 0xd2, 0x33, 0x96, 0x19, 0x02, 0x85, 0xbc, 0x64, 0x43, 0x21, 0x99, 0x19, 0x33, 0x1e, 0xc3, + 0x42, 0x76, 0xfc, 0x58, 0x48, 0x36, 0x24, 0xb4, 0xd9, 0xd2, 0x53, 0xc1, 0x90, 0xd7, 0x3c, 0x60, + 0x48, 0x2e, 0x14, 0x85, 0x60, 0x8a, 0x02, 0xd0, 0x90, 0x37, 0x7c, 0x68, 0x48, 0x7e, 0xc6, 0x3e, + 0x4c, 0x81, 0x43, 0xb6, 0xbc, 0x70, 0x08, 0x84, 0xa2, 0x2a, 0xfc, 0xdc, 0xc3, 0xf0, 0x90, 0x57, + 0x1c, 0x3c, 0xa4, 0x10, 0x0a, 0xec, 0xf0, 0xb5, 0x8c, 0x03, 0x22, 0xfb, 0x13, 0x80, 0x08, 0x03, + 0x30, 0x9e, 0x09, 0x55, 0x31, 0x03, 0x11, 0xd9, 0x9f, 0x40, 0x44, 0x8a, 0x33, 0x14, 0xce, 0x80, + 0x44, 0x7e, 0x10, 0x0c, 0x89, 0x84, 0x83, 0x16, 0x7c, 0x9a, 0xd1, 0x30, 0x11, 0x39, 0x04, 0x13, + 0x29, 0x87, 0xd6, 0xef, 0x4c, 0x7d, 0x64, 0x50, 0xe4, 0x28, 0x00, 0x14, 0x61, 0xf0, 0xc5, 0xd5, + 0x50, 0xe5, 0x11, 0x50, 0x91, 0xa3, 0x00, 0x54, 0x64, 0x71, 0xa6, 0xda, 0x99, 0xb0, 0xc8, 0x5d, + 0x3f, 0x2c, 0x82, 0x66, 0xdc, 0xb1, 0x50, 0x5c, 0xa4, 0x15, 0x86, 0x8b, 0x30, 0xec, 0xe2, 0xf9, + 0x50, 0x8d, 0x73, 0x00, 0x23, 0xfb, 0x13, 0xc0, 0xc8, 0xf2, 0x0c, 0x4b, 0x8b, 0x8a, 0x8c, 0x5c, + 0x23, 0x19, 0xc5, 0x98, 0xab, 0x26, 0xc9, 0x3d, 0x36, 0x0c, 0xcd, 0xe0, 0x18, 0x07, 0x1b, 0x88, + 0x57, 0x49, 0xa5, 0xec, 0xba, 0xe5, 0x29, 0x28, 0x0a, 0x2d, 0xa2, 0x3c, 0xae, 0x58, 0xfc, 0x5d, + 0xdc, 0x95, 0xa5, 0x05, 0xa6, 0xb7, 0xca, 0xce, 0xf3, 0x2a, 0xdb, 0x83, 0xad, 0x24, 0xfc, 0xd8, + 0xca, 0x2a, 0x14, 0x48, 0x71, 0x34, 0x06, 0x9b, 0x28, 0xba, 0x03, 0x9b, 0x5c, 0x87, 0x45, 0x9a, + 0x04, 0x30, 0x04, 0x86, 0x47, 0xd6, 0x14, 0x8d, 0xac, 0x65, 0xf2, 0x80, 0xed, 0x02, 0x0b, 0xb1, + 0x2f, 0xc0, 0x92, 0x87, 0xd7, 0x29, 0xba, 0x18, 0x86, 0x20, 0x38, 0xdc, 0x1b, 0xbc, 0xfa, 0xfa, + 0x43, 0xdc, 0xdd, 0x21, 0x17, 0x6f, 0x09, 0x82, 0x46, 0xe2, 0xdf, 0x10, 0x34, 0x92, 0xf8, 0xda, + 0xd0, 0x88, 0xb7, 0x88, 0x4c, 0xfa, 0x8b, 0xc8, 0x7f, 0xc4, 0xdd, 0x33, 0x71, 0x80, 0x8e, 0xb6, + 0xd6, 0xc1, 0xbc, 0xac, 0xa3, 0xbf, 0x49, 0x9a, 0xd5, 0xd3, 0x4e, 0x79, 0xf1, 0x46, 0x7e, 0x12, + 0x2e, 0x27, 0x76, 0xe6, 0x79, 0x68, 0x74, 0x2a, 0x42, 0x96, 0xbb, 0xf0, 0x8a, 0x50, 0x80, 0xe4, + 0x43, 0xcc, 0x22, 0xdd, 0x82, 0x44, 0x7e, 0x12, 0x3e, 0x6a, 0x64, 0x3c, 0x07, 0x61, 0x03, 0x74, + 0x1b, 0xf2, 0xb4, 0x5d, 0x23, 0x6b, 0xba, 0xc9, 0x03, 0x92, 0x2f, 0x5d, 0x63, 0x5d, 0x99, 0xb5, + 0x03, 0xc2, 0xb3, 0xaf, 0x9b, 0x52, 0x4e, 0xe7, 0xbf, 0x3c, 0x49, 0x53, 0xde, 0x97, 0x34, 0x5d, + 0x82, 0x3c, 0x99, 0xbd, 0xa9, 0x2b, 0x6d, 0x4c, 0x23, 0x4b, 0x5e, 0x72, 0x09, 0xe2, 0x03, 0x40, + 0x93, 0x71, 0x12, 0x35, 0x20, 0x83, 0xcf, 0xf0, 0xc0, 0x62, 0x39, 0x65, 0x61, 0xfd, 0xc2, 0x64, + 0xdd, 0x48, 0x1e, 0x6f, 0x56, 0xc8, 0x26, 0xff, 0xfd, 0x8b, 0x55, 0x81, 0x71, 0x3f, 0xaf, 0xf5, + 0x55, 0x0b, 0xf7, 0x75, 0xeb, 0x5c, 0xe2, 0xf2, 0xe2, 0x67, 0x71, 0x28, 0x8f, 0xc5, 0xcf, 0xc0, + 0xbd, 0xb5, 0x4d, 0x3e, 0xe1, 0x01, 0x96, 0x2e, 0x03, 0x9c, 0x2a, 0xa6, 0xfc, 0xbe, 0x32, 0xb0, + 0x70, 0x87, 0x6f, 0x67, 0xfe, 0x54, 0x31, 0xdf, 0xa2, 0x04, 0xff, 0xc2, 0x72, 0x63, 0x0b, 0xf3, + 0x14, 0xdb, 0x79, 0x6f, 0xb1, 0x8d, 0xaa, 0x90, 0xd3, 0x0d, 0x55, 0x33, 0x54, 0xeb, 0x9c, 0xee, + 0x46, 0x52, 0x72, 0xc6, 0x3b, 0xa9, 0x5c, 0x52, 0x48, 0xed, 0xa4, 0x72, 0x29, 0x21, 0xbd, 0x93, + 0xca, 0x65, 0x84, 0xec, 0x4e, 0x2a, 0x97, 0x15, 0x72, 0x3b, 0xa9, 0x5c, 0x41, 0x58, 0x10, 0x3f, + 0x4a, 0xb8, 0xb6, 0xee, 0xa2, 0x29, 0x51, 0x17, 0x13, 0xcd, 0x78, 0x56, 0x02, 0x96, 0xec, 0xa1, + 0x90, 0xd9, 0x93, 0xd1, 0xd0, 0xc4, 0x1d, 0x0e, 0xd8, 0x39, 0x63, 0xcf, 0xa1, 0x65, 0x1f, 0xed, + 0xd0, 0xa6, 0xef, 0xac, 0xf8, 0x63, 0x0a, 0xb1, 0xfa, 0x33, 0x11, 0x74, 0xe8, 0xad, 0x83, 0x86, + 0xf4, 0x0a, 0xda, 0xc6, 0x13, 0xf5, 0xae, 0xba, 0xf5, 0x12, 0x23, 0x9b, 0xe8, 0x6d, 0x78, 0x7c, + 0xcc, 0x8f, 0x38, 0xaa, 0x13, 0x51, 0xdd, 0xc9, 0x63, 0x7e, 0x77, 0x62, 0xab, 0x76, 0x37, 0x2b, + 0xf9, 0x88, 0x16, 0xbe, 0x0d, 0x25, 0x7f, 0x52, 0x15, 0x78, 0xfc, 0x4f, 0x41, 0xd1, 0xc0, 0x96, + 0xa2, 0x0e, 0x64, 0x5f, 0x05, 0xb8, 0xc0, 0x88, 0x1c, 0x6d, 0x3d, 0x80, 0xc7, 0x02, 0x93, 0x2b, + 0xf4, 0x32, 0xe4, 0xdd, 0xbc, 0x8c, 0xed, 0xea, 0x14, 0xdc, 0xcc, 0xe5, 0x15, 0x7f, 0x1f, 0x77, + 0x55, 0xfa, 0x91, 0xb8, 0x3a, 0x64, 0x0c, 0x6c, 0x0e, 0x7b, 0x0c, 0x1b, 0x2b, 0xad, 0xbf, 0x10, + 0x2d, 0x2d, 0x23, 0xd4, 0x61, 0xcf, 0x92, 0xb8, 0xb0, 0xf8, 0x00, 0x32, 0x8c, 0x82, 0x0a, 0x90, + 0x3d, 0xda, 0xbb, 0xb7, 0xb7, 0xff, 0xd6, 0x9e, 0x10, 0x43, 0x00, 0x99, 0x8d, 0x5a, 0xad, 0x7e, + 0xd0, 0x14, 0xe2, 0x28, 0x0f, 0xe9, 0x8d, 0xcd, 0x7d, 0xa9, 0x29, 0x24, 0x08, 0x59, 0xaa, 0xef, + 0xd4, 0x6b, 0x4d, 0x21, 0x89, 0x16, 0xa1, 0xc8, 0x7e, 0xcb, 0x77, 0xf7, 0xa5, 0xfb, 0x1b, 0x4d, + 0x21, 0xe5, 0x21, 0x1d, 0xd6, 0xf7, 0xb6, 0xea, 0x92, 0x90, 0x16, 0xff, 0x0b, 0x2e, 0x86, 0x26, + 0x72, 0x2e, 0xcc, 0x16, 0xf7, 0xc0, 0x6c, 0xe2, 0xcf, 0x13, 0xa4, 0x8a, 0x0f, 0xcb, 0xce, 0xd0, + 0xce, 0xd8, 0xc2, 0xd7, 0xe7, 0x48, 0xed, 0xc6, 0x56, 0x4f, 0x0a, 0x77, 0x03, 0x9f, 0x60, 0xab, + 0xdd, 0x65, 0xd9, 0x22, 0x0b, 0x4f, 0x45, 0xa9, 0xc8, 0xa9, 0x54, 0xc8, 0x64, 0x6c, 0xef, 0xe2, + 0xb6, 0x25, 0x33, 0x27, 0xc4, 0x8c, 0x2e, 0x4f, 0xd8, 0x08, 0xf5, 0x90, 0x11, 0xc5, 0x77, 0xe6, + 0xda, 0xcb, 0x3c, 0xa4, 0xa5, 0x7a, 0x53, 0x7a, 0x5b, 0x48, 0x22, 0x04, 0x25, 0xfa, 0x53, 0x3e, + 0xdc, 0xdb, 0x38, 0x38, 0x6c, 0xec, 0x93, 0xbd, 0x5c, 0x82, 0xb2, 0xbd, 0x97, 0x36, 0x31, 0x2d, + 0xfe, 0x29, 0x01, 0x8f, 0x87, 0xe4, 0x96, 0xe8, 0x36, 0x80, 0x35, 0x92, 0x0d, 0xdc, 0xd6, 0x8c, + 0x4e, 0xb8, 0x91, 0x35, 0x47, 0x12, 0xe5, 0x90, 0xf2, 0x16, 0xff, 0x65, 0x4e, 0x41, 0x67, 0xd1, + 0xab, 0x5c, 0x29, 0x59, 0x95, 0x7d, 0xd5, 0x2e, 0x07, 0x80, 0x90, 0xb8, 0x4d, 0x14, 0xd3, 0xbd, + 0xa5, 0x8a, 0x29, 0x3f, 0xba, 0x1f, 0xe4, 0x54, 0x22, 0xf6, 0x46, 0xe6, 0x73, 0x27, 0xe9, 0x47, + 0x73, 0x27, 0xe2, 0xaf, 0x92, 0xde, 0x8d, 0xf5, 0xa7, 0xd2, 0xfb, 0x90, 0x31, 0x2d, 0xc5, 0x1a, + 0x9a, 0xdc, 0xe0, 0x5e, 0x8e, 0x9a, 0x97, 0xaf, 0xd9, 0x3f, 0x0e, 0xa9, 0xb8, 0xc4, 0xd5, 0x7c, + 0xbf, 0xdf, 0xa6, 0x78, 0x0b, 0x4a, 0xfe, 0xcd, 0x09, 0xbf, 0x32, 0xae, 0xcf, 0x49, 0x88, 0x77, + 0xdc, 0x6c, 0xc7, 0x03, 0x11, 0x4e, 0xc2, 0x6f, 0xf1, 0x20, 0xf8, 0xed, 0xd7, 0x71, 0x78, 0x62, + 0x4a, 0x75, 0x82, 0xde, 0x1c, 0x3b, 0xe7, 0x57, 0xe6, 0xa9, 0x6d, 0xd6, 0x18, 0xcd, 0x7f, 0xd2, + 0xe2, 0x4d, 0x58, 0xf0, 0xd2, 0xa3, 0x2d, 0xf2, 0x27, 0x49, 0xd7, 0xe7, 0xfb, 0x71, 0xc2, 0x6f, + 0x2c, 0xad, 0x1b, 0xb3, 0xb3, 0xc4, 0x9c, 0x76, 0x16, 0x98, 0x2c, 0x24, 0xbf, 0xbd, 0x64, 0x21, + 0xf5, 0x88, 0xc9, 0x82, 0xf7, 0xc2, 0xa5, 0xfd, 0x17, 0x6e, 0x22, 0xae, 0x67, 0x02, 0xe2, 0xfa, + 0xdb, 0x00, 0x9e, 0xf6, 0xe1, 0x32, 0xa4, 0x0d, 0x6d, 0x38, 0xe8, 0x50, 0x33, 0x49, 0x4b, 0x6c, + 0x80, 0x6e, 0x41, 0x9a, 0x98, 0x9b, 0xbd, 0x99, 0x93, 0x9e, 0x97, 0x98, 0x8b, 0x07, 0xa1, 0x65, + 0xdc, 0xa2, 0x0a, 0x68, 0xb2, 0x85, 0x13, 0xf2, 0x8a, 0xd7, 0xfc, 0xaf, 0x78, 0x32, 0xb4, 0x19, + 0x14, 0xfc, 0xaa, 0x0f, 0x20, 0x4d, 0xcd, 0x83, 0xe4, 0x37, 0xb4, 0x0d, 0xc9, 0xcb, 0x53, 0xf2, + 0x1b, 0xfd, 0x10, 0x40, 0xb1, 0x2c, 0x43, 0x6d, 0x0d, 0xdd, 0x17, 0xac, 0x06, 0x9b, 0xd7, 0x86, + 0xcd, 0xb7, 0x79, 0x89, 0xdb, 0xd9, 0xb2, 0x2b, 0xea, 0xb1, 0x35, 0x8f, 0x42, 0x71, 0x0f, 0x4a, + 0x7e, 0x59, 0xbb, 0xa0, 0x62, 0x73, 0xf0, 0x17, 0x54, 0xac, 0x3e, 0xe6, 0x05, 0x95, 0x53, 0x8e, + 0x25, 0x59, 0xc7, 0x99, 0x0e, 0xc4, 0x7f, 0xc6, 0x61, 0xc1, 0x6b, 0x9d, 0xdf, 0x70, 0x1a, 0x3f, + 0xa3, 0x72, 0xb9, 0x38, 0x91, 0xc5, 0x67, 0x4f, 0x15, 0xf3, 0xe8, 0xbb, 0x4c, 0xe2, 0x3f, 0x8a, + 0x43, 0xce, 0x59, 0x7c, 0x48, 0xbb, 0xd7, 0xdd, 0xbb, 0x84, 0xb7, 0xb9, 0xc9, 0xfa, 0xc7, 0x49, + 0xa7, 0x2b, 0x7d, 0xc7, 0x49, 0xa8, 0xc2, 0x20, 0x64, 0xef, 0x4e, 0xdb, 0x8d, 0x79, 0x9e, 0x3f, + 0xfe, 0x8c, 0xcf, 0x83, 0x64, 0x12, 0xe8, 0x7f, 0x20, 0xa3, 0xb4, 0x1d, 0xe0, 0xbc, 0x14, 0x80, + 0xa4, 0xda, 0xac, 0x6b, 0xcd, 0xd1, 0x06, 0xe5, 0x94, 0xb8, 0x04, 0x9f, 0x55, 0xc2, 0xe9, 0x6a, + 0xbf, 0x4e, 0xf4, 0x32, 0x1e, 0xbf, 0xdb, 0x2c, 0x01, 0x1c, 0xed, 0xdd, 0xdf, 0xdf, 0xda, 0xbe, + 0xbb, 0x5d, 0xdf, 0xe2, 0x29, 0xd5, 0xd6, 0x56, 0x7d, 0x4b, 0x48, 0x10, 0x3e, 0xa9, 0x7e, 0x7f, + 0xff, 0xb8, 0xbe, 0x25, 0x24, 0xc5, 0x3b, 0x90, 0x77, 0x5c, 0x0f, 0xaa, 0x40, 0xd6, 0x6e, 0x02, + 0xc4, 0xb9, 0x03, 0xe0, 0x3d, 0x9d, 0x65, 0x48, 0xeb, 0xda, 0xfb, 0xbc, 0xa7, 0x9b, 0x94, 0xd8, + 0x40, 0xec, 0x40, 0x79, 0xcc, 0x6f, 0xa1, 0x3b, 0x90, 0xd5, 0x87, 0x2d, 0xd9, 0x36, 0xda, 0xb1, + 0x96, 0x89, 0x5d, 0xd7, 0x0f, 0x5b, 0x3d, 0xb5, 0x7d, 0x0f, 0x9f, 0xdb, 0xdb, 0xa4, 0x0f, 0x5b, + 0xf7, 0x98, 0x6d, 0xb3, 0xb7, 0x24, 0xbc, 0x6f, 0x39, 0x83, 0x9c, 0x7d, 0x55, 0xd1, 0xff, 0x42, + 0xde, 0x71, 0x89, 0xce, 0x87, 0x2e, 0xa1, 0xbe, 0x94, 0xab, 0x77, 0x45, 0xd0, 0x75, 0x58, 0x34, + 0xd5, 0xd3, 0x81, 0xdd, 0x30, 0x62, 0x38, 0x5a, 0x82, 0xde, 0x99, 0x32, 0x7b, 0xb0, 0x6b, 0x83, + 0x3f, 0x24, 0x12, 0x0a, 0xe3, 0xbe, 0xe2, 0xbb, 0x9c, 0x40, 0x40, 0xc4, 0x4e, 0x06, 0x45, 0xec, + 0x0f, 0x13, 0x50, 0xf0, 0xb4, 0xa1, 0xd0, 0x7f, 0x7b, 0x1c, 0x57, 0x29, 0x20, 0xd4, 0x78, 0x78, + 0xdd, 0x6f, 0x28, 0xfc, 0x0b, 0x4b, 0xcc, 0xbf, 0xb0, 0xb0, 0xae, 0x9f, 0xdd, 0xcd, 0x4a, 0xcd, + 0xdd, 0xcd, 0x7a, 0x1e, 0x90, 0xa5, 0x59, 0x4a, 0x4f, 0x3e, 0xd3, 0x2c, 0x75, 0x70, 0x2a, 0x33, + 0xd3, 0x60, 0x6e, 0x46, 0xa0, 0x4f, 0x8e, 0xe9, 0x83, 0x03, 0x6a, 0x25, 0x3f, 0x8a, 0x43, 0xce, + 0x29, 0xfb, 0xe6, 0xfd, 0x24, 0xe2, 0x02, 0x64, 0x78, 0x65, 0xc3, 0xbe, 0x89, 0xe0, 0xa3, 0xc0, + 0xb6, 0x5d, 0x15, 0x72, 0x7d, 0x6c, 0x29, 0xd4, 0x67, 0xb2, 0x30, 0xe9, 0x8c, 0xaf, 0xbf, 0x02, + 0x05, 0xcf, 0xd7, 0x29, 0xc4, 0x8d, 0xee, 0xd5, 0xdf, 0x12, 0x62, 0xd5, 0xec, 0xc7, 0x9f, 0x5e, + 0x49, 0xee, 0xe1, 0xf7, 0xc9, 0x0d, 0x93, 0xea, 0xb5, 0x46, 0xbd, 0x76, 0x4f, 0x88, 0x57, 0x0b, + 0x1f, 0x7f, 0x7a, 0x25, 0x2b, 0x61, 0xda, 0x65, 0xb9, 0x7e, 0x0f, 0xca, 0x63, 0x07, 0xe3, 0xbf, + 0xd0, 0x08, 0x4a, 0x5b, 0x47, 0x07, 0xbb, 0xdb, 0xb5, 0x8d, 0x66, 0x5d, 0x3e, 0xde, 0x6f, 0xd6, + 0x85, 0x38, 0x7a, 0x1c, 0x96, 0x76, 0xb7, 0xdf, 0x68, 0x34, 0xe5, 0xda, 0xee, 0x76, 0x7d, 0xaf, + 0x29, 0x6f, 0x34, 0x9b, 0x1b, 0xb5, 0x7b, 0x42, 0x62, 0xfd, 0x37, 0x05, 0x28, 0x6f, 0x6c, 0xd6, + 0xb6, 0x49, 0x6d, 0xa7, 0xb6, 0x15, 0xea, 0x1e, 0x6a, 0x90, 0xa2, 0x90, 0xed, 0xd4, 0xef, 0x8d, + 0xab, 0xd3, 0xdb, 0x70, 0xe8, 0x2e, 0xa4, 0x29, 0x9a, 0x8b, 0xa6, 0x7f, 0x80, 0x5c, 0x9d, 0xd1, + 0x97, 0x23, 0x93, 0xa1, 0xd7, 0x69, 0xea, 0x17, 0xc9, 0xd5, 0xe9, 0x6d, 0x3a, 0xb4, 0x0b, 0x59, + 0x1b, 0x6c, 0x9b, 0xf5, 0x6d, 0x6f, 0x75, 0x66, 0xbf, 0x8b, 0x2c, 0x8d, 0x81, 0xa2, 0xd3, 0x3f, + 0x56, 0xae, 0xce, 0x68, 0xe0, 0xa1, 0x6d, 0xc8, 0x70, 0x84, 0x64, 0xc6, 0x77, 0xba, 0xd5, 0x59, + 0x7d, 0x2b, 0x24, 0x41, 0xde, 0x85, 0x9b, 0x67, 0x7f, 0x82, 0x5d, 0x8d, 0xd0, 0x9b, 0x44, 0x0f, + 0xa0, 0xe8, 0x47, 0x5d, 0xa2, 0x7d, 0x0b, 0x5c, 0x8d, 0xd8, 0x21, 0x23, 0xfa, 0xfd, 0x10, 0x4c, + 0xb4, 0x6f, 0x83, 0xab, 0x11, 0x1b, 0x66, 0xe8, 0x5d, 0x58, 0x9c, 0x84, 0x48, 0xa2, 0x7f, 0x2a, + 0x5c, 0x9d, 0xa3, 0x85, 0x86, 0xfa, 0x80, 0x02, 0xa0, 0x95, 0x39, 0xbe, 0x1c, 0xae, 0xce, 0xd3, + 0x51, 0x43, 0x1d, 0x28, 0x8f, 0xc3, 0x15, 0x51, 0xbf, 0x24, 0xae, 0x46, 0xee, 0xae, 0xb1, 0xb7, + 0xf8, 0x6b, 0xf7, 0xa8, 0x5f, 0x16, 0x57, 0x23, 0x37, 0xdb, 0xd0, 0x11, 0x80, 0xa7, 0xf6, 0x8c, + 0xf0, 0xa5, 0x71, 0x35, 0x4a, 0xdb, 0x0d, 0xe9, 0xb0, 0x14, 0x54, 0x94, 0xce, 0xf3, 0xe1, 0x71, + 0x75, 0xae, 0x6e, 0x1c, 0xb1, 0x67, 0x7f, 0x79, 0x19, 0xed, 0x43, 0xe4, 0x6a, 0xc4, 0xb6, 0xdc, + 0x66, 0xfd, 0xb3, 0x2f, 0x57, 0xe2, 0x9f, 0x7f, 0xb9, 0x12, 0xff, 0xdb, 0x97, 0x2b, 0xf1, 0x4f, + 0xbe, 0x5a, 0x89, 0x7d, 0xfe, 0xd5, 0x4a, 0xec, 0x2f, 0x5f, 0xad, 0xc4, 0xfe, 0xef, 0xb9, 0x53, + 0xd5, 0xea, 0x0e, 0x5b, 0x6b, 0x6d, 0xad, 0x7f, 0xc3, 0xfb, 0x9f, 0x94, 0xa0, 0xff, 0xc9, 0xb4, + 0x32, 0x34, 0xa0, 0xde, 0xfc, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x7e, 0xbe, 0x60, 0x47, + 0x33, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -7185,13 +7175,6 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.MempoolError) > 0 { - i -= len(m.MempoolError) - copy(dAtA[i:], m.MempoolError) - i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError))) - i-- - dAtA[i] = 0x5a - } if m.Priority != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) i-- @@ -9547,10 +9530,6 @@ func (m *ResponseCheckTx) Size() (n int) { if m.Priority != 0 { n += 1 + sovTypes(uint64(m.Priority)) } - l = len(m.MempoolError) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } return n } @@ -10798,10 +10777,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10883,10 +10859,7 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10936,10 +10909,7 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11091,10 +11061,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11332,10 +11299,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11490,10 +11454,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11677,10 +11638,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11783,10 +11741,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11870,10 +11825,7 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11942,10 +11894,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11995,10 +11944,7 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12048,10 +11994,7 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12171,10 +12114,7 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12281,10 +12221,7 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12419,10 +12356,7 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12710,10 +12644,7 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13016,10 +12947,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13122,10 +13050,7 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13296,10 +13221,7 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13602,10 +13524,7 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14355,10 +14274,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14440,10 +14356,7 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14525,10 +14438,7 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14578,10 +14488,7 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14767,10 +14674,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -14924,10 +14828,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15234,10 +15135,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15321,10 +15219,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15523,48 +15418,13 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { break } } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MempoolError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15835,10 +15695,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -15992,10 +15849,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16098,10 +15952,7 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16185,10 +16036,7 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16257,10 +16105,7 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16344,10 +16189,7 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16524,10 +16366,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16749,10 +16588,7 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -16959,10 +16795,7 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17046,10 +16879,7 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17118,10 +16948,7 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17362,10 +17189,7 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17468,10 +17292,7 @@ func (m *CommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17574,10 +17395,7 @@ func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17693,10 +17511,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -17830,10 +17645,7 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18104,10 +17916,7 @@ func (m *ExecTxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18262,10 +18071,7 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18368,10 +18174,7 @@ func (m *TxRecord) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18474,10 +18277,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18579,10 +18379,7 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18685,10 +18482,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -18825,10 +18619,7 @@ func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19001,10 +18792,7 @@ func (m *Misbehavior) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -19179,10 +18967,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index ef85af0881..caee97ff8e 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -278,8 +278,11 @@ func (txmp *TxMempool) CheckTx( } txmp.defaultTxCallback(tx, res) - txmp.initTxCallback(wtx, res, txInfo) + err = txmp.initTxCallback(wtx, res, txInfo) + if err != nil { + return err + } if cb != nil { cb(res) } @@ -488,7 +491,7 @@ func (txmp *TxMempool) Update( // // NOTE: // - An explicit lock is NOT required. -func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, txInfo TxInfo) { +func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, txInfo TxInfo) error { var err error if txmp.postCheck != nil { err = txmp.postCheck(wtx.tx, res) @@ -510,10 +513,7 @@ func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) } - if err != nil { - res.MempoolError = err.Error() - } - return + return err } sender := res.Sender @@ -527,7 +527,7 @@ func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, "sender", sender, ) txmp.metrics.RejectedTxs.Add(1) - return + return nil } } @@ -548,7 +548,7 @@ func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, "err", err.Error(), ) txmp.metrics.RejectedTxs.Add(1) - return + return nil } // evict an existing transaction(s) @@ -588,6 +588,7 @@ func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, "num_txs", txmp.Size(), ) txmp.notifyTxsAvailable() + return nil } // defaultTxCallback is the CheckTx application callback used when a diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 33b6dd8aae..538cb3e1fc 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -622,10 +622,17 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { expectedErrString := "" if testCase.err != nil { expectedErrString = testCase.err.Error() + require.Equal(t, expectedErrString, txmp.postCheck(tx, res).Error()) + } else { + require.Equal(t, nil, txmp.postCheck(tx, res)) } - require.Equal(t, expectedErrString, res.MempoolError) } - require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) + if testCase.err == nil { + require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) + } else { + err = txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0}) + fmt.Print(err.Error()) + } }) } } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index ca649ec3ad..195179584b 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -54,11 +54,10 @@ func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.Requ return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Err()) case r := <-resCh: return &coretypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Codespace: r.Codespace, - MempoolError: r.MempoolError, - Hash: req.Tx.Hash(), + Code: r.Code, + Data: r.Data, + Codespace: r.Codespace, + Hash: req.Tx.Hash(), }, nil } } @@ -90,7 +89,7 @@ func (env *Environment) BroadcastTxCommit(ctx context.Context, req *coretypes.Re return &coretypes.ResultBroadcastTxCommit{ CheckTx: *r, Hash: req.Tx.Hash(), - }, fmt.Errorf("transaction encountered error (%s)", r.MempoolError) + }, fmt.Errorf("wrong ABCI CodeType, got (%d) instead of OK", r.Code) } if !indexer.KVSinkEnabled(env.EventSinks) { diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 6e3579c8dd..90632bf6ea 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -256,12 +256,7 @@ message ResponseCheckTx { string sender = 9; int64 priority = 10; - // mempool_error is set by Tendermint. - - // ABCI applications creating a ResponseCheckTX should not set mempool_error. - string mempool_error = 11; - - reserved 3, 4, 6, 7; // see https://github.com/tendermint/tendermint/issues/8543 + reserved 3, 4, 6, 7, 11; // see https://github.com/tendermint/tendermint/issues/8543 } message ResponseDeliverTx { diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index 910ccea476..8757f8ab3e 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -927,10 +927,7 @@ func (m *BlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -999,10 +996,7 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1124,10 +1118,7 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1177,10 +1168,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1268,10 +1256,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1496,10 +1481,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index d542d929e3..4ae9abc9e1 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -1935,10 +1935,7 @@ func (m *NewRoundStep) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2115,10 +2112,7 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2201,10 +2195,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2325,10 +2316,7 @@ func (m *ProposalPOL) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2449,10 +2437,7 @@ func (m *BlockPart) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2538,10 +2523,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2667,10 +2649,7 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2810,10 +2789,7 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2986,10 +2962,7 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3354,10 +3327,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go index 86ff1be01f..fd80819cd0 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/proto/tendermint/consensus/wal.pb.go @@ -921,10 +921,7 @@ func (m *MsgInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1064,10 +1061,7 @@ func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1136,10 +1130,7 @@ func (m *EndHeight) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1329,10 +1320,7 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1451,10 +1439,7 @@ func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go index 8ff4c4a4fe..24c6c1b1ba 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/proto/tendermint/crypto/keys.pb.go @@ -687,10 +687,7 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthKeys - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthKeys } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 97350c64c7..82fb943fcd 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -820,10 +820,7 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -943,10 +940,7 @@ func (m *ValueOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1092,10 +1086,7 @@ func (m *DominoOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1245,10 +1236,7 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1332,10 +1320,7 @@ func (m *ProofOps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go index ad87f854f4..c0ebcb9760 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/proto/tendermint/libs/bits/types.pb.go @@ -307,10 +307,7 @@ func (m *BitArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 3487652bc8..11e259551d 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -370,10 +370,7 @@ func (m *Txs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -458,10 +455,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go index 7c26d3fcd4..47a3bb0cd8 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/proto/tendermint/p2p/conn.pb.go @@ -723,10 +723,7 @@ func (m *PacketPing) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -776,10 +773,7 @@ func (m *PacketPong) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -902,10 +896,7 @@ func (m *PacketMsg) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1060,10 +1051,7 @@ func (m *Packet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1180,10 +1168,7 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 25d636e43d..15ccce15e5 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -587,10 +587,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -640,10 +637,7 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -727,10 +721,7 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -850,10 +841,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index a0e647ee7b..bffa6884fe 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -917,10 +917,7 @@ func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1230,10 +1227,7 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1347,10 +1341,7 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1502,10 +1493,7 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1678,10 +1666,7 @@ func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index da30f75270..56b35e7271 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -1708,10 +1708,7 @@ func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1793,10 +1790,7 @@ func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1915,10 +1909,7 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2036,10 +2027,7 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2158,10 +2146,7 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2279,10 +2264,7 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2401,10 +2383,7 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2454,10 +2433,7 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2507,10 +2483,7 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2840,10 +2813,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2960,10 +2930,7 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 8db184011b..af5c64ecf8 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -944,10 +944,7 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1052,10 +1049,7 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1157,10 +1151,7 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1275,10 +1266,7 @@ func (m *Version) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1744,10 +1732,7 @@ func (m *State) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 93e844730a..5541c28037 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1740,10 +1740,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1793,10 +1790,7 @@ func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1971,10 +1965,7 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2081,10 +2072,7 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2245,10 +2233,7 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2317,10 +2302,7 @@ func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2406,10 +2388,7 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2478,10 +2457,7 @@ func (m *ParamsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2583,10 +2559,7 @@ func (m *ParamsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index aacb90fab7..f2077aad8b 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -389,10 +389,7 @@ func (m *Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthBlock - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthBlock } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index e08342a460..50c0c84fa2 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -920,10 +920,7 @@ func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1026,10 +1023,7 @@ func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1238,10 +1232,7 @@ func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1431,10 +1422,7 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1570,10 +1558,7 @@ func (m *CanonicalVoteExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go index 1c49aef647..a9aa26a799 100644 --- a/proto/tendermint/types/events.pb.go +++ b/proto/tendermint/types/events.pb.go @@ -285,10 +285,7 @@ func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvents } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index 746d853130..052fb0e6b7 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -827,10 +827,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1023,10 +1020,7 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1217,10 +1211,7 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1304,10 +1295,7 @@ func (m *EvidenceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index a13ff77236..89ca14f957 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -1935,10 +1935,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2026,10 +2023,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2150,10 +2144,7 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2235,10 +2226,7 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2307,10 +2295,7 @@ func (m *VersionParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2398,10 +2383,7 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2523,10 +2505,7 @@ func (m *SynchronyParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2776,10 +2755,7 @@ func (m *TimeoutParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -2868,10 +2844,7 @@ func (m *ABCIParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index f6f8a33f3f..fcfbc01f54 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -2650,10 +2650,7 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2789,10 +2786,7 @@ func (m *Part) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2909,10 +2903,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3418,10 +3409,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3503,10 +3491,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3834,10 +3819,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3992,10 +3974,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4165,10 +4144,7 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4323,10 +4299,7 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4564,10 +4537,7 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4793,10 +4763,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4918,10 +4885,7 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5043,10 +5007,7 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5200,10 +5161,7 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5357,10 +5315,7 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go index 2c3468b83f..23b30ed3cb 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/proto/tendermint/types/validator.pb.go @@ -583,10 +583,7 @@ func (m *ValidatorSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -741,10 +738,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -849,10 +843,7 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 7aefd7747b..76a94fd3c0 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -265,10 +265,7 @@ func (m *Consensus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index 8fda326830..ef740f89d8 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -231,11 +231,10 @@ type ResultConsensusState struct { // CheckTx result type ResultBroadcastTx struct { - Code uint32 `json:"code"` - Data bytes.HexBytes `json:"data"` - Codespace string `json:"codespace"` - MempoolError string `json:"mempool_error"` - Hash bytes.HexBytes `json:"hash"` + Code uint32 `json:"code"` + Data bytes.HexBytes `json:"data"` + Codespace string `json:"codespace"` + Hash bytes.HexBytes `json:"hash"` } // CheckTx and DeliverTx results From bc24ae4642b522094b967b7979afc651912f3cfd Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 26 May 2022 14:28:28 +0200 Subject: [PATCH 064/203] rpc: deprecate/updates to broadcast tx (#8624) --- CHANGELOG_PENDING.md | 1 + UPGRADING.md | 12 +++++++++ internal/rpc/core/mempool.go | 18 ++++++++------ internal/rpc/core/routes.go | 6 ++++- light/proxy/routes.go | 4 +++ light/rpc/client.go | 4 +++ rpc/client/http/http.go | 4 +++ rpc/client/interface.go | 2 ++ rpc/client/local/local.go | 4 +++ rpc/client/mock/abci.go | 23 +++++++++++++++++ rpc/client/mocks/client.go | 23 +++++++++++++++++ rpc/openapi/openapi.yaml | 48 +++++++++++++++++++++++++++++++++++- 12 files changed, 140 insertions(+), 9 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f16a21aa4c..6f10f6bfdc 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -22,6 +22,7 @@ Special thanks to external contributors on this release: - [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters) - [config] \#8222 default indexer configuration to null. (@creachadair) - [rpc] \#8570 rework timeouts to be per-method instead of global. (@creachadair) + - [rpc] \#8624 deprecate `broadcast_tx_commit` and `braodcast_tx_sync` and `broadcast_tx_async` in favor of `braodcast_tx`. (@tychoish) - Apps diff --git a/UPGRADING.md b/UPGRADING.md index 43caddb6b5..91d237f325 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -96,6 +96,18 @@ callback. For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which defines and describes the new API in detail. +#### BroadcastTx Methods + +All callers should use the new `broadcast_tx` method, which has the +same semantics as the legacy `broadcast_tx_sync` method. The +`broadcast_tx_sync` and `broadcast_tx_async` methods are now +deprecated and will be removed in 0.37. + +Additionally the `broadcast_tx_commit` method is *also* deprecated, +and will be removed in 0.37. Client code that submits a transaction +and needs to wait for it to be committed to the chain, should poll +the tendermint to observe the transaction in the committed state. + ### Timeout Parameter Changes Tendermint v0.36 updates how the Tendermint consensus timing parameters are diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 195179584b..309412baa1 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -19,20 +19,24 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +// More: +// https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +// Deprecated and should be removed in 0.37 func (env *Environment) BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(ctx, req.Tx, nil, mempool.TxInfo{}) - if err != nil { - return nil, err - } + go func() { _ = env.Mempool.CheckTx(ctx, req.Tx, nil, mempool.TxInfo{}) }() return &coretypes.ResultBroadcastTx{Hash: req.Tx.Hash()}, nil } -// BroadcastTxSync returns with the response from CheckTx. Does not wait for +// Deprecated and should be remove in 0.37 +func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + return env.BroadcastTx(ctx, req) +} + +// BroadcastTx returns with the response from CheckTx. Does not wait for // DeliverTx result. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync -func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { +func (env *Environment) BroadcastTx(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { resCh := make(chan *abci.ResponseCheckTx, 1) err := env.Mempool.CheckTx( ctx, diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index cafb92094a..107b0e226d 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -59,8 +59,11 @@ func NewRoutesMap(svc RPCService, opts *RouteOptions) RoutesMap { "num_unconfirmed_txs": rpc.NewRPCFunc(svc.NumUnconfirmedTxs), // tx broadcast API + "broadcast_tx": rpc.NewRPCFunc(svc.BroadcastTx), + // TODO remove after 0.36 + // deprecated broadcast tx methods: "broadcast_tx_commit": rpc.NewRPCFunc(svc.BroadcastTxCommit), - "broadcast_tx_sync": rpc.NewRPCFunc(svc.BroadcastTxSync), + "broadcast_tx_sync": rpc.NewRPCFunc(svc.BroadcastTx), "broadcast_tx_async": rpc.NewRPCFunc(svc.BroadcastTxAsync), // abci API @@ -87,6 +90,7 @@ type RPCService interface { BlockSearch(ctx context.Context, req *coretypes.RequestBlockSearch) (*coretypes.ResultBlockSearch, error) BlockchainInfo(ctx context.Context, req *coretypes.RequestBlockchainInfo) (*coretypes.ResultBlockchainInfo, error) BroadcastEvidence(ctx context.Context, req *coretypes.RequestBroadcastEvidence) (*coretypes.ResultBroadcastEvidence, error) + BroadcastTx(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) BroadcastTxCommit(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTxCommit, error) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 8331723e73..329d33fe43 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -52,6 +52,10 @@ func (p proxyService) BroadcastTxAsync(ctx context.Context, req *coretypes.Reque return p.Client.BroadcastTxAsync(ctx, req.Tx) } +func (p proxyService) BroadcastTx(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + return p.Client.BroadcastTx(ctx, req.Tx) +} + func (p proxyService) BroadcastTxCommit(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTxCommit, error) { return p.Client.BroadcastTxCommit(ctx, req.Tx) } diff --git a/light/rpc/client.go b/light/rpc/client.go index aedf15050b..1a57ee9f12 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -229,6 +229,10 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.R return c.next.BroadcastTxSync(ctx, tx) } +func (c *Client) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return c.next.BroadcastTx(ctx, tx) +} + func (c *Client) UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.UnconfirmedTxs(ctx, page, perPage) } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 50d78d279b..8f0fe1d994 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -242,6 +242,10 @@ func (c *baseRPCClient) BroadcastTxSync(ctx context.Context, tx types.Tx) (*core return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } +func (c *baseRPCClient) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return c.broadcastTX(ctx, "broadcast_tx_sync", tx) +} + func (c *baseRPCClient) broadcastTX(ctx context.Context, route string, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { result := new(coretypes.ResultBroadcastTx) if err := c.caller.Call(ctx, route, &coretypes.RequestBroadcastTx{Tx: tx}, result); err != nil { diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 4b55d36e69..29e4f07070 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -62,6 +62,8 @@ type ABCIClient interface { opts ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) // Writing to abci app + BroadcastTx(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) + // These methods are deprecated: BroadcastTxCommit(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error) BroadcastTxAsync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) BroadcastTxSync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 8718ee504b..c81f384d59 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -87,6 +87,10 @@ func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes. return c.env.BroadcastTxCommit(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } +func (c *Local) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return c.env.BroadcastTx(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) +} + func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 2252275860..142d64d19b 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -94,6 +94,10 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes. } func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + return a.BroadcastTx(ctx, tx) +} + +func (a ABCIApp) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c, err := a.App.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err @@ -158,6 +162,14 @@ func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes return res.(*coretypes.ResultBroadcastTx), nil } +func (m ABCIMock) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + res, err := m.Broadcast.GetResponse(tx) + if err != nil { + return nil, err + } + return res.(*coretypes.ResultBroadcastTx), nil +} + func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { @@ -252,3 +264,14 @@ func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coret }) return res, err } + +func (r *ABCIRecorder) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + res, err := r.Client.BroadcastTx(ctx, tx) + r.addCall(Call{ + Name: "broadcast_tx", + Args: tx, + Response: res, + Error: err, + }) + return res, err +} diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 0bc478fc3f..5e766a6d2d 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -227,6 +227,29 @@ func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*c return r0, r1 } +// BroadcastTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BroadcastTxAsync provides a mock function with given fields: _a0, _a1 func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 566419b854..7d258426ea 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -82,9 +82,50 @@ paths: /broadcast_tx_sync: get: summary: Returns with the response from CheckTx. Does not wait for DeliverTx result. + deprecated: true tags: - Tx operationId: broadcast_tx_sync + description: | + This method is deprecated in Tendermint v0.36, and will be + removed in v0.37. Use `broadcast_tx`, which has similar + semantics. + + This method blocks until CheckTx returns and reports its result, but + does not wait for the transaction to be included in a block. To know + when the transaction is included in a block, check for the transaction + event using JSON-RPC. See + https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html + + See https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + required: true + schema: + type: string + example: "456" + description: The transaction + responses: + "200": + description: Empty + content: + application/json: + schema: + $ref: "#/components/schemas/BroadcastTxResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /broadcast_tx: + get: + summary: Returns with the response from CheckTx. Does not wait for DeliverTx result. + tags: + - Tx + operationId: broadcast_tx description: | This method blocks until CheckTx returns and reports its result, but does not wait for the transaction to be included in a block. To know @@ -118,10 +159,14 @@ paths: /broadcast_tx_async: get: summary: Returns right away, with no response. Does not wait for CheckTx nor DeliverTx results. + deprecated: true tags: - Tx operationId: broadcast_tx_async description: | + This method is deprecated in Tendermint v0.36, and will be + removed in v0.37. Use `broadcast_tx`. + This method submits the transaction and returns immediately without waiting for the transaction to be checked (CheckTx) or committed. Too know when the transaction is included in a block, you can check for the @@ -154,6 +199,7 @@ paths: /broadcast_tx_commit: get: summary: Returns with the responses from CheckTx and DeliverTx. + deprecated: true tags: - Tx operationId: broadcast_tx_commit @@ -165,7 +211,7 @@ paths: succeed and report the failing (non-zero) ABCI result code. WARNING: Use this only for testing and development. For production use, - call broadcast_tx_sync or broadcast_tx_async. + call broadcast_tx. To know when a transaction is included in a block, check for the transaction event using JSON-RPC. See From a3a06cd1a61a15958ffc1a6272ae3c1ee847d7c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 May 2022 15:24:21 +0000 Subject: [PATCH 065/203] build(deps): Bump github.com/bufbuild/buf from 1.3.1 to 1.4.0 (#8622) Bumps [github.com/bufbuild/buf](https://github.com/bufbuild/buf) from 1.3.1 to 1.4.0.
Release notes

Sourced from github.com/bufbuild/buf's releases.

v1.4.0

  • Fix issue where duplicate synthetic oneofs (such as with proto3 maps or optional fields) did not result in a properly formed error.
  • Add buf beta registry repository update command which supports updating repository visibility (public vs private). As with all beta commands, this is likely to change in the future.
Changelog

Sourced from github.com/bufbuild/buf's changelog.

[v1.4.0] - 2022-04-21

  • Fix issue where duplicate synthetic oneofs (such as with proto3 maps or optional fields) did not result in a properly formed error.
  • Add buf beta registry repository update command which supports updating repository visibility (public vs private). As with all beta commands, this is likely to change in the future.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/bufbuild/buf&package-manager=go_modules&previous-version=1.3.1&new-version=1.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 8 ++++---- go.sum | 18 ++++++++++++------ 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index bbaa94afc8..a6846b993b 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( ) require ( - github.com/bufbuild/buf v1.3.1 + github.com/bufbuild/buf v1.4.0 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 @@ -54,7 +54,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect - github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4 // indirect + github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lufeee/execinquery v1.0.0 // indirect @@ -228,11 +228,11 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect - google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect + google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 1665d1ba65..49fa1a74c0 100644 --- a/go.sum +++ b/go.sum @@ -175,8 +175,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.3.1 h1:AelWcENnbNEjwxmQXIZaU51GHgnWQ8Mc94kZdDUKgRs= -github.com/bufbuild/buf v1.3.1/go.mod h1:CTRUb23N+zlm1U8ZIBKz0Sqluk++qQloB2i/MZNZHIs= +github.com/bufbuild/buf v1.4.0 h1:GqE3a8CMmcFvWPzuY3Mahf9Kf3S9XgZ/ORpfYFzO+90= +github.com/bufbuild/buf v1.4.0/go.mod h1:mwHG7klTHnX+rM/ym8LXGl7vYpVmnwT96xWoRB4H5QI= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= @@ -614,11 +614,15 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f h1:BNuUg9k2EiJmlMwjoef3e8vZLHplbVw6DrjGFjLL+Yo= github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4 h1:E2CdxLXYSn6Zrj2+u8DWrwMJW3YZLSWtM/7kIL8OL18= -github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= +github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= +github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 h1:uFlcJKZPLQd7rmOY/RrvBuUaYmAFnlFHKLivhO6cOy8= +github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= @@ -1465,8 +1469,9 @@ golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16C golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 h1:EH1Deb8WZJ0xc0WK//leUHXcX9aLE5SymusoTmMZye8= +golang.org/x/term v0.0.0-20220411215600-e5f449aeb171/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1722,8 +1727,9 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac h1:qSNTkEN+L2mvWcLgJOR+8bdHX9rN/IdU3A1Ghpfb1Rg= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From eb3b4887610fd6cfe0c126bfe62905a5188ad356 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 26 May 2022 09:04:52 -0700 Subject: [PATCH 066/203] build: simplify the proto generated check in CI (#8617) Since we now use buf from a tools dependency, we can use the Go tool to install the version we expected without having to curl a tarball. --- .github/workflows/check-generated.yml | 6 +----- scripts/proto-gen.sh | 8 ++------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 50d923376b..3bf63b79cf 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -57,11 +57,7 @@ jobs: export PATH="${PATH}:${tools}/bin" export GOBIN="${tools}/bin" - readonly base='https://github.com/bufbuild/buf/releases/latest/download' - readonly OS="$(uname -s)" ARCH="$(uname -m)" - curl -sSL "${base}/buf-${OS}-${ARCH}.tar.gz" \ - | tar -xzf - -C "$tools" --strip-components=1 - + go install github.com/bufbuild/buf/cmd/buf go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest make proto-gen diff --git a/scripts/proto-gen.sh b/scripts/proto-gen.sh index 10499dcd1f..06fa07dd95 100755 --- a/scripts/proto-gen.sh +++ b/scripts/proto-gen.sh @@ -11,13 +11,9 @@ cd "$(git rev-parse --show-toplevel)" # Run inside Docker to install the correct versions of the required tools # without polluting the local system. docker run --rm -i -v "$PWD":/w --workdir=/w golang:1.18-alpine sh <<"EOF" -apk add curl git make - -readonly buf_release='https://github.com/bufbuild/buf/releases/latest/download' -readonly OS="$(uname -s)" ARCH="$(uname -m)" -curl -sSL "${buf_release}/buf-${OS}-${ARCH}.tar.gz" \ - | tar -xzf - -C /usr/local --strip-components=1 +apk add git make +go install github.com/bufbuild/buf/cmd/buf go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest make proto-gen EOF From 9027401de4659b7552fb99052ed1c7b272674b1a Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 27 May 2022 02:17:19 -0700 Subject: [PATCH 067/203] Forward-port changelogs from v0.35.5 to master (#8627) --- CHANGELOG.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfafd8c7d4..8b30abf027 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). +## v0.35.5 + +May 26, 2022 + +### BUG FIXES + +- [p2p] [\#8371](https://github.com/tendermint/tendermint/pull/8371) fix setting in con-tracker (backport #8370) (@tychoish) +- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters) +- [statesync] [\#8494](https://github.com/tendermint/tendermint/pull/8494) avoid potential race (@tychoish) +- [keymigrate] [\#8467](https://github.com/tendermint/tendermint/pull/8467) improve filtering for legacy transaction hashes (backport #8466) (@creachadair) +- [rpc] [\#8594](https://github.com/tendermint/tendermint/pull/8594) fix encoding of block_results responses (@creachadair) + ## v0.35.4 April 18, 2022 @@ -2126,7 +2138,7 @@ handshake by authenticating the NetAddress.ID of the peer we're dialing. This release fixes yet another issue with the proposer selection algorithm. We hope it's the last one, but we won't be surprised if it's not. We plan to one day expose the selection algorithm more directly to -the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)). +the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)). For more, see issues marked [proposer-selection](https://github.com/tendermint/tendermint/labels/proposer-selection). From 844a5fd03cd3e3f744cf89fd6ecb503673a2c0b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 May 2022 22:18:13 -0700 Subject: [PATCH 068/203] build(deps): Bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#8630) --- go.mod | 18 +++++++++--------- go.sum | 51 +++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 50 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index a6846b993b..174c9f9e10 100644 --- a/go.mod +++ b/go.mod @@ -26,11 +26,11 @@ require ( github.com/rs/zerolog v1.26.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.4.0 - github.com/spf13/viper v1.11.0 + github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.7.1 github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 - golang.org/x/net v0.0.0-20220412020605-290c469a71a5 + golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.46.2 pgregory.net/rapid v0.4.7 @@ -58,7 +58,7 @@ require ( github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lufeee/execinquery v1.0.0 // indirect - github.com/pelletier/go-toml/v2 v2.0.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.1 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/profile v1.6.0 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect @@ -205,12 +205,12 @@ require ( github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect github.com/spf13/afero v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stretchr/objx v0.1.1 // indirect - github.com/subosito/gotenv v1.2.0 // indirect + github.com/subosito/gotenv v1.3.0 // indirect github.com/sylvia7788/contextcheck v1.0.4 // indirect github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect @@ -227,16 +227,16 @@ require ( gitlab.com/bosi/decorder v0.2.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect - golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect - google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect + golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect + google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect honnef.co/go/tools v0.3.1 // indirect mvdan.cc/gofumpt v0.3.1 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect diff --git a/go.sum b/go.sum index 49fa1a74c0..16dff839d7 100644 --- a/go.sum +++ b/go.sum @@ -44,6 +44,8 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= @@ -315,8 +317,9 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= @@ -506,6 +509,7 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -868,8 +872,9 @@ github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.0 h1:P7Bq0SaI8nsexyay5UAyDo+ICWy5MQPgEZ5+l8JQTKo= github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= @@ -902,6 +907,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -974,6 +980,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= +github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1017,8 +1024,9 @@ github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfA github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1036,8 +1044,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= -github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= @@ -1058,8 +1067,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1139,13 +1149,17 @@ go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6y go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= +go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1320,8 +1334,10 @@ golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1355,6 +1371,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1465,8 +1483,10 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1595,8 +1615,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1639,6 +1660,9 @@ google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1728,8 +1752,13 @@ google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1765,6 +1794,7 @@ google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -1816,8 +1846,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 571f26bca55f63b512807bb4ef4f8313fb08819e Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Mon, 30 May 2022 08:41:18 +0200 Subject: [PATCH 069/203] Remove obsolete abci methods, no longer called by ABCI++ Tendermint (#8633) * Remove ABCI methods marked as obsolete, but no longer called * Add links in ABCI++ section of 'UPGRADING.md' * make proto-gen * Ressurrect * make proto-gen2 * Fixed lint * Make proto-gen3 * Minor fix to comment * make proto-gen4 --- UPGRADING.md | 5 +- abci/types/types.pb.go | 2465 +++++------------------------ proto/tendermint/abci/types.proto | 36 +- 3 files changed, 412 insertions(+), 2094 deletions(-) diff --git a/UPGRADING.md b/UPGRADING.md index 91d237f325..60bd9a10f6 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -16,7 +16,10 @@ by Tendermint itself. Right now, we return a regular error when this happens. #### ABCI++ -Coming soon... +For information on how ABCI++ works, see the +[Specification](https://github.com/tendermint/tendermint/blob/master/spec/abci%2B%2B/README.md). +In particular, the simplest way to upgrade your application is described +[here](https://github.com/tendermint/tendermint/blob/master/spec/abci%2B%2B/abci++_tmint_expected_behavior_002_draft.md#adapting-existing-applications-that-use-abci). #### ABCI Mutex diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 5b43a74ced..77d515bbe7 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -120,7 +120,7 @@ func (x ResponseOfferSnapshot_Result) String() string { } func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33, 0} + return fileDescriptor_252557cfdd89a31a, []int{28, 0} } type ResponseApplySnapshotChunk_Result int32 @@ -157,7 +157,7 @@ func (x ResponseApplySnapshotChunk_Result) String() string { } func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35, 0} + return fileDescriptor_252557cfdd89a31a, []int{30, 0} } type ResponseProcessProposal_ProposalStatus int32 @@ -185,7 +185,7 @@ func (x ResponseProcessProposal_ProposalStatus) String() string { } func (ResponseProcessProposal_ProposalStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37, 0} + return fileDescriptor_252557cfdd89a31a, []int{32, 0} } type ResponseVerifyVoteExtension_VerifyStatus int32 @@ -213,7 +213,7 @@ func (x ResponseVerifyVoteExtension_VerifyStatus) String() string { } func (ResponseVerifyVoteExtension_VerifyStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39, 0} + return fileDescriptor_252557cfdd89a31a, []int{34, 0} } // TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal @@ -245,7 +245,7 @@ func (x TxRecord_TxAction) String() string { } func (TxRecord_TxAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{47, 0} + return fileDescriptor_252557cfdd89a31a, []int{42, 0} } type Request struct { @@ -255,10 +255,7 @@ type Request struct { // *Request_Info // *Request_InitChain // *Request_Query - // *Request_BeginBlock // *Request_CheckTx - // *Request_DeliverTx - // *Request_EndBlock // *Request_Commit // *Request_ListSnapshots // *Request_OfferSnapshot @@ -326,18 +323,9 @@ type Request_InitChain struct { type Request_Query struct { Query *RequestQuery `protobuf:"bytes,5,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Request_BeginBlock struct { - BeginBlock *RequestBeginBlock `protobuf:"bytes,6,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Request_CheckTx struct { CheckTx *RequestCheckTx `protobuf:"bytes,7,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Request_DeliverTx struct { - DeliverTx *RequestDeliverTx `protobuf:"bytes,8,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Request_EndBlock struct { - EndBlock *RequestEndBlock `protobuf:"bytes,9,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Request_Commit struct { Commit *RequestCommit `protobuf:"bytes,10,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -374,10 +362,7 @@ func (*Request_Flush) isRequest_Value() {} func (*Request_Info) isRequest_Value() {} func (*Request_InitChain) isRequest_Value() {} func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} func (*Request_Commit) isRequest_Value() {} func (*Request_ListSnapshots) isRequest_Value() {} func (*Request_OfferSnapshot) isRequest_Value() {} @@ -431,14 +416,6 @@ func (m *Request) GetQuery() *RequestQuery { return nil } -// Deprecated: Do not use. -func (m *Request) GetBeginBlock() *RequestBeginBlock { - if x, ok := m.GetValue().(*Request_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Request) GetCheckTx() *RequestCheckTx { if x, ok := m.GetValue().(*Request_CheckTx); ok { return x.CheckTx @@ -446,22 +423,6 @@ func (m *Request) GetCheckTx() *RequestCheckTx { return nil } -// Deprecated: Do not use. -func (m *Request) GetDeliverTx() *RequestDeliverTx { - if x, ok := m.GetValue().(*Request_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -// Deprecated: Do not use. -func (m *Request) GetEndBlock() *RequestEndBlock { - if x, ok := m.GetValue().(*Request_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Request) GetCommit() *RequestCommit { if x, ok := m.GetValue().(*Request_Commit); ok { return x.Commit @@ -540,10 +501,7 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_Info)(nil), (*Request_InitChain)(nil), (*Request_Query)(nil), - (*Request_BeginBlock)(nil), (*Request_CheckTx)(nil), - (*Request_DeliverTx)(nil), - (*Request_EndBlock)(nil), (*Request_Commit)(nil), (*Request_ListSnapshots)(nil), (*Request_OfferSnapshot)(nil), @@ -857,74 +815,6 @@ func (m *RequestQuery) GetProve() bool { return false } -type RequestBeginBlock struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo CommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` -} - -func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } -func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } -func (*RequestBeginBlock) ProtoMessage() {} -func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{6} -} -func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBeginBlock.Merge(m, src) -} -func (m *RequestBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo - -func (m *RequestBeginBlock) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *RequestBeginBlock) GetHeader() types1.Header { - if m != nil { - return m.Header - } - return types1.Header{} -} - -func (m *RequestBeginBlock) GetLastCommitInfo() CommitInfo { - if m != nil { - return m.LastCommitInfo - } - return CommitInfo{} -} - -func (m *RequestBeginBlock) GetByzantineValidators() []Misbehavior { - if m != nil { - return m.ByzantineValidators - } - return nil -} - type RequestCheckTx struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=tendermint.abci.CheckTxType" json:"type,omitempty"` @@ -934,7 +824,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{7} + return fileDescriptor_252557cfdd89a31a, []int{6} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -977,94 +867,6 @@ func (m *RequestCheckTx) GetType() CheckTxType { return CheckTxType_New } -type RequestDeliverTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } -func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } -func (*RequestDeliverTx) ProtoMessage() {} -func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{8} -} -func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestDeliverTx.Merge(m, src) -} -func (m *RequestDeliverTx) XXX_Size() int { - return m.Size() -} -func (m *RequestDeliverTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestDeliverTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestDeliverTx proto.InternalMessageInfo - -func (m *RequestDeliverTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type RequestEndBlock struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` -} - -func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } -func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } -func (*RequestEndBlock) ProtoMessage() {} -func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{9} -} -func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEndBlock.Merge(m, src) -} -func (m *RequestEndBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestEndBlock proto.InternalMessageInfo - -func (m *RequestEndBlock) GetHeight() int64 { - if m != nil { - return m.Height - } - return 0 -} - type RequestCommit struct { } @@ -1072,7 +874,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{10} + return fileDescriptor_252557cfdd89a31a, []int{7} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1109,7 +911,7 @@ func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } func (*RequestListSnapshots) ProtoMessage() {} func (*RequestListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{11} + return fileDescriptor_252557cfdd89a31a, []int{8} } func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1148,7 +950,7 @@ func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*RequestOfferSnapshot) ProtoMessage() {} func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{12} + return fileDescriptor_252557cfdd89a31a, []int{9} } func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1202,7 +1004,7 @@ func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChu func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestLoadSnapshotChunk) ProtoMessage() {} func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{13} + return fileDescriptor_252557cfdd89a31a, []int{10} } func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1263,7 +1065,7 @@ func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotC func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestApplySnapshotChunk) ProtoMessage() {} func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{14} + return fileDescriptor_252557cfdd89a31a, []int{11} } func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1332,7 +1134,7 @@ func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } func (*RequestPrepareProposal) ProtoMessage() {} func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{15} + return fileDescriptor_252557cfdd89a31a, []int{12} } func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1434,7 +1236,7 @@ func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } func (*RequestProcessProposal) ProtoMessage() {} func (*RequestProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{13} } func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1529,7 +1331,7 @@ func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } func (*RequestExtendVote) ProtoMessage() {} func (*RequestExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_252557cfdd89a31a, []int{14} } func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1584,7 +1386,7 @@ func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExt func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*RequestVerifyVoteExtension) ProtoMessage() {} func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{18} + return fileDescriptor_252557cfdd89a31a, []int{15} } func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1658,7 +1460,7 @@ func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } func (m *RequestFinalizeBlock) String() string { return proto.CompactTextString(m) } func (*RequestFinalizeBlock) ProtoMessage() {} func (*RequestFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} + return fileDescriptor_252557cfdd89a31a, []int{16} } func (m *RequestFinalizeBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1751,10 +1553,7 @@ type Response struct { // *Response_Info // *Response_InitChain // *Response_Query - // *Response_BeginBlock // *Response_CheckTx - // *Response_DeliverTx - // *Response_EndBlock // *Response_Commit // *Response_ListSnapshots // *Response_OfferSnapshot @@ -1772,7 +1571,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} + return fileDescriptor_252557cfdd89a31a, []int{17} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1825,18 +1624,9 @@ type Response_InitChain struct { type Response_Query struct { Query *ResponseQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Response_BeginBlock struct { - BeginBlock *ResponseBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Response_CheckTx struct { CheckTx *ResponseCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Response_DeliverTx struct { - DeliverTx *ResponseDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Response_EndBlock struct { - EndBlock *ResponseEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Response_Commit struct { Commit *ResponseCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -1874,10 +1664,7 @@ func (*Response_Flush) isResponse_Value() {} func (*Response_Info) isResponse_Value() {} func (*Response_InitChain) isResponse_Value() {} func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} func (*Response_Commit) isResponse_Value() {} func (*Response_ListSnapshots) isResponse_Value() {} func (*Response_OfferSnapshot) isResponse_Value() {} @@ -1938,14 +1725,6 @@ func (m *Response) GetQuery() *ResponseQuery { return nil } -// Deprecated: Do not use. -func (m *Response) GetBeginBlock() *ResponseBeginBlock { - if x, ok := m.GetValue().(*Response_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Response) GetCheckTx() *ResponseCheckTx { if x, ok := m.GetValue().(*Response_CheckTx); ok { return x.CheckTx @@ -1953,22 +1732,6 @@ func (m *Response) GetCheckTx() *ResponseCheckTx { return nil } -// Deprecated: Do not use. -func (m *Response) GetDeliverTx() *ResponseDeliverTx { - if x, ok := m.GetValue().(*Response_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -// Deprecated: Do not use. -func (m *Response) GetEndBlock() *ResponseEndBlock { - if x, ok := m.GetValue().(*Response_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Response) GetCommit() *ResponseCommit { if x, ok := m.GetValue().(*Response_Commit); ok { return x.Commit @@ -2048,10 +1811,7 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_Info)(nil), (*Response_InitChain)(nil), (*Response_Query)(nil), - (*Response_BeginBlock)(nil), (*Response_CheckTx)(nil), - (*Response_DeliverTx)(nil), - (*Response_EndBlock)(nil), (*Response_Commit)(nil), (*Response_ListSnapshots)(nil), (*Response_OfferSnapshot)(nil), @@ -2074,7 +1834,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} + return fileDescriptor_252557cfdd89a31a, []int{18} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2118,7 +1878,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} + return fileDescriptor_252557cfdd89a31a, []int{19} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2161,7 +1921,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} + return fileDescriptor_252557cfdd89a31a, []int{20} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2203,7 +1963,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} + return fileDescriptor_252557cfdd89a31a, []int{21} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2277,7 +2037,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} + return fileDescriptor_252557cfdd89a31a, []int{22} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2344,7 +2104,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} + return fileDescriptor_252557cfdd89a31a, []int{23} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2436,50 +2196,6 @@ func (m *ResponseQuery) GetCodespace() string { return "" } -type ResponseBeginBlock struct { - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` -} - -func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } -func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseBeginBlock) ProtoMessage() {} -func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} -} -func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBeginBlock.Merge(m, src) -} -func (m *ResponseBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *ResponseBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo - -func (m *ResponseBeginBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - type ResponseCheckTx struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2493,7 +2209,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} + return fileDescriptor_252557cfdd89a31a, []int{24} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2579,7 +2295,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} + return fileDescriptor_252557cfdd89a31a, []int{25} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2664,66 +2380,6 @@ func (m *ResponseDeliverTx) GetCodespace() string { return "" } -type ResponseEndBlock struct { - ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` -} - -func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } -func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseEndBlock) ProtoMessage() {} -func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} -} -func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEndBlock.Merge(m, src) -} -func (m *ResponseEndBlock) XXX_Size() int { - return m.Size() -} -func (m *ResponseEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo - -func (m *ResponseEndBlock) GetValidatorUpdates() []ValidatorUpdate { - if m != nil { - return m.ValidatorUpdates - } - return nil -} - -func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { - if m != nil { - return m.ConsensusParamUpdates - } - return nil -} - -func (m *ResponseEndBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - type ResponseCommit struct { // reserve 1 Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2734,7 +2390,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} + return fileDescriptor_252557cfdd89a31a, []int{26} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2785,7 +2441,7 @@ func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } func (*ResponseListSnapshots) ProtoMessage() {} func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} + return fileDescriptor_252557cfdd89a31a, []int{27} } func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2829,7 +2485,7 @@ func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*ResponseOfferSnapshot) ProtoMessage() {} func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} + return fileDescriptor_252557cfdd89a31a, []int{28} } func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2873,7 +2529,7 @@ func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotC func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseLoadSnapshotChunk) ProtoMessage() {} func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} + return fileDescriptor_252557cfdd89a31a, []int{29} } func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2919,7 +2575,7 @@ func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapsho func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseApplySnapshotChunk) ProtoMessage() {} func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} + return fileDescriptor_252557cfdd89a31a, []int{30} } func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2981,7 +2637,7 @@ func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } func (*ResponsePrepareProposal) ProtoMessage() {} func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} + return fileDescriptor_252557cfdd89a31a, []int{31} } func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3057,7 +2713,7 @@ func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } func (*ResponseProcessProposal) ProtoMessage() {} func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{32} } func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3129,7 +2785,7 @@ func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } func (*ResponseExtendVote) ProtoMessage() {} func (*ResponseExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_252557cfdd89a31a, []int{33} } func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3173,7 +2829,7 @@ func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteE func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*ResponseVerifyVoteExtension) ProtoMessage() {} func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} + return fileDescriptor_252557cfdd89a31a, []int{34} } func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3222,7 +2878,7 @@ func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } func (*ResponseFinalizeBlock) ProtoMessage() {} func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} + return fileDescriptor_252557cfdd89a31a, []int{35} } func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3302,7 +2958,7 @@ func (m *CommitInfo) Reset() { *m = CommitInfo{} } func (m *CommitInfo) String() string { return proto.CompactTextString(m) } func (*CommitInfo) ProtoMessage() {} func (*CommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{41} + return fileDescriptor_252557cfdd89a31a, []int{36} } func (m *CommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3360,7 +3016,7 @@ func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedCommitInfo) ProtoMessage() {} func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{42} + return fileDescriptor_252557cfdd89a31a, []int{37} } func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3404,7 +3060,7 @@ func (m *ExtendedCommitInfo) GetVotes() []ExtendedVoteInfo { } // Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock and ResponseDeliverTx. +// ResponseFinalizeBlock, ResponseDeliverTx, ExecTxResult // Later, transactions may be queried using these events. type Event struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` @@ -3415,7 +3071,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{43} + return fileDescriptor_252557cfdd89a31a, []int{38} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3469,7 +3125,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{44} + return fileDescriptor_252557cfdd89a31a, []int{39} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3537,7 +3193,7 @@ func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } func (*ExecTxResult) ProtoMessage() {} func (*ExecTxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{45} + return fileDescriptor_252557cfdd89a31a, []int{40} } func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3292,7 @@ func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{46} + return fileDescriptor_252557cfdd89a31a, []int{41} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3702,7 +3358,7 @@ func (m *TxRecord) Reset() { *m = TxRecord{} } func (m *TxRecord) String() string { return proto.CompactTextString(m) } func (*TxRecord) ProtoMessage() {} func (*TxRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{47} + return fileDescriptor_252557cfdd89a31a, []int{42} } func (m *TxRecord) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3756,7 +3412,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{48} + return fileDescriptor_252557cfdd89a31a, []int{43} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3809,7 +3465,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{49} + return fileDescriptor_252557cfdd89a31a, []int{44} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3862,7 +3518,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{50} + return fileDescriptor_252557cfdd89a31a, []int{45} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3919,7 +3575,7 @@ func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedVoteInfo) ProtoMessage() {} func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{51} + return fileDescriptor_252557cfdd89a31a, []int{46} } func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3987,7 +3643,7 @@ func (m *Misbehavior) Reset() { *m = Misbehavior{} } func (m *Misbehavior) String() string { return proto.CompactTextString(m) } func (*Misbehavior) ProtoMessage() {} func (*Misbehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{52} + return fileDescriptor_252557cfdd89a31a, []int{47} } func (m *Misbehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4063,7 +3719,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{53} + return fileDescriptor_252557cfdd89a31a, []int{48} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4141,10 +3797,7 @@ func init() { proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") - proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") - proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") - proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") @@ -4162,10 +3815,8 @@ func init() { proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") - proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.ResponseBeginBlock") proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.ResponseDeliverTx") - proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.ResponseEndBlock") proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") @@ -4194,224 +3845,211 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3459 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x73, 0x23, 0xe5, - 0xb5, 0xd7, 0xfb, 0x71, 0x64, 0x49, 0xed, 0xcf, 0x66, 0xd0, 0x88, 0x19, 0x7b, 0x68, 0x0a, 0x98, - 0x19, 0xc0, 0xc3, 0xf5, 0xdc, 0x81, 0xe1, 0x0e, 0x5c, 0xca, 0x96, 0x35, 0xc8, 0x1e, 0x8f, 0x6d, - 0xda, 0xb2, 0x29, 0xee, 0x4d, 0xa6, 0x69, 0x49, 0x9f, 0xad, 0x66, 0x24, 0x75, 0xd3, 0xdd, 0x32, - 0x32, 0xcb, 0x50, 0x6c, 0xa8, 0x54, 0x85, 0x4d, 0x2a, 0x49, 0x55, 0xd8, 0x25, 0x55, 0xc9, 0x7f, - 0x90, 0x55, 0x56, 0x59, 0xb0, 0xc8, 0x82, 0x55, 0x92, 0x15, 0x49, 0xc1, 0x2e, 0xff, 0x40, 0x76, - 0x49, 0xea, 0x7b, 0xf4, 0x4b, 0xea, 0x96, 0x5a, 0x0c, 0x50, 0x95, 0x2a, 0x76, 0xfa, 0x4e, 0x9f, - 0x73, 0xfa, 0x7b, 0x9c, 0xef, 0x3c, 0x7e, 0xa7, 0x05, 0x4f, 0x58, 0x78, 0xd0, 0xc1, 0x46, 0x5f, - 0x1d, 0x58, 0x37, 0x94, 0x56, 0x5b, 0xbd, 0x61, 0x9d, 0xeb, 0xd8, 0x5c, 0xd3, 0x0d, 0xcd, 0xd2, - 0x50, 0xd9, 0x7d, 0xb8, 0x46, 0x1e, 0x56, 0x2f, 0x7b, 0xb8, 0xdb, 0xc6, 0xb9, 0x6e, 0x69, 0x37, - 0x74, 0x43, 0xd3, 0x4e, 0x18, 0x7f, 0xf5, 0x92, 0xe7, 0x31, 0xd5, 0xe3, 0xd5, 0xe6, 0x7b, 0xca, - 0x85, 0x1f, 0xe2, 0x73, 0xfb, 0xe9, 0xe5, 0x09, 0x59, 0x5d, 0x31, 0x94, 0xbe, 0xfd, 0x78, 0xf5, - 0x54, 0xd3, 0x4e, 0x7b, 0xf8, 0x06, 0x1d, 0xb5, 0x86, 0x27, 0x37, 0x2c, 0xb5, 0x8f, 0x4d, 0x4b, - 0xe9, 0xeb, 0x9c, 0x61, 0xf9, 0x54, 0x3b, 0xd5, 0xe8, 0xcf, 0x1b, 0xe4, 0x17, 0xa3, 0x8a, 0xff, - 0x02, 0xc8, 0x4a, 0xf8, 0xbd, 0x21, 0x36, 0x2d, 0xb4, 0x0e, 0x29, 0xdc, 0xee, 0x6a, 0x95, 0xf8, - 0x95, 0xf8, 0xd5, 0xc2, 0xfa, 0xa5, 0xb5, 0xb1, 0xc5, 0xad, 0x71, 0xbe, 0x7a, 0xbb, 0xab, 0x35, - 0x62, 0x12, 0xe5, 0x45, 0xb7, 0x20, 0x7d, 0xd2, 0x1b, 0x9a, 0xdd, 0x4a, 0x82, 0x0a, 0x5d, 0x0e, - 0x13, 0xba, 0x4b, 0x98, 0x1a, 0x31, 0x89, 0x71, 0x93, 0x57, 0xa9, 0x83, 0x13, 0xad, 0x92, 0x9c, - 0xfe, 0xaa, 0xed, 0xc1, 0x09, 0x7d, 0x15, 0xe1, 0x45, 0x9b, 0x00, 0xea, 0x40, 0xb5, 0xe4, 0x76, - 0x57, 0x51, 0x07, 0x95, 0x14, 0x95, 0x7c, 0x32, 0x5c, 0x52, 0xb5, 0x6a, 0x84, 0xb1, 0x11, 0x93, - 0xf2, 0xaa, 0x3d, 0x20, 0xd3, 0x7d, 0x6f, 0x88, 0x8d, 0xf3, 0x4a, 0x7a, 0xfa, 0x74, 0xdf, 0x24, - 0x4c, 0x64, 0xba, 0x94, 0x1b, 0x6d, 0x43, 0xa1, 0x85, 0x4f, 0xd5, 0x81, 0xdc, 0xea, 0x69, 0xed, - 0x87, 0x95, 0x0c, 0x15, 0x16, 0xc3, 0x84, 0x37, 0x09, 0xeb, 0x26, 0xe1, 0xdc, 0x4c, 0x54, 0xe2, - 0x8d, 0x98, 0x04, 0x2d, 0x87, 0x82, 0x5e, 0x85, 0x5c, 0xbb, 0x8b, 0xdb, 0x0f, 0x65, 0x6b, 0x54, - 0xc9, 0x52, 0x3d, 0xab, 0x61, 0x7a, 0x6a, 0x84, 0xaf, 0x39, 0x6a, 0xc4, 0xa4, 0x6c, 0x9b, 0xfd, - 0x44, 0x77, 0x01, 0x3a, 0xb8, 0xa7, 0x9e, 0x61, 0x83, 0xc8, 0xe7, 0xa6, 0xef, 0xc1, 0x16, 0xe3, - 0x6c, 0x8e, 0xf8, 0x34, 0xf2, 0x1d, 0x9b, 0x80, 0x6a, 0x90, 0xc7, 0x83, 0x0e, 0x5f, 0x4e, 0x9e, - 0xaa, 0xb9, 0x12, 0x7a, 0xde, 0x83, 0x8e, 0x77, 0x31, 0x39, 0xcc, 0xc7, 0xe8, 0x36, 0x64, 0xda, - 0x5a, 0xbf, 0xaf, 0x5a, 0x15, 0xa0, 0x1a, 0x56, 0x42, 0x17, 0x42, 0xb9, 0x1a, 0x31, 0x89, 0xf3, - 0xa3, 0x3d, 0x28, 0xf5, 0x54, 0xd3, 0x92, 0xcd, 0x81, 0xa2, 0x9b, 0x5d, 0xcd, 0x32, 0x2b, 0x05, - 0xaa, 0xe1, 0xe9, 0x30, 0x0d, 0xbb, 0xaa, 0x69, 0x1d, 0xda, 0xcc, 0x8d, 0x98, 0x54, 0xec, 0x79, - 0x09, 0x44, 0x9f, 0x76, 0x72, 0x82, 0x0d, 0x47, 0x61, 0x65, 0x61, 0xba, 0xbe, 0x7d, 0xc2, 0x6d, - 0xcb, 0x13, 0x7d, 0x9a, 0x97, 0x80, 0xfe, 0x1f, 0x96, 0x7a, 0x9a, 0xd2, 0x71, 0xd4, 0xc9, 0xed, - 0xee, 0x70, 0xf0, 0xb0, 0x52, 0xa4, 0x4a, 0xaf, 0x85, 0x4e, 0x52, 0x53, 0x3a, 0xb6, 0x8a, 0x1a, - 0x11, 0x68, 0xc4, 0xa4, 0xc5, 0xde, 0x38, 0x11, 0x3d, 0x80, 0x65, 0x45, 0xd7, 0x7b, 0xe7, 0xe3, - 0xda, 0x4b, 0x54, 0xfb, 0xf5, 0x30, 0xed, 0x1b, 0x44, 0x66, 0x5c, 0x3d, 0x52, 0x26, 0xa8, 0xa8, - 0x09, 0x82, 0x6e, 0x60, 0x5d, 0x31, 0xb0, 0xac, 0x1b, 0x9a, 0xae, 0x99, 0x4a, 0xaf, 0x52, 0xa6, - 0xba, 0x9f, 0x0d, 0xd3, 0x7d, 0xc0, 0xf8, 0x0f, 0x38, 0x7b, 0x23, 0x26, 0x95, 0x75, 0x3f, 0x89, - 0x69, 0xd5, 0xda, 0xd8, 0x34, 0x5d, 0xad, 0xc2, 0x2c, 0xad, 0x94, 0xdf, 0xaf, 0xd5, 0x47, 0x42, - 0x75, 0x28, 0xe0, 0x11, 0x11, 0x97, 0xcf, 0x34, 0x0b, 0x57, 0x16, 0xa7, 0x5f, 0xac, 0x3a, 0x65, - 0x3d, 0xd6, 0x2c, 0x4c, 0x2e, 0x15, 0x76, 0x46, 0x48, 0x81, 0xc7, 0xce, 0xb0, 0xa1, 0x9e, 0x9c, - 0x53, 0x35, 0x32, 0x7d, 0x62, 0xaa, 0xda, 0xa0, 0x82, 0xa8, 0xc2, 0xe7, 0xc2, 0x14, 0x1e, 0x53, - 0x21, 0xa2, 0xa2, 0x6e, 0x8b, 0x34, 0x62, 0xd2, 0xd2, 0xd9, 0x24, 0x99, 0x98, 0xd8, 0x89, 0x3a, - 0x50, 0x7a, 0xea, 0x07, 0x98, 0x5f, 0x9b, 0xa5, 0xe9, 0x26, 0x76, 0x97, 0x73, 0xd3, 0xbb, 0x42, - 0x4c, 0xec, 0xc4, 0x4b, 0xd8, 0xcc, 0x42, 0xfa, 0x4c, 0xe9, 0x0d, 0xb1, 0xf8, 0x2c, 0x14, 0x3c, - 0x8e, 0x15, 0x55, 0x20, 0xdb, 0xc7, 0xa6, 0xa9, 0x9c, 0x62, 0xea, 0x87, 0xf3, 0x92, 0x3d, 0x14, - 0x4b, 0xb0, 0xe0, 0x75, 0xa6, 0xe2, 0x27, 0x71, 0x47, 0x92, 0xf8, 0x49, 0x22, 0x79, 0x86, 0x0d, - 0xba, 0x6c, 0x2e, 0xc9, 0x87, 0xe8, 0x29, 0x28, 0xd2, 0x29, 0xcb, 0xf6, 0x73, 0xe2, 0xac, 0x53, - 0xd2, 0x02, 0x25, 0x1e, 0x73, 0xa6, 0x55, 0x28, 0xe8, 0xeb, 0xba, 0xc3, 0x92, 0xa4, 0x2c, 0xa0, - 0xaf, 0xeb, 0x36, 0xc3, 0x93, 0xb0, 0x40, 0xd6, 0xe7, 0x70, 0xa4, 0xe8, 0x4b, 0x0a, 0x84, 0xc6, - 0x59, 0xc4, 0x3f, 0x26, 0x40, 0x18, 0x77, 0xc0, 0xe8, 0x36, 0xa4, 0x48, 0x2c, 0xe2, 0x61, 0xa5, - 0xba, 0xc6, 0x02, 0xd5, 0x9a, 0x1d, 0xa8, 0xd6, 0x9a, 0x76, 0xa0, 0xda, 0xcc, 0x7d, 0xf6, 0xc5, - 0x6a, 0xec, 0x93, 0xbf, 0xae, 0xc6, 0x25, 0x2a, 0x81, 0x2e, 0x12, 0x5f, 0xa9, 0xa8, 0x03, 0x59, - 0xed, 0xd0, 0x29, 0xe7, 0x89, 0x23, 0x54, 0xd4, 0xc1, 0x76, 0x07, 0xed, 0x82, 0xd0, 0xd6, 0x06, - 0x26, 0x1e, 0x98, 0x43, 0x53, 0x66, 0x81, 0x90, 0x07, 0x13, 0x9f, 0x3b, 0x64, 0xe1, 0xb5, 0x66, - 0x73, 0x1e, 0x50, 0x46, 0xa9, 0xdc, 0xf6, 0x13, 0x88, 0x5b, 0x3d, 0x53, 0x7a, 0x6a, 0x47, 0xb1, - 0x34, 0xc3, 0xac, 0xa4, 0xae, 0x24, 0x03, 0xfd, 0xe1, 0xb1, 0xcd, 0x72, 0xa4, 0x77, 0x14, 0x0b, - 0x6f, 0xa6, 0xc8, 0x74, 0x25, 0x8f, 0x24, 0x7a, 0x06, 0xca, 0x8a, 0xae, 0xcb, 0xa6, 0xa5, 0x58, - 0x58, 0x6e, 0x9d, 0x5b, 0xd8, 0xa4, 0x81, 0x66, 0x41, 0x2a, 0x2a, 0xba, 0x7e, 0x48, 0xa8, 0x9b, - 0x84, 0x88, 0x9e, 0x86, 0x12, 0x89, 0x49, 0xaa, 0xd2, 0x93, 0xbb, 0x58, 0x3d, 0xed, 0x5a, 0x34, - 0xa4, 0x24, 0xa5, 0x22, 0xa7, 0x36, 0x28, 0x51, 0xec, 0x38, 0x27, 0x4e, 0xe3, 0x11, 0x42, 0x90, - 0xea, 0x28, 0x96, 0x42, 0x77, 0x72, 0x41, 0xa2, 0xbf, 0x09, 0x4d, 0x57, 0xac, 0x2e, 0xdf, 0x1f, - 0xfa, 0x1b, 0x5d, 0x80, 0x0c, 0x57, 0x9b, 0xa4, 0x6a, 0xf9, 0x08, 0x2d, 0x43, 0x5a, 0x37, 0xb4, - 0x33, 0x4c, 0x8f, 0x2e, 0x27, 0xb1, 0x81, 0xf8, 0x61, 0x02, 0x16, 0x27, 0x22, 0x17, 0xd1, 0xdb, - 0x55, 0xcc, 0xae, 0xfd, 0x2e, 0xf2, 0x1b, 0xbd, 0x44, 0xf4, 0x2a, 0x1d, 0x6c, 0xf0, 0x68, 0x5f, - 0x99, 0xdc, 0xea, 0x06, 0x7d, 0xce, 0xb7, 0x86, 0x73, 0xa3, 0x7b, 0x20, 0xf4, 0x14, 0xd3, 0x92, - 0x99, 0xf7, 0x97, 0x3d, 0x91, 0xff, 0x89, 0x89, 0x4d, 0x66, 0xb1, 0x82, 0x18, 0x34, 0x57, 0x52, - 0x22, 0xa2, 0x2e, 0x15, 0x1d, 0xc1, 0x72, 0xeb, 0xfc, 0x03, 0x65, 0x60, 0xa9, 0x03, 0x2c, 0x4f, - 0x9c, 0xda, 0x64, 0x2a, 0x71, 0x5f, 0x35, 0x5b, 0xb8, 0xab, 0x9c, 0xa9, 0x9a, 0x3d, 0xad, 0x25, - 0x47, 0xde, 0x39, 0x51, 0x53, 0x94, 0xa0, 0xe4, 0x0f, 0xbb, 0xa8, 0x04, 0x09, 0x6b, 0xc4, 0xd7, - 0x9f, 0xb0, 0x46, 0xe8, 0x45, 0x48, 0x91, 0x35, 0xd2, 0xb5, 0x97, 0x02, 0x5e, 0xc4, 0xe5, 0x9a, - 0xe7, 0x3a, 0x96, 0x28, 0xa7, 0x28, 0x3a, 0xb7, 0xc1, 0x09, 0xc5, 0xe3, 0x5a, 0xc5, 0x6b, 0x50, - 0x1e, 0x8b, 0xb3, 0x9e, 0xe3, 0x8b, 0x7b, 0x8f, 0x4f, 0x2c, 0x43, 0xd1, 0x17, 0x50, 0xc5, 0x0b, - 0xb0, 0x1c, 0x14, 0x1f, 0xc5, 0xae, 0x43, 0xf7, 0xc5, 0x39, 0x74, 0x0b, 0x72, 0x4e, 0x80, 0x64, - 0xb7, 0xf1, 0xe2, 0xc4, 0x2a, 0x6c, 0x66, 0xc9, 0x61, 0x25, 0xd7, 0x90, 0x58, 0x35, 0x35, 0x87, - 0x04, 0x9d, 0x78, 0x56, 0xd1, 0xf5, 0x86, 0x62, 0x76, 0xc5, 0x77, 0xa0, 0x12, 0x16, 0xfc, 0xc6, - 0x96, 0x91, 0x72, 0xac, 0xf0, 0x02, 0x64, 0x4e, 0x34, 0xa3, 0xaf, 0x58, 0x54, 0x59, 0x51, 0xe2, - 0x23, 0x62, 0x9d, 0x2c, 0x10, 0x26, 0x29, 0x99, 0x0d, 0x44, 0x19, 0x2e, 0x86, 0x06, 0x40, 0x22, - 0xa2, 0x0e, 0x3a, 0x98, 0xed, 0x67, 0x51, 0x62, 0x03, 0x57, 0x11, 0x9b, 0x2c, 0x1b, 0x90, 0xd7, - 0x9a, 0x74, 0xad, 0x54, 0x7f, 0x5e, 0xe2, 0x23, 0xf1, 0xb7, 0x49, 0xb8, 0x10, 0x1c, 0x06, 0xd1, - 0x15, 0x58, 0xe8, 0x2b, 0x23, 0xd9, 0x1a, 0xf1, 0xbb, 0xcc, 0x8e, 0x03, 0xfa, 0xca, 0xa8, 0x39, - 0x62, 0x17, 0x59, 0x80, 0xa4, 0x35, 0x32, 0x2b, 0x89, 0x2b, 0xc9, 0xab, 0x0b, 0x12, 0xf9, 0x89, - 0x8e, 0x60, 0xb1, 0xa7, 0xb5, 0x95, 0x9e, 0xec, 0xb1, 0x78, 0x6e, 0xec, 0x4f, 0x4d, 0x6c, 0x36, - 0x0b, 0x68, 0xb8, 0x33, 0x61, 0xf4, 0x65, 0xaa, 0x63, 0xd7, 0xb1, 0xfc, 0x6f, 0xc9, 0xea, 0x3d, - 0x67, 0x94, 0xf6, 0x79, 0x0a, 0xdb, 0x67, 0x67, 0xe6, 0xf6, 0xd9, 0x2f, 0xc2, 0xf2, 0x00, 0x8f, - 0x2c, 0xcf, 0x1c, 0x99, 0xe1, 0x64, 0xe9, 0x59, 0x20, 0xf2, 0xcc, 0x7d, 0x3f, 0xb1, 0x21, 0x74, - 0x8d, 0x66, 0x16, 0xba, 0x66, 0x62, 0x43, 0x56, 0x3a, 0x1d, 0x03, 0x9b, 0x26, 0xcd, 0x6c, 0x17, - 0x68, 0xba, 0x40, 0xe9, 0x1b, 0x8c, 0x2c, 0xfe, 0xc2, 0x7b, 0x56, 0xfe, 0x4c, 0x82, 0x9f, 0x44, - 0xdc, 0x3d, 0x89, 0x43, 0x58, 0xe6, 0xf2, 0x1d, 0xdf, 0x61, 0x24, 0xa2, 0x7a, 0x1e, 0x64, 0x8b, - 0x47, 0x38, 0x87, 0xe4, 0xa3, 0x9d, 0x83, 0xed, 0x6d, 0x53, 0x1e, 0x6f, 0xfb, 0x1f, 0x76, 0x36, - 0xaf, 0x3b, 0x51, 0xc4, 0x4d, 0xd3, 0x02, 0xa3, 0x88, 0xbb, 0xae, 0x84, 0xcf, 0xbd, 0xfd, 0x32, - 0x0e, 0xd5, 0xf0, 0xbc, 0x2c, 0x50, 0xd5, 0x73, 0xb0, 0xe8, 0xac, 0xc5, 0x99, 0x1f, 0xbb, 0xf5, - 0x82, 0xf3, 0x80, 0x4f, 0x30, 0x34, 0x2a, 0x3e, 0x0d, 0xa5, 0xb1, 0xac, 0x91, 0x9d, 0x42, 0xf1, - 0xcc, 0xfb, 0x7e, 0xf1, 0xa7, 0x49, 0xc7, 0xab, 0xfa, 0x52, 0xbb, 0x00, 0xcb, 0x7b, 0x13, 0x96, - 0x3a, 0xb8, 0xad, 0x76, 0xbe, 0xae, 0xe1, 0x2d, 0x72, 0xe9, 0xef, 0xed, 0x2e, 0x82, 0xdd, 0xfd, - 0xb9, 0x00, 0x39, 0x09, 0x9b, 0x3a, 0x49, 0xe9, 0xd0, 0x26, 0xe4, 0xf1, 0xa8, 0x8d, 0x75, 0xcb, - 0xce, 0x82, 0x83, 0xab, 0x09, 0xc6, 0x5d, 0xb7, 0x39, 0x49, 0x6d, 0xec, 0x88, 0xa1, 0x9b, 0x1c, - 0x06, 0x09, 0x47, 0x34, 0xb8, 0xb8, 0x17, 0x07, 0x79, 0xc9, 0xc6, 0x41, 0x92, 0xa1, 0xa5, 0x30, - 0x93, 0x1a, 0x03, 0x42, 0x6e, 0x72, 0x20, 0x24, 0x35, 0xe3, 0x65, 0x3e, 0x24, 0xa4, 0xe6, 0x43, - 0x42, 0xd2, 0x33, 0x96, 0x19, 0x02, 0x85, 0xbc, 0x64, 0x43, 0x21, 0x99, 0x19, 0x33, 0x1e, 0xc3, - 0x42, 0x76, 0xfc, 0x58, 0x48, 0x36, 0x24, 0xb4, 0xd9, 0xd2, 0x53, 0xc1, 0x90, 0xd7, 0x3c, 0x60, - 0x48, 0x2e, 0x14, 0x85, 0x60, 0x8a, 0x02, 0xd0, 0x90, 0x37, 0x7c, 0x68, 0x48, 0x7e, 0xc6, 0x3e, - 0x4c, 0x81, 0x43, 0xb6, 0xbc, 0x70, 0x08, 0x84, 0xa2, 0x2a, 0xfc, 0xdc, 0xc3, 0xf0, 0x90, 0x57, - 0x1c, 0x3c, 0xa4, 0x10, 0x0a, 0xec, 0xf0, 0xb5, 0x8c, 0x03, 0x22, 0xfb, 0x13, 0x80, 0x08, 0x03, - 0x30, 0x9e, 0x09, 0x55, 0x31, 0x03, 0x11, 0xd9, 0x9f, 0x40, 0x44, 0x8a, 0x33, 0x14, 0xce, 0x80, - 0x44, 0x7e, 0x10, 0x0c, 0x89, 0x84, 0x83, 0x16, 0x7c, 0x9a, 0xd1, 0x30, 0x11, 0x39, 0x04, 0x13, - 0x29, 0x87, 0xd6, 0xef, 0x4c, 0x7d, 0x64, 0x50, 0xe4, 0x28, 0x00, 0x14, 0x61, 0xf0, 0xc5, 0xd5, - 0x50, 0xe5, 0x11, 0x50, 0x91, 0xa3, 0x00, 0x54, 0x64, 0x71, 0xa6, 0xda, 0x99, 0xb0, 0xc8, 0x5d, - 0x3f, 0x2c, 0x82, 0x66, 0xdc, 0xb1, 0x50, 0x5c, 0xa4, 0x15, 0x86, 0x8b, 0x30, 0xec, 0xe2, 0xf9, - 0x50, 0x8d, 0x73, 0x00, 0x23, 0xfb, 0x13, 0xc0, 0xc8, 0xf2, 0x0c, 0x4b, 0x8b, 0x8a, 0x8c, 0x5c, - 0x23, 0x19, 0xc5, 0x98, 0xab, 0x26, 0xc9, 0x3d, 0x36, 0x0c, 0xcd, 0xe0, 0x18, 0x07, 0x1b, 0x88, - 0x57, 0x49, 0xa5, 0xec, 0xba, 0xe5, 0x29, 0x28, 0x0a, 0x2d, 0xa2, 0x3c, 0xae, 0x58, 0xfc, 0x5d, - 0xdc, 0x95, 0xa5, 0x05, 0xa6, 0xb7, 0xca, 0xce, 0xf3, 0x2a, 0xdb, 0x83, 0xad, 0x24, 0xfc, 0xd8, - 0xca, 0x2a, 0x14, 0x48, 0x71, 0x34, 0x06, 0x9b, 0x28, 0xba, 0x03, 0x9b, 0x5c, 0x87, 0x45, 0x9a, - 0x04, 0x30, 0x04, 0x86, 0x47, 0xd6, 0x14, 0x8d, 0xac, 0x65, 0xf2, 0x80, 0xed, 0x02, 0x0b, 0xb1, - 0x2f, 0xc0, 0x92, 0x87, 0xd7, 0x29, 0xba, 0x18, 0x86, 0x20, 0x38, 0xdc, 0x1b, 0xbc, 0xfa, 0xfa, - 0x43, 0xdc, 0xdd, 0x21, 0x17, 0x6f, 0x09, 0x82, 0x46, 0xe2, 0xdf, 0x10, 0x34, 0x92, 0xf8, 0xda, - 0xd0, 0x88, 0xb7, 0x88, 0x4c, 0xfa, 0x8b, 0xc8, 0x7f, 0xc4, 0xdd, 0x33, 0x71, 0x80, 0x8e, 0xb6, - 0xd6, 0xc1, 0xbc, 0xac, 0xa3, 0xbf, 0x49, 0x9a, 0xd5, 0xd3, 0x4e, 0x79, 0xf1, 0x46, 0x7e, 0x12, - 0x2e, 0x27, 0x76, 0xe6, 0x79, 0x68, 0x74, 0x2a, 0x42, 0x96, 0xbb, 0xf0, 0x8a, 0x50, 0x80, 0xe4, - 0x43, 0xcc, 0x22, 0xdd, 0x82, 0x44, 0x7e, 0x12, 0x3e, 0x6a, 0x64, 0x3c, 0x07, 0x61, 0x03, 0x74, - 0x1b, 0xf2, 0xb4, 0x5d, 0x23, 0x6b, 0xba, 0xc9, 0x03, 0x92, 0x2f, 0x5d, 0x63, 0x5d, 0x99, 0xb5, - 0x03, 0xc2, 0xb3, 0xaf, 0x9b, 0x52, 0x4e, 0xe7, 0xbf, 0x3c, 0x49, 0x53, 0xde, 0x97, 0x34, 0x5d, - 0x82, 0x3c, 0x99, 0xbd, 0xa9, 0x2b, 0x6d, 0x4c, 0x23, 0x4b, 0x5e, 0x72, 0x09, 0xe2, 0x03, 0x40, - 0x93, 0x71, 0x12, 0x35, 0x20, 0x83, 0xcf, 0xf0, 0xc0, 0x62, 0x39, 0x65, 0x61, 0xfd, 0xc2, 0x64, - 0xdd, 0x48, 0x1e, 0x6f, 0x56, 0xc8, 0x26, 0xff, 0xfd, 0x8b, 0x55, 0x81, 0x71, 0x3f, 0xaf, 0xf5, - 0x55, 0x0b, 0xf7, 0x75, 0xeb, 0x5c, 0xe2, 0xf2, 0xe2, 0x67, 0x71, 0x28, 0x8f, 0xc5, 0xcf, 0xc0, - 0xbd, 0xb5, 0x4d, 0x3e, 0xe1, 0x01, 0x96, 0x2e, 0x03, 0x9c, 0x2a, 0xa6, 0xfc, 0xbe, 0x32, 0xb0, - 0x70, 0x87, 0x6f, 0x67, 0xfe, 0x54, 0x31, 0xdf, 0xa2, 0x04, 0xff, 0xc2, 0x72, 0x63, 0x0b, 0xf3, - 0x14, 0xdb, 0x79, 0x6f, 0xb1, 0x8d, 0xaa, 0x90, 0xd3, 0x0d, 0x55, 0x33, 0x54, 0xeb, 0x9c, 0xee, - 0x46, 0x52, 0x72, 0xc6, 0x3b, 0xa9, 0x5c, 0x52, 0x48, 0xed, 0xa4, 0x72, 0x29, 0x21, 0xbd, 0x93, - 0xca, 0x65, 0x84, 0xec, 0x4e, 0x2a, 0x97, 0x15, 0x72, 0x3b, 0xa9, 0x5c, 0x41, 0x58, 0x10, 0x3f, - 0x4a, 0xb8, 0xb6, 0xee, 0xa2, 0x29, 0x51, 0x17, 0x13, 0xcd, 0x78, 0x56, 0x02, 0x96, 0xec, 0xa1, - 0x90, 0xd9, 0x93, 0xd1, 0xd0, 0xc4, 0x1d, 0x0e, 0xd8, 0x39, 0x63, 0xcf, 0xa1, 0x65, 0x1f, 0xed, - 0xd0, 0xa6, 0xef, 0xac, 0xf8, 0x63, 0x0a, 0xb1, 0xfa, 0x33, 0x11, 0x74, 0xe8, 0xad, 0x83, 0x86, - 0xf4, 0x0a, 0xda, 0xc6, 0x13, 0xf5, 0xae, 0xba, 0xf5, 0x12, 0x23, 0x9b, 0xe8, 0x6d, 0x78, 0x7c, - 0xcc, 0x8f, 0x38, 0xaa, 0x13, 0x51, 0xdd, 0xc9, 0x63, 0x7e, 0x77, 0x62, 0xab, 0x76, 0x37, 0x2b, - 0xf9, 0x88, 0x16, 0xbe, 0x0d, 0x25, 0x7f, 0x52, 0x15, 0x78, 0xfc, 0x4f, 0x41, 0xd1, 0xc0, 0x96, - 0xa2, 0x0e, 0x64, 0x5f, 0x05, 0xb8, 0xc0, 0x88, 0x1c, 0x6d, 0x3d, 0x80, 0xc7, 0x02, 0x93, 0x2b, - 0xf4, 0x32, 0xe4, 0xdd, 0xbc, 0x8c, 0xed, 0xea, 0x14, 0xdc, 0xcc, 0xe5, 0x15, 0x7f, 0x1f, 0x77, - 0x55, 0xfa, 0x91, 0xb8, 0x3a, 0x64, 0x0c, 0x6c, 0x0e, 0x7b, 0x0c, 0x1b, 0x2b, 0xad, 0xbf, 0x10, - 0x2d, 0x2d, 0x23, 0xd4, 0x61, 0xcf, 0x92, 0xb8, 0xb0, 0xf8, 0x00, 0x32, 0x8c, 0x82, 0x0a, 0x90, - 0x3d, 0xda, 0xbb, 0xb7, 0xb7, 0xff, 0xd6, 0x9e, 0x10, 0x43, 0x00, 0x99, 0x8d, 0x5a, 0xad, 0x7e, - 0xd0, 0x14, 0xe2, 0x28, 0x0f, 0xe9, 0x8d, 0xcd, 0x7d, 0xa9, 0x29, 0x24, 0x08, 0x59, 0xaa, 0xef, - 0xd4, 0x6b, 0x4d, 0x21, 0x89, 0x16, 0xa1, 0xc8, 0x7e, 0xcb, 0x77, 0xf7, 0xa5, 0xfb, 0x1b, 0x4d, - 0x21, 0xe5, 0x21, 0x1d, 0xd6, 0xf7, 0xb6, 0xea, 0x92, 0x90, 0x16, 0xff, 0x0b, 0x2e, 0x86, 0x26, - 0x72, 0x2e, 0xcc, 0x16, 0xf7, 0xc0, 0x6c, 0xe2, 0xcf, 0x13, 0xa4, 0x8a, 0x0f, 0xcb, 0xce, 0xd0, - 0xce, 0xd8, 0xc2, 0xd7, 0xe7, 0x48, 0xed, 0xc6, 0x56, 0x4f, 0x0a, 0x77, 0x03, 0x9f, 0x60, 0xab, - 0xdd, 0x65, 0xd9, 0x22, 0x0b, 0x4f, 0x45, 0xa9, 0xc8, 0xa9, 0x54, 0xc8, 0x64, 0x6c, 0xef, 0xe2, - 0xb6, 0x25, 0x33, 0x27, 0xc4, 0x8c, 0x2e, 0x4f, 0xd8, 0x08, 0xf5, 0x90, 0x11, 0xc5, 0x77, 0xe6, - 0xda, 0xcb, 0x3c, 0xa4, 0xa5, 0x7a, 0x53, 0x7a, 0x5b, 0x48, 0x22, 0x04, 0x25, 0xfa, 0x53, 0x3e, - 0xdc, 0xdb, 0x38, 0x38, 0x6c, 0xec, 0x93, 0xbd, 0x5c, 0x82, 0xb2, 0xbd, 0x97, 0x36, 0x31, 0x2d, - 0xfe, 0x29, 0x01, 0x8f, 0x87, 0xe4, 0x96, 0xe8, 0x36, 0x80, 0x35, 0x92, 0x0d, 0xdc, 0xd6, 0x8c, - 0x4e, 0xb8, 0x91, 0x35, 0x47, 0x12, 0xe5, 0x90, 0xf2, 0x16, 0xff, 0x65, 0x4e, 0x41, 0x67, 0xd1, - 0xab, 0x5c, 0x29, 0x59, 0x95, 0x7d, 0xd5, 0x2e, 0x07, 0x80, 0x90, 0xb8, 0x4d, 0x14, 0xd3, 0xbd, - 0xa5, 0x8a, 0x29, 0x3f, 0xba, 0x1f, 0xe4, 0x54, 0x22, 0xf6, 0x46, 0xe6, 0x73, 0x27, 0xe9, 0x47, - 0x73, 0x27, 0xe2, 0xaf, 0x92, 0xde, 0x8d, 0xf5, 0xa7, 0xd2, 0xfb, 0x90, 0x31, 0x2d, 0xc5, 0x1a, - 0x9a, 0xdc, 0xe0, 0x5e, 0x8e, 0x9a, 0x97, 0xaf, 0xd9, 0x3f, 0x0e, 0xa9, 0xb8, 0xc4, 0xd5, 0x7c, - 0xbf, 0xdf, 0xa6, 0x78, 0x0b, 0x4a, 0xfe, 0xcd, 0x09, 0xbf, 0x32, 0xae, 0xcf, 0x49, 0x88, 0x77, - 0xdc, 0x6c, 0xc7, 0x03, 0x11, 0x4e, 0xc2, 0x6f, 0xf1, 0x20, 0xf8, 0xed, 0xd7, 0x71, 0x78, 0x62, - 0x4a, 0x75, 0x82, 0xde, 0x1c, 0x3b, 0xe7, 0x57, 0xe6, 0xa9, 0x6d, 0xd6, 0x18, 0xcd, 0x7f, 0xd2, - 0xe2, 0x4d, 0x58, 0xf0, 0xd2, 0xa3, 0x2d, 0xf2, 0x27, 0x49, 0xd7, 0xe7, 0xfb, 0x71, 0xc2, 0x6f, - 0x2c, 0xad, 0x1b, 0xb3, 0xb3, 0xc4, 0x9c, 0x76, 0x16, 0x98, 0x2c, 0x24, 0xbf, 0xbd, 0x64, 0x21, - 0xf5, 0x88, 0xc9, 0x82, 0xf7, 0xc2, 0xa5, 0xfd, 0x17, 0x6e, 0x22, 0xae, 0x67, 0x02, 0xe2, 0xfa, - 0xdb, 0x00, 0x9e, 0xf6, 0xe1, 0x32, 0xa4, 0x0d, 0x6d, 0x38, 0xe8, 0x50, 0x33, 0x49, 0x4b, 0x6c, - 0x80, 0x6e, 0x41, 0x9a, 0x98, 0x9b, 0xbd, 0x99, 0x93, 0x9e, 0x97, 0x98, 0x8b, 0x07, 0xa1, 0x65, - 0xdc, 0xa2, 0x0a, 0x68, 0xb2, 0x85, 0x13, 0xf2, 0x8a, 0xd7, 0xfc, 0xaf, 0x78, 0x32, 0xb4, 0x19, - 0x14, 0xfc, 0xaa, 0x0f, 0x20, 0x4d, 0xcd, 0x83, 0xe4, 0x37, 0xb4, 0x0d, 0xc9, 0xcb, 0x53, 0xf2, - 0x1b, 0xfd, 0x10, 0x40, 0xb1, 0x2c, 0x43, 0x6d, 0x0d, 0xdd, 0x17, 0xac, 0x06, 0x9b, 0xd7, 0x86, - 0xcd, 0xb7, 0x79, 0x89, 0xdb, 0xd9, 0xb2, 0x2b, 0xea, 0xb1, 0x35, 0x8f, 0x42, 0x71, 0x0f, 0x4a, - 0x7e, 0x59, 0xbb, 0xa0, 0x62, 0x73, 0xf0, 0x17, 0x54, 0xac, 0x3e, 0xe6, 0x05, 0x95, 0x53, 0x8e, - 0x25, 0x59, 0xc7, 0x99, 0x0e, 0xc4, 0x7f, 0xc6, 0x61, 0xc1, 0x6b, 0x9d, 0xdf, 0x70, 0x1a, 0x3f, - 0xa3, 0x72, 0xb9, 0x38, 0x91, 0xc5, 0x67, 0x4f, 0x15, 0xf3, 0xe8, 0xbb, 0x4c, 0xe2, 0x3f, 0x8a, - 0x43, 0xce, 0x59, 0x7c, 0x48, 0xbb, 0xd7, 0xdd, 0xbb, 0x84, 0xb7, 0xb9, 0xc9, 0xfa, 0xc7, 0x49, - 0xa7, 0x2b, 0x7d, 0xc7, 0x49, 0xa8, 0xc2, 0x20, 0x64, 0xef, 0x4e, 0xdb, 0x8d, 0x79, 0x9e, 0x3f, - 0xfe, 0x8c, 0xcf, 0x83, 0x64, 0x12, 0xe8, 0x7f, 0x20, 0xa3, 0xb4, 0x1d, 0xe0, 0xbc, 0x14, 0x80, - 0xa4, 0xda, 0xac, 0x6b, 0xcd, 0xd1, 0x06, 0xe5, 0x94, 0xb8, 0x04, 0x9f, 0x55, 0xc2, 0xe9, 0x6a, - 0xbf, 0x4e, 0xf4, 0x32, 0x1e, 0xbf, 0xdb, 0x2c, 0x01, 0x1c, 0xed, 0xdd, 0xdf, 0xdf, 0xda, 0xbe, - 0xbb, 0x5d, 0xdf, 0xe2, 0x29, 0xd5, 0xd6, 0x56, 0x7d, 0x4b, 0x48, 0x10, 0x3e, 0xa9, 0x7e, 0x7f, - 0xff, 0xb8, 0xbe, 0x25, 0x24, 0xc5, 0x3b, 0x90, 0x77, 0x5c, 0x0f, 0xaa, 0x40, 0xd6, 0x6e, 0x02, - 0xc4, 0xb9, 0x03, 0xe0, 0x3d, 0x9d, 0x65, 0x48, 0xeb, 0xda, 0xfb, 0xbc, 0xa7, 0x9b, 0x94, 0xd8, - 0x40, 0xec, 0x40, 0x79, 0xcc, 0x6f, 0xa1, 0x3b, 0x90, 0xd5, 0x87, 0x2d, 0xd9, 0x36, 0xda, 0xb1, - 0x96, 0x89, 0x5d, 0xd7, 0x0f, 0x5b, 0x3d, 0xb5, 0x7d, 0x0f, 0x9f, 0xdb, 0xdb, 0xa4, 0x0f, 0x5b, - 0xf7, 0x98, 0x6d, 0xb3, 0xb7, 0x24, 0xbc, 0x6f, 0x39, 0x83, 0x9c, 0x7d, 0x55, 0xd1, 0xff, 0x42, - 0xde, 0x71, 0x89, 0xce, 0x87, 0x2e, 0xa1, 0xbe, 0x94, 0xab, 0x77, 0x45, 0xd0, 0x75, 0x58, 0x34, - 0xd5, 0xd3, 0x81, 0xdd, 0x30, 0x62, 0x38, 0x5a, 0x82, 0xde, 0x99, 0x32, 0x7b, 0xb0, 0x6b, 0x83, - 0x3f, 0x24, 0x12, 0x0a, 0xe3, 0xbe, 0xe2, 0xbb, 0x9c, 0x40, 0x40, 0xc4, 0x4e, 0x06, 0x45, 0xec, - 0x0f, 0x13, 0x50, 0xf0, 0xb4, 0xa1, 0xd0, 0x7f, 0x7b, 0x1c, 0x57, 0x29, 0x20, 0xd4, 0x78, 0x78, - 0xdd, 0x6f, 0x28, 0xfc, 0x0b, 0x4b, 0xcc, 0xbf, 0xb0, 0xb0, 0xae, 0x9f, 0xdd, 0xcd, 0x4a, 0xcd, - 0xdd, 0xcd, 0x7a, 0x1e, 0x90, 0xa5, 0x59, 0x4a, 0x4f, 0x3e, 0xd3, 0x2c, 0x75, 0x70, 0x2a, 0x33, - 0xd3, 0x60, 0x6e, 0x46, 0xa0, 0x4f, 0x8e, 0xe9, 0x83, 0x03, 0x6a, 0x25, 0x3f, 0x8a, 0x43, 0xce, - 0x29, 0xfb, 0xe6, 0xfd, 0x24, 0xe2, 0x02, 0x64, 0x78, 0x65, 0xc3, 0xbe, 0x89, 0xe0, 0xa3, 0xc0, - 0xb6, 0x5d, 0x15, 0x72, 0x7d, 0x6c, 0x29, 0xd4, 0x67, 0xb2, 0x30, 0xe9, 0x8c, 0xaf, 0xbf, 0x02, - 0x05, 0xcf, 0xd7, 0x29, 0xc4, 0x8d, 0xee, 0xd5, 0xdf, 0x12, 0x62, 0xd5, 0xec, 0xc7, 0x9f, 0x5e, - 0x49, 0xee, 0xe1, 0xf7, 0xc9, 0x0d, 0x93, 0xea, 0xb5, 0x46, 0xbd, 0x76, 0x4f, 0x88, 0x57, 0x0b, - 0x1f, 0x7f, 0x7a, 0x25, 0x2b, 0x61, 0xda, 0x65, 0xb9, 0x7e, 0x0f, 0xca, 0x63, 0x07, 0xe3, 0xbf, - 0xd0, 0x08, 0x4a, 0x5b, 0x47, 0x07, 0xbb, 0xdb, 0xb5, 0x8d, 0x66, 0x5d, 0x3e, 0xde, 0x6f, 0xd6, - 0x85, 0x38, 0x7a, 0x1c, 0x96, 0x76, 0xb7, 0xdf, 0x68, 0x34, 0xe5, 0xda, 0xee, 0x76, 0x7d, 0xaf, - 0x29, 0x6f, 0x34, 0x9b, 0x1b, 0xb5, 0x7b, 0x42, 0x62, 0xfd, 0x37, 0x05, 0x28, 0x6f, 0x6c, 0xd6, - 0xb6, 0x49, 0x6d, 0xa7, 0xb6, 0x15, 0xea, 0x1e, 0x6a, 0x90, 0xa2, 0x90, 0xed, 0xd4, 0xef, 0x8d, - 0xab, 0xd3, 0xdb, 0x70, 0xe8, 0x2e, 0xa4, 0x29, 0x9a, 0x8b, 0xa6, 0x7f, 0x80, 0x5c, 0x9d, 0xd1, - 0x97, 0x23, 0x93, 0xa1, 0xd7, 0x69, 0xea, 0x17, 0xc9, 0xd5, 0xe9, 0x6d, 0x3a, 0xb4, 0x0b, 0x59, - 0x1b, 0x6c, 0x9b, 0xf5, 0x6d, 0x6f, 0x75, 0x66, 0xbf, 0x8b, 0x2c, 0x8d, 0x81, 0xa2, 0xd3, 0x3f, - 0x56, 0xae, 0xce, 0x68, 0xe0, 0xa1, 0x6d, 0xc8, 0x70, 0x84, 0x64, 0xc6, 0x77, 0xba, 0xd5, 0x59, - 0x7d, 0x2b, 0x24, 0x41, 0xde, 0x85, 0x9b, 0x67, 0x7f, 0x82, 0x5d, 0x8d, 0xd0, 0x9b, 0x44, 0x0f, - 0xa0, 0xe8, 0x47, 0x5d, 0xa2, 0x7d, 0x0b, 0x5c, 0x8d, 0xd8, 0x21, 0x23, 0xfa, 0xfd, 0x10, 0x4c, - 0xb4, 0x6f, 0x83, 0xab, 0x11, 0x1b, 0x66, 0xe8, 0x5d, 0x58, 0x9c, 0x84, 0x48, 0xa2, 0x7f, 0x2a, - 0x5c, 0x9d, 0xa3, 0x85, 0x86, 0xfa, 0x80, 0x02, 0xa0, 0x95, 0x39, 0xbe, 0x1c, 0xae, 0xce, 0xd3, - 0x51, 0x43, 0x1d, 0x28, 0x8f, 0xc3, 0x15, 0x51, 0xbf, 0x24, 0xae, 0x46, 0xee, 0xae, 0xb1, 0xb7, - 0xf8, 0x6b, 0xf7, 0xa8, 0x5f, 0x16, 0x57, 0x23, 0x37, 0xdb, 0xd0, 0x11, 0x80, 0xa7, 0xf6, 0x8c, - 0xf0, 0xa5, 0x71, 0x35, 0x4a, 0xdb, 0x0d, 0xe9, 0xb0, 0x14, 0x54, 0x94, 0xce, 0xf3, 0xe1, 0x71, - 0x75, 0xae, 0x6e, 0x1c, 0xb1, 0x67, 0x7f, 0x79, 0x19, 0xed, 0x43, 0xe4, 0x6a, 0xc4, 0xb6, 0xdc, - 0x66, 0xfd, 0xb3, 0x2f, 0x57, 0xe2, 0x9f, 0x7f, 0xb9, 0x12, 0xff, 0xdb, 0x97, 0x2b, 0xf1, 0x4f, - 0xbe, 0x5a, 0x89, 0x7d, 0xfe, 0xd5, 0x4a, 0xec, 0x2f, 0x5f, 0xad, 0xc4, 0xfe, 0xef, 0xb9, 0x53, - 0xd5, 0xea, 0x0e, 0x5b, 0x6b, 0x6d, 0xad, 0x7f, 0xc3, 0xfb, 0x9f, 0x94, 0xa0, 0xff, 0xc9, 0xb4, - 0x32, 0x34, 0xa0, 0xde, 0xfc, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x7e, 0xbe, 0x60, 0x47, - 0x33, 0x00, 0x00, + // 3263 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x73, 0x23, 0xd5, + 0x11, 0xd7, 0xe8, 0x5b, 0xad, 0xaf, 0xf1, 0xb3, 0x59, 0xb4, 0x62, 0xd7, 0x36, 0x43, 0x01, 0xcb, + 0x02, 0x36, 0xf1, 0x66, 0x61, 0xc9, 0x42, 0x28, 0x5b, 0xd6, 0x46, 0xf6, 0x7a, 0x6d, 0x33, 0x96, + 0x4d, 0x91, 0x0f, 0x86, 0xb1, 0xf4, 0x6c, 0x0d, 0x2b, 0x69, 0x86, 0x99, 0x91, 0x91, 0x39, 0x26, + 0xc5, 0x85, 0x43, 0xc2, 0x25, 0x95, 0xa4, 0x2a, 0xdc, 0x92, 0xaa, 0xe4, 0x3f, 0x48, 0x2e, 0x39, + 0xe5, 0xc0, 0x21, 0x07, 0x4e, 0xa9, 0x9c, 0x48, 0x0a, 0x6e, 0xf9, 0x07, 0x72, 0x4b, 0xa5, 0xde, + 0xc7, 0x7c, 0x49, 0x33, 0xfa, 0x00, 0x8a, 0xaa, 0x54, 0x71, 0x9b, 0xd7, 0xd3, 0xdd, 0xef, 0xab, + 0x5f, 0x77, 0xff, 0xfa, 0x3d, 0x78, 0xcc, 0xc6, 0xfd, 0x36, 0x36, 0x7b, 0x5a, 0xdf, 0x5e, 0x57, + 0x4f, 0x5b, 0xda, 0xba, 0x7d, 0x69, 0x60, 0x6b, 0xcd, 0x30, 0x75, 0x5b, 0x47, 0x65, 0xef, 0xe7, + 0x1a, 0xf9, 0x59, 0xbd, 0xee, 0xe3, 0x6e, 0x99, 0x97, 0x86, 0xad, 0xaf, 0x1b, 0xa6, 0xae, 0x9f, + 0x31, 0xfe, 0xea, 0xb5, 0xf1, 0xdf, 0x0f, 0xf1, 0x25, 0xd7, 0x16, 0x10, 0xa6, 0xbd, 0xac, 0x1b, + 0xaa, 0xa9, 0xf6, 0x9c, 0xdf, 0x2b, 0xe7, 0xba, 0x7e, 0xde, 0xc5, 0xeb, 0xb4, 0x75, 0x3a, 0x38, + 0x5b, 0xb7, 0xb5, 0x1e, 0xb6, 0x6c, 0xb5, 0x67, 0x70, 0x86, 0xa5, 0x73, 0xfd, 0x5c, 0xa7, 0x9f, + 0xeb, 0xe4, 0x8b, 0x51, 0xa5, 0x3f, 0xe7, 0x20, 0x23, 0xe3, 0x77, 0x07, 0xd8, 0xb2, 0xd1, 0x06, + 0x24, 0x71, 0xab, 0xa3, 0x57, 0x84, 0x55, 0xe1, 0x46, 0x7e, 0xe3, 0xda, 0xda, 0xc8, 0xf0, 0xd7, + 0x38, 0x5f, 0xbd, 0xd5, 0xd1, 0x1b, 0x31, 0x99, 0xf2, 0xa2, 0xdb, 0x90, 0x3a, 0xeb, 0x0e, 0xac, + 0x4e, 0x25, 0x4e, 0x85, 0xae, 0x47, 0x09, 0xdd, 0x23, 0x4c, 0x8d, 0x98, 0xcc, 0xb8, 0x49, 0x57, + 0x5a, 0xff, 0x4c, 0xaf, 0x24, 0x26, 0x77, 0xb5, 0xd3, 0x3f, 0xa3, 0x5d, 0x11, 0x5e, 0xb4, 0x05, + 0xa0, 0xf5, 0x35, 0x5b, 0x69, 0x75, 0x54, 0xad, 0x5f, 0x49, 0x52, 0xc9, 0xc7, 0xa3, 0x25, 0x35, + 0xbb, 0x46, 0x18, 0x1b, 0x31, 0x39, 0xa7, 0x39, 0x0d, 0x32, 0xdc, 0x77, 0x07, 0xd8, 0xbc, 0xac, + 0xa4, 0x26, 0x0f, 0xf7, 0x75, 0xc2, 0x44, 0x86, 0x4b, 0xb9, 0xd1, 0x2b, 0x90, 0x6d, 0x75, 0x70, + 0xeb, 0xa1, 0x62, 0x0f, 0x2b, 0x19, 0x2a, 0xb9, 0x12, 0x25, 0x59, 0x23, 0x7c, 0xcd, 0x61, 0x23, + 0x26, 0x67, 0x5a, 0xec, 0x13, 0xdd, 0x81, 0x74, 0x4b, 0xef, 0xf5, 0x34, 0xbb, 0x02, 0x54, 0x76, + 0x39, 0x52, 0x96, 0x72, 0x35, 0x62, 0x32, 0xe7, 0x47, 0xfb, 0x50, 0xea, 0x6a, 0x96, 0xad, 0x58, + 0x7d, 0xd5, 0xb0, 0x3a, 0xba, 0x6d, 0x55, 0xf2, 0x54, 0xc3, 0x93, 0x51, 0x1a, 0xf6, 0x34, 0xcb, + 0x3e, 0x72, 0x98, 0x1b, 0x31, 0xb9, 0xd8, 0xf5, 0x13, 0x88, 0x3e, 0xfd, 0xec, 0x0c, 0x9b, 0xae, + 0xc2, 0x4a, 0x61, 0xb2, 0xbe, 0x03, 0xc2, 0xed, 0xc8, 0x13, 0x7d, 0xba, 0x9f, 0x80, 0x7e, 0x04, + 0x8b, 0x5d, 0x5d, 0x6d, 0xbb, 0xea, 0x94, 0x56, 0x67, 0xd0, 0x7f, 0x58, 0x29, 0x52, 0xa5, 0xcf, + 0x44, 0x0e, 0x52, 0x57, 0xdb, 0x8e, 0x8a, 0x1a, 0x11, 0x68, 0xc4, 0xe4, 0x85, 0xee, 0x28, 0x11, + 0xbd, 0x05, 0x4b, 0xaa, 0x61, 0x74, 0x2f, 0x47, 0xb5, 0x97, 0xa8, 0xf6, 0x9b, 0x51, 0xda, 0x37, + 0x89, 0xcc, 0xa8, 0x7a, 0xa4, 0x8e, 0x51, 0x51, 0x13, 0x44, 0xc3, 0xc4, 0x86, 0x6a, 0x62, 0xc5, + 0x30, 0x75, 0x43, 0xb7, 0xd4, 0x6e, 0xa5, 0x4c, 0x75, 0x3f, 0x1d, 0xa5, 0xfb, 0x90, 0xf1, 0x1f, + 0x72, 0xf6, 0x46, 0x4c, 0x2e, 0x1b, 0x41, 0x12, 0xd3, 0xaa, 0xb7, 0xb0, 0x65, 0x79, 0x5a, 0xc5, + 0x69, 0x5a, 0x29, 0x7f, 0x50, 0x6b, 0x80, 0x84, 0xea, 0x90, 0xc7, 0x43, 0x22, 0xae, 0x5c, 0xe8, + 0x36, 0xae, 0x2c, 0x50, 0x85, 0x52, 0xe4, 0x09, 0xa5, 0xac, 0x27, 0xba, 0x8d, 0x1b, 0x31, 0x19, + 0xb0, 0xdb, 0x42, 0x2a, 0x3c, 0x72, 0x81, 0x4d, 0xed, 0xec, 0x92, 0xaa, 0x51, 0xe8, 0x1f, 0x4b, + 0xd3, 0xfb, 0x15, 0x44, 0x15, 0x3e, 0x1b, 0xa5, 0xf0, 0x84, 0x0a, 0x11, 0x15, 0x75, 0x47, 0xa4, + 0x11, 0x93, 0x17, 0x2f, 0xc6, 0xc9, 0xc4, 0xc4, 0xce, 0xb4, 0xbe, 0xda, 0xd5, 0xde, 0xc7, 0xca, + 0x69, 0x57, 0x6f, 0x3d, 0xac, 0x2c, 0x4e, 0x36, 0xb1, 0x7b, 0x9c, 0x7b, 0x8b, 0x30, 0x13, 0x13, + 0x3b, 0xf3, 0x13, 0xb6, 0x32, 0x90, 0xba, 0x50, 0xbb, 0x03, 0xbc, 0x9b, 0xcc, 0xa6, 0xc5, 0xcc, + 0x6e, 0x32, 0x9b, 0x15, 0x73, 0xbb, 0xc9, 0x6c, 0x4e, 0x04, 0xe9, 0x69, 0xc8, 0xfb, 0x5c, 0x12, + 0xaa, 0x40, 0xa6, 0x87, 0x2d, 0x4b, 0x3d, 0xc7, 0xd4, 0x83, 0xe5, 0x64, 0xa7, 0x29, 0x95, 0xa0, + 0xe0, 0x77, 0x43, 0xd2, 0x47, 0x82, 0x2b, 0x49, 0x3c, 0x0c, 0x91, 0xbc, 0xc0, 0x26, 0x5d, 0x08, + 0x2e, 0xc9, 0x9b, 0xe8, 0x09, 0x28, 0xd2, 0x49, 0x28, 0xce, 0x7f, 0xe2, 0xe6, 0x92, 0x72, 0x81, + 0x12, 0x4f, 0x38, 0xd3, 0x0a, 0xe4, 0x8d, 0x0d, 0xc3, 0x65, 0x49, 0x50, 0x16, 0x30, 0x36, 0x0c, + 0x87, 0xe1, 0x71, 0x28, 0x90, 0x19, 0xbb, 0x1c, 0x49, 0xda, 0x49, 0x9e, 0xd0, 0x38, 0x8b, 0xf4, + 0xb7, 0x38, 0x88, 0xa3, 0xae, 0x0b, 0xdd, 0x81, 0x24, 0xf1, 0xe2, 0xdc, 0x21, 0x57, 0xd7, 0x98, + 0x8b, 0x5f, 0x73, 0x5c, 0xfc, 0x5a, 0xd3, 0x71, 0xf1, 0x5b, 0xd9, 0x4f, 0x3e, 0x5b, 0x89, 0x7d, + 0xf4, 0xcf, 0x15, 0x41, 0xa6, 0x12, 0xe8, 0x2a, 0x71, 0x58, 0xaa, 0xd6, 0x57, 0xb4, 0x36, 0x1d, + 0x72, 0x8e, 0x78, 0x23, 0x55, 0xeb, 0xef, 0xb4, 0xd1, 0x1e, 0x88, 0x2d, 0xbd, 0x6f, 0xe1, 0xbe, + 0x35, 0xb0, 0x14, 0x16, 0x42, 0xb8, 0x1b, 0x0e, 0x38, 0x53, 0x16, 0xc8, 0x6a, 0x0e, 0xe7, 0x21, + 0x65, 0x94, 0xcb, 0xad, 0x20, 0x01, 0xdd, 0x03, 0xb8, 0x50, 0xbb, 0x5a, 0x5b, 0xb5, 0x75, 0xd3, + 0xaa, 0x24, 0x57, 0x13, 0x37, 0xf2, 0x1b, 0xab, 0x63, 0x5b, 0x7d, 0xe2, 0xb0, 0x1c, 0x1b, 0x6d, + 0xd5, 0xc6, 0x5b, 0x49, 0x32, 0x5c, 0xd9, 0x27, 0x89, 0x9e, 0x82, 0xb2, 0x6a, 0x18, 0x8a, 0x65, + 0xab, 0x36, 0x56, 0x4e, 0x2f, 0x6d, 0x6c, 0x51, 0x17, 0x5d, 0x90, 0x8b, 0xaa, 0x61, 0x1c, 0x11, + 0xea, 0x16, 0x21, 0xa2, 0x27, 0xa1, 0x44, 0xbc, 0xb9, 0xa6, 0x76, 0x95, 0x0e, 0xd6, 0xce, 0x3b, + 0x76, 0x25, 0xbd, 0x2a, 0xdc, 0x48, 0xc8, 0x45, 0x4e, 0x6d, 0x50, 0xa2, 0xd4, 0x76, 0x77, 0x9c, + 0x7a, 0x72, 0x84, 0x20, 0xd9, 0x56, 0x6d, 0x95, 0xae, 0x64, 0x41, 0xa6, 0xdf, 0x84, 0x66, 0xa8, + 0x76, 0x87, 0xaf, 0x0f, 0xfd, 0x46, 0x57, 0x20, 0xcd, 0xd5, 0x26, 0xa8, 0x5a, 0xde, 0x42, 0x4b, + 0x90, 0x32, 0x4c, 0xfd, 0x02, 0xd3, 0xad, 0xcb, 0xca, 0xac, 0x21, 0xc9, 0x50, 0x0a, 0x7a, 0x7d, + 0x54, 0x82, 0xb8, 0x3d, 0xe4, 0xbd, 0xc4, 0xed, 0x21, 0x7a, 0x01, 0x92, 0x64, 0x21, 0x69, 0x1f, + 0xa5, 0x90, 0x38, 0xc7, 0xe5, 0x9a, 0x97, 0x06, 0x96, 0x29, 0xa7, 0x54, 0x86, 0x62, 0x20, 0x1a, + 0x48, 0x57, 0x60, 0x29, 0xcc, 0xb9, 0x4b, 0x1d, 0x97, 0x1e, 0x70, 0xd2, 0xe8, 0x36, 0x64, 0x5d, + 0xef, 0xce, 0x0c, 0xe7, 0xea, 0x58, 0xb7, 0x0e, 0xb3, 0xec, 0xb2, 0x12, 0x8b, 0x21, 0x1b, 0xd0, + 0x51, 0x79, 0x2c, 0x2f, 0xc8, 0x19, 0xd5, 0x30, 0x1a, 0xaa, 0xd5, 0x91, 0xde, 0x86, 0x4a, 0x94, + 0xe7, 0xf6, 0x2d, 0x98, 0x40, 0xcd, 0xde, 0x59, 0xb0, 0x2b, 0x90, 0x3e, 0xd3, 0xcd, 0x9e, 0x6a, + 0x53, 0x65, 0x45, 0x99, 0xb7, 0xc8, 0x42, 0x32, 0x2f, 0x9e, 0xa0, 0x64, 0xd6, 0x90, 0x14, 0xb8, + 0x1a, 0xe9, 0xbd, 0x89, 0x88, 0xd6, 0x6f, 0x63, 0xb6, 0xac, 0x45, 0x99, 0x35, 0x3c, 0x45, 0x6c, + 0xb0, 0xac, 0x41, 0xba, 0xb5, 0xe8, 0x5c, 0xa9, 0xfe, 0x9c, 0xcc, 0x5b, 0xd2, 0x1f, 0x13, 0x70, + 0x25, 0xdc, 0x87, 0xa3, 0x55, 0x28, 0xf4, 0xd4, 0xa1, 0x62, 0x0f, 0xb9, 0xd9, 0x09, 0x74, 0xe3, + 0xa1, 0xa7, 0x0e, 0x9b, 0x43, 0x66, 0x73, 0x22, 0x24, 0xec, 0xa1, 0x55, 0x89, 0xaf, 0x26, 0x6e, + 0x14, 0x64, 0xf2, 0x89, 0x8e, 0x61, 0xa1, 0xab, 0xb7, 0xd4, 0xae, 0xd2, 0x55, 0x2d, 0x5b, 0xe1, + 0xc1, 0x9d, 0x1d, 0xa2, 0x27, 0xc6, 0x16, 0x9b, 0x79, 0x63, 0xdc, 0x66, 0xfb, 0x49, 0x1c, 0x0e, + 0xb7, 0xff, 0x32, 0xd5, 0xb1, 0xa7, 0x3a, 0x5b, 0x8d, 0x8e, 0x61, 0xe9, 0xf4, 0xf2, 0x7d, 0xb5, + 0x6f, 0x6b, 0x7d, 0xac, 0x8c, 0x1d, 0xab, 0x71, 0xeb, 0x79, 0xa0, 0x59, 0xa7, 0xb8, 0xa3, 0x5e, + 0x68, 0xba, 0xc9, 0x55, 0x2e, 0xba, 0xf2, 0x27, 0xde, 0xd9, 0xf2, 0xf6, 0x28, 0x15, 0x30, 0x6a, + 0xc7, 0xbd, 0xa4, 0xe7, 0x76, 0x2f, 0x2f, 0xc0, 0x52, 0x1f, 0x0f, 0x6d, 0xdf, 0x18, 0x99, 0xe1, + 0x64, 0xe8, 0x5e, 0x20, 0xf2, 0xcf, 0xeb, 0x9f, 0xd8, 0x10, 0x7a, 0x86, 0x86, 0x45, 0x43, 0xb7, + 0xb0, 0xa9, 0xa8, 0xed, 0xb6, 0x89, 0x2d, 0xab, 0x92, 0xa5, 0xdc, 0x65, 0x87, 0xbe, 0xc9, 0xc8, + 0xd2, 0x6f, 0xfc, 0x7b, 0x15, 0x0c, 0x83, 0x7c, 0x27, 0x04, 0x6f, 0x27, 0x8e, 0x60, 0x89, 0xcb, + 0xb7, 0x03, 0x9b, 0xc1, 0xd2, 0xd1, 0xc7, 0xc6, 0x0f, 0xdc, 0xe8, 0x26, 0x20, 0x47, 0x7c, 0x86, + 0x7d, 0x48, 0x7c, 0xb5, 0x7d, 0x40, 0x90, 0xa4, 0xab, 0x94, 0x64, 0x4e, 0x88, 0x7c, 0xff, 0xbf, + 0xed, 0xcd, 0x6b, 0xb0, 0x30, 0x96, 0x63, 0xb8, 0xf3, 0x12, 0x42, 0xe7, 0x15, 0xf7, 0xcf, 0x4b, + 0xfa, 0xad, 0x00, 0xd5, 0xe8, 0xa4, 0x22, 0x54, 0xd5, 0xb3, 0xb0, 0xe0, 0xce, 0xc5, 0x1d, 0x1f, + 0x3b, 0xf5, 0xa2, 0xfb, 0x83, 0x0f, 0x30, 0xd2, 0x81, 0x3f, 0x09, 0xa5, 0x91, 0x94, 0x87, 0xed, + 0x42, 0xf1, 0xc2, 0xdf, 0xbf, 0xf4, 0xcb, 0x84, 0xeb, 0x55, 0x03, 0x79, 0x49, 0x88, 0xe5, 0xbd, + 0x0e, 0x8b, 0x6d, 0xdc, 0xd2, 0xda, 0x5f, 0xd6, 0xf0, 0x16, 0xb8, 0xf4, 0xb7, 0x76, 0x37, 0x83, + 0xdd, 0xfd, 0x1c, 0x20, 0x2b, 0x63, 0xcb, 0x20, 0xd9, 0x07, 0xda, 0x82, 0x1c, 0x1e, 0xb6, 0xb0, + 0x61, 0x3b, 0x09, 0x5b, 0x78, 0x2a, 0xcc, 0xb8, 0xeb, 0x0e, 0x27, 0x01, 0x82, 0xae, 0x18, 0xba, + 0xc5, 0xb1, 0x6e, 0x34, 0x6c, 0xe5, 0xe2, 0x7e, 0xb0, 0xfb, 0xa2, 0x03, 0x76, 0x13, 0x91, 0x38, + 0x8e, 0x49, 0x8d, 0xa0, 0xdd, 0x5b, 0x1c, 0xed, 0x26, 0xa7, 0x74, 0x16, 0x80, 0xbb, 0xb5, 0x00, + 0xdc, 0x4d, 0x4d, 0x99, 0x66, 0x04, 0xde, 0x7d, 0xd1, 0xc1, 0xbb, 0xe9, 0x29, 0x23, 0x1e, 0x01, + 0xbc, 0xaf, 0xfa, 0x00, 0x6f, 0x96, 0x8a, 0xae, 0x46, 0x8a, 0x86, 0x20, 0xde, 0x97, 0x5d, 0xc4, + 0x9b, 0x8f, 0x44, 0xcb, 0x5c, 0x78, 0x14, 0xf2, 0x1e, 0x8c, 0x41, 0x5e, 0x06, 0x51, 0x9f, 0x8a, + 0x54, 0x31, 0x05, 0xf3, 0x1e, 0x8c, 0x61, 0xde, 0xe2, 0x14, 0x85, 0x53, 0x40, 0xef, 0x8f, 0xc3, + 0x41, 0x6f, 0x34, 0x2c, 0xe5, 0xc3, 0x9c, 0x0d, 0xf5, 0x2a, 0x11, 0xa8, 0xb7, 0x1c, 0x89, 0xd0, + 0x98, 0xfa, 0x99, 0x61, 0xef, 0x71, 0x08, 0xec, 0x65, 0x00, 0xf5, 0x46, 0xa4, 0xf2, 0x19, 0x70, + 0xef, 0x71, 0x08, 0xee, 0x5d, 0x98, 0xaa, 0x76, 0x2a, 0xf0, 0xbd, 0x17, 0x04, 0xbe, 0x28, 0x22, + 0xc7, 0xf2, 0x4e, 0x7b, 0x04, 0xf2, 0x3d, 0x8d, 0x42, 0xbe, 0x0c, 0x9d, 0x3e, 0x17, 0xa9, 0x71, + 0x0e, 0xe8, 0x7b, 0x30, 0x06, 0x7d, 0x97, 0xa6, 0x58, 0xda, 0xec, 0xd8, 0x37, 0x23, 0x66, 0x19, + 0xea, 0xdd, 0x4d, 0x66, 0x41, 0xcc, 0x4b, 0xcf, 0x90, 0x40, 0x3c, 0xe2, 0xe1, 0x48, 0x4e, 0x8c, + 0x4d, 0x53, 0x37, 0x39, 0x8a, 0x65, 0x0d, 0xe9, 0x06, 0xc1, 0x42, 0x9e, 0x37, 0x9b, 0x80, 0x93, + 0x29, 0xf6, 0xf0, 0x79, 0x30, 0xe9, 0x4f, 0x82, 0x27, 0x4b, 0x91, 0xb2, 0x1f, 0x47, 0xe5, 0x38, + 0x8e, 0xf2, 0xa1, 0xe7, 0x78, 0x10, 0x3d, 0xaf, 0x40, 0x9e, 0x60, 0x8a, 0x11, 0x60, 0xac, 0x1a, + 0x2e, 0x30, 0xbe, 0x09, 0x0b, 0x34, 0x76, 0x32, 0x8c, 0xcd, 0x03, 0x52, 0x92, 0x06, 0xa4, 0x32, + 0xf9, 0xc1, 0xd6, 0x85, 0x45, 0xa6, 0xe7, 0x61, 0xd1, 0xc7, 0xeb, 0x62, 0x15, 0x86, 0x12, 0x45, + 0x97, 0x7b, 0x93, 0x83, 0x96, 0xbf, 0x0a, 0xde, 0x0a, 0x79, 0x88, 0x3a, 0x0c, 0xfc, 0x0a, 0x5f, + 0x13, 0xf8, 0x8d, 0x7f, 0x69, 0xf0, 0xeb, 0xc7, 0x5e, 0x89, 0x20, 0xf6, 0xfa, 0x8f, 0xe0, 0xed, + 0x89, 0x0b, 0x65, 0x5b, 0x7a, 0x1b, 0x73, 0x34, 0x44, 0xbf, 0x49, 0x76, 0xd2, 0xd5, 0xcf, 0x39, + 0xe6, 0x21, 0x9f, 0x84, 0xcb, 0x0d, 0x39, 0x39, 0x1e, 0x51, 0x5c, 0x20, 0xc5, 0x42, 0x3e, 0x07, + 0x52, 0x22, 0x24, 0x1e, 0x62, 0x16, 0x20, 0x0a, 0x32, 0xf9, 0x24, 0x7c, 0xd4, 0xec, 0x78, 0xe8, + 0x66, 0x0d, 0x74, 0x07, 0x72, 0xb4, 0x58, 0xad, 0xe8, 0x86, 0xc5, 0x63, 0x42, 0x20, 0xcb, 0x61, + 0x15, 0xeb, 0xb5, 0x43, 0xc2, 0x73, 0x60, 0x58, 0x72, 0xd6, 0xe0, 0x5f, 0xbe, 0x5c, 0x23, 0x17, + 0xc8, 0x35, 0xae, 0x41, 0x8e, 0x8c, 0xde, 0x32, 0xd4, 0x16, 0xa6, 0xa5, 0xd1, 0x9c, 0xec, 0x11, + 0xa4, 0x4f, 0x04, 0x28, 0x8f, 0x84, 0x98, 0xd0, 0xb9, 0x3b, 0x26, 0x19, 0xf7, 0x41, 0xfb, 0xeb, + 0x00, 0xe7, 0xaa, 0xa5, 0xbc, 0xa7, 0xf6, 0x6d, 0xdc, 0xe6, 0xd3, 0xcd, 0x9d, 0xab, 0xd6, 0x1b, + 0x94, 0x10, 0xec, 0x38, 0x3b, 0xd2, 0xb1, 0x0f, 0x43, 0xe6, 0xfc, 0x18, 0x12, 0x55, 0x21, 0x6b, + 0x98, 0x9a, 0x6e, 0x6a, 0xf6, 0x25, 0x1d, 0x6d, 0x42, 0x76, 0xdb, 0xbb, 0xc9, 0x6c, 0x42, 0x4c, + 0xee, 0x26, 0xb3, 0x49, 0x31, 0xe5, 0x16, 0xaa, 0xd8, 0x91, 0xcd, 0x8b, 0x05, 0xe9, 0x83, 0xb8, + 0x67, 0x8b, 0xdb, 0xb8, 0xab, 0x5d, 0x60, 0x73, 0x8e, 0xc9, 0xcc, 0xb6, 0xb9, 0xcb, 0x21, 0x53, + 0xf6, 0x51, 0xc8, 0xe8, 0x49, 0x6b, 0x60, 0xe1, 0x36, 0x2f, 0x99, 0xb8, 0x6d, 0xd4, 0x80, 0x34, + 0xbe, 0xc0, 0x7d, 0xdb, 0xaa, 0x64, 0xa8, 0x0d, 0x5f, 0x19, 0xc7, 0xb0, 0xe4, 0xf7, 0x56, 0x85, + 0x58, 0xee, 0xbf, 0x3f, 0x5b, 0x11, 0x19, 0xf7, 0x73, 0x7a, 0x4f, 0xb3, 0x71, 0xcf, 0xb0, 0x2f, + 0x65, 0x2e, 0x3f, 0x79, 0x65, 0xa5, 0x1d, 0x28, 0x05, 0xe3, 0x7e, 0xe8, 0x7c, 0x9f, 0x80, 0xa2, + 0x89, 0x6d, 0x55, 0xeb, 0x2b, 0x81, 0x4c, 0xbe, 0xc0, 0x88, 0xbc, 0xc0, 0x73, 0x08, 0x8f, 0x84, + 0xc6, 0x7f, 0xf4, 0x12, 0xe4, 0xbc, 0xd4, 0x41, 0xa0, 0xd3, 0x99, 0x50, 0xff, 0xf0, 0x78, 0xa5, + 0xbf, 0x08, 0x9e, 0xca, 0x60, 0x45, 0xa5, 0x0e, 0x69, 0x13, 0x5b, 0x83, 0x2e, 0xab, 0x71, 0x94, + 0x36, 0x9e, 0x9f, 0x2d, 0x73, 0x20, 0xd4, 0x41, 0xd7, 0x96, 0xb9, 0xb0, 0xf4, 0x16, 0xa4, 0x19, + 0x05, 0xe5, 0x21, 0x73, 0xbc, 0x7f, 0x7f, 0xff, 0xe0, 0x8d, 0x7d, 0x31, 0x86, 0x00, 0xd2, 0x9b, + 0xb5, 0x5a, 0xfd, 0xb0, 0x29, 0x0a, 0x28, 0x07, 0xa9, 0xcd, 0xad, 0x03, 0xb9, 0x29, 0xc6, 0x09, + 0x59, 0xae, 0xef, 0xd6, 0x6b, 0x4d, 0x31, 0x81, 0x16, 0xa0, 0xc8, 0xbe, 0x95, 0x7b, 0x07, 0xf2, + 0x83, 0xcd, 0xa6, 0x98, 0xf4, 0x91, 0x8e, 0xea, 0xfb, 0xdb, 0x75, 0x59, 0x4c, 0x49, 0xdf, 0x81, + 0xab, 0x91, 0xb9, 0x86, 0x57, 0x2e, 0x11, 0x7c, 0xe5, 0x12, 0xe9, 0xd7, 0x71, 0x82, 0xc6, 0xa2, + 0x12, 0x08, 0xb4, 0x3b, 0x32, 0xf1, 0x8d, 0x39, 0xb2, 0x8f, 0x91, 0xd9, 0x13, 0x00, 0x66, 0xe2, + 0x33, 0x6c, 0xb7, 0x3a, 0x2c, 0xa1, 0x61, 0xfe, 0xb2, 0x28, 0x17, 0x39, 0x95, 0x0a, 0x59, 0x8c, + 0xed, 0x1d, 0xdc, 0xb2, 0x15, 0x76, 0xea, 0x18, 0xf8, 0xc9, 0x11, 0x36, 0x42, 0x3d, 0x62, 0x44, + 0xe9, 0xed, 0xb9, 0xd6, 0x32, 0x07, 0x29, 0xb9, 0xde, 0x94, 0xdf, 0x14, 0x13, 0x08, 0x41, 0x89, + 0x7e, 0x2a, 0x47, 0xfb, 0x9b, 0x87, 0x47, 0x8d, 0x03, 0xb2, 0x96, 0x8b, 0x50, 0x76, 0xd6, 0xd2, + 0x21, 0xa6, 0xa4, 0xbf, 0xc7, 0xe1, 0xd1, 0x88, 0xf4, 0x07, 0xdd, 0x01, 0xb0, 0x87, 0x8a, 0x89, + 0x5b, 0xba, 0xd9, 0x8e, 0x36, 0xb2, 0xe6, 0x50, 0xa6, 0x1c, 0x72, 0xce, 0xe6, 0x5f, 0xd6, 0x84, + 0x2a, 0x1b, 0x7a, 0x85, 0x2b, 0x25, 0xb3, 0x72, 0x20, 0xdf, 0xf5, 0x90, 0x62, 0x12, 0x6e, 0x11, + 0xc5, 0x74, 0x6d, 0xa9, 0x62, 0xca, 0x8f, 0x1e, 0xf8, 0x41, 0xf2, 0x80, 0x06, 0x9a, 0x99, 0xcb, + 0xb1, 0x3e, 0x18, 0xcd, 0x08, 0x16, 0x7a, 0x13, 0x1e, 0x1d, 0x89, 0x93, 0xae, 0xd2, 0xd4, 0xac, + 0xe1, 0xf2, 0x91, 0x60, 0xb8, 0xe4, 0xaa, 0xa5, 0xdf, 0x25, 0xfc, 0x0b, 0x1b, 0xcc, 0xf6, 0x0e, + 0x20, 0x6d, 0xd9, 0xaa, 0x3d, 0xb0, 0xb8, 0xc1, 0xbd, 0x34, 0x6b, 0xea, 0xb8, 0xe6, 0x7c, 0x1c, + 0x51, 0x71, 0x99, 0xab, 0xf9, 0x76, 0xbd, 0x2d, 0xe9, 0x36, 0x94, 0x82, 0x8b, 0x13, 0x7d, 0x64, + 0x3c, 0x9f, 0x13, 0x97, 0xee, 0x02, 0x1a, 0x4f, 0xaa, 0x43, 0xca, 0x28, 0x42, 0x58, 0x19, 0xe5, + 0xf7, 0x02, 0x3c, 0x36, 0x21, 0x81, 0x46, 0xaf, 0x8f, 0xec, 0xf3, 0xcb, 0xf3, 0xa4, 0xdf, 0x6b, + 0x8c, 0x16, 0xdc, 0x69, 0xe9, 0x16, 0x14, 0xfc, 0xf4, 0xd9, 0x26, 0xf9, 0x8b, 0x84, 0xe7, 0xf3, + 0x83, 0xf5, 0x1e, 0x2f, 0x24, 0x0a, 0x5f, 0x31, 0x24, 0x06, 0xed, 0x2c, 0x3e, 0xa7, 0x9d, 0x1d, + 0x85, 0xd9, 0x59, 0x62, 0xae, 0x4c, 0x73, 0x2e, 0x6b, 0x4b, 0x7e, 0x35, 0x6b, 0x0b, 0x1c, 0xb8, + 0x54, 0xf0, 0xc0, 0x8d, 0xc5, 0xf5, 0x74, 0x48, 0x5c, 0x7f, 0x13, 0xc0, 0xab, 0x94, 0x91, 0xa8, + 0x65, 0xea, 0x83, 0x7e, 0x9b, 0x9a, 0x49, 0x4a, 0x66, 0x0d, 0x74, 0x1b, 0x52, 0xc4, 0xdc, 0x9c, + 0xc5, 0x1c, 0xf7, 0xbc, 0xc4, 0x5c, 0x7c, 0x95, 0x36, 0xc6, 0x2d, 0x69, 0x80, 0xc6, 0x4b, 0xf1, + 0x11, 0x5d, 0xbc, 0x1a, 0xec, 0xe2, 0xf1, 0xc8, 0xa2, 0x7e, 0x78, 0x57, 0xef, 0x43, 0x8a, 0x9a, + 0x07, 0xc9, 0x6f, 0xe8, 0xfd, 0x0f, 0xc7, 0x4b, 0xe4, 0x1b, 0xfd, 0x04, 0x40, 0xb5, 0x6d, 0x53, + 0x3b, 0x1d, 0x78, 0x1d, 0xac, 0x84, 0x9b, 0xd7, 0xa6, 0xc3, 0xb7, 0x75, 0x8d, 0xdb, 0xd9, 0x92, + 0x27, 0xea, 0xb3, 0x35, 0x9f, 0x42, 0x69, 0x1f, 0x4a, 0x41, 0x59, 0x27, 0xc3, 0x67, 0x63, 0x08, + 0x66, 0xf8, 0x0c, 0xb0, 0xf1, 0x0c, 0xdf, 0xc5, 0x07, 0x09, 0x76, 0xc9, 0x45, 0x1b, 0xd2, 0x7f, + 0x05, 0x28, 0xf8, 0xad, 0xf3, 0x6b, 0xce, 0x5b, 0xa7, 0xa4, 0xea, 0x57, 0xc7, 0xd2, 0xd6, 0xcc, + 0xb9, 0x6a, 0x1d, 0x7f, 0x93, 0x59, 0xeb, 0x07, 0x02, 0x64, 0xdd, 0xc9, 0x07, 0xef, 0xbb, 0x02, + 0x17, 0x84, 0x6c, 0xed, 0xe2, 0xfe, 0x4b, 0x2a, 0x76, 0x1d, 0x98, 0x70, 0xaf, 0x03, 0xef, 0xba, + 0x09, 0x55, 0x54, 0x29, 0xd0, 0xbf, 0xd2, 0xdc, 0xa6, 0x9c, 0xfc, 0xf1, 0x57, 0x7c, 0x1c, 0x24, + 0x93, 0x40, 0xdf, 0x83, 0xb4, 0xda, 0x72, 0x0b, 0xa0, 0xa5, 0x90, 0xca, 0xa0, 0xc3, 0xba, 0xd6, + 0x1c, 0x6e, 0x52, 0x4e, 0x99, 0x4b, 0xf0, 0x51, 0xc5, 0x9d, 0x51, 0x49, 0xaf, 0x11, 0xbd, 0x8c, + 0x27, 0xe8, 0x36, 0x4b, 0x00, 0xc7, 0xfb, 0x0f, 0x0e, 0xb6, 0x77, 0xee, 0xed, 0xd4, 0xb7, 0x79, + 0x4a, 0xb5, 0xbd, 0x5d, 0xdf, 0x16, 0xe3, 0x84, 0x4f, 0xae, 0x3f, 0x38, 0x38, 0xa9, 0x6f, 0x8b, + 0x09, 0xe9, 0x2e, 0xe4, 0x5c, 0xd7, 0x83, 0x2a, 0x90, 0x71, 0x8a, 0xb9, 0x02, 0x77, 0x00, 0xbc, + 0x36, 0xbf, 0x04, 0x29, 0x43, 0x7f, 0x8f, 0xdf, 0xcd, 0x25, 0x64, 0xd6, 0x90, 0xda, 0x50, 0x1e, + 0xf1, 0x5b, 0xe8, 0x2e, 0x64, 0x8c, 0xc1, 0xa9, 0xe2, 0x18, 0xed, 0x48, 0xe9, 0xdb, 0x01, 0x9a, + 0x83, 0xd3, 0xae, 0xd6, 0xba, 0x8f, 0x2f, 0x9d, 0x65, 0x32, 0x06, 0xa7, 0xf7, 0x99, 0x6d, 0xb3, + 0x5e, 0xe2, 0xfe, 0x5e, 0x2e, 0x20, 0xeb, 0x1c, 0x55, 0xf4, 0x7d, 0xc8, 0xb9, 0x2e, 0xd1, 0xbd, + 0x5b, 0x8f, 0xf4, 0xa5, 0x5c, 0xbd, 0x27, 0x82, 0x6e, 0xc2, 0x82, 0xa5, 0x9d, 0xf7, 0x9d, 0xc2, + 0x3f, 0x2b, 0xf5, 0xc4, 0xe9, 0x99, 0x29, 0xb3, 0x1f, 0x7b, 0x4e, 0x35, 0x82, 0x44, 0x42, 0x71, + 0xd4, 0x57, 0x7c, 0x93, 0x03, 0x08, 0x89, 0xd8, 0x89, 0xb0, 0x88, 0xfd, 0xb3, 0x38, 0xe4, 0x7d, + 0xd7, 0x09, 0xe8, 0xbb, 0x3e, 0xc7, 0x55, 0x0a, 0x09, 0x35, 0x3e, 0x5e, 0xef, 0xf2, 0x3a, 0x38, + 0xb1, 0xf8, 0xfc, 0x13, 0x8b, 0xba, 0xbd, 0x71, 0x6e, 0x25, 0x92, 0x73, 0xdf, 0x4a, 0x3c, 0x07, + 0xc8, 0xd6, 0x6d, 0xb5, 0xab, 0x5c, 0xe8, 0xb6, 0xd6, 0x3f, 0x57, 0x98, 0x69, 0x30, 0x37, 0x23, + 0xd2, 0x3f, 0x27, 0xf4, 0xc7, 0x21, 0xb5, 0x92, 0x9f, 0x0a, 0x90, 0x75, 0x61, 0xdf, 0xbc, 0x57, + 0xdb, 0x57, 0x20, 0xcd, 0x91, 0x0d, 0xbb, 0xdb, 0xe6, 0xad, 0xd0, 0xeb, 0x97, 0x2a, 0x64, 0x7b, + 0xd8, 0x56, 0xa9, 0xcf, 0x64, 0x61, 0xd2, 0x6d, 0xdf, 0x7c, 0x19, 0xf2, 0xbe, 0x67, 0x01, 0xc4, + 0x8d, 0xee, 0xd7, 0xdf, 0x10, 0x63, 0xd5, 0xcc, 0x87, 0x1f, 0xaf, 0x26, 0xf6, 0xf1, 0x7b, 0xe4, + 0x84, 0xc9, 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x2f, 0x0a, 0xd5, 0xfc, 0x87, 0x1f, 0xaf, 0x66, 0x64, + 0x4c, 0x2b, 0xef, 0x37, 0xef, 0x43, 0x79, 0x64, 0x63, 0x82, 0x07, 0x1a, 0x41, 0x69, 0xfb, 0xf8, + 0x70, 0x6f, 0xa7, 0xb6, 0xd9, 0xac, 0x2b, 0x27, 0x07, 0xcd, 0xba, 0x28, 0xa0, 0x47, 0x61, 0x71, + 0x6f, 0xe7, 0x07, 0x8d, 0xa6, 0x52, 0xdb, 0xdb, 0xa9, 0xef, 0x37, 0x95, 0xcd, 0x66, 0x73, 0xb3, + 0x76, 0x5f, 0x8c, 0x6f, 0xfc, 0x21, 0x0f, 0xe5, 0xcd, 0xad, 0xda, 0x0e, 0xc1, 0x76, 0x5a, 0x4b, + 0xa5, 0xee, 0xa1, 0x06, 0x49, 0x5a, 0x43, 0x9c, 0xf8, 0x38, 0xb0, 0x3a, 0xf9, 0x3a, 0x05, 0xdd, + 0x83, 0x14, 0x2d, 0x2f, 0xa2, 0xc9, 0xaf, 0x05, 0xab, 0x53, 0xee, 0x57, 0xc8, 0x60, 0xe8, 0x71, + 0x9a, 0xf8, 0x7c, 0xb0, 0x3a, 0xf9, 0xba, 0x05, 0xed, 0x41, 0xc6, 0xa9, 0x2e, 0x4d, 0x7b, 0xd3, + 0x57, 0x9d, 0x7a, 0x07, 0x42, 0xa6, 0xc6, 0xaa, 0x74, 0x93, 0x5f, 0x16, 0x56, 0xa7, 0x5c, 0xc4, + 0xa0, 0x1d, 0x48, 0xf3, 0x0a, 0xc9, 0x94, 0xc7, 0x82, 0xd5, 0x69, 0x57, 0x2b, 0x48, 0x86, 0x9c, + 0x57, 0xff, 0x9c, 0xfe, 0x5e, 0xb2, 0x3a, 0xc3, 0x1d, 0x13, 0x7a, 0x0b, 0x8a, 0xc1, 0xaa, 0xcb, + 0x6c, 0x0f, 0x12, 0xab, 0x33, 0x5e, 0xe2, 0x10, 0xfd, 0xc1, 0x12, 0xcc, 0x6c, 0x0f, 0x14, 0xab, + 0x33, 0xde, 0xe9, 0xa0, 0x77, 0x60, 0x61, 0xbc, 0x44, 0x32, 0xfb, 0x7b, 0xc5, 0xea, 0x1c, 0xb7, + 0x3c, 0xa8, 0x07, 0x28, 0xa4, 0xb4, 0x32, 0xc7, 0xf3, 0xc5, 0xea, 0x3c, 0x97, 0x3e, 0xa8, 0x0d, + 0xe5, 0xd1, 0x72, 0xc5, 0xac, 0xcf, 0x19, 0xab, 0x33, 0x5f, 0x00, 0xb1, 0x5e, 0x82, 0xd8, 0x7d, + 0xd6, 0xe7, 0x8d, 0xd5, 0x99, 0xef, 0x83, 0xd0, 0x31, 0x80, 0x0f, 0x7b, 0xce, 0xf0, 0xdc, 0xb1, + 0x3a, 0xcb, 0xcd, 0x10, 0x32, 0x60, 0x31, 0x0c, 0x94, 0xce, 0xf3, 0xfa, 0xb1, 0x3a, 0xd7, 0x85, + 0x11, 0xb1, 0xe7, 0x20, 0xbc, 0x9c, 0xed, 0x35, 0x64, 0x75, 0xc6, 0x9b, 0xa3, 0xad, 0xfa, 0x27, + 0x9f, 0x2f, 0x0b, 0x9f, 0x7e, 0xbe, 0x2c, 0xfc, 0xeb, 0xf3, 0x65, 0xe1, 0xa3, 0x2f, 0x96, 0x63, + 0x9f, 0x7e, 0xb1, 0x1c, 0xfb, 0xc7, 0x17, 0xcb, 0xb1, 0x1f, 0x3e, 0x7b, 0xae, 0xd9, 0x9d, 0xc1, + 0xe9, 0x5a, 0x4b, 0xef, 0xad, 0xfb, 0x1f, 0x90, 0x87, 0x3d, 0x5b, 0x3f, 0x4d, 0xd3, 0x80, 0x7a, + 0xeb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, 0x5f, 0x7b, 0xc4, 0xd6, 0x2e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5171,27 +4809,6 @@ func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -5213,48 +4830,6 @@ func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - return len(dAtA) - i, nil -} -func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -5638,12 +5213,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err21 != nil { - return 0, err21 + n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err18 != nil { + return 0, err18 } - i -= n21 - i = encodeVarintTypes(dAtA, i, uint64(n21)) + i -= n18 + i = encodeVarintTypes(dAtA, i, uint64(n18)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -5701,70 +5276,6 @@ func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5800,64 +5311,6 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *RequestCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6060,12 +5513,12 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x3a } - n25, err25 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err25 != nil { - return 0, err25 + n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err20 != nil { + return 0, err20 } - i -= n25 - i = encodeVarintTypes(dAtA, i, uint64(n25)) + i -= n20 + i = encodeVarintTypes(dAtA, i, uint64(n20)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6148,12 +5601,12 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x3a } - n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err27 != nil { - return 0, err27 + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err22 != nil { + return 0, err22 } - i -= n27 - i = encodeVarintTypes(dAtA, i, uint64(n27)) + i -= n22 + i = encodeVarintTypes(dAtA, i, uint64(n22)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6322,12 +5775,12 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n29, err29 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err29 != nil { - return 0, err29 + n24, err24 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err24 != nil { + return 0, err24 } - i -= n29 - i = encodeVarintTypes(dAtA, i, uint64(n29)) + i -= n24 + i = encodeVarintTypes(dAtA, i, uint64(n24)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6536,27 +5989,6 @@ func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -6578,48 +6010,6 @@ func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} -func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - return len(dAtA) - i, nil -} func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -7118,7 +6508,7 @@ func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { +func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7128,57 +6518,20 @@ func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Priority != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) - i-- - dAtA[i] = 0x50 + if m.Priority != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x50 } if len(m.Sender) > 0 { i -= len(m.Sender) @@ -7294,69 +6647,6 @@ func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.ConsensusParamUpdates != nil { - { - size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ValidatorUpdates) > 0 { - for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7517,20 +6807,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA55 := make([]byte, len(m.RefetchChunks)*10) - var j54 int + dAtA46 := make([]byte, len(m.RefetchChunks)*10) + var j45 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA55[j54] = uint8(uint64(num)&0x7f | 0x80) + dAtA46[j45] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j54++ + j45++ } - dAtA55[j54] = uint8(num) - j54++ + dAtA46[j45] = uint8(num) + j45++ } - i -= j54 - copy(dAtA[i:], dAtA55[:j54]) - i = encodeVarintTypes(dAtA, i, uint64(j54)) + i -= j45 + copy(dAtA[i:], dAtA46[:j45]) + i = encodeVarintTypes(dAtA, i, uint64(j45)) i-- dAtA[i] = 0x12 } @@ -8379,12 +7669,12 @@ func (m *Misbehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n63, err63 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err63 != nil { - return 0, err63 + n54, err54 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err54 != nil { + return 0, err54 } - i -= n63 - i = encodeVarintTypes(dAtA, i, uint64(n63)) + i -= n54 + i = encodeVarintTypes(dAtA, i, uint64(n54)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -8545,18 +7835,6 @@ func (m *Request_Query) Size() (n int) { } return n } -func (m *Request_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_CheckTx) Size() (n int) { if m == nil { return 0 @@ -8569,30 +7847,6 @@ func (m *Request_CheckTx) Size() (n int) { } return n } -func (m *Request_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Request_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_Commit) Size() (n int) { if m == nil { return 0 @@ -8813,29 +8067,6 @@ func (m *RequestQuery) Size() (n int) { return n } -func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.LastCommitInfo.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *RequestCheckTx) Size() (n int) { if m == nil { return 0 @@ -8852,31 +8083,6 @@ func (m *RequestCheckTx) Size() (n int) { return n } -func (m *RequestDeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *RequestEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - return n -} - func (m *RequestCommit) Size() (n int) { if m == nil { return 0 @@ -9193,18 +8399,6 @@ func (m *Response_Query) Size() (n int) { } return n } -func (m *Response_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_CheckTx) Size() (n int) { if m == nil { return 0 @@ -9217,30 +8411,6 @@ func (m *Response_CheckTx) Size() (n int) { } return n } -func (m *Response_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Response_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_Commit) Size() (n int) { if m == nil { return 0 @@ -9488,21 +8658,6 @@ func (m *ResponseQuery) Size() (n int) { return n } -func (m *ResponseBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *ResponseCheckTx) Size() (n int) { if m == nil { return 0 @@ -9573,31 +8728,6 @@ func (m *ResponseDeliverTx) Size() (n int) { return n } -func (m *ResponseEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ValidatorUpdates) > 0 { - for _, e := range m.ValidatorUpdates { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.ConsensusParamUpdates != nil { - l = m.ConsensusParamUpdates.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *ResponseCommit) Size() (n int) { if m == nil { return 0 @@ -10281,9 +9411,9 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Query{v} iNdEx = postIndex - case 6: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10310,15 +9440,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestBeginBlock{} + v := &RequestCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_BeginBlock{v} + m.Value = &Request_CheckTx{v} iNdEx = postIndex - case 7: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10345,15 +9475,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCheckTx{} + v := &RequestCommit{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_CheckTx{v} + m.Value = &Request_Commit{v} iNdEx = postIndex - case 8: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10380,15 +9510,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestDeliverTx{} + v := &RequestListSnapshots{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_DeliverTx{v} + m.Value = &Request_ListSnapshots{v} iNdEx = postIndex - case 9: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10415,15 +9545,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestEndBlock{} + v := &RequestOfferSnapshot{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_EndBlock{v} + m.Value = &Request_OfferSnapshot{v} iNdEx = postIndex - case 10: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10450,118 +9580,13 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCommit{} + v := &RequestLoadSnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_Commit{v} + m.Value = &Request_LoadSnapshotChunk{v} iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestListSnapshots{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_ListSnapshots{v} - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestOfferSnapshot{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_OfferSnapshot{v} - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestLoadSnapshotChunk{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_LoadSnapshotChunk{v} - iNdEx = postIndex - case 14: + case 14: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) } @@ -11276,333 +10301,9 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) - } - m.InitialHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InitialHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestQuery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Prove = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) } - var msglen int + m.InitialHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11612,26 +10313,11 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.InitialHeight |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11653,7 +10339,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { +func (m *RequestQuery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11676,15 +10362,15 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11711,16 +10397,48 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - m.Type = 0 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11730,11 +10448,31 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.Prove = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11756,7 +10494,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11779,10 +10517,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11819,61 +10557,11 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { m.Tx = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Height = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11883,7 +10571,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Type |= CheckTxType(b&0x7F) << shift if b < 0x80 { break } @@ -13671,116 +12359,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Flush{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseInfo{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Info{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseInitChain{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_InitChain{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseQuery{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Query{v} + m.Value = &Response_Flush{v} iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13807,15 +12390,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseBeginBlock{} + v := &ResponseInfo{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_BeginBlock{v} + m.Value = &Response_Info{v} iNdEx = postIndex - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13842,15 +12425,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} + v := &ResponseInitChain{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_CheckTx{v} + m.Value = &Response_InitChain{v} iNdEx = postIndex - case 9: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13877,15 +12460,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} + v := &ResponseQuery{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_DeliverTx{v} + m.Value = &Response_Query{v} iNdEx = postIndex - case 10: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13912,11 +12495,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEndBlock{} + v := &ResponseCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_EndBlock{v} + m.Value = &Response_CheckTx{v} iNdEx = postIndex case 11: if wireType != 2 { @@ -15150,90 +13733,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -15710,160 +14209,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) - if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} - } - if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResponseCommit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 90632bf6ea..771c56a8f3 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -4,7 +4,6 @@ package tendermint.abci; option go_package = "github.com/tendermint/tendermint/abci/types"; import "tendermint/crypto/proof.proto"; -import "tendermint/types/types.proto"; import "tendermint/crypto/keys.proto"; import "tendermint/types/params.proto"; import "google/protobuf/timestamp.proto"; @@ -24,10 +23,7 @@ message Request { RequestInfo info = 3; RequestInitChain init_chain = 4; RequestQuery query = 5; - RequestBeginBlock begin_block = 6 [deprecated = true]; RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8 [deprecated = true]; - RequestEndBlock end_block = 9 [deprecated = true]; RequestCommit commit = 10; RequestListSnapshots list_snapshots = 11; RequestOfferSnapshot offer_snapshot = 12; @@ -39,6 +35,7 @@ message Request { RequestVerifyVoteExtension verify_vote_extension = 18; RequestFinalizeBlock finalize_block = 19; } + reserved 6, 8, 9; // RequestBeginBlock, RequestDeliverTx, RequestEndBlock } message RequestEcho { @@ -70,13 +67,6 @@ message RequestQuery { bool prove = 4; } -message RequestBeginBlock { - bytes hash = 1; - tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - CommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 4 [(gogoproto.nullable) = false]; -} - enum CheckTxType { NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; @@ -87,14 +77,6 @@ message RequestCheckTx { CheckTxType type = 2; } -message RequestDeliverTx { - bytes tx = 1; -} - -message RequestEndBlock { - int64 height = 1; -} - message RequestCommit {} // lists available snapshots @@ -186,10 +168,7 @@ message Response { ResponseInfo info = 4; ResponseInitChain init_chain = 5; ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7 [deprecated = true]; ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9 [deprecated = true]; - ResponseEndBlock end_block = 10 [deprecated = true]; ResponseCommit commit = 11; ResponseListSnapshots list_snapshots = 12; ResponseOfferSnapshot offer_snapshot = 13; @@ -201,6 +180,7 @@ message Response { ResponseVerifyVoteExtension verify_vote_extension = 19; ResponseFinalizeBlock finalize_block = 20; } + reserved 7, 9, 10; // ResponseBeginBlock, ResponseDeliverTx, ResponseEndBlock } // nondeterministic @@ -244,10 +224,6 @@ message ResponseQuery { string codespace = 10; } -message ResponseBeginBlock { - repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - message ResponseCheckTx { uint32 code = 1; bytes data = 2; @@ -271,12 +247,6 @@ message ResponseDeliverTx { string codespace = 8; } -message ResponseEndBlock { - repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; - tendermint.types.ConsensusParams consensus_param_updates = 2; - repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - message ResponseCommit { // reserve 1 bytes data = 2; @@ -385,7 +355,7 @@ message ExtendedCommitInfo { } // Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock and ResponseDeliverTx. +// ResponseFinalizeBlock, ResponseDeliverTx, ExecTxResult // Later, transactions may be queried using these events. message Event { string type = 1; From 3dec4a474467612daee2403134f922386d61f043 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Mon, 30 May 2022 10:45:56 +0200 Subject: [PATCH 070/203] docs: add documentation for undocumented p2p metrics (#8640) Once merged will backport to v0.35 --- docs/nodes/metrics.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md index 7b0622519b..46ab9d5fae 100644 --- a/docs/nodes/metrics.md +++ b/docs/nodes/metrics.md @@ -56,8 +56,11 @@ The following metrics are available: | p2p_peer_receive_bytes_total | Counter | peer_id, chID | number of bytes per channel received from a given peer | | p2p_peer_send_bytes_total | Counter | peer_id, chID | number of bytes per channel sent to a given peer | | p2p_peer_pending_send_bytes | Gauge | peer_id | number of pending bytes to be sent to a given peer | -| p2p_num_txs | Gauge | peer_id | number of transactions submitted by each peer_id | -| p2p_pending_send_bytes | Gauge | peer_id | amount of data pending to be sent to peer | +| p2p_router_peer_queue_recv | Histogram | | The time taken to read off of a peer's queue before sending on the connection | +| p2p_router_peer_queue_send | Histogram | | The time taken to send on a peer's queue which will later be sent on the connection | +| p2p_router_channel_queue_send | Histogram | | The time taken to send on a p2p channel's queue which will later be consumed by the corresponding service | +| p2p_router_channel_queue_dropped_msgs | Counter | ch_id | The number of messages dropped from a peer's queue for a specific p2p channel | +| p2p_peer_queue_msg_size | Gauge | ch_id | The size of messages sent over a peer's queue for a specific p2p channel | | mempool_size | Gauge | | Number of uncommitted transactions | | mempool_tx_size_bytes | Histogram | | transaction sizes in bytes | | mempool_failed_txs | Counter | | number of failed transactions | From fefce8dc3571f013e6c88e4036d30804222dbe20 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Tue, 31 May 2022 12:22:52 +0200 Subject: [PATCH 071/203] e2e: programmable ABCI method times (#8638) * e2e: programmable ABCI method times * fix linting error --- test/e2e/app/app.go | 38 +++++++++++++++++++++++ test/e2e/generator/generate.go | 46 ++++++++++++++++++++-------- test/e2e/networks/ci.toml | 5 +--- test/e2e/pkg/manifest.go | 10 ++++++- test/e2e/pkg/testnet.go | 55 ++++++++++++++++------------------ test/e2e/runner/setup.go | 38 +++++++++++++---------- 6 files changed, 131 insertions(+), 61 deletions(-) diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 203a60f6aa..788f296b7a 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -13,6 +13,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" @@ -80,6 +81,14 @@ type Config struct { // // height <-> pubkey <-> voting power ValidatorUpdates map[string]map[string]uint8 `toml:"validator_update"` + + // Add artificial delays to each of the main ABCI calls to mimic computation time + // of the application + PrepareProposalDelayMS uint64 `toml:"prepare_proposal_delay_ms"` + ProcessProposalDelayMS uint64 `toml:"process_proposal_delay_ms"` + CheckTxDelayMS uint64 `toml:"check_tx_delay_ms"` + VoteExtensionDelayMS uint64 `toml:"vote_extension_delay_ms"` + FinalizeBlockDelayMS uint64 `toml:"finalize_block_delay_ms"` } func DefaultConfig(dir string) *Config { @@ -164,6 +173,11 @@ func (app *Application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*a Code: code.CodeTypeEncodingError, }, nil } + + if app.cfg.CheckTxDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.CheckTxDelayMS) * time.Millisecond) + } + return &abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil } @@ -189,6 +203,10 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali panic(err) } + if app.cfg.FinalizeBlockDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.FinalizeBlockDelayMS) * time.Millisecond) + } + return &abci.ResponseFinalizeBlock{ TxResults: txs, ValidatorUpdates: valUpdates, @@ -394,6 +412,11 @@ func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrep Tx: tx, }) } + + if app.cfg.PrepareProposalDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.PrepareProposalDelayMS) * time.Millisecond) + } + return &abci.ResponsePrepareProposal{TxRecords: trs}, nil } @@ -415,6 +438,11 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc } } } + + if app.cfg.ProcessProposalDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.ProcessProposalDelayMS) * time.Millisecond) + } + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil } @@ -442,6 +470,11 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot // nolint:gosec // G404: Use of weak random number generator num := rand.Int63n(voteExtensionMaxVal) extLen := binary.PutVarint(ext, num) + + if app.cfg.VoteExtensionDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.VoteExtensionDelayMS) * time.Millisecond) + } + app.logger.Info("generated vote extension", "num", num, "ext", fmt.Sprintf("%x", ext[:extLen]), "state.Height", app.state.Height) return &abci.ResponseExtendVote{ VoteExtension: ext[:extLen], @@ -476,6 +509,11 @@ func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.Request Status: abci.ResponseVerifyVoteExtension_REJECT, }, nil } + + if app.cfg.VoteExtensionDelayMS != 0 { + time.Sleep(time.Duration(app.cfg.VoteExtensionDelayMS) * time.Millisecond) + } + app.logger.Info("verified vote extension value", "req", req, "num", num) return &abci.ResponseVerifyVoteExtension{ Status: abci.ResponseVerifyVoteExtension_ACCEPT, diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 5f917d746e..0e84701115 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -15,13 +15,13 @@ var ( // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, - "queueType": {"priority"}, // "fifo" "initialHeight": {0, 1000}, "initialState": { map[string]string{}, map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, }, "validators": {"genesis", "initchain"}, + "abci": {"builtin", "outofprocess"}, } // The following specify randomly chosen values for testnet nodes. @@ -32,11 +32,10 @@ var ( "rocksdb": 10, "cleveldb": 5, } - nodeABCIProtocols = weightedChoice{ - "builtin": 50, - "tcp": 20, - "grpc": 20, - "unix": 10, + ABCIProtocols = weightedChoice{ + "tcp": 20, + "grpc": 20, + "unix": 10, } nodePrivvalProtocols = weightedChoice{ "file": 50, @@ -62,10 +61,13 @@ var ( "kill": 0.1, "restart": 0.1, } - evidence = uniformChoice{0, 1, 10} - txSize = uniformChoice{1024, 4096} // either 1kb or 4kb - ipv6 = uniformChoice{false, true} - keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} + + // the following specify random chosen values for the entire testnet + evidence = uniformChoice{0, 1, 10} + txSize = uniformChoice{1024, 4096} // either 1kb or 4kb + ipv6 = uniformChoice{false, true} + keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} + abciDelays = uniformChoice{"none", "small", "large"} voteExtensionEnableHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} voteExtensionEnabled = uniformChoice{true, false} @@ -107,7 +109,6 @@ type Options struct { func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), - ABCIProtocol: nodeABCIProtocols.Choose(r), InitialHeight: int64(opt["initialHeight"].(int)), InitialState: opt["initialState"].(map[string]string), Validators: &map[string]int64{}, @@ -115,7 +116,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er Nodes: map[string]*e2e.ManifestNode{}, KeyType: keyType.Choose(r).(string), Evidence: evidence.Choose(r).(int), - QueueType: opt["queueType"].(string), + QueueType: "priority", TxSize: txSize.Choose(r).(int), } @@ -123,6 +124,27 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er manifest.VoteExtensionsEnableHeight = manifest.InitialHeight + voteExtensionEnableHeightOffset.Choose(r).(int64) } + if opt["abci"] == "builtin" { + manifest.ABCIProtocol = string(e2e.ProtocolBuiltin) + } else { + manifest.ABCIProtocol = ABCIProtocols.Choose(r) + } + + switch abciDelays.Choose(r).(string) { + case "none": + case "small": + manifest.PrepareProposalDelayMS = 100 + manifest.ProcessProposalDelayMS = 100 + manifest.VoteExtensionDelayMS = 20 + manifest.FinalizeBlockDelayMS = 200 + case "large": + manifest.PrepareProposalDelayMS = 200 + manifest.ProcessProposalDelayMS = 200 + manifest.CheckTxDelayMS = 20 + manifest.VoteExtensionDelayMS = 100 + manifest.FinalizeBlockDelayMS = 500 + } + var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index e2f7376a25..eb74dd1119 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -5,6 +5,7 @@ evidence = 5 initial_height = 1000 initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"} queue_type = "priority" +abci_protocol = "builtin" [validators] validator01 = 100 @@ -37,7 +38,6 @@ snapshot_interval = 5 block_sync = "v0" [node.validator02] -abci_protocol = "tcp" database = "cleveldb" persist_interval = 0 perturb = ["restart"] @@ -48,7 +48,6 @@ block_sync = "v0" [node.validator03] database = "badgerdb" seeds = ["seed01"] -abci_protocol = "grpc" persist_interval = 3 perturb = ["kill"] privval_protocol = "grpc" @@ -56,7 +55,6 @@ block_sync = "v0" retain_blocks = 10 [node.validator04] -abci_protocol = "builtin" snapshot_interval = 5 database = "rocksdb" persistent_peers = ["validator01"] @@ -69,7 +67,6 @@ block_sync = "v0" database = "badgerdb" state_sync = "p2p" start_at = 1005 # Becomes part of the validator set at 1010 -abci_protocol = "builtin" perturb = ["pause", "disconnect", "restart"] [node.full01] diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index dd2ad02bac..68ea8ca1d1 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -64,7 +64,7 @@ type Manifest struct { QueueType string `toml:"queue_type"` // Number of bytes per tx. Default is 1kb (1024) - TxSize int + TxSize int `toml:"tx_size"` // VoteExtensionsEnableHeight configures the first height during which // the chain will use and require vote extension data to be present @@ -76,6 +76,14 @@ type Manifest struct { // builtin will build a complete Tendermint node into the application and // launch it instead of launching a separate Tendermint process. ABCIProtocol string `toml:"abci_protocol"` + + // Add artificial delays to each of the main ABCI calls to mimic computation time + // of the application + PrepareProposalDelayMS uint64 `toml:"prepare_proposal_delay_ms"` + ProcessProposalDelayMS uint64 `toml:"process_proposal_delay_ms"` + CheckTxDelayMS uint64 `toml:"check_tx_delay_ms"` + VoteExtensionDelayMS uint64 `toml:"vote_extension_delay_ms"` + FinalizeBlockDelayMS uint64 `toml:"finalize_block_delay_ms"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index ad79c99c6d..0e87466b07 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -72,7 +72,12 @@ type Testnet struct { VoteExtensionsEnableHeight int64 LogLevel string TxSize int - ABCIProtocol string + ABCIProtocol Protocol + PrepareProposalDelayMS int + ProcessProposalDelayMS int + CheckTxDelayMS int + VoteExtensionDelayMS int + FinalizeBlockDelayMS int } // Node represents a Tendermint node in a testnet. @@ -88,7 +93,6 @@ type Node struct { Mempool string StateSync string Database string - ABCIProtocol Protocol PrivvalProtocol Protocol PersistInterval uint64 SnapshotInterval uint64 @@ -128,20 +132,25 @@ func LoadTestnet(file string) (*Testnet, error) { proxyPortGen := newPortGenerator(proxyPortFirst) testnet := &Testnet{ - Name: filepath.Base(dir), - File: file, - Dir: dir, - IP: ipGen.Network(), - InitialHeight: 1, - InitialState: manifest.InitialState, - Validators: map[*Node]int64{}, - ValidatorUpdates: map[int64]map[*Node]int64{}, - Nodes: []*Node{}, - Evidence: manifest.Evidence, - KeyType: "ed25519", - LogLevel: manifest.LogLevel, - TxSize: manifest.TxSize, - ABCIProtocol: manifest.ABCIProtocol, + Name: filepath.Base(dir), + File: file, + Dir: dir, + IP: ipGen.Network(), + InitialHeight: 1, + InitialState: manifest.InitialState, + Validators: map[*Node]int64{}, + ValidatorUpdates: map[int64]map[*Node]int64{}, + Nodes: []*Node{}, + Evidence: manifest.Evidence, + KeyType: "ed25519", + LogLevel: manifest.LogLevel, + TxSize: manifest.TxSize, + ABCIProtocol: Protocol(manifest.ABCIProtocol), + PrepareProposalDelayMS: int(manifest.PrepareProposalDelayMS), + ProcessProposalDelayMS: int(manifest.ProcessProposalDelayMS), + CheckTxDelayMS: int(manifest.CheckTxDelayMS), + VoteExtensionDelayMS: int(manifest.VoteExtensionDelayMS), + FinalizeBlockDelayMS: int(manifest.FinalizeBlockDelayMS), } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -153,7 +162,7 @@ func LoadTestnet(file string) (*Testnet, error) { testnet.InitialHeight = manifest.InitialHeight } if testnet.ABCIProtocol == "" { - testnet.ABCIProtocol = string(ProtocolBuiltin) + testnet.ABCIProtocol = ProtocolBuiltin } // Set up nodes, in alphabetical order (IPs and ports get same order). @@ -174,7 +183,6 @@ func LoadTestnet(file string) (*Testnet, error) { ProxyPort: proxyPortGen.Next(), Mode: ModeValidator, Database: "goleveldb", - ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, Mempool: nodeManifest.Mempool, @@ -192,9 +200,6 @@ func LoadTestnet(file string) (*Testnet, error) { if nodeManifest.Mode != "" { node.Mode = Mode(nodeManifest.Mode) } - if node.Mode == ModeLight { - node.ABCIProtocol = ProtocolBuiltin - } if nodeManifest.Database != "" { node.Database = nodeManifest.Database } @@ -354,14 +359,6 @@ func (n Node) Validate(testnet Testnet) error { default: return fmt.Errorf("invalid database setting %q", n.Database) } - switch n.ABCIProtocol { - case ProtocolBuiltin, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: - default: - return fmt.Errorf("invalid ABCI protocol setting %q", n.ABCIProtocol) - } - if n.Mode == ModeLight && n.ABCIProtocol != ProtocolBuiltin { - return errors.New("light client must use builtin protocol") - } switch n.PrivvalProtocol { case ProtocolFile, ProtocolTCP, ProtocolGRPC, ProtocolUNIX: default: diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 5f78a5b35a..5887f13efa 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -141,6 +141,9 @@ func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { "addUint32": func(x, y uint32) uint32 { return x + y }, + "isBuiltin": func(protocol e2e.Protocol, mode e2e.Mode) bool { + return mode == e2e.ModeLight || protocol == e2e.ProtocolBuiltin + }, }).Parse(`version: '2.4' networks: @@ -163,7 +166,7 @@ services: e2e: true container_name: {{ .Name }} image: tendermint/e2e-node -{{- if eq .ABCIProtocol "builtin" }} +{{- if isBuiltin $.ABCIProtocol .Mode }} entrypoint: /usr/bin/entrypoint-builtin {{- else if .LogLevel }} command: start --log-level {{ .LogLevel }} @@ -254,7 +257,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Mode = string(node.Mode) } - switch node.ABCIProtocol { + switch node.Testnet.ABCIProtocol { case e2e.ProtocolUNIX: cfg.ProxyApp = AppAddressUNIX case e2e.ProtocolTCP: @@ -266,7 +269,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.ProxyApp = "" cfg.ABCI = "" default: - return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) + return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.Testnet.ABCIProtocol) } // Tendermint errors if it does not have a privval key set up, regardless of whether @@ -343,18 +346,23 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { // MakeAppConfig generates an ABCI application config for a node. func MakeAppConfig(node *e2e.Node) ([]byte, error) { cfg := map[string]interface{}{ - "chain_id": node.Testnet.Name, - "dir": "data/app", - "listen": AppAddressUNIX, - "mode": node.Mode, - "proxy_port": node.ProxyPort, - "protocol": "socket", - "persist_interval": node.PersistInterval, - "snapshot_interval": node.SnapshotInterval, - "retain_blocks": node.RetainBlocks, - "key_type": node.PrivvalKey.Type(), + "chain_id": node.Testnet.Name, + "dir": "data/app", + "listen": AppAddressUNIX, + "mode": node.Mode, + "proxy_port": node.ProxyPort, + "protocol": "socket", + "persist_interval": node.PersistInterval, + "snapshot_interval": node.SnapshotInterval, + "retain_blocks": node.RetainBlocks, + "key_type": node.PrivvalKey.Type(), + "prepare_proposal_delay_ms": node.Testnet.PrepareProposalDelayMS, + "process_proposal_delay_ms": node.Testnet.ProcessProposalDelayMS, + "check_tx_delay_ms": node.Testnet.CheckTxDelayMS, + "vote_extension_delay_ms": node.Testnet.VoteExtensionDelayMS, + "finalize_block_delay_ms": node.Testnet.FinalizeBlockDelayMS, } - switch node.ABCIProtocol { + switch node.Testnet.ABCIProtocol { case e2e.ProtocolUNIX: cfg["listen"] = AppAddressUNIX case e2e.ProtocolTCP: @@ -366,7 +374,7 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { delete(cfg, "listen") cfg["protocol"] = "builtin" default: - return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) + return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.Testnet.ABCIProtocol) } if node.Mode == e2e.ModeValidator { switch node.PrivvalProtocol { From 7422f7b7a04967461776b2d879bcde8b24c5f0dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 May 2022 14:26:27 +0000 Subject: [PATCH 072/203] build(deps): Bump github.com/bufbuild/buf from 1.4.0 to 1.5.0 (#8650) Bumps [github.com/bufbuild/buf](https://github.com/bufbuild/buf) from 1.4.0 to 1.5.0.
Release notes

Sourced from github.com/bufbuild/buf's releases.

v1.5.0

  • Upgrade to protoc 3.20.1 support.
  • Fix an issue where buf would fail if two or more roots contained a file with the same name, but with different file types (i.e. a regular file vs. a directory).
  • Fix check for PACKAGE_SERVICE_NO_DELETE to detect deleted services.
  • Remove buf beta registry track.
  • Remove buf beta registry branch.
Changelog

Sourced from github.com/bufbuild/buf's changelog.

[v1.5.0] - 2022-05-30

  • Upgrade to protoc 3.20.1 support.
  • Fix an issue where buf would fail if two or more roots contained a file with the same name, but with different file types (i.e. a regular file vs. a directory).
  • Fix check for PACKAGE_SERVICE_NO_DELETE to detect deleted services.
  • Remove buf beta registry track.
  • Remove buf beta registry branch.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/bufbuild/buf&package-manager=go_modules&previous-version=1.4.0&new-version=1.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 13 +++++++------ go.sum | 23 ++++++++++++++--------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 174c9f9e10..a8c3a5ac81 100644 --- a/go.mod +++ b/go.mod @@ -30,14 +30,14 @@ require ( github.com/stretchr/testify v1.7.1 github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 - golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 + golang.org/x/net v0.0.0-20220526153639-5463443f8c37 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.46.2 pgregory.net/rapid v0.4.7 ) require ( - github.com/bufbuild/buf v1.4.0 + github.com/bufbuild/buf v1.5.0 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 @@ -48,14 +48,15 @@ require ( require ( github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/bufbuild/connect-go v0.0.0-20220525141242-b79148bf7e44 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/firefart/nonamedreturns v1.0.1 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 // indirect - github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/compress v1.15.5 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lufeee/execinquery v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.0.1 // indirect @@ -228,11 +229,11 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect - golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect - google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect + google.golang.org/genproto v0.0.0-20220525015930-6ca3db687a9d // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 16dff839d7..4ca6a03896 100644 --- a/go.sum +++ b/go.sum @@ -177,8 +177,10 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.4.0 h1:GqE3a8CMmcFvWPzuY3Mahf9Kf3S9XgZ/ORpfYFzO+90= -github.com/bufbuild/buf v1.4.0/go.mod h1:mwHG7klTHnX+rM/ym8LXGl7vYpVmnwT96xWoRB4H5QI= +github.com/bufbuild/buf v1.5.0 h1:JHcWhMGMUEYJxXhbS8lJnAxfbuDaRIzuPB0DkAgTb9E= +github.com/bufbuild/buf v1.5.0/go.mod h1:dzEhpYNhRG0AzL/E9LlpzRki72XvvZO8b6FurWUa8Gc= +github.com/bufbuild/connect-go v0.0.0-20220525141242-b79148bf7e44 h1:aBc5SwEZ+BGrKpCJSKwb3heqoPVEBUcNYhyAX5XfH5Q= +github.com/bufbuild/connect-go v0.0.0-20220525141242-b79148bf7e44/go.mod h1:BajZGyRXK+Oq6Ddkm7atQ1Tu4W92OMpam7vyhFIf0ww= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= @@ -234,8 +236,9 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY= @@ -669,8 +672,8 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.5 h1:qyCLMz2JCrKADihKOh9FxnW3houKeNsp2h5OEz0QSEA= +github.com/klauspost/compress v1.15.5/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1336,8 +1339,9 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220526153639-5463443f8c37 h1:lUkvobShwKsOesNfWWlCS5q7fnbG1MEliIzwu886fn8= +golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1490,8 +1494,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 h1:EH1Deb8WZJ0xc0WK//leUHXcX9aLE5SymusoTmMZye8= -golang.org/x/term v0.0.0-20220411215600-e5f449aeb171/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1757,8 +1761,9 @@ google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220525015930-6ca3db687a9d h1:8BnRR08DxAQ+e2pFx64Q3Ltg/AkrrxyG1LLa1WpomyA= +google.golang.org/genproto v0.0.0-20220525015930-6ca3db687a9d/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From b7805e94eae6db742f6c04ebfc9f9348f88f6f67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Jun 2022 04:34:12 -0400 Subject: [PATCH 073/203] build(deps): Bump eventsource from 1.1.0 to 1.1.1 in /docs (#8663) Bumps [eventsource](https://github.com/EventSource/eventsource) from 1.1.0 to 1.1.1. - [Release notes](https://github.com/EventSource/eventsource/releases) - [Changelog](https://github.com/EventSource/eventsource/blob/master/HISTORY.md) - [Commits](https://github.com/EventSource/eventsource/compare/v1.1.0...v1.1.1) --- updated-dependencies: - dependency-name: eventsource dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 447c8c27d0..a67545ab3e 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -6210,9 +6210,9 @@ } }, "node_modules/eventsource": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz", - "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.1.tgz", + "integrity": "sha512-qV5ZC0h7jYIAOhArFJgSfdyz6rALJyb270714o7ZtNnw2WSJ+eexhKtE0O8LYPRsHZHf2osHKZBxGPvm3kPkCA==", "dependencies": { "original": "^1.0.0" }, @@ -19046,9 +19046,9 @@ "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==" }, "eventsource": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz", - "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.1.tgz", + "integrity": "sha512-qV5ZC0h7jYIAOhArFJgSfdyz6rALJyb270714o7ZtNnw2WSJ+eexhKtE0O8LYPRsHZHf2osHKZBxGPvm3kPkCA==", "requires": { "original": "^1.0.0" } From d2ca0b868d964d0dabb3ea99bc54d096a1a6def9 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 1 Jun 2022 06:17:17 -0400 Subject: [PATCH 074/203] consensus: gossip catchup sleeping protocol (#8652) --- internal/consensus/reactor.go | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 18d5851a4e..c353e0c73d 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -509,10 +509,12 @@ OUTER_LOOP: return } + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { case <-ctx.Done(): return - default: + case <-timer.C: } rs := r.getRoundState() @@ -560,13 +562,6 @@ OUTER_LOOP: "blockstoreBase", blockStoreBase, "blockstoreHeight", r.state.blockStore.Height(), ) - - timer.Reset(r.state.config.PeerGossipSleepDuration) - select { - case <-timer.C: - case <-ctx.Done(): - return - } } else { ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) } @@ -582,12 +577,6 @@ OUTER_LOOP: // if height and round don't match, sleep if (rs.Height != prs.Height) || (rs.Round != prs.Round) { - timer.Reset(r.state.config.PeerGossipSleepDuration) - select { - case <-timer.C: - case <-ctx.Done(): - return - } continue OUTER_LOOP } @@ -637,18 +626,7 @@ OUTER_LOOP: return } } - - continue OUTER_LOOP - } - - // nothing to do -- sleep - timer.Reset(r.state.config.PeerGossipSleepDuration) - select { - case <-timer.C: - case <-ctx.Done(): - return } - continue OUTER_LOOP } } From 7ffc872dd78a67b95feb1e5f93f8732f5937e830 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 1 Jun 2022 12:35:31 +0200 Subject: [PATCH 075/203] config: complete removal of seed addresses in config (#8654) --- CHANGELOG_PENDING.md | 1 + UPGRADING.md | 5 +++++ cmd/tendermint/commands/run_node.go | 1 - config/config.go | 9 --------- config/toml.go | 7 ------- scripts/confix/plan.go | 6 ++++++ test/e2e/runner/setup.go | 8 -------- 7 files changed, 12 insertions(+), 25 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 6f10f6bfdc..797725bd7a 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -23,6 +23,7 @@ Special thanks to external contributors on this release: - [config] \#8222 default indexer configuration to null. (@creachadair) - [rpc] \#8570 rework timeouts to be per-method instead of global. (@creachadair) - [rpc] \#8624 deprecate `broadcast_tx_commit` and `braodcast_tx_sync` and `broadcast_tx_async` in favor of `braodcast_tx`. (@tychoish) + - [config] \#8654 remove deprecated `seeds` field from config. Users should switch to `bootstrap-peers` instead. (@cmwaters) - Apps diff --git a/UPGRADING.md b/UPGRADING.md index 60bd9a10f6..44e5898882 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -62,6 +62,11 @@ applications remains correct. turned on are not affected. Operators who wish to enable indexing for a new node, however, must now edit the `config.toml` explicitly. +- The function of seed nodes was modified in the past release. Now, seed nodes + are treated identically to any other peer, however they only run the PEX + reactor. Because of this `seeds` has been removed from the config. Users + should add any seed nodes in the list of `bootstrap-peers`. + ### RPC Changes Tendermint v0.36 adds a new RPC event subscription API. The existing event diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 347a04034e..f4d49b91e6 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -63,7 +63,6 @@ func AddNodeFlags(cmd *cobra.Command, conf *cfg.Config) { "p2p.laddr", conf.P2P.ListenAddress, "node listen address. (0.0.0.0:0 means any interface, any port)") - cmd.Flags().String("p2p.seeds", conf.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck cmd.Flags().String("p2p.persistent-peers", conf.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") cmd.Flags().Bool("p2p.upnp", conf.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", conf.P2P.PexReactor, "enable/disable Peer-Exchange") diff --git a/config/config.go b/config/config.go index 7d0a4915e6..43b3fc10f3 100644 --- a/config/config.go +++ b/config/config.go @@ -612,15 +612,6 @@ type P2PConfig struct { //nolint: maligned // Address to advertise to peers for them to dial ExternalAddress string `mapstructure:"external-address"` - // Comma separated list of seed nodes to connect to - // We only use these if we can’t connect to peers in the addrbook - // - // Deprecated: This value is not used by the new PEX reactor. Use - // BootstrapPeers instead. - // - // TODO(#5670): Remove once the p2p refactor is complete. - Seeds string `mapstructure:"seeds"` - // Comma separated list of peers to be added to the peer store // on startup. Either BootstrapPeers or PersistentPeers are // needed for peer discovery diff --git a/config/toml.go b/config/toml.go index 0fac73cdda..4db4f4e65d 100644 --- a/config/toml.go +++ b/config/toml.go @@ -295,13 +295,6 @@ laddr = "{{ .P2P.ListenAddress }}" # example: 159.89.10.97:26656 external-address = "{{ .P2P.ExternalAddress }}" -# Comma separated list of seed nodes to connect to -# We only use these if we can’t connect to peers in the addrbook -# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -seeds = "{{ .P2P.Seeds }}" - # Comma separated list of peers to be added to the peer store # on startup. Either BootstrapPeers or PersistentPeers are # needed for peer discovery diff --git a/scripts/confix/plan.go b/scripts/confix/plan.go index a0ceef9379..706343338f 100644 --- a/scripts/confix/plan.go +++ b/scripts/confix/plan.go @@ -228,4 +228,10 @@ var plan = transform.Plan{ T: transform.Remove(parser.Key{"mempool", "recheck"}), ErrorOK: true, }, + { + // Since https://github.com/tendermint/tendermint/pull/8654. + Desc: "Remove the seeds option from the [p2p] section", + T: transform.Remove(parser.Key{"p2p", "seeds"}), + ErrorOK: true, + }, } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 5887f13efa..f3c1ddc0f6 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -322,14 +322,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { } } - cfg.P2P.Seeds = "" //nolint: staticcheck - for _, seed := range node.Seeds { - if len(cfg.P2P.Seeds) > 0 { //nolint: staticcheck - cfg.P2P.Seeds += "," //nolint: staticcheck - } - cfg.P2P.Seeds += seed.AddressP2P(true) //nolint: staticcheck - } - cfg.P2P.PersistentPeers = "" for _, peer := range node.PersistentPeers { if len(cfg.P2P.PersistentPeers) > 0 { From bf676827eb9e49ca1cf2d0e836bca940489e7886 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 1 Jun 2022 06:01:43 -0700 Subject: [PATCH 076/203] Revert buf tools upgrade requiring Go 1.18 (#8658) --- .github/workflows/build.yml | 6 +++--- .github/workflows/check-generated.yml | 4 ++-- .github/workflows/e2e-manual.yml | 2 +- .github/workflows/e2e-nightly-34x.yml | 2 +- .github/workflows/e2e-nightly-35x.yml | 2 +- .github/workflows/e2e-nightly-master.yml | 2 +- .github/workflows/e2e.yml | 2 +- .github/workflows/fuzz-nightly.yml | 2 +- .github/workflows/lint.yml | 2 +- .github/workflows/release.yml | 2 +- .github/workflows/tests.yml | 2 +- go.mod | 13 ++++++------- go.sum | 23 +++++++++-------------- 13 files changed, 29 insertions(+), 35 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ce6958eabc..bcfea1cba3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,7 +22,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: @@ -43,7 +43,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: @@ -65,7 +65,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 3bf63b79cf..1d43b6fe5b 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -18,7 +18,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 @@ -41,7 +41,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 with: diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index bab3fcf62d..6da4c33428 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -17,7 +17,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index d7af7b347d..82265a2589 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -22,7 +22,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 with: diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index c397ead9c0..d737f69c35 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -22,7 +22,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 with: diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index fc1fff1048..7a02ad143a 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -21,7 +21,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 6666999fca..9043efb03e 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 0fcab9ae5b..07941a734d 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 863d5ab106..f8b493a2e0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '^1.17' + go-version: '1.18' - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2e0cd548c5..4b3e22446c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - name: Build uses: goreleaser/goreleaser-action@v3 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6ce8a4d341..e9e47e25b4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -18,7 +18,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: diff --git a/go.mod b/go.mod index a8c3a5ac81..174c9f9e10 100644 --- a/go.mod +++ b/go.mod @@ -30,14 +30,14 @@ require ( github.com/stretchr/testify v1.7.1 github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 - golang.org/x/net v0.0.0-20220526153639-5463443f8c37 + golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.46.2 pgregory.net/rapid v0.4.7 ) require ( - github.com/bufbuild/buf v1.5.0 + github.com/bufbuild/buf v1.4.0 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 @@ -48,15 +48,14 @@ require ( require ( github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect - github.com/bufbuild/connect-go v0.0.0-20220525141242-b79148bf7e44 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/firefart/nonamedreturns v1.0.1 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 // indirect - github.com/klauspost/compress v1.15.5 // indirect + github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lufeee/execinquery v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.0.1 // indirect @@ -229,11 +228,11 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect - golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect + golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect - google.golang.org/genproto v0.0.0-20220525015930-6ca3db687a9d // indirect + google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 4ca6a03896..16dff839d7 100644 --- a/go.sum +++ b/go.sum @@ -177,10 +177,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.5.0 h1:JHcWhMGMUEYJxXhbS8lJnAxfbuDaRIzuPB0DkAgTb9E= -github.com/bufbuild/buf v1.5.0/go.mod h1:dzEhpYNhRG0AzL/E9LlpzRki72XvvZO8b6FurWUa8Gc= -github.com/bufbuild/connect-go v0.0.0-20220525141242-b79148bf7e44 h1:aBc5SwEZ+BGrKpCJSKwb3heqoPVEBUcNYhyAX5XfH5Q= -github.com/bufbuild/connect-go v0.0.0-20220525141242-b79148bf7e44/go.mod h1:BajZGyRXK+Oq6Ddkm7atQ1Tu4W92OMpam7vyhFIf0ww= +github.com/bufbuild/buf v1.4.0 h1:GqE3a8CMmcFvWPzuY3Mahf9Kf3S9XgZ/ORpfYFzO+90= +github.com/bufbuild/buf v1.4.0/go.mod h1:mwHG7klTHnX+rM/ym8LXGl7vYpVmnwT96xWoRB4H5QI= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= @@ -236,9 +234,8 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY= @@ -672,8 +669,8 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.5 h1:qyCLMz2JCrKADihKOh9FxnW3houKeNsp2h5OEz0QSEA= -github.com/klauspost/compress v1.15.5/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1339,9 +1336,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220526153639-5463443f8c37 h1:lUkvobShwKsOesNfWWlCS5q7fnbG1MEliIzwu886fn8= -golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1494,8 +1490,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 h1:EH1Deb8WZJ0xc0WK//leUHXcX9aLE5SymusoTmMZye8= +golang.org/x/term v0.0.0-20220411215600-e5f449aeb171/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1761,9 +1757,8 @@ google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220525015930-6ca3db687a9d h1:8BnRR08DxAQ+e2pFx64Q3Ltg/AkrrxyG1LLa1WpomyA= -google.golang.org/genproto v0.0.0-20220525015930-6ca3db687a9d/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From 9a8c334362fd8f55e4b7f640eaa81a6a9d24a915 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Jun 2022 13:14:50 +0000 Subject: [PATCH 077/203] build(deps): Bump google.golang.org/grpc from 1.46.2 to 1.47.0 (#8667) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.46.2 to 1.47.0.
Release notes

Sourced from google.golang.org/grpc's releases.

Release 1.47.0

New Features

  • xds: add support for RBAC metadata invert matchers (#5345)

Bug Fixes

  • client: fix a context leaked if a connection to an address is lost before it is fully established (#5337)
  • client: fix potential panic during RPC retries (#5323)
  • xds/client: fix a potential concurrent map read/write in load reporting (#5331)
  • client/SubConn: do not recreate addrConn if UpdateAddresses is called with the same addresses (#5373)
  • xds/eds: resources containing duplicate localities with the same priority will be rejected (#5303)
  • server: return Canceled or DeadlineExceeded status code when writing headers to a stream that is already closed (#5292)

Behavior Changes

  • xds/priority: start the init timer when a child switches to Connecting from non-failure states (#5334)
  • server: respond with HTTP Status 405 and gRPC status INTERNAL if the method sent to server is not POST (#5364)

Documentation

  • server: clarify documentation around setting and sending headers and ServerStream errors (#5302)
Commits
  • 5b509df Change version to 1.47.0 (#5377)
  • ed75225 Don't call cmp in non testing file (#5370)
  • 081c688 client: fix hctx leakage in addrConn.createTransport (#5337)
  • 30b9d59 client/SubConn: do not recreate addrConn if UpdateAddresses is called with th...
  • 459729d xds/priority: avoid sending duplicate updates to children (#5374)
  • 9f4b31a Added HTTP status and grpc status to POST check (#5364)
  • 333a441 xds/ringhash: update connectivity state aggregation, and make sure at least o...
  • e23132c Added support for metadata matcher invert (#5345)
  • d9b952b xds/resolver: use correct resource name in log message (#5357)
  • db79903 xds/priority: start the init timer when a child switch to Connecting from non...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.46.2&new-version=1.47.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 174c9f9e10..fd2f15cb86 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.46.2 + google.golang.org/grpc v1.47.0 pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index 16dff839d7..3d036cf0c4 100644 --- a/go.sum +++ b/go.sum @@ -1795,8 +1795,9 @@ google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 56fc80d66d5a1eb81f513574859530b5cc089dc7 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Wed, 1 Jun 2022 18:53:10 +0200 Subject: [PATCH 078/203] abci: Move `app_hash` parameter from `Commit` to `FinalizeBlock` (#8664) * Removed from proto * make proto-gen * make build works * make some tests pass * Fix TestMempoolTxConcurrentWithCommit * Minor change * Update abci/types/types.go * Update internal/state/execution.go * Update test/e2e/app/state.go Co-authored-by: Callum Waters * Updated changelog and `UPGRADING.md` * Fixed abci-cli tests, and doc * Addressed @cmwaters' comments * Addressed @cmwaters' comments, part 2 Co-authored-by: Callum Waters --- CHANGELOG_PENDING.md | 3 + UPGRADING.md | 8 + abci/cmd/abci-cli/abci-cli.go | 87 +++++---- abci/example/kvstore/kvstore.go | 15 +- abci/tests/server/client.go | 18 +- abci/tests/test_cli/ex1.abci | 3 +- abci/tests/test_cli/ex1.abci.out | 19 +- abci/tests/test_cli/ex2.abci | 3 + abci/tests/test_cli/ex2.abci.out | 15 ++ abci/types/types.pb.go | 290 ++++++++++------------------- docs/app-dev/abci-cli.md | 29 +-- internal/consensus/mempool_test.go | 40 +++- internal/consensus/replay_stubs.go | 2 +- internal/consensus/replay_test.go | 8 +- internal/state/execution.go | 48 +++-- proto/tendermint/abci/types.proto | 2 - test/e2e/app/app.go | 4 +- test/e2e/app/state.go | 15 +- 18 files changed, 294 insertions(+), 315 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 797725bd7a..c3e191e7e2 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -30,6 +30,7 @@ Special thanks to external contributors on this release: - [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec). - [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish) - [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic) + - [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena) - P2P Protocol @@ -69,6 +70,8 @@ Special thanks to external contributors on this release: - [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca) - [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair) - [consenus] \#8514 move `RecheckTx` from the local node mempool config to a global `ConsensusParams` field in `BlockParams` (@cmwaters) +- [abci] ABCI++ [specified](https://github.com/tendermint/tendermint/tree/master/spec/abci%2B%2B). (@sergio-mena, @cmwaters, @josef-widder) +- [abci] ABCI++ [implemented](https://github.com/orgs/tendermint/projects/9). (@williambanfield, @thanethomson, @sergio-mena) ### IMPROVEMENTS diff --git a/UPGRADING.md b/UPGRADING.md index 44e5898882..13582e75b4 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -21,6 +21,14 @@ For information on how ABCI++ works, see the In particular, the simplest way to upgrade your application is described [here](https://github.com/tendermint/tendermint/blob/master/spec/abci%2B%2B/abci++_tmint_expected_behavior_002_draft.md#adapting-existing-applications-that-use-abci). +#### Moving the `app_hash` parameter + +The Application's hash (or any data representing the Application's current +state) is known by the time `FinalizeBlock` finishes its execution. +Accordingly, the `app_hash` parameter has been moved from `ResponseCommit` to +`ResponseFinalizeBlock`, since it makes sense for the Application to return +this value as soon as is it known. + #### ABCI Mutex In previous versions of ABCI, Tendermint was prevented from making diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b1b1b2c7ec..97a5e815be 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -298,23 +298,23 @@ func cmdTest(cmd *cobra.Command, args []string) error { return compose( []func() error{ func() error { return servertest.InitChain(ctx, client) }, - func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.Commit(ctx, client) }, func() error { return servertest.FinalizeBlock(ctx, client, [][]byte{ []byte("abc"), }, []uint32{ code.CodeTypeBadNonce, - }, nil) + }, nil, nil) }, - func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.Commit(ctx, client) }, func() error { return servertest.FinalizeBlock(ctx, client, [][]byte{ {0x00}, }, []uint32{ code.CodeTypeOK, - }, nil) + }, nil, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, - func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, + func() error { return servertest.Commit(ctx, client) }, func() error { return servertest.FinalizeBlock(ctx, client, [][]byte{ {0x00}, @@ -330,9 +330,9 @@ func cmdTest(cmd *cobra.Command, args []string) error { code.CodeTypeOK, code.CodeTypeOK, code.CodeTypeBadNonce, - }, nil) + }, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, - func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, + func() error { return servertest.Commit(ctx, client) }, }) } @@ -494,7 +494,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { const codeBad uint32 = 10 -// Append a new tx to application +// Append new txs to application func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { if len(args) == 0 { printResponse(cmd, args, response{ @@ -515,14 +515,19 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { if err != nil { return err } + resps := make([]response, 0, len(res.TxResults)+1) for _, tx := range res.TxResults { - printResponse(cmd, args, response{ + resps = append(resps, response{ Code: tx.Code, Data: tx.Data, Info: tx.Info, Log: tx.Log, }) } + resps = append(resps, response{ + Data: res.AppHash, + }) + printResponse(cmd, args, resps...) return nil } @@ -552,13 +557,11 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { // Get application Merkle root hash func cmdCommit(cmd *cobra.Command, args []string) error { - res, err := client.Commit(cmd.Context()) + _, err := client.Commit(cmd.Context()) if err != nil { return err } - printResponse(cmd, args, response{ - Data: res.Data, - }) + printResponse(cmd, args, response{}) return nil } @@ -632,44 +635,46 @@ func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { //-------------------------------------------------------------------------------- -func printResponse(cmd *cobra.Command, args []string, rsp response) { +func printResponse(cmd *cobra.Command, args []string, rsps ...response) { if flagVerbose { fmt.Println(">", cmd.Use, strings.Join(args, " ")) } - // Always print the status code. - if rsp.Code == types.CodeTypeOK { - fmt.Printf("-> code: OK\n") - } else { - fmt.Printf("-> code: %d\n", rsp.Code) - - } + for _, rsp := range rsps { + // Always print the status code. + if rsp.Code == types.CodeTypeOK { + fmt.Printf("-> code: OK\n") + } else { + fmt.Printf("-> code: %d\n", rsp.Code) - if len(rsp.Data) != 0 { - // Do no print this line when using the commit command - // because the string comes out as gibberish - if cmd.Use != "commit" { - fmt.Printf("-> data: %s\n", rsp.Data) } - fmt.Printf("-> data.hex: 0x%X\n", rsp.Data) - } - if rsp.Log != "" { - fmt.Printf("-> log: %s\n", rsp.Log) - } - if rsp.Query != nil { - fmt.Printf("-> height: %d\n", rsp.Query.Height) - if rsp.Query.Key != nil { - fmt.Printf("-> key: %s\n", rsp.Query.Key) - fmt.Printf("-> key.hex: %X\n", rsp.Query.Key) + if len(rsp.Data) != 0 { + // Do no print this line when using the finalize_block command + // because the string comes out as gibberish + if cmd.Use != "finalize_block" { + fmt.Printf("-> data: %s\n", rsp.Data) + } + fmt.Printf("-> data.hex: 0x%X\n", rsp.Data) } - if rsp.Query.Value != nil { - fmt.Printf("-> value: %s\n", rsp.Query.Value) - fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) + if rsp.Log != "" { + fmt.Printf("-> log: %s\n", rsp.Log) } - if rsp.Query.ProofOps != nil { - fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps) + + if rsp.Query != nil { + fmt.Printf("-> height: %d\n", rsp.Query.Height) + if rsp.Query.Key != nil { + fmt.Printf("-> key: %s\n", rsp.Query.Key) + fmt.Printf("-> key.hex: %X\n", rsp.Query.Key) + } + if rsp.Query.Value != nil { + fmt.Printf("-> value: %s\n", rsp.Query.Value) + fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) + } + if rsp.Query.ProofOps != nil { + fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps) + } } } } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 61d1292398..bbb2fbe346 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -196,7 +196,13 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal respTxs[i] = app.handleTx(tx) } - return &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}, nil + // Using a memdb - just return the big endian size of the db + appHash := make([]byte, 8) + binary.PutVarint(appHash, app.state.Size) + app.state.AppHash = appHash + app.state.Height++ + + return &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates, AppHash: appHash}, nil } func (*Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { @@ -207,14 +213,9 @@ func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) app.mu.Lock() defer app.mu.Unlock() - // Using a memdb - just return the big endian size of the db - appHash := make([]byte, 8) - binary.PutVarint(appHash, app.state.Size) - app.state.AppHash = appHash - app.state.Height++ saveState(app.state) - resp := &types.ResponseCommit{Data: appHash} + resp := &types.ResponseCommit{} if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks { resp.RetainHeight = app.state.Height - app.RetainBlocks + 1 } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index eeae747460..cddb42ec0a 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -32,25 +32,20 @@ func InitChain(ctx context.Context, client abciclient.Client) error { return nil } -func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error { - res, err := client.Commit(ctx) - data := res.Data +func Commit(ctx context.Context, client abciclient.Client) error { + _, err := client.Commit(ctx) if err != nil { fmt.Println("Failed test: Commit") fmt.Printf("error while committing: %v\n", err) return err } - if !bytes.Equal(data, hashExp) { - fmt.Println("Failed test: Commit") - fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp) - return errors.New("commitTx failed") - } fmt.Println("Passed test: Commit") return nil } -func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error { +func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte, hashExp []byte) error { res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes}) + appHash := res.AppHash for i, tx := range res.TxResults { code, data, log := tx.Code, tx.Data, tx.Log if code != codeExp[i] { @@ -66,6 +61,11 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by return errors.New("FinalizeBlock error") } } + if !bytes.Equal(appHash, hashExp) { + fmt.Println("Failed test: FinalizeBlock") + fmt.Printf("Application hash was unexpected. Got %X expected %X\n", appHash, hashExp) + return errors.New("FinalizeBlock error") + } fmt.Println("Passed test: FinalizeBlock") return nil } diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci index 09457189ed..56355dc945 100644 --- a/abci/tests/test_cli/ex1.abci +++ b/abci/tests/test_cli/ex1.abci @@ -1,9 +1,8 @@ echo hello info -commit finalize_block "abc" -info commit +info query "abc" finalize_block "def=xyz" "ghi=123" commit diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index c004ab0599..9a35290b01 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -8,11 +8,12 @@ -> data: {"size":0} -> data.hex: 0x7B2273697A65223A307D -> commit +> finalize_block "abc" -> code: OK --> data.hex: 0x0000000000000000 +-> code: OK +-> data.hex: 0x0200000000000000 -> finalize_block "abc" +> commit -> code: OK > info @@ -20,14 +21,10 @@ -> data: {"size":1} -> data.hex: 0x7B2273697A65223A317D -> commit --> code: OK --> data.hex: 0x0200000000000000 - > query "abc" -> code: OK -> log: exists --> height: 2 +-> height: 1 -> key: abc -> key.hex: 616263 -> value: abc @@ -35,17 +32,17 @@ > finalize_block "def=xyz" "ghi=123" -> code: OK -> finalize_block "def=xyz" "ghi=123" -> code: OK +-> code: OK +-> data.hex: 0x0600000000000000 > commit -> code: OK --> data.hex: 0x0600000000000000 > query "def" -> code: OK -> log: exists --> height: 3 +-> height: 2 -> key: def -> key.hex: 646566 -> value: xyz diff --git a/abci/tests/test_cli/ex2.abci b/abci/tests/test_cli/ex2.abci index 90e99c2f90..1cabba1512 100644 --- a/abci/tests/test_cli/ex2.abci +++ b/abci/tests/test_cli/ex2.abci @@ -1,7 +1,10 @@ check_tx 0x00 check_tx 0xff finalize_block 0x00 +commit check_tx 0x00 finalize_block 0x01 +commit finalize_block 0x04 +commit info diff --git a/abci/tests/test_cli/ex2.abci.out b/abci/tests/test_cli/ex2.abci.out index aab0b1966f..e29a353682 100644 --- a/abci/tests/test_cli/ex2.abci.out +++ b/abci/tests/test_cli/ex2.abci.out @@ -6,15 +6,30 @@ > finalize_block 0x00 -> code: OK +-> code: OK +-> data.hex: 0x0200000000000000 + +> commit +-> code: OK > check_tx 0x00 -> code: OK > finalize_block 0x01 -> code: OK +-> code: OK +-> data.hex: 0x0400000000000000 + +> commit +-> code: OK > finalize_block 0x04 -> code: OK +-> code: OK +-> data.hex: 0x0600000000000000 + +> commit +-> code: OK > info -> code: OK diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 77d515bbe7..946cfa6af5 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -2382,8 +2382,7 @@ func (m *ResponseDeliverTx) GetCodespace() string { type ResponseCommit struct { // reserve 1 - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } @@ -2419,13 +2418,6 @@ func (m *ResponseCommit) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo -func (m *ResponseCommit) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - func (m *ResponseCommit) GetRetainHeight() int64 { if m != nil { return m.RetainHeight @@ -2871,7 +2863,6 @@ type ResponseFinalizeBlock struct { ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,3,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - RetainHeight int64 `protobuf:"varint,6,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } @@ -2942,13 +2933,6 @@ func (m *ResponseFinalizeBlock) GetAppHash() []byte { return nil } -func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { - if m != nil { - return m.RetainHeight - } - return 0 -} - type CommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` @@ -3845,13 +3829,13 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3263 bytes of a gzipped FileDescriptorProto + // 3253 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x73, 0x23, 0xd5, 0x11, 0xd7, 0xe8, 0x5b, 0xad, 0xaf, 0xf1, 0xb3, 0x59, 0xb4, 0x62, 0xd7, 0x36, 0x43, 0x01, 0xcb, 0x02, 0x36, 0xf1, 0x66, 0x61, 0xc9, 0x42, 0x28, 0x5b, 0xd6, 0x46, 0xf6, 0x7a, 0x6d, 0x33, 0x96, 0x4d, 0x91, 0x0f, 0x86, 0xb1, 0xf4, 0x6c, 0x0d, 0x2b, 0x69, 0x86, 0x99, 0x91, 0x91, 0x39, 0x26, - 0xc5, 0x85, 0x43, 0xc2, 0x25, 0x95, 0xa4, 0x2a, 0xdc, 0x92, 0xaa, 0xe4, 0x3f, 0x48, 0x2e, 0x39, - 0xe5, 0xc0, 0x21, 0x07, 0x4e, 0xa9, 0x9c, 0x48, 0x0a, 0x6e, 0xf9, 0x07, 0x72, 0x4b, 0xa5, 0xde, + 0xc5, 0x85, 0x43, 0x8a, 0x4b, 0x2a, 0x49, 0x55, 0xb8, 0x25, 0x55, 0xc9, 0x7f, 0x90, 0x5c, 0x72, + 0xca, 0x81, 0x43, 0x0e, 0x9c, 0x52, 0x39, 0x91, 0x14, 0xdc, 0xf8, 0x07, 0x72, 0x4b, 0xa5, 0xde, 0xc7, 0x7c, 0x49, 0x33, 0xfa, 0x00, 0x8a, 0xaa, 0x54, 0x71, 0x9b, 0xd7, 0xd3, 0xdd, 0xef, 0xab, 0x5f, 0x77, 0xff, 0xfa, 0x3d, 0x78, 0xcc, 0xc6, 0xfd, 0x36, 0x36, 0x7b, 0x5a, 0xdf, 0x5e, 0x57, 0x4f, 0x5b, 0xda, 0xba, 0x7d, 0x69, 0x60, 0x6b, 0xcd, 0x30, 0x75, 0x5b, 0x47, 0x65, 0xef, 0xe7, @@ -3859,7 +3843,7 @@ var fileDescriptor_252557cfdd89a31a = []byte{ 0x31, 0xfe, 0xea, 0xb5, 0xf1, 0xdf, 0x0f, 0xf1, 0x25, 0xd7, 0x16, 0x10, 0xa6, 0xbd, 0xac, 0x1b, 0xaa, 0xa9, 0xf6, 0x9c, 0xdf, 0x2b, 0xe7, 0xba, 0x7e, 0xde, 0xc5, 0xeb, 0xb4, 0x75, 0x3a, 0x38, 0x5b, 0xb7, 0xb5, 0x1e, 0xb6, 0x6c, 0xb5, 0x67, 0x70, 0x86, 0xa5, 0x73, 0xfd, 0x5c, 0xa7, 0x9f, - 0xeb, 0xe4, 0x8b, 0x51, 0xa5, 0x3f, 0xe7, 0x20, 0x23, 0xe3, 0x77, 0x07, 0xd8, 0xb2, 0xd1, 0x06, + 0xeb, 0xe4, 0x8b, 0x51, 0xa5, 0xbf, 0xe4, 0x20, 0x23, 0xe3, 0x77, 0x07, 0xd8, 0xb2, 0xd1, 0x06, 0x24, 0x71, 0xab, 0xa3, 0x57, 0x84, 0x55, 0xe1, 0x46, 0x7e, 0xe3, 0xda, 0xda, 0xc8, 0xf0, 0xd7, 0x38, 0x5f, 0xbd, 0xd5, 0xd1, 0x1b, 0x31, 0x99, 0xf2, 0xa2, 0xdb, 0x90, 0x3a, 0xeb, 0x0e, 0xac, 0x4e, 0x25, 0x4e, 0x85, 0xae, 0x47, 0x09, 0xdd, 0x23, 0x4c, 0x8d, 0x98, 0xcc, 0xb8, 0x49, 0x57, @@ -3872,7 +3856,7 @@ var fileDescriptor_252557cfdd89a31a = []byte{ 0x39, 0x52, 0x96, 0x72, 0x35, 0x62, 0x32, 0xe7, 0x47, 0xfb, 0x50, 0xea, 0x6a, 0x96, 0xad, 0x58, 0x7d, 0xd5, 0xb0, 0x3a, 0xba, 0x6d, 0x55, 0xf2, 0x54, 0xc3, 0x93, 0x51, 0x1a, 0xf6, 0x34, 0xcb, 0x3e, 0x72, 0x98, 0x1b, 0x31, 0xb9, 0xd8, 0xf5, 0x13, 0x88, 0x3e, 0xfd, 0xec, 0x0c, 0x9b, 0xae, - 0xc2, 0x4a, 0x61, 0xb2, 0xbe, 0x03, 0xc2, 0xed, 0xc8, 0x13, 0x7d, 0xba, 0x9f, 0x80, 0x7e, 0x04, + 0xc2, 0x4a, 0x61, 0xb2, 0xbe, 0x03, 0xc2, 0xed, 0xc8, 0x13, 0x7d, 0xba, 0x9f, 0x80, 0x7e, 0x02, 0x8b, 0x5d, 0x5d, 0x6d, 0xbb, 0xea, 0x94, 0x56, 0x67, 0xd0, 0x7f, 0x58, 0x29, 0x52, 0xa5, 0xcf, 0x44, 0x0e, 0x52, 0x57, 0xdb, 0x8e, 0x8a, 0x1a, 0x11, 0x68, 0xc4, 0xe4, 0x85, 0xee, 0x28, 0x11, 0xbd, 0x05, 0x4b, 0xaa, 0x61, 0x74, 0x2f, 0x47, 0xb5, 0x97, 0xa8, 0xf6, 0x9b, 0x51, 0xda, 0x37, @@ -3892,9 +3876,9 @@ var fileDescriptor_252557cfdd89a31a = []byte{ 0x2e, 0xc9, 0x9b, 0xe8, 0x09, 0x28, 0xd2, 0x49, 0x28, 0xce, 0x7f, 0xe2, 0xe6, 0x92, 0x72, 0x81, 0x12, 0x4f, 0x38, 0xd3, 0x0a, 0xe4, 0x8d, 0x0d, 0xc3, 0x65, 0x49, 0x50, 0x16, 0x30, 0x36, 0x0c, 0x87, 0xe1, 0x71, 0x28, 0x90, 0x19, 0xbb, 0x1c, 0x49, 0xda, 0x49, 0x9e, 0xd0, 0x38, 0x8b, 0xf4, - 0xb7, 0x38, 0x88, 0xa3, 0xae, 0x0b, 0xdd, 0x81, 0x24, 0xf1, 0xe2, 0xdc, 0x21, 0x57, 0xd7, 0x98, + 0xf7, 0x38, 0x88, 0xa3, 0xae, 0x0b, 0xdd, 0x81, 0x24, 0xf1, 0xe2, 0xdc, 0x21, 0x57, 0xd7, 0x98, 0x8b, 0x5f, 0x73, 0x5c, 0xfc, 0x5a, 0xd3, 0x71, 0xf1, 0x5b, 0xd9, 0x4f, 0x3e, 0x5b, 0x89, 0x7d, - 0xf4, 0xcf, 0x15, 0x41, 0xa6, 0x12, 0xe8, 0x2a, 0x71, 0x58, 0xaa, 0xd6, 0x57, 0xb4, 0x36, 0x1d, + 0xf4, 0xaf, 0x15, 0x41, 0xa6, 0x12, 0xe8, 0x2a, 0x71, 0x58, 0xaa, 0xd6, 0x57, 0xb4, 0x36, 0x1d, 0x72, 0x8e, 0x78, 0x23, 0x55, 0xeb, 0xef, 0xb4, 0xd1, 0x1e, 0x88, 0x2d, 0xbd, 0x6f, 0xe1, 0xbe, 0x35, 0xb0, 0x14, 0x16, 0x42, 0xb8, 0x1b, 0x0e, 0x38, 0x53, 0x16, 0xc8, 0x6a, 0x0e, 0xe7, 0x21, 0x65, 0x94, 0xcb, 0xad, 0x20, 0x01, 0xdd, 0x03, 0xb8, 0x50, 0xbb, 0x5a, 0x5b, 0xb5, 0x75, 0xd3, @@ -3914,7 +3898,7 @@ var fileDescriptor_252557cfdd89a31a = []byte{ 0xe7, 0xf6, 0x2d, 0x98, 0x40, 0xcd, 0xde, 0x59, 0xb0, 0x2b, 0x90, 0x3e, 0xd3, 0xcd, 0x9e, 0x6a, 0x53, 0x65, 0x45, 0x99, 0xb7, 0xc8, 0x42, 0x32, 0x2f, 0x9e, 0xa0, 0x64, 0xd6, 0x90, 0x14, 0xb8, 0x1a, 0xe9, 0xbd, 0x89, 0x88, 0xd6, 0x6f, 0x63, 0xb6, 0xac, 0x45, 0x99, 0x35, 0x3c, 0x45, 0x6c, - 0xb0, 0xac, 0x41, 0xba, 0xb5, 0xe8, 0x5c, 0xa9, 0xfe, 0x9c, 0xcc, 0x5b, 0xd2, 0x1f, 0x13, 0x70, + 0xb0, 0xac, 0x41, 0xba, 0xb5, 0xe8, 0x5c, 0xa9, 0xfe, 0x9c, 0xcc, 0x5b, 0xd2, 0x9f, 0x12, 0x70, 0x25, 0xdc, 0x87, 0xa3, 0x55, 0x28, 0xf4, 0xd4, 0xa1, 0x62, 0x0f, 0xb9, 0xd9, 0x09, 0x74, 0xe3, 0xa1, 0xa7, 0x0e, 0x9b, 0x43, 0x66, 0x73, 0x22, 0x24, 0xec, 0xa1, 0x55, 0x89, 0xaf, 0x26, 0x6e, 0x14, 0x64, 0xf2, 0x89, 0x8e, 0x61, 0xa1, 0xab, 0xb7, 0xd4, 0xae, 0xd2, 0x55, 0x2d, 0x5b, 0xe1, @@ -3925,22 +3909,22 @@ var fileDescriptor_252557cfdd89a31a = []byte{ 0xc7, 0xbd, 0xa4, 0xe7, 0x76, 0x2f, 0x2f, 0xc0, 0x52, 0x1f, 0x0f, 0x6d, 0xdf, 0x18, 0x99, 0xe1, 0x64, 0xe8, 0x5e, 0x20, 0xf2, 0xcf, 0xeb, 0x9f, 0xd8, 0x10, 0x7a, 0x86, 0x86, 0x45, 0x43, 0xb7, 0xb0, 0xa9, 0xa8, 0xed, 0xb6, 0x89, 0x2d, 0xab, 0x92, 0xa5, 0xdc, 0x65, 0x87, 0xbe, 0xc9, 0xc8, - 0xd2, 0x6f, 0xfc, 0x7b, 0x15, 0x0c, 0x83, 0x7c, 0x27, 0x04, 0x6f, 0x27, 0x8e, 0x60, 0x89, 0xcb, + 0xd2, 0x6f, 0xfd, 0x7b, 0x15, 0x0c, 0x83, 0x7c, 0x27, 0x04, 0x6f, 0x27, 0x8e, 0x60, 0x89, 0xcb, 0xb7, 0x03, 0x9b, 0xc1, 0xd2, 0xd1, 0xc7, 0xc6, 0x0f, 0xdc, 0xe8, 0x26, 0x20, 0x47, 0x7c, 0x86, - 0x7d, 0x48, 0x7c, 0xb5, 0x7d, 0x40, 0x90, 0xa4, 0xab, 0x94, 0x64, 0x4e, 0x88, 0x7c, 0xff, 0xbf, + 0x7d, 0x48, 0x7c, 0xbd, 0x7d, 0x40, 0x90, 0xa4, 0xab, 0x94, 0x64, 0x4e, 0x88, 0x7c, 0xff, 0xbf, 0xed, 0xcd, 0x6b, 0xb0, 0x30, 0x96, 0x63, 0xb8, 0xf3, 0x12, 0x42, 0xe7, 0x15, 0xf7, 0xcf, 0x4b, - 0xfa, 0xad, 0x00, 0xd5, 0xe8, 0xa4, 0x22, 0x54, 0xd5, 0xb3, 0xb0, 0xe0, 0xce, 0xc5, 0x1d, 0x1f, + 0xfa, 0x9d, 0x00, 0xd5, 0xe8, 0xa4, 0x22, 0x54, 0xd5, 0xb3, 0xb0, 0xe0, 0xce, 0xc5, 0x1d, 0x1f, 0x3b, 0xf5, 0xa2, 0xfb, 0x83, 0x0f, 0x30, 0xd2, 0x81, 0x3f, 0x09, 0xa5, 0x91, 0x94, 0x87, 0xed, - 0x42, 0xf1, 0xc2, 0xdf, 0xbf, 0xf4, 0xcb, 0x84, 0xeb, 0x55, 0x03, 0x79, 0x49, 0x88, 0xe5, 0xbd, - 0x0e, 0x8b, 0x6d, 0xdc, 0xd2, 0xda, 0x5f, 0xd6, 0xf0, 0x16, 0xb8, 0xf4, 0xb7, 0x76, 0x37, 0x83, - 0xdd, 0xfd, 0x1c, 0x20, 0x2b, 0x63, 0xcb, 0x20, 0xd9, 0x07, 0xda, 0x82, 0x1c, 0x1e, 0xb6, 0xb0, + 0x42, 0xf1, 0xc2, 0xdf, 0xbf, 0xf4, 0xab, 0x84, 0xeb, 0x55, 0x03, 0x79, 0x49, 0x88, 0xe5, 0xbd, + 0x0e, 0x8b, 0x6d, 0xdc, 0xd2, 0xda, 0x5f, 0xd5, 0xf0, 0x16, 0xb8, 0xf4, 0x77, 0x76, 0x37, 0x83, + 0xdd, 0xfd, 0x12, 0x20, 0x2b, 0x63, 0xcb, 0x20, 0xd9, 0x07, 0xda, 0x82, 0x1c, 0x1e, 0xb6, 0xb0, 0x61, 0x3b, 0x09, 0x5b, 0x78, 0x2a, 0xcc, 0xb8, 0xeb, 0x0e, 0x27, 0x01, 0x82, 0xae, 0x18, 0xba, 0xc5, 0xb1, 0x6e, 0x34, 0x6c, 0xe5, 0xe2, 0x7e, 0xb0, 0xfb, 0xa2, 0x03, 0x76, 0x13, 0x91, 0x38, 0x8e, 0x49, 0x8d, 0xa0, 0xdd, 0x5b, 0x1c, 0xed, 0x26, 0xa7, 0x74, 0x16, 0x80, 0xbb, 0xb5, 0x00, 0xdc, 0x4d, 0x4d, 0x99, 0x66, 0x04, 0xde, 0x7d, 0xd1, 0xc1, 0xbb, 0xe9, 0x29, 0x23, 0x1e, 0x01, 0xbc, 0xaf, 0xfa, 0x00, 0x6f, 0x96, 0x8a, 0xae, 0x46, 0x8a, 0x86, 0x20, 0xde, 0x97, 0x5d, 0xc4, 0x9b, 0x8f, 0x44, 0xcb, 0x5c, 0x78, 0x14, 0xf2, 0x1e, 0x8c, 0x41, 0x5e, 0x06, 0x51, 0x9f, 0x8a, - 0x54, 0x31, 0x05, 0xf3, 0x1e, 0x8c, 0x61, 0xde, 0xe2, 0x14, 0x85, 0x53, 0x40, 0xef, 0x8f, 0xc3, + 0x54, 0x31, 0x05, 0xf3, 0x1e, 0x8c, 0x61, 0xde, 0xe2, 0x14, 0x85, 0x53, 0x40, 0xef, 0x4f, 0xc3, 0x41, 0x6f, 0x34, 0x2c, 0xe5, 0xc3, 0x9c, 0x0d, 0xf5, 0x2a, 0x11, 0xa8, 0xb7, 0x1c, 0x89, 0xd0, 0x98, 0xfa, 0x99, 0x61, 0xef, 0x71, 0x08, 0xec, 0x65, 0x00, 0xf5, 0x46, 0xa4, 0xf2, 0x19, 0x70, 0xef, 0x71, 0x08, 0xee, 0x5d, 0x98, 0xaa, 0x76, 0x2a, 0xf0, 0xbd, 0x17, 0x04, 0xbe, 0x28, 0x22, @@ -3948,12 +3932,12 @@ var fileDescriptor_252557cfdd89a31a = []byte{ 0x0e, 0xe8, 0x7b, 0x30, 0x06, 0x7d, 0x97, 0xa6, 0x58, 0xda, 0xec, 0xd8, 0x37, 0x23, 0x66, 0x19, 0xea, 0xdd, 0x4d, 0x66, 0x41, 0xcc, 0x4b, 0xcf, 0x90, 0x40, 0x3c, 0xe2, 0xe1, 0x48, 0x4e, 0x8c, 0x4d, 0x53, 0x37, 0x39, 0x8a, 0x65, 0x0d, 0xe9, 0x06, 0xc1, 0x42, 0x9e, 0x37, 0x9b, 0x80, 0x93, - 0x29, 0xf6, 0xf0, 0x79, 0x30, 0xe9, 0x4f, 0x82, 0x27, 0x4b, 0x91, 0xb2, 0x1f, 0x47, 0xe5, 0x38, + 0x29, 0xf6, 0xf0, 0x79, 0x30, 0xe9, 0xcf, 0x82, 0x27, 0x4b, 0x91, 0xb2, 0x1f, 0x47, 0xe5, 0x38, 0x8e, 0xf2, 0xa1, 0xe7, 0x78, 0x10, 0x3d, 0xaf, 0x40, 0x9e, 0x60, 0x8a, 0x11, 0x60, 0xac, 0x1a, 0x2e, 0x30, 0xbe, 0x09, 0x0b, 0x34, 0x76, 0x32, 0x8c, 0xcd, 0x03, 0x52, 0x92, 0x06, 0xa4, 0x32, 0xf9, 0xc1, 0xd6, 0x85, 0x45, 0xa6, 0xe7, 0x61, 0xd1, 0xc7, 0xeb, 0x62, 0x15, 0x86, 0x12, 0x45, - 0x97, 0x7b, 0x93, 0x83, 0x96, 0xbf, 0x0a, 0xde, 0x0a, 0x79, 0x88, 0x3a, 0x0c, 0xfc, 0x0a, 0x5f, - 0x13, 0xf8, 0x8d, 0x7f, 0x69, 0xf0, 0xeb, 0xc7, 0x5e, 0x89, 0x20, 0xf6, 0xfa, 0x8f, 0xe0, 0xed, + 0x97, 0x7b, 0x93, 0x83, 0x96, 0xbf, 0x09, 0xde, 0x0a, 0x79, 0x88, 0x3a, 0x0c, 0xfc, 0x0a, 0xdf, + 0x10, 0xf8, 0x8d, 0x7f, 0x65, 0xf0, 0xeb, 0xc7, 0x5e, 0x89, 0x20, 0xf6, 0xfa, 0x8f, 0xe0, 0xed, 0x89, 0x0b, 0x65, 0x5b, 0x7a, 0x1b, 0x73, 0x34, 0x44, 0xbf, 0x49, 0x76, 0xd2, 0xd5, 0xcf, 0x39, 0xe6, 0x21, 0x9f, 0x84, 0xcb, 0x0d, 0x39, 0x39, 0x1e, 0x51, 0x5c, 0x20, 0xc5, 0x42, 0x3e, 0x07, 0x52, 0x22, 0x24, 0x1e, 0x62, 0x16, 0x20, 0x0a, 0x32, 0xf9, 0x24, 0x7c, 0xd4, 0xec, 0x78, 0xe8, @@ -3968,88 +3952,88 @@ var fileDescriptor_252557cfdd89a31a = []byte{ 0x67, 0x8b, 0xdb, 0xb8, 0xab, 0x5d, 0x60, 0x73, 0x8e, 0xc9, 0xcc, 0xb6, 0xb9, 0xcb, 0x21, 0x53, 0xf6, 0x51, 0xc8, 0xe8, 0x49, 0x6b, 0x60, 0xe1, 0x36, 0x2f, 0x99, 0xb8, 0x6d, 0xd4, 0x80, 0x34, 0xbe, 0xc0, 0x7d, 0xdb, 0xaa, 0x64, 0xa8, 0x0d, 0x5f, 0x19, 0xc7, 0xb0, 0xe4, 0xf7, 0x56, 0x85, - 0x58, 0xee, 0xbf, 0x3f, 0x5b, 0x11, 0x19, 0xf7, 0x73, 0x7a, 0x4f, 0xb3, 0x71, 0xcf, 0xb0, 0x2f, - 0x65, 0x2e, 0x3f, 0x79, 0x65, 0xa5, 0x1d, 0x28, 0x05, 0xe3, 0x7e, 0xe8, 0x7c, 0x9f, 0x80, 0xa2, - 0x89, 0x6d, 0x55, 0xeb, 0x2b, 0x81, 0x4c, 0xbe, 0xc0, 0x88, 0xbc, 0xc0, 0x73, 0x08, 0x8f, 0x84, - 0xc6, 0x7f, 0xf4, 0x12, 0xe4, 0xbc, 0xd4, 0x41, 0xa0, 0xd3, 0x99, 0x50, 0xff, 0xf0, 0x78, 0xa5, - 0xbf, 0x08, 0x9e, 0xca, 0x60, 0x45, 0xa5, 0x0e, 0x69, 0x13, 0x5b, 0x83, 0x2e, 0xab, 0x71, 0x94, - 0x36, 0x9e, 0x9f, 0x2d, 0x73, 0x20, 0xd4, 0x41, 0xd7, 0x96, 0xb9, 0xb0, 0xf4, 0x16, 0xa4, 0x19, - 0x05, 0xe5, 0x21, 0x73, 0xbc, 0x7f, 0x7f, 0xff, 0xe0, 0x8d, 0x7d, 0x31, 0x86, 0x00, 0xd2, 0x9b, - 0xb5, 0x5a, 0xfd, 0xb0, 0x29, 0x0a, 0x28, 0x07, 0xa9, 0xcd, 0xad, 0x03, 0xb9, 0x29, 0xc6, 0x09, - 0x59, 0xae, 0xef, 0xd6, 0x6b, 0x4d, 0x31, 0x81, 0x16, 0xa0, 0xc8, 0xbe, 0x95, 0x7b, 0x07, 0xf2, - 0x83, 0xcd, 0xa6, 0x98, 0xf4, 0x91, 0x8e, 0xea, 0xfb, 0xdb, 0x75, 0x59, 0x4c, 0x49, 0xdf, 0x81, - 0xab, 0x91, 0xb9, 0x86, 0x57, 0x2e, 0x11, 0x7c, 0xe5, 0x12, 0xe9, 0xd7, 0x71, 0x82, 0xc6, 0xa2, - 0x12, 0x08, 0xb4, 0x3b, 0x32, 0xf1, 0x8d, 0x39, 0xb2, 0x8f, 0x91, 0xd9, 0x13, 0x00, 0x66, 0xe2, - 0x33, 0x6c, 0xb7, 0x3a, 0x2c, 0xa1, 0x61, 0xfe, 0xb2, 0x28, 0x17, 0x39, 0x95, 0x0a, 0x59, 0x8c, - 0xed, 0x1d, 0xdc, 0xb2, 0x15, 0x76, 0xea, 0x18, 0xf8, 0xc9, 0x11, 0x36, 0x42, 0x3d, 0x62, 0x44, - 0xe9, 0xed, 0xb9, 0xd6, 0x32, 0x07, 0x29, 0xb9, 0xde, 0x94, 0xdf, 0x14, 0x13, 0x08, 0x41, 0x89, - 0x7e, 0x2a, 0x47, 0xfb, 0x9b, 0x87, 0x47, 0x8d, 0x03, 0xb2, 0x96, 0x8b, 0x50, 0x76, 0xd6, 0xd2, - 0x21, 0xa6, 0xa4, 0xbf, 0xc7, 0xe1, 0xd1, 0x88, 0xf4, 0x07, 0xdd, 0x01, 0xb0, 0x87, 0x8a, 0x89, - 0x5b, 0xba, 0xd9, 0x8e, 0x36, 0xb2, 0xe6, 0x50, 0xa6, 0x1c, 0x72, 0xce, 0xe6, 0x5f, 0xd6, 0x84, - 0x2a, 0x1b, 0x7a, 0x85, 0x2b, 0x25, 0xb3, 0x72, 0x20, 0xdf, 0xf5, 0x90, 0x62, 0x12, 0x6e, 0x11, - 0xc5, 0x74, 0x6d, 0xa9, 0x62, 0xca, 0x8f, 0x1e, 0xf8, 0x41, 0xf2, 0x80, 0x06, 0x9a, 0x99, 0xcb, - 0xb1, 0x3e, 0x18, 0xcd, 0x08, 0x16, 0x7a, 0x13, 0x1e, 0x1d, 0x89, 0x93, 0xae, 0xd2, 0xd4, 0xac, - 0xe1, 0xf2, 0x91, 0x60, 0xb8, 0xe4, 0xaa, 0xa5, 0xdf, 0x25, 0xfc, 0x0b, 0x1b, 0xcc, 0xf6, 0x0e, - 0x20, 0x6d, 0xd9, 0xaa, 0x3d, 0xb0, 0xb8, 0xc1, 0xbd, 0x34, 0x6b, 0xea, 0xb8, 0xe6, 0x7c, 0x1c, - 0x51, 0x71, 0x99, 0xab, 0xf9, 0x76, 0xbd, 0x2d, 0xe9, 0x36, 0x94, 0x82, 0x8b, 0x13, 0x7d, 0x64, - 0x3c, 0x9f, 0x13, 0x97, 0xee, 0x02, 0x1a, 0x4f, 0xaa, 0x43, 0xca, 0x28, 0x42, 0x58, 0x19, 0xe5, - 0xf7, 0x02, 0x3c, 0x36, 0x21, 0x81, 0x46, 0xaf, 0x8f, 0xec, 0xf3, 0xcb, 0xf3, 0xa4, 0xdf, 0x6b, - 0x8c, 0x16, 0xdc, 0x69, 0xe9, 0x16, 0x14, 0xfc, 0xf4, 0xd9, 0x26, 0xf9, 0x8b, 0x84, 0xe7, 0xf3, - 0x83, 0xf5, 0x1e, 0x2f, 0x24, 0x0a, 0x5f, 0x31, 0x24, 0x06, 0xed, 0x2c, 0x3e, 0xa7, 0x9d, 0x1d, - 0x85, 0xd9, 0x59, 0x62, 0xae, 0x4c, 0x73, 0x2e, 0x6b, 0x4b, 0x7e, 0x35, 0x6b, 0x0b, 0x1c, 0xb8, - 0x54, 0xf0, 0xc0, 0x8d, 0xc5, 0xf5, 0x74, 0x48, 0x5c, 0x7f, 0x13, 0xc0, 0xab, 0x94, 0x91, 0xa8, - 0x65, 0xea, 0x83, 0x7e, 0x9b, 0x9a, 0x49, 0x4a, 0x66, 0x0d, 0x74, 0x1b, 0x52, 0xc4, 0xdc, 0x9c, - 0xc5, 0x1c, 0xf7, 0xbc, 0xc4, 0x5c, 0x7c, 0x95, 0x36, 0xc6, 0x2d, 0x69, 0x80, 0xc6, 0x4b, 0xf1, - 0x11, 0x5d, 0xbc, 0x1a, 0xec, 0xe2, 0xf1, 0xc8, 0xa2, 0x7e, 0x78, 0x57, 0xef, 0x43, 0x8a, 0x9a, - 0x07, 0xc9, 0x6f, 0xe8, 0xfd, 0x0f, 0xc7, 0x4b, 0xe4, 0x1b, 0xfd, 0x04, 0x40, 0xb5, 0x6d, 0x53, - 0x3b, 0x1d, 0x78, 0x1d, 0xac, 0x84, 0x9b, 0xd7, 0xa6, 0xc3, 0xb7, 0x75, 0x8d, 0xdb, 0xd9, 0x92, - 0x27, 0xea, 0xb3, 0x35, 0x9f, 0x42, 0x69, 0x1f, 0x4a, 0x41, 0x59, 0x27, 0xc3, 0x67, 0x63, 0x08, - 0x66, 0xf8, 0x0c, 0xb0, 0xf1, 0x0c, 0xdf, 0xc5, 0x07, 0x09, 0x76, 0xc9, 0x45, 0x1b, 0xd2, 0x7f, - 0x05, 0x28, 0xf8, 0xad, 0xf3, 0x6b, 0xce, 0x5b, 0xa7, 0xa4, 0xea, 0x57, 0xc7, 0xd2, 0xd6, 0xcc, - 0xb9, 0x6a, 0x1d, 0x7f, 0x93, 0x59, 0xeb, 0x07, 0x02, 0x64, 0xdd, 0xc9, 0x07, 0xef, 0xbb, 0x02, - 0x17, 0x84, 0x6c, 0xed, 0xe2, 0xfe, 0x4b, 0x2a, 0x76, 0x1d, 0x98, 0x70, 0xaf, 0x03, 0xef, 0xba, - 0x09, 0x55, 0x54, 0x29, 0xd0, 0xbf, 0xd2, 0xdc, 0xa6, 0x9c, 0xfc, 0xf1, 0x57, 0x7c, 0x1c, 0x24, - 0x93, 0x40, 0xdf, 0x83, 0xb4, 0xda, 0x72, 0x0b, 0xa0, 0xa5, 0x90, 0xca, 0xa0, 0xc3, 0xba, 0xd6, - 0x1c, 0x6e, 0x52, 0x4e, 0x99, 0x4b, 0xf0, 0x51, 0xc5, 0x9d, 0x51, 0x49, 0xaf, 0x11, 0xbd, 0x8c, - 0x27, 0xe8, 0x36, 0x4b, 0x00, 0xc7, 0xfb, 0x0f, 0x0e, 0xb6, 0x77, 0xee, 0xed, 0xd4, 0xb7, 0x79, - 0x4a, 0xb5, 0xbd, 0x5d, 0xdf, 0x16, 0xe3, 0x84, 0x4f, 0xae, 0x3f, 0x38, 0x38, 0xa9, 0x6f, 0x8b, - 0x09, 0xe9, 0x2e, 0xe4, 0x5c, 0xd7, 0x83, 0x2a, 0x90, 0x71, 0x8a, 0xb9, 0x02, 0x77, 0x00, 0xbc, - 0x36, 0xbf, 0x04, 0x29, 0x43, 0x7f, 0x8f, 0xdf, 0xcd, 0x25, 0x64, 0xd6, 0x90, 0xda, 0x50, 0x1e, - 0xf1, 0x5b, 0xe8, 0x2e, 0x64, 0x8c, 0xc1, 0xa9, 0xe2, 0x18, 0xed, 0x48, 0xe9, 0xdb, 0x01, 0x9a, - 0x83, 0xd3, 0xae, 0xd6, 0xba, 0x8f, 0x2f, 0x9d, 0x65, 0x32, 0x06, 0xa7, 0xf7, 0x99, 0x6d, 0xb3, - 0x5e, 0xe2, 0xfe, 0x5e, 0x2e, 0x20, 0xeb, 0x1c, 0x55, 0xf4, 0x7d, 0xc8, 0xb9, 0x2e, 0xd1, 0xbd, - 0x5b, 0x8f, 0xf4, 0xa5, 0x5c, 0xbd, 0x27, 0x82, 0x6e, 0xc2, 0x82, 0xa5, 0x9d, 0xf7, 0x9d, 0xc2, - 0x3f, 0x2b, 0xf5, 0xc4, 0xe9, 0x99, 0x29, 0xb3, 0x1f, 0x7b, 0x4e, 0x35, 0x82, 0x44, 0x42, 0x71, - 0xd4, 0x57, 0x7c, 0x93, 0x03, 0x08, 0x89, 0xd8, 0x89, 0xb0, 0x88, 0xfd, 0xb3, 0x38, 0xe4, 0x7d, - 0xd7, 0x09, 0xe8, 0xbb, 0x3e, 0xc7, 0x55, 0x0a, 0x09, 0x35, 0x3e, 0x5e, 0xef, 0xf2, 0x3a, 0x38, - 0xb1, 0xf8, 0xfc, 0x13, 0x8b, 0xba, 0xbd, 0x71, 0x6e, 0x25, 0x92, 0x73, 0xdf, 0x4a, 0x3c, 0x07, - 0xc8, 0xd6, 0x6d, 0xb5, 0xab, 0x5c, 0xe8, 0xb6, 0xd6, 0x3f, 0x57, 0x98, 0x69, 0x30, 0x37, 0x23, - 0xd2, 0x3f, 0x27, 0xf4, 0xc7, 0x21, 0xb5, 0x92, 0x9f, 0x0a, 0x90, 0x75, 0x61, 0xdf, 0xbc, 0x57, - 0xdb, 0x57, 0x20, 0xcd, 0x91, 0x0d, 0xbb, 0xdb, 0xe6, 0xad, 0xd0, 0xeb, 0x97, 0x2a, 0x64, 0x7b, - 0xd8, 0x56, 0xa9, 0xcf, 0x64, 0x61, 0xd2, 0x6d, 0xdf, 0x7c, 0x19, 0xf2, 0xbe, 0x67, 0x01, 0xc4, - 0x8d, 0xee, 0xd7, 0xdf, 0x10, 0x63, 0xd5, 0xcc, 0x87, 0x1f, 0xaf, 0x26, 0xf6, 0xf1, 0x7b, 0xe4, - 0x84, 0xc9, 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x2f, 0x0a, 0xd5, 0xfc, 0x87, 0x1f, 0xaf, 0x66, 0x64, - 0x4c, 0x2b, 0xef, 0x37, 0xef, 0x43, 0x79, 0x64, 0x63, 0x82, 0x07, 0x1a, 0x41, 0x69, 0xfb, 0xf8, - 0x70, 0x6f, 0xa7, 0xb6, 0xd9, 0xac, 0x2b, 0x27, 0x07, 0xcd, 0xba, 0x28, 0xa0, 0x47, 0x61, 0x71, - 0x6f, 0xe7, 0x07, 0x8d, 0xa6, 0x52, 0xdb, 0xdb, 0xa9, 0xef, 0x37, 0x95, 0xcd, 0x66, 0x73, 0xb3, - 0x76, 0x5f, 0x8c, 0x6f, 0xfc, 0x21, 0x0f, 0xe5, 0xcd, 0xad, 0xda, 0x0e, 0xc1, 0x76, 0x5a, 0x4b, - 0xa5, 0xee, 0xa1, 0x06, 0x49, 0x5a, 0x43, 0x9c, 0xf8, 0x38, 0xb0, 0x3a, 0xf9, 0x3a, 0x05, 0xdd, - 0x83, 0x14, 0x2d, 0x2f, 0xa2, 0xc9, 0xaf, 0x05, 0xab, 0x53, 0xee, 0x57, 0xc8, 0x60, 0xe8, 0x71, - 0x9a, 0xf8, 0x7c, 0xb0, 0x3a, 0xf9, 0xba, 0x05, 0xed, 0x41, 0xc6, 0xa9, 0x2e, 0x4d, 0x7b, 0xd3, - 0x57, 0x9d, 0x7a, 0x07, 0x42, 0xa6, 0xc6, 0xaa, 0x74, 0x93, 0x5f, 0x16, 0x56, 0xa7, 0x5c, 0xc4, - 0xa0, 0x1d, 0x48, 0xf3, 0x0a, 0xc9, 0x94, 0xc7, 0x82, 0xd5, 0x69, 0x57, 0x2b, 0x48, 0x86, 0x9c, - 0x57, 0xff, 0x9c, 0xfe, 0x5e, 0xb2, 0x3a, 0xc3, 0x1d, 0x13, 0x7a, 0x0b, 0x8a, 0xc1, 0xaa, 0xcb, - 0x6c, 0x0f, 0x12, 0xab, 0x33, 0x5e, 0xe2, 0x10, 0xfd, 0xc1, 0x12, 0xcc, 0x6c, 0x0f, 0x14, 0xab, - 0x33, 0xde, 0xe9, 0xa0, 0x77, 0x60, 0x61, 0xbc, 0x44, 0x32, 0xfb, 0x7b, 0xc5, 0xea, 0x1c, 0xb7, - 0x3c, 0xa8, 0x07, 0x28, 0xa4, 0xb4, 0x32, 0xc7, 0xf3, 0xc5, 0xea, 0x3c, 0x97, 0x3e, 0xa8, 0x0d, - 0xe5, 0xd1, 0x72, 0xc5, 0xac, 0xcf, 0x19, 0xab, 0x33, 0x5f, 0x00, 0xb1, 0x5e, 0x82, 0xd8, 0x7d, - 0xd6, 0xe7, 0x8d, 0xd5, 0x99, 0xef, 0x83, 0xd0, 0x31, 0x80, 0x0f, 0x7b, 0xce, 0xf0, 0xdc, 0xb1, - 0x3a, 0xcb, 0xcd, 0x10, 0x32, 0x60, 0x31, 0x0c, 0x94, 0xce, 0xf3, 0xfa, 0xb1, 0x3a, 0xd7, 0x85, - 0x11, 0xb1, 0xe7, 0x20, 0xbc, 0x9c, 0xed, 0x35, 0x64, 0x75, 0xc6, 0x9b, 0xa3, 0xad, 0xfa, 0x27, - 0x9f, 0x2f, 0x0b, 0x9f, 0x7e, 0xbe, 0x2c, 0xfc, 0xeb, 0xf3, 0x65, 0xe1, 0xa3, 0x2f, 0x96, 0x63, - 0x9f, 0x7e, 0xb1, 0x1c, 0xfb, 0xc7, 0x17, 0xcb, 0xb1, 0x1f, 0x3e, 0x7b, 0xae, 0xd9, 0x9d, 0xc1, - 0xe9, 0x5a, 0x4b, 0xef, 0xad, 0xfb, 0x1f, 0x90, 0x87, 0x3d, 0x5b, 0x3f, 0x4d, 0xd3, 0x80, 0x7a, - 0xeb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, 0x5f, 0x7b, 0xc4, 0xd6, 0x2e, 0x00, 0x00, + 0x58, 0xee, 0x97, 0x9f, 0xad, 0x88, 0x8c, 0xfb, 0x39, 0xbd, 0xa7, 0xd9, 0xb8, 0x67, 0xd8, 0x97, + 0x32, 0x97, 0x9f, 0xbc, 0xb2, 0xd2, 0x6d, 0x28, 0x05, 0xe3, 0x3e, 0x7a, 0x02, 0x8a, 0x26, 0xb6, + 0x55, 0xad, 0xaf, 0x04, 0xb2, 0xf6, 0x02, 0x23, 0xf2, 0x62, 0xce, 0x21, 0x3c, 0x12, 0x1a, 0xeb, + 0xd1, 0x4b, 0x90, 0xf3, 0xd2, 0x04, 0x81, 0x0e, 0x7d, 0x42, 0xad, 0xc3, 0xe3, 0x95, 0xfe, 0x2a, + 0x78, 0x2a, 0x83, 0xd5, 0x93, 0x3a, 0xa4, 0x4d, 0x6c, 0x0d, 0xba, 0xac, 0x9e, 0x51, 0xda, 0x78, + 0x7e, 0xb6, 0x2c, 0x81, 0x50, 0x07, 0x5d, 0x5b, 0xe6, 0xc2, 0xd2, 0x5b, 0x90, 0x66, 0x14, 0x94, + 0x87, 0xcc, 0xf1, 0xfe, 0xfd, 0xfd, 0x83, 0x37, 0xf6, 0xc5, 0x18, 0x02, 0x48, 0x6f, 0xd6, 0x6a, + 0xf5, 0xc3, 0xa6, 0x28, 0xa0, 0x1c, 0xa4, 0x36, 0xb7, 0x0e, 0xe4, 0xa6, 0x18, 0x27, 0x64, 0xb9, + 0xbe, 0x5b, 0xaf, 0x35, 0xc5, 0x04, 0x5a, 0x80, 0x22, 0xfb, 0x56, 0xee, 0x1d, 0xc8, 0x0f, 0x36, + 0x9b, 0x62, 0xd2, 0x47, 0x3a, 0xaa, 0xef, 0x6f, 0xd7, 0x65, 0x31, 0x25, 0x7d, 0x0f, 0xae, 0x46, + 0xe6, 0x15, 0x5e, 0x69, 0x44, 0xf0, 0x95, 0x46, 0xa4, 0xdf, 0xc4, 0x09, 0xf2, 0x8a, 0x4a, 0x16, + 0xd0, 0xee, 0xc8, 0xc4, 0x37, 0xe6, 0xc8, 0x34, 0x46, 0x66, 0x4f, 0xc0, 0x96, 0x89, 0xcf, 0xb0, + 0xdd, 0xea, 0xb0, 0xe4, 0x85, 0xf9, 0xc6, 0xa2, 0x5c, 0xe4, 0x54, 0x2a, 0x64, 0x31, 0xb6, 0x77, + 0x70, 0xcb, 0x56, 0xd8, 0x09, 0x63, 0x40, 0x27, 0x47, 0xd8, 0x08, 0xf5, 0x88, 0x11, 0xa5, 0xb7, + 0xe7, 0x5a, 0xcb, 0x1c, 0xa4, 0xe4, 0x7a, 0x53, 0x7e, 0x53, 0x4c, 0x20, 0x04, 0x25, 0xfa, 0xa9, + 0x1c, 0xed, 0x6f, 0x1e, 0x1e, 0x35, 0x0e, 0xc8, 0x5a, 0x2e, 0x42, 0xd9, 0x59, 0x4b, 0x87, 0x98, + 0x92, 0xfe, 0x11, 0x87, 0x47, 0x23, 0x52, 0x1d, 0x74, 0x07, 0xc0, 0x1e, 0x2a, 0x26, 0x6e, 0xe9, + 0x66, 0x3b, 0xda, 0xc8, 0x9a, 0x43, 0x99, 0x72, 0xc8, 0x39, 0x9b, 0x7f, 0x59, 0x13, 0x2a, 0x6a, + 0xe8, 0x15, 0xae, 0x94, 0xcc, 0xca, 0x81, 0x77, 0xd7, 0x43, 0x0a, 0x47, 0xb8, 0x45, 0x14, 0xd3, + 0xb5, 0xa5, 0x8a, 0x29, 0x3f, 0x7a, 0xe0, 0x07, 0xc4, 0x03, 0x1a, 0x54, 0x66, 0x2e, 0xbd, 0xfa, + 0x20, 0x33, 0x23, 0x58, 0xe8, 0x4d, 0x78, 0x74, 0x24, 0x26, 0xba, 0x4a, 0x53, 0xb3, 0x86, 0xc6, + 0x47, 0x82, 0xa1, 0x91, 0xab, 0x96, 0x7e, 0x9f, 0xf0, 0x2f, 0x6c, 0x30, 0xb3, 0x3b, 0x80, 0xb4, + 0x65, 0xab, 0xf6, 0xc0, 0xe2, 0x06, 0xf7, 0xd2, 0xac, 0x69, 0xe2, 0x9a, 0xf3, 0x71, 0x44, 0xc5, + 0x65, 0xae, 0xe6, 0xbb, 0xf5, 0xb6, 0x88, 0x83, 0x0d, 0x2e, 0x4e, 0xf4, 0x91, 0xf1, 0x7c, 0x4e, + 0x5c, 0xba, 0x0b, 0x68, 0x3c, 0x81, 0x0e, 0x29, 0x99, 0x08, 0x61, 0x25, 0x93, 0x3f, 0x08, 0xf0, + 0xd8, 0x84, 0x64, 0x19, 0xbd, 0x3e, 0xb2, 0xcf, 0x2f, 0xcf, 0x93, 0x6a, 0xaf, 0x31, 0x5a, 0x70, + 0xa7, 0xa5, 0x5b, 0x50, 0xf0, 0xd3, 0x67, 0x9b, 0xe4, 0x97, 0x71, 0xcf, 0xe7, 0x07, 0x6b, 0x3b, + 0x5e, 0xf8, 0x13, 0xbe, 0x66, 0xf8, 0x0b, 0xda, 0x59, 0x7c, 0x4e, 0x3b, 0x3b, 0x0a, 0xb3, 0xb3, + 0xc4, 0x5c, 0x59, 0xe5, 0x5c, 0xd6, 0x96, 0xfc, 0x7a, 0xd6, 0x16, 0x38, 0x70, 0xa9, 0x60, 0xda, + 0xfa, 0x26, 0x80, 0x57, 0xf0, 0x22, 0x01, 0xc9, 0xd4, 0x07, 0xfd, 0x36, 0xb5, 0x80, 0x94, 0xcc, + 0x1a, 0xe8, 0x36, 0xa4, 0x88, 0x25, 0x39, 0xeb, 0x34, 0xee, 0x54, 0x89, 0x25, 0xf8, 0x0a, 0x66, + 0x8c, 0x5b, 0xd2, 0x00, 0x8d, 0x57, 0xd4, 0x23, 0xba, 0x78, 0x35, 0xd8, 0xc5, 0xe3, 0x91, 0xb5, + 0xf9, 0xf0, 0xae, 0xde, 0x87, 0x14, 0xdd, 0x79, 0x92, 0x70, 0xd1, 0x6b, 0x1c, 0x0e, 0x7b, 0xc8, + 0x37, 0xfa, 0x19, 0x80, 0x6a, 0xdb, 0xa6, 0x76, 0x3a, 0xf0, 0x3a, 0x58, 0x09, 0xb7, 0x9c, 0x4d, + 0x87, 0x6f, 0xeb, 0x1a, 0x37, 0xa1, 0x25, 0x4f, 0xd4, 0x67, 0x46, 0x3e, 0x85, 0xd2, 0x3e, 0x94, + 0x82, 0xb2, 0x4e, 0xa2, 0xce, 0xc6, 0x10, 0x4c, 0xd4, 0x19, 0xee, 0xe2, 0x89, 0xba, 0x9b, 0xe6, + 0x27, 0xd8, 0x5d, 0x15, 0x6d, 0x48, 0xff, 0x15, 0xa0, 0xe0, 0x37, 0xbc, 0x6f, 0x38, 0xfd, 0x9c, + 0x92, 0x71, 0x5f, 0x1d, 0xcb, 0x3e, 0x33, 0xe7, 0xaa, 0x75, 0xfc, 0x6d, 0x26, 0x9f, 0x1f, 0x08, + 0x90, 0x75, 0x27, 0x1f, 0xbc, 0xb6, 0x0a, 0xdc, 0xf3, 0xb1, 0xb5, 0x8b, 0xfb, 0xef, 0x9a, 0xd8, + 0xad, 0x5e, 0xc2, 0xbd, 0xd5, 0xbb, 0xeb, 0xe6, 0x4a, 0x51, 0x15, 0x3d, 0xff, 0x4a, 0x73, 0x9b, + 0x72, 0x52, 0xc3, 0x5f, 0xf3, 0x71, 0x90, 0x24, 0x01, 0xfd, 0x00, 0xd2, 0x6a, 0xcb, 0xad, 0x63, + 0x96, 0x42, 0x0a, 0x7c, 0x0e, 0xeb, 0x5a, 0x73, 0xb8, 0x49, 0x39, 0x65, 0x2e, 0xc1, 0x47, 0x15, + 0x77, 0x46, 0x25, 0xbd, 0x46, 0xf4, 0x32, 0x9e, 0xa0, 0x47, 0x2c, 0x01, 0x1c, 0xef, 0x3f, 0x38, + 0xd8, 0xde, 0xb9, 0xb7, 0x53, 0xdf, 0xe6, 0xd9, 0xd2, 0xf6, 0x76, 0x7d, 0x5b, 0x8c, 0x13, 0x3e, + 0xb9, 0xfe, 0xe0, 0xe0, 0xa4, 0xbe, 0x2d, 0x26, 0xa4, 0xbb, 0x90, 0x73, 0xbd, 0x0a, 0x41, 0xf5, + 0x4e, 0x4d, 0x56, 0xe0, 0x67, 0x9b, 0x97, 0xd8, 0x97, 0x20, 0x65, 0xe8, 0xef, 0xf1, 0x2b, 0xb6, + 0x84, 0xcc, 0x1a, 0x52, 0x1b, 0xca, 0x23, 0x2e, 0x09, 0xdd, 0x85, 0x8c, 0x31, 0x38, 0x55, 0x1c, + 0xa3, 0x1d, 0xa9, 0x60, 0x3b, 0x78, 0x71, 0x70, 0xda, 0xd5, 0x5a, 0xf7, 0xf1, 0xa5, 0xb3, 0x4c, + 0xc6, 0xe0, 0xf4, 0x3e, 0xb3, 0x6d, 0xd6, 0x4b, 0xdc, 0xdf, 0xcb, 0x05, 0x64, 0x9d, 0xa3, 0x8a, + 0x7e, 0x08, 0x39, 0xd7, 0xdb, 0xb9, 0x57, 0xe4, 0x91, 0x6e, 0x92, 0xab, 0xf7, 0x44, 0xd0, 0x4d, + 0x58, 0xb0, 0xb4, 0xf3, 0xbe, 0x53, 0xbf, 0x67, 0x15, 0x9b, 0x38, 0x3d, 0x33, 0x65, 0xf6, 0x63, + 0xcf, 0x29, 0x2a, 0x90, 0x20, 0x27, 0x8e, 0xfa, 0x8a, 0x6f, 0x73, 0x00, 0x21, 0xc1, 0x38, 0x11, + 0x16, 0x8c, 0x7f, 0x11, 0x87, 0xbc, 0xef, 0x56, 0x00, 0x7d, 0xdf, 0xe7, 0xb8, 0x4a, 0x21, 0x51, + 0xc4, 0xc7, 0xeb, 0xdd, 0x41, 0x07, 0x27, 0x16, 0x9f, 0x7f, 0x62, 0x51, 0x97, 0x30, 0xce, 0xe5, + 0x42, 0x72, 0xee, 0xcb, 0x85, 0xe7, 0x00, 0xd9, 0xba, 0xad, 0x76, 0x95, 0x0b, 0xdd, 0xd6, 0xfa, + 0xe7, 0x0a, 0x33, 0x0d, 0xe6, 0x66, 0x44, 0xfa, 0xe7, 0x84, 0xfe, 0x38, 0xa4, 0x56, 0xf2, 0x73, + 0x01, 0xb2, 0x2e, 0xa2, 0x9b, 0xf7, 0x86, 0xfa, 0x0a, 0xa4, 0x39, 0x68, 0x61, 0x57, 0xd4, 0xbc, + 0x15, 0x7a, 0x8b, 0x52, 0x85, 0x6c, 0x0f, 0xdb, 0x2a, 0xf5, 0x99, 0x2c, 0x02, 0xba, 0xed, 0x9b, + 0x2f, 0x43, 0xde, 0x77, 0xbb, 0x4f, 0xdc, 0xe8, 0x7e, 0xfd, 0x0d, 0x31, 0x56, 0xcd, 0x7c, 0xf8, + 0xf1, 0x6a, 0x62, 0x1f, 0xbf, 0x47, 0x4e, 0x98, 0x5c, 0xaf, 0x35, 0xea, 0xb5, 0xfb, 0xa2, 0x50, + 0xcd, 0x7f, 0xf8, 0xf1, 0x6a, 0x46, 0xc6, 0xb4, 0x80, 0x7e, 0xf3, 0x3e, 0x94, 0x47, 0x36, 0x26, + 0x78, 0xa0, 0x11, 0x94, 0xb6, 0x8f, 0x0f, 0xf7, 0x76, 0x6a, 0x9b, 0xcd, 0xba, 0x72, 0x72, 0xd0, + 0xac, 0x8b, 0x02, 0x7a, 0x14, 0x16, 0xf7, 0x76, 0x7e, 0xd4, 0x68, 0x2a, 0xb5, 0xbd, 0x9d, 0xfa, + 0x7e, 0x53, 0xd9, 0x6c, 0x36, 0x37, 0x6b, 0xf7, 0xc5, 0xf8, 0xc6, 0x1f, 0xf3, 0x50, 0xde, 0xdc, + 0xaa, 0xed, 0x10, 0xd8, 0xa6, 0xb5, 0x54, 0xea, 0x1e, 0x6a, 0x90, 0xa4, 0xa5, 0xc0, 0x89, 0x6f, + 0xfc, 0xaa, 0x93, 0x6f, 0x45, 0xd0, 0x3d, 0x48, 0xd1, 0x2a, 0x21, 0x9a, 0xfc, 0xe8, 0xaf, 0x3a, + 0xe5, 0x9a, 0x84, 0x0c, 0x86, 0x1e, 0xa7, 0x89, 0xaf, 0x00, 0xab, 0x93, 0x6f, 0x4d, 0xd0, 0x1e, + 0x64, 0x9c, 0x22, 0xd1, 0xb4, 0xa7, 0x79, 0xd5, 0xa9, 0x57, 0x19, 0x64, 0x6a, 0xac, 0xd8, 0x36, + 0xf9, 0x81, 0x60, 0x75, 0xca, 0x7d, 0x0a, 0xda, 0x81, 0x34, 0x2f, 0x74, 0x4c, 0x79, 0xf3, 0x57, + 0x9d, 0x76, 0x43, 0x82, 0x64, 0xc8, 0x79, 0x65, 0xcc, 0xe9, 0xcf, 0x1e, 0xab, 0x33, 0x5c, 0x15, + 0xa1, 0xb7, 0xa0, 0x18, 0x2c, 0xa8, 0xcc, 0xf6, 0xae, 0xb0, 0x3a, 0xe3, 0x5d, 0x0c, 0xd1, 0x1f, + 0xac, 0xae, 0xcc, 0xf6, 0xce, 0xb0, 0x3a, 0xe3, 0xd5, 0x0c, 0x7a, 0x07, 0x16, 0xc6, 0xab, 0x1f, + 0xb3, 0x3f, 0x3b, 0xac, 0xce, 0x71, 0x59, 0x83, 0x7a, 0x80, 0x42, 0xaa, 0x26, 0x73, 0xbc, 0x42, + 0xac, 0xce, 0x73, 0x77, 0x83, 0xda, 0x50, 0x1e, 0xad, 0x44, 0xcc, 0xfa, 0x2a, 0xb1, 0x3a, 0xf3, + 0x3d, 0x0e, 0xeb, 0x25, 0x08, 0xcb, 0x67, 0x7d, 0xa5, 0x58, 0x9d, 0xf9, 0x5a, 0x07, 0x1d, 0x03, + 0xf8, 0x60, 0xe5, 0x0c, 0xaf, 0x16, 0xab, 0xb3, 0x5c, 0xf0, 0x20, 0x03, 0x16, 0xc3, 0xf0, 0xe6, + 0x3c, 0x8f, 0x18, 0xab, 0x73, 0xdd, 0xfb, 0x10, 0x7b, 0x0e, 0x22, 0xc7, 0xd9, 0x1e, 0x35, 0x56, + 0x67, 0xbc, 0x00, 0xda, 0xaa, 0x7f, 0xf2, 0xf9, 0xb2, 0xf0, 0xe9, 0xe7, 0xcb, 0xc2, 0xbf, 0x3f, + 0x5f, 0x16, 0x3e, 0xfa, 0x62, 0x39, 0xf6, 0xe9, 0x17, 0xcb, 0xb1, 0x7f, 0x7e, 0xb1, 0x1c, 0xfb, + 0xf1, 0xb3, 0xe7, 0x9a, 0xdd, 0x19, 0x9c, 0xae, 0xb5, 0xf4, 0xde, 0xba, 0xff, 0x1d, 0x78, 0xd8, + 0xeb, 0xf3, 0xd3, 0x34, 0x0d, 0xa8, 0xb7, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xb4, 0x7f, + 0x53, 0x9d, 0x2e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -6672,13 +6656,6 @@ func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } return len(dAtA) - i, nil } @@ -7069,11 +7046,6 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) - i-- - dAtA[i] = 0x30 - } if len(m.AppHash) > 0 { i -= len(m.AppHash) copy(dAtA[i:], m.AppHash) @@ -8734,10 +8706,6 @@ func (m *ResponseCommit) Size() (n int) { } var l int _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } if m.RetainHeight != 0 { n += 1 + sovTypes(uint64(m.RetainHeight)) } @@ -8933,9 +8901,6 @@ func (m *ResponseFinalizeBlock) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.RetainHeight != 0 { - n += 1 + sovTypes(uint64(m.RetainHeight)) - } return n } @@ -14238,40 +14203,6 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) @@ -15509,25 +15440,6 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) - } - m.RetainHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RetainHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 8f916427d2..109e3b9fb1 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -169,11 +169,12 @@ Try running these commands: -> data: {"size":0} -> data.hex: 0x7B2273697A65223A307D -> commit +> finalize_block "abc" -> code: OK --> data.hex: 0x0000000000000000 +-> code: OK +-> data.hex: 0x0200000000000000 -> finalize_block "abc" +> commit -> code: OK > info @@ -181,34 +182,36 @@ Try running these commands: -> data: {"size":1} -> data.hex: 0x7B2273697A65223A317D -> commit --> code: OK --> data.hex: 0x0200000000000000 - > query "abc" -> code: OK -> log: exists --> height: 2 +-> height: 1 +-> key: abc +-> key.hex: 616263 -> value: abc -> value.hex: 616263 -> finalize_block "def=xyz" +> finalize_block "def=xyz" "ghi=123" -> code: OK +-> code: OK +-> code: OK +-> data.hex: 0x0600000000000000 > commit -> code: OK --> data.hex: 0x0400000000000000 > query "def" -> code: OK -> log: exists --> height: 3 +-> height: 2 +-> key: def +-> key.hex: 646566 -> value: xyz -> value.hex: 78797A ``` -Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if -we do `finalize_block "abc=efg"` it will store `(abc, efg)`. +Note that if we do `finalize_block "abc" ...` it will store `(abc, abc)`, but if +we do `finalize_block "abc=efg" ...` it will store `(abc, efg)`. Similarly, you could put the commands in a file and run `abci-cli --verbose batch < myfile`. diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 747d95750a..ed11f68409 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "fmt" "os" + "sync" "testing" "time" @@ -136,8 +137,10 @@ func checkTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{}) + var rCode uint32 + err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.ResponseCheckTx) { rCode = r.Code }, mempool.TxInfo{}) require.NoError(t, err, "error after checkTx") + require.Equal(t, code.CodeTypeOK, rCode, "checkTx code is error, txBytes %X", txBytes) } } @@ -173,6 +176,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { case msg := <-newBlockHeaderCh: headerEvent := msg.Data().(types.EventDataNewBlockHeader) n += headerEvent.NumTxs + logger.Info("new transactions", "nTxs", headerEvent.NumTxs, "total", n) case <-time.After(30 * time.Second): t.Fatal("Timed out waiting 30s to commit blocks with transactions") } @@ -202,10 +206,10 @@ func TestMempoolRmBadTx(t *testing.T) { resFinalize, err := app.FinalizeBlock(ctx, &abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) require.NoError(t, err) assert.False(t, resFinalize.TxResults[0].IsErr(), fmt.Sprintf("expected no error. got %v", resFinalize)) + assert.True(t, len(resFinalize.AppHash) > 0) - resCommit, err := app.Commit(ctx) + _, err = app.Commit(ctx) require.NoError(t, err) - assert.True(t, len(resCommit.Data) > 0) emptyMempoolCh := make(chan struct{}) checkTxRespCh := make(chan struct{}) @@ -263,6 +267,7 @@ type CounterApplication struct { txCount int mempoolTxCount int + mu sync.Mutex } func NewCounterApplication() *CounterApplication { @@ -270,10 +275,16 @@ func NewCounterApplication() *CounterApplication { } func (app *CounterApplication) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { + app.mu.Lock() + defer app.mu.Unlock() + return &abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}, nil } func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { + app.mu.Lock() + defer app.mu.Unlock() + respTxs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { txValue := txAsUint64(tx) @@ -287,10 +298,21 @@ func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.Reques app.txCount++ respTxs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK} } - return &abci.ResponseFinalizeBlock{TxResults: respTxs}, nil + + res := &abci.ResponseFinalizeBlock{TxResults: respTxs} + + if app.txCount > 0 { + res.AppHash = make([]byte, 8) + binary.BigEndian.PutUint64(res.AppHash, uint64(app.txCount)) + } + + return res, nil } func (app *CounterApplication) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { + app.mu.Lock() + defer app.mu.Unlock() + txValue := txAsUint64(req.Tx) if txValue != uint64(app.mempoolTxCount) { return &abci.ResponseCheckTx{ @@ -308,13 +330,11 @@ func txAsUint64(tx []byte) uint64 { } func (app *CounterApplication) Commit(context.Context) (*abci.ResponseCommit, error) { + app.mu.Lock() + defer app.mu.Unlock() + app.mempoolTxCount = app.txCount - if app.txCount == 0 { - return &abci.ResponseCommit{}, nil - } - hash := make([]byte, 8) - binary.BigEndian.PutUint64(hash, uint64(app.txCount)) - return &abci.ResponseCommit{Data: hash}, nil + return &abci.ResponseCommit{}, nil } func (app *CounterApplication) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 407ec925e0..ec5f707736 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -86,5 +86,5 @@ func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFina } func (mock *mockProxyApp) Commit(context.Context) (*abci.ResponseCommit, error) { - return &abci.ResponseCommit{Data: mock.appHash}, nil + return &abci.ResponseCommit{}, nil } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 99d3c17a14..328dba040a 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -1017,15 +1017,15 @@ type badApp struct { onlyLastHashIsWrong bool } -func (app *badApp) Commit(context.Context) (*abci.ResponseCommit, error) { +func (app *badApp) FinalizeBlock(_ context.Context, _ *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.height++ if app.onlyLastHashIsWrong { if app.height == app.numBlocks { - return &abci.ResponseCommit{Data: tmrand.Bytes(8)}, nil + return &abci.ResponseFinalizeBlock{AppHash: tmrand.Bytes(8)}, nil } - return &abci.ResponseCommit{Data: []byte{app.height}}, nil + return &abci.ResponseFinalizeBlock{AppHash: []byte{app.height}}, nil } else if app.allHashesAreWrong { - return &abci.ResponseCommit{Data: tmrand.Bytes(8)}, nil + return &abci.ResponseFinalizeBlock{AppHash: tmrand.Bytes(8)}, nil } panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") diff --git a/internal/state/execution.go b/internal/state/execution.go index cc3e63d7ae..5c46f43532 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -206,7 +206,7 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, ErrInvalidBlock(err) } startTime := time.Now().UnixNano() - finalizeBlockResponse, err := blockExec.appClient.FinalizeBlock( + fBlockRes, err := blockExec.appClient.FinalizeBlock( ctx, &abci.RequestFinalizeBlock{ Hash: block.Hash(), @@ -225,8 +225,16 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, ErrProxyAppConn(err) } + blockExec.logger.Info( + "finalized block", + "height", block.Height, + "num_txs_res", len(fBlockRes.TxResults), + "num_val_updates", len(fBlockRes.ValidatorUpdates), + "block_app_hash", fmt.Sprintf("%X", fBlockRes.AppHash), + ) + abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: finalizeBlockResponse, + FinalizeBlock: fBlockRes, } // Save the results before we commit. @@ -235,12 +243,12 @@ func (blockExec *BlockExecutor) ApplyBlock( } // validate the validator updates and convert to tendermint types - err = validateValidatorUpdates(finalizeBlockResponse.ValidatorUpdates, state.ConsensusParams.Validator) + err = validateValidatorUpdates(fBlockRes.ValidatorUpdates, state.ConsensusParams.Validator) if err != nil { return state, fmt.Errorf("error in validator updates: %w", err) } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(finalizeBlockResponse.ValidatorUpdates) + validatorUpdates, err := types.PB2TM.ValidatorUpdates(fBlockRes.ValidatorUpdates) if err != nil { return state, err } @@ -248,23 +256,23 @@ func (blockExec *BlockExecutor) ApplyBlock( blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) blockExec.metrics.ValidatorSetUpdates.Add(1) } - if finalizeBlockResponse.ConsensusParamUpdates != nil { + if fBlockRes.ConsensusParamUpdates != nil { blockExec.metrics.ConsensusParamUpdates.Add(1) } // Update the state with the block and responses. - rs, err := abci.MarshalTxResults(finalizeBlockResponse.TxResults) + rs, err := abci.MarshalTxResults(fBlockRes.TxResults) if err != nil { return state, fmt.Errorf("marshaling TxResults: %w", err) } h := merkle.HashFromByteSlices(rs) - state, err = state.Update(blockID, &block.Header, h, finalizeBlockResponse.ConsensusParamUpdates, validatorUpdates) + state, err = state.Update(blockID, &block.Header, h, fBlockRes.ConsensusParamUpdates, validatorUpdates) if err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } // Lock mempool, commit app state, update mempoool. - appHash, retainHeight, err := blockExec.Commit(ctx, state, block, finalizeBlockResponse.TxResults) + retainHeight, err := blockExec.Commit(ctx, state, block, fBlockRes.TxResults) if err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } @@ -273,7 +281,7 @@ func (blockExec *BlockExecutor) ApplyBlock( blockExec.evpool.Update(ctx, state, block.Evidence) // Update the app hash and save the state. - state.AppHash = appHash + state.AppHash = fBlockRes.AppHash if err := blockExec.store.Save(state); err != nil { return state, err } @@ -293,7 +301,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, finalizeBlockResponse, validatorUpdates) + fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, fBlockRes, validatorUpdates) return state, nil } @@ -338,7 +346,7 @@ func (blockExec *BlockExecutor) Commit( state State, block *types.Block, txResults []*abci.ExecTxResult, -) ([]byte, int64, error) { +) (int64, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -347,14 +355,14 @@ func (blockExec *BlockExecutor) Commit( err := blockExec.mempool.FlushAppConn(ctx) if err != nil { blockExec.logger.Error("client error during mempool.FlushAppConn", "err", err) - return nil, 0, err + return 0, err } // Commit block, get hash back res, err := blockExec.appClient.Commit(ctx) if err != nil { blockExec.logger.Error("client error during proxyAppConn.Commit", "err", err) - return nil, 0, err + return 0, err } // ResponseCommit has no error code - just data @@ -362,7 +370,7 @@ func (blockExec *BlockExecutor) Commit( "committed state", "height", block.Height, "num_txs", len(block.Txs), - "app_hash", fmt.Sprintf("%X", res.Data), + "block_app_hash", fmt.Sprintf("%X", block.AppHash), ) // Update mempool. @@ -376,7 +384,7 @@ func (blockExec *BlockExecutor) Commit( state.ConsensusParams.ABCI.RecheckTx, ) - return res.Data, res.RetainHeight, err + return res.RetainHeight, err } func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { @@ -708,15 +716,15 @@ func ExecCommitBlock( fireEvents(be.logger, be.eventBus, block, blockID, finalizeBlockResponse, validatorUpdates) } - // Commit block, get hash back - res, err := appConn.Commit(ctx) + // Commit block + _, err = appConn.Commit(ctx) if err != nil { - logger.Error("client error during proxyAppConn.Commit", "err", res) + logger.Error("client error during proxyAppConn.Commit", "err", err) return nil, err } - // ResponseCommit has no error or log, just data - return res.Data, nil + // ResponseCommit has no error or log + return finalizeBlockResponse.AppHash, nil } func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64) (uint64, error) { diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 771c56a8f3..aa9e3fcbe1 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -249,7 +249,6 @@ message ResponseDeliverTx { message ResponseCommit { // reserve 1 - bytes data = 2; int64 retain_height = 3; } @@ -332,7 +331,6 @@ message ResponseFinalizeBlock { repeated ValidatorUpdate validator_updates = 3 [(gogoproto.nullable) = false]; tendermint.types.ConsensusParams consensus_param_updates = 4; bytes app_hash = 5; - int64 retain_height = 6; } //---------------------------------------- diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 788f296b7a..771b726438 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -210,6 +210,7 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali return &abci.ResponseFinalizeBlock{ TxResults: txs, ValidatorUpdates: valUpdates, + AppHash: app.state.Finalize(), Events: []abci.Event{ { Type: "val_updates", @@ -233,7 +234,7 @@ func (app *Application) Commit(_ context.Context) (*abci.ResponseCommit, error) app.mu.Lock() defer app.mu.Unlock() - height, hash, err := app.state.Commit() + height, err := app.state.Commit() if err != nil { panic(err) } @@ -253,7 +254,6 @@ func (app *Application) Commit(_ context.Context) (*abci.ResponseCommit, error) retainHeight = int64(height - app.cfg.RetainBlocks + 1) } return &abci.ResponseCommit{ - Data: hash, RetainHeight: retainHeight, }, nil } diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index 17d8cd75ff..91b1a32d59 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -137,8 +137,8 @@ func (s *State) Set(key, value string) { } } -// Commit commits the current state. -func (s *State) Commit() (uint64, []byte, error) { +// Finalize is called after applying a block, updating the height and returning the new app_hash +func (s *State) Finalize() []byte { s.Lock() defer s.Unlock() switch { @@ -150,13 +150,20 @@ func (s *State) Commit() (uint64, []byte, error) { s.Height = 1 } s.Hash = hashItems(s.Values, s.Height) + return s.Hash +} + +// Commit commits the current state. +func (s *State) Commit() (uint64, error) { + s.Lock() + defer s.Unlock() if s.persistInterval > 0 && s.Height%s.persistInterval == 0 { err := s.save() if err != nil { - return 0, nil, err + return 0, err } } - return s.Height, s.Hash, nil + return s.Height, nil } func (s *State) Rollback() error { From d5299882b01753f974c0792492d68ba47c2354bd Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 2 Jun 2022 03:14:58 -0400 Subject: [PATCH 079/203] migrate: provide function for database production (#8614) This builds on: #8614 --- cmd/tendermint/commands/key_migrate.go | 91 +++++++++++++------------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go index 6f8817fe15..723026da5a 100644 --- a/cmd/tendermint/commands/key_migrate.go +++ b/cmd/tendermint/commands/key_migrate.go @@ -6,70 +6,71 @@ import ( "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/scripts/keymigrate" "github.com/tendermint/tendermint/scripts/scmigrate" ) -func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command { +func MakeKeyMigrateCommand(conf *config.Config, logger log.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "key-migrate", Short: "Run Database key migration", RunE: func(cmd *cobra.Command, args []string) error { - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() + return RunDatabaseMigration(cmd.Context(), logger, conf) + }, + } - contexts := []string{ - // this is ordered to put - // the more ephemeral tables first to - // forclose the possiblity of the - // ephemeral data overwriting later data - "tx_index", - "peerstore", - "light", - "blockstore", - "state", - "evidence", - } + // allow database info to be overridden via cli + addDBFlags(cmd, conf) - for idx, dbctx := range contexts { - logger.Info("beginning a key migration", - "dbctx", dbctx, - "num", idx+1, - "total", len(contexts), - ) + return cmd +} - db, err := cfg.DefaultDBProvider(&cfg.DBContext{ - ID: dbctx, - Config: conf, - }) +func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.Config) error { + contexts := []string{ + // this is ordered to put + // the more ephemeral tables first to + // reduce the possibility of the + // ephemeral data overwriting later data + "tx_index", + "peerstore", + "light", + "blockstore", + "state", + "evidence", + } - if err != nil { - return fmt.Errorf("constructing database handle: %w", err) - } + for idx, dbctx := range contexts { + logger.Info("beginning a key migration", + "dbctx", dbctx, + "num", idx+1, + "total", len(contexts), + ) - if err = keymigrate.Migrate(ctx, db); err != nil { - return fmt.Errorf("running migration for context %q: %w", - dbctx, err) - } + db, err := config.DefaultDBProvider(&config.DBContext{ + ID: dbctx, + Config: conf, + }) - if dbctx == "blockstore" { - if err := scmigrate.Migrate(ctx, db); err != nil { - return fmt.Errorf("running seen commit migration: %w", err) + if err != nil { + return fmt.Errorf("constructing database handle: %w", err) + } - } - } - } + if err = keymigrate.Migrate(ctx, db); err != nil { + return fmt.Errorf("running migration for context %q: %w", + dbctx, err) + } - logger.Info("completed database migration successfully") + if dbctx == "blockstore" { + if err := scmigrate.Migrate(ctx, db); err != nil { + return fmt.Errorf("running seen commit migration: %w", err) - return nil - }, + } + } } - // allow database info to be overridden via cli - addDBFlags(cmd, conf) + logger.Info("completed database migration successfully") - return cmd + return nil } From 30bfe51ebeba61e64abdc9d00fa78f7389fe5073 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Thu, 2 Jun 2022 10:01:16 +0200 Subject: [PATCH 080/203] cmd: add tool for compaction of goleveldb (#8564) --- cmd/tendermint/commands/compact.go | 71 ++++++++++++++++++++++++++++++ cmd/tendermint/main.go | 1 + go.mod | 64 +++++++++++++-------------- go.sum | 9 ++-- 4 files changed, 108 insertions(+), 37 deletions(-) create mode 100644 cmd/tendermint/commands/compact.go diff --git a/cmd/tendermint/commands/compact.go b/cmd/tendermint/commands/compact.go new file mode 100644 index 0000000000..eadd828ae3 --- /dev/null +++ b/cmd/tendermint/commands/compact.go @@ -0,0 +1,71 @@ +package commands + +import ( + "errors" + "path/filepath" + "sync" + + "github.com/spf13/cobra" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" +) + +func MakeCompactDBCommand(cfg *config.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "experimental-compact-goleveldb", + Short: "force compacts the tendermint storage engine (only GoLevelDB supported)", + Long: ` +This is a temporary utility command that performs a force compaction on the state +and blockstores to reduce disk space for a pruning node. This should only be run +once the node has stopped. This command will likely be omitted in the future after +the planned refactor to the storage engine. + +Currently, only GoLevelDB is supported. + `, + RunE: func(cmd *cobra.Command, args []string) error { + if cfg.DBBackend != "goleveldb" { + return errors.New("compaction is currently only supported with goleveldb") + } + + compactGoLevelDBs(cfg.RootDir, logger) + return nil + }, + } + + return cmd +} + +func compactGoLevelDBs(rootDir string, logger log.Logger) { + dbNames := []string{"state", "blockstore"} + o := &opt.Options{ + DisableSeeksCompaction: true, + } + wg := sync.WaitGroup{} + + for _, dbName := range dbNames { + dbName := dbName + wg.Add(1) + go func() { + defer wg.Done() + dbPath := filepath.Join(rootDir, "data", dbName+".db") + store, err := leveldb.OpenFile(dbPath, o) + if err != nil { + logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err) + return + } + defer store.Close() + + logger.Info("starting compaction...", "db", dbPath) + + err = store.CompactRange(util.Range{Start: nil, Limit: nil}) + if err != nil { + logger.Error("failed to compact tendermint db", "path", dbPath, "err", err) + } + }() + } + wg.Wait() +} diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 715e8e1d47..1be614aeaf 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -44,6 +44,7 @@ func main() { commands.MakeKeyMigrateCommand(conf, logger), debug.GetDebugCommand(logger), commands.NewCompletionCmd(rcmd, true), + commands.MakeCompactDBCommand(conf, logger), ) // NOTE: diff --git a/go.mod b/go.mod index fd2f15cb86..69284bead4 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 google.golang.org/grpc v1.47.0 pgregory.net/rapid v0.4.7 ) @@ -46,31 +46,6 @@ require ( gotest.tools v2.2.0+incompatible ) -require ( - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect - github.com/firefart/nonamedreturns v1.0.1 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect - github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect - github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 // indirect - github.com/klauspost/compress v1.15.1 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect - github.com/lufeee/execinquery v1.0.0 // indirect - github.com/pelletier/go-toml/v2 v2.0.1 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/profile v1.6.0 // indirect - github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - go.opencensus.io v0.23.0 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect -) - require ( 4d63.com/gochecknoglobals v0.1.0 // indirect github.com/Antonboom/errname v0.1.6 // indirect @@ -78,6 +53,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/DataDog/zstd v1.4.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Microsoft/go-winio v0.5.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect @@ -98,7 +74,7 @@ require ( github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect github.com/containerd/continuity v0.2.1 // indirect - github.com/creachadair/tomledit v0.0.22 + github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/daixiang0/gci v0.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect @@ -110,11 +86,12 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect - github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect + github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.1 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fzipp/gocyclo v0.5.1 // indirect github.com/go-critic/go-critic v0.6.3 // indirect @@ -128,6 +105,8 @@ require ( github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.3 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect @@ -150,19 +129,25 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect + github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/julz/importas v0.1.0 // indirect github.com/kisielk/errcheck v1.6.0 // indirect github.com/kisielk/gotool v1.0.0 // indirect + github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/pgzip v1.2.5 // indirect github.com/kulti/thelper v0.6.2 // indirect github.com/kunwardeep/paralleltest v1.0.3 // indirect github.com/kyoh86/exportloopref v0.1.8 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect github.com/ldez/tagliatelle v0.3.1 // indirect github.com/leonklingele/grouper v1.1.0 // indirect + github.com/lufeee/execinquery v1.0.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/maratori/testpackage v1.0.1 // indirect github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect @@ -184,16 +169,19 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.0.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.1 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/profile v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.34.0 github.com/prometheus/procfs v0.7.3 // indirect github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryancurrah/gomodguard v1.2.3 // indirect github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect @@ -209,10 +197,10 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.1.1 // indirect github.com/subosito/gotenv v1.3.0 // indirect github.com/sylvia7788/contextcheck v1.0.4 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect @@ -226,6 +214,11 @@ require ( github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect + go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect @@ -243,3 +236,10 @@ require ( mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect ) + +require ( + github.com/creachadair/tomledit v0.0.22 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.34.0 + github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca +) diff --git a/go.sum b/go.sum index 3d036cf0c4..b107e95b50 100644 --- a/go.sum +++ b/go.sum @@ -295,12 +295,12 @@ github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStB github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -1369,7 +1369,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 666d93338a70914aebde3ed402de811cf4f8630c Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 2 Jun 2022 11:14:25 -0400 Subject: [PATCH 081/203] p2p: shed peers from store from other networks (#8678) --- internal/p2p/router.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 267d55a962..8b77541ded 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -720,6 +720,15 @@ func (r *Router) handshakePeer( if err = peerInfo.Validate(); err != nil { return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err) } + + if peerInfo.Network != nodeInfo.Network { + if err := r.peerManager.store.Delete(peerInfo.NodeID); err != nil { + return peerInfo, fmt.Errorf("problem removing peer from store from incorrect network [%s]: %w", peerInfo.Network, err) + } + + return peerInfo, fmt.Errorf("connected to peer from wrong network, %q, removed from peer store", peerInfo.Network) + } + if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) @@ -728,6 +737,7 @@ func (r *Router) handshakePeer( return peerInfo, fmt.Errorf("expected to connect with peer %q, got %q", expectID, peerInfo.NodeID) } + if err := nodeInfo.CompatibleWith(peerInfo); err != nil { return peerInfo, ErrRejected{ err: err, From 08099ff669ac919802610f3ce6d5230a5bce6422 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 2 Jun 2022 10:20:00 -0700 Subject: [PATCH 082/203] privval: restrict listeners to TCP and Unix domain sockets (#8670) Front load the protocol type check so we do not wind up creating listeners of types that are not usable for this interface (for example, UDP). Fixes #8647. --- privval/utils.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/privval/utils.go b/privval/utils.go index 1d6681b452..a2cbbf5014 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -27,13 +27,17 @@ func IsConnTimeout(err error) bool { // NewSignerListener creates a new SignerListenerEndpoint using the corresponding listen address func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEndpoint, error) { - var listener net.Listener - protocol, address := tmnet.ProtocolAndAddress(listenAddr) + if protocol != "unix" && protocol != "tcp" { //nolint:goconst + return nil, fmt.Errorf("unsupported address family %q, want unix or tcp", protocol) + } + ln, err := net.Listen(protocol, address) if err != nil { return nil, err } + + var listener net.Listener switch protocol { case "unix": listener = NewUnixListener(ln) @@ -41,13 +45,8 @@ func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEnd // TODO: persist this key so external signer can actually authenticate us listener = NewTCPListener(ln, ed25519.GenPrivKey()) default: - return nil, fmt.Errorf( - "wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", - protocol, - ) + panic("invalid protocol: " + protocol) // semantically unreachable } - pve := NewSignerListenerEndpoint(logger.With("module", "privval"), listener) - - return pve, nil + return NewSignerListenerEndpoint(logger.With("module", "privval"), listener), nil } From ce6485fa709c05aed51fedf492a822f1481ed74e Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Thu, 2 Jun 2022 21:13:08 +0200 Subject: [PATCH 083/203] Remove the abci responses type - prune legacy responses (#8673) Closes #8069 * Type `ABCIResponses` was just wrapping type `ResponseFinalizeBlock`. This patch removes the former. * Did some renaming to avoid confusion on the data structure we are working with. * We also remove any stale ABCIResponses we may have in the state store at the time of pruning **IMPORTANT**: There is an undesirable side-effect of the unwrapping. An empty `ResponseFinalizeBlock` yields a 0-length proto-buf serialized buffer. This was not the case with `ABCIResponses`. I have added an interim solution, but open for suggestions on more elegant ones. --- abci/cmd/abci-cli/abci-cli.go | 2 + cmd/tendermint/commands/reindex_event.go | 6 +- cmd/tendermint/commands/reindex_event_test.go | 15 +- internal/consensus/replay.go | 4 +- internal/consensus/replay_stubs.go | 17 +- internal/consensus/state_test.go | 9 +- internal/evidence/pool.go | 10 + internal/inspect/inspect_test.go | 11 +- internal/rpc/core/blocks.go | 12 +- internal/rpc/core/blocks_test.go | 23 +- internal/state/errors.go | 6 +- internal/state/execution.go | 14 +- internal/state/helpers_test.go | 37 +- internal/state/mocks/store.go | 33 +- internal/state/state_test.go | 65 ++-- internal/state/store.go | 90 +++-- internal/state/store_test.go | 24 +- internal/store/store.go | 12 +- light/store/db/db.go | 10 + proto/tendermint/state/types.pb.go | 346 ++++-------------- proto/tendermint/state/types.proto | 8 - 21 files changed, 293 insertions(+), 461 deletions(-) diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 97a5e815be..b09f3c9a7b 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -151,8 +151,10 @@ where example.file looks something like: check_tx 0x00 check_tx 0xff finalize_block 0x00 + commit check_tx 0x00 finalize_block 0x01 0x04 0xff + commit info `, Args: cobra.ExactArgs(0), diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 6cec32738a..34d07fdd5b 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -193,7 +193,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { return fmt.Errorf("not able to load block at height %d from the blockstore", i) } - r, err := args.stateStore.LoadABCIResponses(i) + r, err := args.stateStore.LoadFinalizeBlockResponses(i) if err != nil { return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i) } @@ -201,7 +201,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { e := types.EventDataNewBlockHeader{ Header: b.Header, NumTxs: int64(len(b.Txs)), - ResultFinalizeBlock: *r.FinalizeBlock, + ResultFinalizeBlock: *r, } var batch *indexer.Batch @@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { Height: b.Height, Index: uint32(i), Tx: b.Data.Txs[i], - Result: *(r.FinalizeBlock.TxResults[i]), + Result: *(r.TxResults[i]), } _ = batch.Add(&tr) diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index 826fa02335..971cf1c87c 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -16,7 +16,6 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" - prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" _ "github.com/lib/pq" // for the psql sink @@ -154,16 +153,14 @@ func TestReIndexEvent(t *testing.T) { On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil) dtx := abcitypes.ExecTxResult{} - abciResp := &prototmstate.ABCIResponses{ - FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ - TxResults: []*abcitypes.ExecTxResult{&dtx}, - }, + abciResp := &abcitypes.ResponseFinalizeBlock{ + TxResults: []*abcitypes.ExecTxResult{&dtx}, } mockStateStore. - On("LoadABCIResponses", base).Return(nil, errors.New("")).Once(). - On("LoadABCIResponses", base).Return(abciResp, nil). - On("LoadABCIResponses", height).Return(abciResp, nil) + On("LoadFinalizeBlockResponses", base).Return(nil, errors.New("")).Once(). + On("LoadFinalizeBlockResponses", base).Return(abciResp, nil). + On("LoadFinalizeBlockResponses", height).Return(abciResp, nil) testCases := []struct { startHeight int64 @@ -171,7 +168,7 @@ func TestReIndexEvent(t *testing.T) { reIndexErr bool }{ {base, height, true}, // LoadBlock error - {base, height, true}, // LoadABCIResponses error + {base, height, true}, // LoadFinalizeBlockResponses error {base, height, true}, // index block event error {base, height, true}, // index tx event error {base, base, false}, diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index fac7c91ac3..71c7ddd71d 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -424,11 +424,11 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. - abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) + finalizeBlockResponses, err := h.stateStore.LoadFinalizeBlockResponses(storeBlockHeight) if err != nil { return nil, err } - mockApp, err := newMockProxyApp(h.logger, appHash, abciResponses) + mockApp, err := newMockProxyApp(h.logger, appHash, finalizeBlockResponses) if err != nil { return nil, err } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index ec5f707736..60b96ae0c5 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -9,7 +9,6 @@ import ( "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -52,7 +51,7 @@ func (emptyMempool) InitWAL() error { return nil } func (emptyMempool) CloseWAL() {} //----------------------------------------------------------------------------- -// mockProxyApp uses ABCIResponses to give the right results. +// mockProxyApp uses Responses to FinalizeBlock to give the right results. // // Useful because we don't want to call Commit() twice for the same block on // the real app. @@ -60,24 +59,24 @@ func (emptyMempool) CloseWAL() {} func newMockProxyApp( logger log.Logger, appHash []byte, - abciResponses *tmstate.ABCIResponses, + finalizeBlockResponses *abci.ResponseFinalizeBlock, ) (abciclient.Client, error) { return proxy.New(abciclient.NewLocalClient(logger, &mockProxyApp{ - appHash: appHash, - abciResponses: abciResponses, + appHash: appHash, + finalizeBlockResponses: finalizeBlockResponses, }), logger, proxy.NopMetrics()), nil } type mockProxyApp struct { abci.BaseApplication - appHash []byte - txCount int - abciResponses *tmstate.ABCIResponses + appHash []byte + txCount int + finalizeBlockResponses *abci.ResponseFinalizeBlock } func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - r := mock.abciResponses.FinalizeBlock + r := mock.finalizeBlockResponses mock.txCount++ if r == nil { return &abci.ResponseFinalizeBlock{}, nil diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 797dcf59c4..06f670544c 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -1979,7 +1979,8 @@ func TestFinalizeBlockCalled(t *testing.T) { Status: abci.ResponseVerifyVoteExtension_ACCEPT, }, nil) } - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + r := &abci.ResponseFinalizeBlock{AppHash: []byte("the_hash")} + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe() m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) @@ -2060,7 +2061,8 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { }, nil) } m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + r := &abci.ResponseFinalizeBlock{AppHash: []byte("myHash")} + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe() c := factory.ConsensusParams() if !testCase.enabled { c.ABCI.VoteExtensionsEnableHeight = 0 @@ -2357,7 +2359,8 @@ func TestVoteExtensionEnableHeight(t *testing.T) { Status: abci.ResponseVerifyVoteExtension_ACCEPT, }, nil).Times(numValidators - 1) } - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + r := &abci.ResponseFinalizeBlock{AppHash: []byte("hashyHash")} + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe() m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() c := factory.ConsensusParams() c.ABCI.VoteExtensionsEnableHeight = testCase.enableHeight diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index 132c61f238..9b3e891732 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -22,6 +22,16 @@ import ( "github.com/tendermint/tendermint/types" ) +// key prefixes +// NB: Before modifying these, cross-check them with those in +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(sergio): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] +// * internal/p2p/peermanager.go [1] const ( // prefixes are unique across all tm db's prefixCommitted = int64(9) diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 511dbc8147..059e41e889 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -23,7 +23,6 @@ import ( indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proto/tendermint/state" httpclient "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" ) @@ -263,12 +262,10 @@ func TestBlockResults(t *testing.T) { testGasUsed := int64(100) stateStoreMock := &statemocks.Store{} // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ - FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ - TxResults: []*abcitypes.ExecTxResult{ - { - GasUsed: testGasUsed, - }, + stateStoreMock.On("LoadFinalizeBlockResponses", testHeight).Return(&abcitypes.ResponseFinalizeBlock{ + TxResults: []*abcitypes.ExecTxResult{ + { + GasUsed: testGasUsed, }, }, }, nil) diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 2393440022..845eef7bbd 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -187,23 +187,23 @@ func (env *Environment) BlockResults(ctx context.Context, req *coretypes.Request return nil, err } - results, err := env.StateStore.LoadABCIResponses(height) + results, err := env.StateStore.LoadFinalizeBlockResponses(height) if err != nil { return nil, err } var totalGasUsed int64 - for _, res := range results.FinalizeBlock.GetTxResults() { + for _, res := range results.GetTxResults() { totalGasUsed += res.GetGasUsed() } return &coretypes.ResultBlockResults{ Height: height, - TxsResults: results.FinalizeBlock.TxResults, + TxsResults: results.TxResults, TotalGasUsed: totalGasUsed, - FinalizeBlockEvents: results.FinalizeBlock.Events, - ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates, - ConsensusParamUpdates: results.FinalizeBlock.ConsensusParamUpdates, + FinalizeBlockEvents: results.Events, + ValidatorUpdates: results.ValidatorUpdates, + ConsensusParamUpdates: results.ConsensusParamUpdates, }, nil } diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index d95338332d..6ec4000696 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -13,7 +13,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/rpc/coretypes" ) @@ -70,19 +69,17 @@ func TestBlockchainInfo(t *testing.T) { } func TestBlockResults(t *testing.T) { - results := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - TxResults: []*abci.ExecTxResult{ - {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, - {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, - {Code: 1, Log: "not ok", GasUsed: 0}, - }, + results := &abci.ResponseFinalizeBlock{ + TxResults: []*abci.ExecTxResult{ + {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, + {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, + {Code: 1, Log: "not ok", GasUsed: 0}, }, } env := &Environment{} env.StateStore = sm.NewStore(dbm.NewMemDB()) - err := env.StateStore.SaveABCIResponses(100, results) + err := env.StateStore.SaveFinalizeBlockResponses(100, results) require.NoError(t, err) mockstore := &mocks.BlockStore{} mockstore.On("Height").Return(int64(100)) @@ -99,11 +96,11 @@ func TestBlockResults(t *testing.T) { {101, true, nil}, {100, false, &coretypes.ResultBlockResults{ Height: 100, - TxsResults: results.FinalizeBlock.TxResults, + TxsResults: results.TxResults, TotalGasUsed: 15, - FinalizeBlockEvents: results.FinalizeBlock.Events, - ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates, - ConsensusParamUpdates: results.FinalizeBlock.ConsensusParamUpdates, + FinalizeBlockEvents: results.Events, + ValidatorUpdates: results.ValidatorUpdates, + ConsensusParamUpdates: results.ConsensusParamUpdates, }}, } diff --git a/internal/state/errors.go b/internal/state/errors.go index e8ad776f46..516b20e5f5 100644 --- a/internal/state/errors.go +++ b/internal/state/errors.go @@ -46,7 +46,7 @@ type ( Height int64 } - ErrNoABCIResponsesForHeight struct { + ErrNoFinalizeBlockResponsesForHeight struct { Height int64 } ) @@ -102,6 +102,6 @@ func (e ErrNoConsensusParamsForHeight) Error() string { return fmt.Sprintf("could not find consensus params for height #%d", e.Height) } -func (e ErrNoABCIResponsesForHeight) Error() string { - return fmt.Sprintf("could not find results for height #%d", e.Height) +func (e ErrNoFinalizeBlockResponsesForHeight) Error() string { + return fmt.Sprintf("could not find FinalizeBlock responses for height #%d", e.Height) } diff --git a/internal/state/execution.go b/internal/state/execution.go index 5c46f43532..2710478e6a 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -14,7 +14,6 @@ import ( "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmtypes "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -233,12 +232,11 @@ func (blockExec *BlockExecutor) ApplyBlock( "block_app_hash", fmt.Sprintf("%X", fBlockRes.AppHash), ) - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: fBlockRes, - } - // Save the results before we commit. - if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { + err = blockExec.store.SaveFinalizeBlockResponses(block.Height, fBlockRes) + if err != nil && !errors.Is(err, ErrNoFinalizeBlockResponsesForHeight{block.Height}) { + // It is correct to have an empty ResponseFinalizeBlock for ApplyBlock, + // but not for saving it to the state store return state, err } @@ -538,7 +536,7 @@ func (state State) Update( // and update s.LastValidators and s.Validators. nValSet := state.NextValidators.Copy() - // Update the validator set with the latest abciResponses. + // Update the validator set with the latest responses to FinalizeBlock. lastHeightValsChanged := state.LastHeightValidatorsChanged if len(validatorUpdates) > 0 { err := nValSet.UpdateWithChangeSet(validatorUpdates) @@ -552,7 +550,7 @@ func (state State) Update( // Update validator proposer priority and set state variables. nValSet.IncrementProposerPriority(1) - // Update the params with the latest abciResponses. + // Update the params with the latest responses to FinalizeBlock. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged if consensusParamUpdates != nil { diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index dec5afc667..354a2874f3 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -19,7 +19,6 @@ import ( sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" tmtime "github.com/tendermint/tendermint/libs/time" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -148,10 +147,10 @@ func makeHeaderPartsResponsesValPubKeyChange( t *testing.T, state sm.State, pubkey crypto.PubKey, -) (types.Header, types.BlockID, *tmstate.ABCIResponses) { +) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - abciResponses := &tmstate.ABCIResponses{} + finalizeBlockResponses := &abci.ResponseFinalizeBlock{} // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { @@ -160,58 +159,50 @@ func makeHeaderPartsResponsesValPubKeyChange( pbPk, err := encoding.PubKeyToProto(pubkey) require.NoError(t, err) - abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{ - {PubKey: vPbPk, Power: 0}, - {PubKey: pbPk, Power: 10}, - }, + finalizeBlockResponses.ValidatorUpdates = []abci.ValidatorUpdate{ + {PubKey: vPbPk, Power: 0}, + {PubKey: pbPk, Power: 10}, } } - return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, finalizeBlockResponses } func makeHeaderPartsResponsesValPowerChange( t *testing.T, state sm.State, power int64, -) (types.Header, types.BlockID, *tmstate.ABCIResponses) { +) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { t.Helper() block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + finalizeBlockResponses := &abci.ResponseFinalizeBlock{} - abciResponses := &tmstate.ABCIResponses{} - - abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{} // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { vPbPk, err := encoding.PubKeyToProto(val.PubKey) require.NoError(t, err) - abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{ - {PubKey: vPbPk, Power: power}, - }, + finalizeBlockResponses.ValidatorUpdates = []abci.ValidatorUpdate{ + {PubKey: vPbPk, Power: power}, } } - return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, finalizeBlockResponses } func makeHeaderPartsResponsesParams( t *testing.T, state sm.State, params *types.ConsensusParams, -) (types.Header, types.BlockID, *tmstate.ABCIResponses) { +) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { t.Helper() block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) pbParams := params.ToProto() - abciResponses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams}, - } - return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + finalizeBlockResponses := &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams} + return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, finalizeBlockResponses } func randomGenesisDoc() *types.GenesisDoc { diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index d08ba4c9e4..94d5614e6c 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,8 +4,9 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + abcitypes "github.com/tendermint/tendermint/abci/types" + state "github.com/tendermint/tendermint/internal/state" - tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" types "github.com/tendermint/tendermint/types" ) @@ -64,17 +65,15 @@ func (_m *Store) Load() (state.State, error) { return r0, r1 } -// LoadABCIResponses provides a mock function with given fields: _a0 -func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, error) { +// LoadConsensusParams provides a mock function with given fields: _a0 +func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { ret := _m.Called(_a0) - var r0 *tendermintstate.ABCIResponses - if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok { + var r0 types.ConsensusParams + if rf, ok := ret.Get(0).(func(int64) types.ConsensusParams); ok { r0 = rf(_a0) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*tendermintstate.ABCIResponses) - } + r0 = ret.Get(0).(types.ConsensusParams) } var r1 error @@ -87,15 +86,17 @@ func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, e return r0, r1 } -// LoadConsensusParams provides a mock function with given fields: _a0 -func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { +// LoadFinalizeBlockResponses provides a mock function with given fields: _a0 +func (_m *Store) LoadFinalizeBlockResponses(_a0 int64) (*abcitypes.ResponseFinalizeBlock, error) { ret := _m.Called(_a0) - var r0 types.ConsensusParams - if rf, ok := ret.Get(0).(func(int64) types.ConsensusParams); ok { + var r0 *abcitypes.ResponseFinalizeBlock + if rf, ok := ret.Get(0).(func(int64) *abcitypes.ResponseFinalizeBlock); ok { r0 = rf(_a0) } else { - r0 = ret.Get(0).(types.ConsensusParams) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcitypes.ResponseFinalizeBlock) + } } var r1 error @@ -159,12 +160,12 @@ func (_m *Store) Save(_a0 state.State) error { return r0 } -// SaveABCIResponses provides a mock function with given fields: _a0, _a1 -func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses) error { +// SaveFinalizeBlockResponses provides a mock function with given fields: _a0, _a1 +func (_m *Store) SaveFinalizeBlockResponses(_a0 int64, _a1 *abcitypes.ResponseFinalizeBlock) error { ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(int64, *tendermintstate.ABCIResponses) error); ok { + if rf, ok := ret.Get(0).(func(int64, *abcitypes.ResponseFinalizeBlock) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) diff --git a/internal/state/state_test.go b/internal/state/state_test.go index f38d37ea4a..de1164c5dd 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -21,7 +21,6 @@ import ( "github.com/tendermint/tendermint/crypto/merkle" sm "github.com/tendermint/tendermint/internal/state" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -102,8 +101,8 @@ func TestStateSaveLoad(t *testing.T) { loadedState, state) } -// TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. -func TestABCIResponsesSaveLoad1(t *testing.T) { +// TestFinalizeBlockResponsesSaveLoad1 tests saving and loading responses to FinalizeBlock. +func TestFinalizeBlockResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) stateStore := sm.NewStore(stateDB) @@ -113,28 +112,27 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { // Build mock responses. block := statefactory.MakeBlock(state, 2, new(types.Commit)) - abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ExecTxResult, 2) - abciResponses.FinalizeBlock = new(abci.ResponseFinalizeBlock) - abciResponses.FinalizeBlock.TxResults = dtxs + finalizeBlockResponses := new(abci.ResponseFinalizeBlock) + finalizeBlockResponses.TxResults = dtxs - abciResponses.FinalizeBlock.TxResults[0] = &abci.ExecTxResult{Data: []byte("foo"), Events: nil} - abciResponses.FinalizeBlock.TxResults[1] = &abci.ExecTxResult{Data: []byte("bar"), Log: "ok", Events: nil} + finalizeBlockResponses.TxResults[0] = &abci.ExecTxResult{Data: []byte("foo"), Events: nil} + finalizeBlockResponses.TxResults[1] = &abci.ExecTxResult{Data: []byte("bar"), Log: "ok", Events: nil} pbpk, err := encoding.PubKeyToProto(ed25519.GenPrivKey().PubKey()) require.NoError(t, err) - abciResponses.FinalizeBlock.ValidatorUpdates = []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}} + finalizeBlockResponses.ValidatorUpdates = []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}} - err = stateStore.SaveABCIResponses(block.Height, abciResponses) + err = stateStore.SaveFinalizeBlockResponses(block.Height, finalizeBlockResponses) require.NoError(t, err) - loadedABCIResponses, err := stateStore.LoadABCIResponses(block.Height) + loadedFinalizeBlockResponses, err := stateStore.LoadFinalizeBlockResponses(block.Height) require.NoError(t, err) - assert.Equal(t, abciResponses, loadedABCIResponses, - "ABCIResponses don't match:\ngot: %v\nexpected: %v\n", - loadedABCIResponses, abciResponses) + assert.Equal(t, finalizeBlockResponses, loadedFinalizeBlockResponses, + "FinalizeBlockResponses don't match:\ngot: %v\nexpected: %v\n", + loadedFinalizeBlockResponses, finalizeBlockResponses) } -// TestResultsSaveLoad tests saving and loading ABCI results. -func TestABCIResponsesSaveLoad2(t *testing.T) { +// TestFinalizeBlockResponsesSaveLoad2 tests saving and loading responses to FinalizeBlock. +func TestFinalizeBlockResponsesSaveLoad2(t *testing.T) { tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) @@ -190,32 +188,31 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { // Query all before, this should return error. for i := range cases { h := int64(i + 1) - res, err := stateStore.LoadABCIResponses(h) + res, err := stateStore.LoadFinalizeBlockResponses(h) assert.Error(t, err, "%d: %#v", i, res) } // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save - responses := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - TxResults: tc.added, - }, + responses := &abci.ResponseFinalizeBlock{ + TxResults: tc.added, + AppHash: []byte("a_hash"), } - err := stateStore.SaveABCIResponses(h, responses) + err := stateStore.SaveFinalizeBlockResponses(h, responses) require.NoError(t, err) } - // Query all before, should return expected value. + // Query all after, should return expected value. for i, tc := range cases { h := int64(i + 1) - res, err := stateStore.LoadABCIResponses(h) + res, err := stateStore.LoadFinalizeBlockResponses(h) if assert.NoError(t, err, "%d", i) { t.Log(res) e, err := abci.MarshalTxResults(tc.expected) require.NoError(t, err) he := merkle.HashFromByteSlices(e) - rs, err := abci.MarshalTxResults(res.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(res.TxResults) hrs := merkle.HashFromByteSlices(rs) require.NoError(t, err) assert.Equal(t, he, hrs, "%d", i) @@ -282,12 +279,12 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { power++ } header, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, power) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) - rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(responses.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) - state, err = state.Update(blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates) + state, err = state.Update(blockID, &header, h, responses.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) err = stateStore.Save(state) require.NoError(t, err) @@ -1024,12 +1021,12 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Save state etc. var validatorUpdates []*types.Validator - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) - rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(responses.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) - state, err = state.Update(blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates) + state, err = state.Update(blockID, &header, h, responses.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 err = stateStore.Save(state) @@ -1104,12 +1101,12 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { cp = params[changeIndex] } header, blockID, responses := makeHeaderPartsResponsesParams(t, state, &cp) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) - rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(responses.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) - state, err = state.Update(blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates) + state, err = state.Update(blockID, &header, h, responses.ConsensusParamUpdates, validatorUpdates) require.NoError(t, err) err = stateStore.Save(state) diff --git a/internal/state/store.go b/internal/state/store.go index a41719c925..dda53f45eb 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -26,15 +26,23 @@ const ( //------------------------------------------------------------------------ +// key prefixes // NB: Before modifying these, cross-check them with those in -// internal/store/store.go -// TODO(thane): Move these and the ones in internal/store/store.go to their own package. +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(thane): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] +// * internal/p2p/peermanager.go [1] const ( // prefixes are unique across all tm db's - prefixValidators = int64(5) - prefixConsensusParams = int64(6) - prefixABCIResponses = int64(7) - prefixState = int64(8) + prefixValidators = int64(5) + prefixConsensusParams = int64(6) + prefixABCIResponses = int64(7) // deprecated in v0.36 + prefixState = int64(8) + prefixFinalizeBlockResponses = int64(14) ) func encodeKey(prefix int64, height int64) []byte { @@ -57,6 +65,10 @@ func abciResponsesKey(height int64) []byte { return encodeKey(prefixABCIResponses, height) } +func finalizeBlockResponsesKey(height int64) []byte { + return encodeKey(prefixFinalizeBlockResponses, height) +} + // stateKey should never change after being set in init() var stateKey []byte @@ -81,14 +93,14 @@ type Store interface { Load() (State, error) // LoadValidators loads the validator set at a given height LoadValidators(int64) (*types.ValidatorSet, error) - // LoadABCIResponses loads the abciResponse for a given height - LoadABCIResponses(int64) (*tmstate.ABCIResponses, error) + // LoadFinalizeBlockResponses loads the responses to FinalizeBlock for a given height + LoadFinalizeBlockResponses(int64) (*abci.ResponseFinalizeBlock, error) // LoadConsensusParams loads the consensus params for a given height LoadConsensusParams(int64) (types.ConsensusParams, error) // Save overwrites the previous state with the updated one Save(State) error - // SaveABCIResponses saves ABCIResponses for a given height - SaveABCIResponses(int64, *tmstate.ABCIResponses) error + // SaveFinalizeBlockResponses saves responses to FinalizeBlock for a given height + SaveFinalizeBlockResponses(int64, *abci.ResponseFinalizeBlock) error // SaveValidatorSet saves the validator set at a given height SaveValidatorSets(int64, int64, *types.ValidatorSet) error // Bootstrap is used for bootstrapping state when not starting from a initial height. @@ -247,7 +259,7 @@ func (store dbStore) PruneStates(retainHeight int64) error { return err } - if err := store.pruneABCIResponses(retainHeight); err != nil { + if err := store.pruneFinalizeBlockResponses(retainHeight); err != nil { return err } @@ -338,10 +350,15 @@ func (store dbStore) pruneConsensusParams(retainHeight int64) error { ) } -// pruneABCIResponses calls a reverse iterator from base height to retain height batch deleting -// all abci responses in between -func (store dbStore) pruneABCIResponses(height int64) error { - return store.pruneRange(abciResponsesKey(1), abciResponsesKey(height)) +// pruneFinalizeBlockResponses calls a reverse iterator from base height to retain height +// batch deleting all responses to FinalizeBlock, and legacy ABCI responses, in between +func (store dbStore) pruneFinalizeBlockResponses(height int64) error { + err := store.pruneRange(finalizeBlockResponsesKey(1), finalizeBlockResponsesKey(height)) + if err == nil { + // Remove any stale legacy ABCI responses + err = store.pruneRange(abciResponsesKey(1), abciResponsesKey(height)) + } + return err } // pruneRange is a generic function for deleting a range of keys in reverse order. @@ -408,60 +425,63 @@ func (store dbStore) reverseBatchDelete(batch dbm.Batch, start, end []byte) ([]b //------------------------------------------------------------------------ -// LoadABCIResponses loads the ABCIResponses for the given height from the -// database. If not found, ErrNoABCIResponsesForHeight is returned. +// LoadFinalizeBlockResponses loads the responses to FinalizeBlock for the +// given height from the database. If not found, +// ErrNoFinalizeBlockResponsesForHeight is returned. // -// This is useful for recovering from crashes where we called app.Commit and -// before we called s.Save(). It can also be used to produce Merkle proofs of -// the result of txs. -func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) { - buf, err := store.db.Get(abciResponsesKey(height)) +// This is useful for recovering from crashes where we called app.Commit +// and before we called s.Save(). It can also be used to produce Merkle +// proofs of the result of txs. +func (store dbStore) LoadFinalizeBlockResponses(height int64) (*abci.ResponseFinalizeBlock, error) { + buf, err := store.db.Get(finalizeBlockResponsesKey(height)) if err != nil { return nil, err } if len(buf) == 0 { - - return nil, ErrNoABCIResponsesForHeight{height} + return nil, ErrNoFinalizeBlockResponsesForHeight{height} } - abciResponses := new(tmstate.ABCIResponses) - err = abciResponses.Unmarshal(buf) + finalizeBlockResponses := new(abci.ResponseFinalizeBlock) + err = finalizeBlockResponses.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. - return abciResponses, nil + return finalizeBlockResponses, nil } -// SaveABCIResponses persists the ABCIResponses to the database. +// SaveFinalizeBlockResponses persists to the database the responses to FinalizeBlock. // This is useful in case we crash after app.Commit and before s.Save(). // Responses are indexed by height so they can also be loaded later to produce // Merkle proofs. // // Exposed for testing. -func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { - return store.saveABCIResponses(height, abciResponses) +func (store dbStore) SaveFinalizeBlockResponses(height int64, finalizeBlockResponses *abci.ResponseFinalizeBlock) error { + return store.saveFinalizeBlockResponses(height, finalizeBlockResponses) } -func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { +func (store dbStore) saveFinalizeBlockResponses(height int64, finalizeBlockResponses *abci.ResponseFinalizeBlock) error { var dtxs []*abci.ExecTxResult // strip nil values, - for _, tx := range abciResponses.FinalizeBlock.TxResults { + for _, tx := range finalizeBlockResponses.TxResults { if tx != nil { dtxs = append(dtxs, tx) } } - abciResponses.FinalizeBlock.TxResults = dtxs + finalizeBlockResponses.TxResults = dtxs - bz, err := abciResponses.Marshal() + bz, err := finalizeBlockResponses.Marshal() if err != nil { return err } + if len(bz) == 0 { + return ErrNoFinalizeBlockResponsesForHeight{height} + } - return store.db.SetSync(abciResponsesKey(height), bz) + return store.db.SetSync(finalizeBlockResponsesKey(height), bz) } // SaveValidatorSets is used to save the validator set over multiple heights. diff --git a/internal/state/store_test.go b/internal/state/store_test.go index 59084fd100..f6982b5bdf 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -17,7 +17,6 @@ import ( sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -236,15 +235,14 @@ func TestPruneStates(t *testing.T) { err := stateStore.Save(state) require.NoError(t, err) - err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ - TxResults: []*abci.ExecTxResult{ - {Data: []byte{1}}, - {Data: []byte{2}}, - {Data: []byte{3}}, - }, + err = stateStore.SaveFinalizeBlockResponses(h, &abci.ResponseFinalizeBlock{ + TxResults: []*abci.ExecTxResult{ + {Data: []byte{1}}, + {Data: []byte{2}}, + {Data: []byte{3}}, }, - }) + }, + ) require.NoError(t, err) } @@ -265,9 +263,9 @@ func TestPruneStates(t *testing.T) { require.NoError(t, err, h) require.NotNil(t, params, h) - abci, err := stateStore.LoadABCIResponses(h) + finRes, err := stateStore.LoadFinalizeBlockResponses(h) require.NoError(t, err, h) - require.NotNil(t, abci, h) + require.NotNil(t, finRes, h) } emptyParams := types.ConsensusParams{} @@ -291,9 +289,9 @@ func TestPruneStates(t *testing.T) { require.Equal(t, emptyParams, params, h) } - abci, err := stateStore.LoadABCIResponses(h) + finRes, err := stateStore.LoadFinalizeBlockResponses(h) require.Error(t, err, h) - require.Nil(t, abci, h) + require.Nil(t, finRes, h) } }) } diff --git a/internal/store/store.go b/internal/store/store.go index 1ba7e398db..eacbd73aa0 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -650,8 +650,14 @@ func (bs *BlockStore) Close() error { // key prefixes // NB: Before modifying these, cross-check them with those in -// internal/state/store.go -// TODO(thane): Move these and the ones in internal/state/store.go to their own package. +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(thane): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] --> Looks OK, as it is also called "SeenCommit" +// * internal/p2p/peermanager.go [1] const ( // prefixes are unique across all tm db's prefixBlockMeta = int64(0) @@ -659,7 +665,7 @@ const ( prefixBlockCommit = int64(2) prefixSeenCommit = int64(3) prefixBlockHash = int64(4) - prefixExtCommit = int64(9) // 5..8 are used by state/store + prefixExtCommit = int64(13) ) func blockMetaKey(height int64) []byte { diff --git a/light/store/db/db.go b/light/store/db/db.go index c364e17092..17ee6766d9 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -13,6 +13,16 @@ import ( "github.com/tendermint/tendermint/types" ) +// key prefixes +// NB: Before modifying these, cross-check them with those in +// * internal/store/store.go [0..4, 13] +// * internal/state/store.go [5..8, 14] +// * internal/evidence/pool.go [9..10] +// * light/store/db/db.go [11..12] +// TODO(sergio): Move all these to their own package. +// TODO: what about these (they already collide): +// * scripts/scmigrate/migrate.go [3] +// * internal/p2p/peermanager.go [1] const ( prefixLightBlock = int64(11) prefixSize = int64(12) diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index af5c64ecf8..7d86d936f2 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -9,8 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/tendermint/tendermint/abci/types" - types1 "github.com/tendermint/tendermint/proto/tendermint/types" + types "github.com/tendermint/tendermint/proto/tendermint/types" version "github.com/tendermint/tendermint/proto/tendermint/version" io "io" math "math" @@ -30,64 +29,17 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// ABCIResponses retains the responses -// of the various ABCI calls during block processing. -// It is persisted to disk for each height before calling Commit. -type ABCIResponses struct { - FinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=finalize_block,json=finalizeBlock,proto3" json:"finalize_block,omitempty"` -} - -func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } -func (m *ABCIResponses) String() string { return proto.CompactTextString(m) } -func (*ABCIResponses) ProtoMessage() {} -func (*ABCIResponses) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{0} -} -func (m *ABCIResponses) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ABCIResponses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ABCIResponses.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ABCIResponses) XXX_Merge(src proto.Message) { - xxx_messageInfo_ABCIResponses.Merge(m, src) -} -func (m *ABCIResponses) XXX_Size() int { - return m.Size() -} -func (m *ABCIResponses) XXX_DiscardUnknown() { - xxx_messageInfo_ABCIResponses.DiscardUnknown(m) -} - -var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo - -func (m *ABCIResponses) GetFinalizeBlock() *types.ResponseFinalizeBlock { - if m != nil { - return m.FinalizeBlock - } - return nil -} - // ValidatorsInfo represents the latest validator set, or the last height it changed type ValidatorsInfo struct { - ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` - LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` + ValidatorSet *types.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` } func (m *ValidatorsInfo) Reset() { *m = ValidatorsInfo{} } func (m *ValidatorsInfo) String() string { return proto.CompactTextString(m) } func (*ValidatorsInfo) ProtoMessage() {} func (*ValidatorsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{1} + return fileDescriptor_ccfacf933f22bf93, []int{0} } func (m *ValidatorsInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -116,7 +68,7 @@ func (m *ValidatorsInfo) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatorsInfo proto.InternalMessageInfo -func (m *ValidatorsInfo) GetValidatorSet() *types1.ValidatorSet { +func (m *ValidatorsInfo) GetValidatorSet() *types.ValidatorSet { if m != nil { return m.ValidatorSet } @@ -132,15 +84,15 @@ func (m *ValidatorsInfo) GetLastHeightChanged() int64 { // ConsensusParamsInfo represents the latest consensus params, or the last height it changed type ConsensusParamsInfo struct { - ConsensusParams types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` - LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` + ConsensusParams types.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` } func (m *ConsensusParamsInfo) Reset() { *m = ConsensusParamsInfo{} } func (m *ConsensusParamsInfo) String() string { return proto.CompactTextString(m) } func (*ConsensusParamsInfo) ProtoMessage() {} func (*ConsensusParamsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{2} + return fileDescriptor_ccfacf933f22bf93, []int{1} } func (m *ConsensusParamsInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -169,11 +121,11 @@ func (m *ConsensusParamsInfo) XXX_DiscardUnknown() { var xxx_messageInfo_ConsensusParamsInfo proto.InternalMessageInfo -func (m *ConsensusParamsInfo) GetConsensusParams() types1.ConsensusParams { +func (m *ConsensusParamsInfo) GetConsensusParams() types.ConsensusParams { if m != nil { return m.ConsensusParams } - return types1.ConsensusParams{} + return types.ConsensusParams{} } func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { @@ -192,7 +144,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{3} + return fileDescriptor_ccfacf933f22bf93, []int{2} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -241,23 +193,23 @@ type State struct { ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) - LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockID types1.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` - LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID types.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` // LastValidators is used to validate block.LastCommit. // Validators are persisted to the database separately every time they change, // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 // Extra +1 due to nextValSet delay. - NextValidators *types1.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` - Validators *types1.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` - LastValidators *types1.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` - LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` + NextValidators *types.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` + Validators *types.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` + LastValidators *types.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` + LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` // Consensus parameters used for validating blocks. // Changes returned by EndBlock and updated after Commit. - ConsensusParams types1.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` - LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` + ConsensusParams types.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` // Merkle root of the results from executing prev block LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` // the latest AppHash we've received from calling abci.Commit() @@ -268,7 +220,7 @@ func (m *State) Reset() { *m = State{} } func (m *State) String() string { return proto.CompactTextString(m) } func (*State) ProtoMessage() {} func (*State) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{4} + return fileDescriptor_ccfacf933f22bf93, []int{3} } func (m *State) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,11 +277,11 @@ func (m *State) GetLastBlockHeight() int64 { return 0 } -func (m *State) GetLastBlockID() types1.BlockID { +func (m *State) GetLastBlockID() types.BlockID { if m != nil { return m.LastBlockID } - return types1.BlockID{} + return types.BlockID{} } func (m *State) GetLastBlockTime() time.Time { @@ -339,21 +291,21 @@ func (m *State) GetLastBlockTime() time.Time { return time.Time{} } -func (m *State) GetNextValidators() *types1.ValidatorSet { +func (m *State) GetNextValidators() *types.ValidatorSet { if m != nil { return m.NextValidators } return nil } -func (m *State) GetValidators() *types1.ValidatorSet { +func (m *State) GetValidators() *types.ValidatorSet { if m != nil { return m.Validators } return nil } -func (m *State) GetLastValidators() *types1.ValidatorSet { +func (m *State) GetLastValidators() *types.ValidatorSet { if m != nil { return m.LastValidators } @@ -367,11 +319,11 @@ func (m *State) GetLastHeightValidatorsChanged() int64 { return 0 } -func (m *State) GetConsensusParams() types1.ConsensusParams { +func (m *State) GetConsensusParams() types.ConsensusParams { if m != nil { return m.ConsensusParams } - return types1.ConsensusParams{} + return types.ConsensusParams{} } func (m *State) GetLastHeightConsensusParamsChanged() int64 { @@ -396,7 +348,6 @@ func (m *State) GetAppHash() []byte { } func init() { - proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses") proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo") proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo") proto.RegisterType((*Version)(nil), "tendermint.state.Version") @@ -406,87 +357,49 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 717 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4d, 0x6f, 0xd3, 0x4a, - 0x14, 0x8d, 0x5f, 0x3f, 0x92, 0x4c, 0x9a, 0xa4, 0x6f, 0xfa, 0x16, 0x69, 0xfa, 0xea, 0xe4, 0x45, - 0x8f, 0xaa, 0x62, 0xe1, 0x48, 0xb0, 0x40, 0x6c, 0x90, 0x9a, 0x54, 0x50, 0x4b, 0x05, 0x81, 0x8b, - 0xba, 0x60, 0x81, 0x35, 0x71, 0x26, 0xf6, 0x08, 0xc7, 0xb6, 0x3c, 0x93, 0xf2, 0xb1, 0x67, 0xdf, - 0x2d, 0xff, 0xa8, 0xcb, 0x2e, 0x59, 0x15, 0x48, 0xff, 0x08, 0x9a, 0x0f, 0xdb, 0x93, 0x84, 0x45, - 0x11, 0xbb, 0xcc, 0x3d, 0xe7, 0x9e, 0x7b, 0xe6, 0xce, 0xbd, 0x31, 0xf8, 0x97, 0xe1, 0x68, 0x8c, - 0xd3, 0x29, 0x89, 0x58, 0x9f, 0x32, 0xc4, 0x70, 0x9f, 0x7d, 0x4c, 0x30, 0xb5, 0x92, 0x34, 0x66, - 0x31, 0xdc, 0x2e, 0x50, 0x4b, 0xa0, 0xed, 0x7f, 0xfc, 0xd8, 0x8f, 0x05, 0xd8, 0xe7, 0xbf, 0x24, - 0xaf, 0xbd, 0xa7, 0xa9, 0xa0, 0x91, 0x47, 0x74, 0x91, 0xb6, 0x5e, 0x42, 0xc4, 0x17, 0xd0, 0xee, - 0x0a, 0x7a, 0x81, 0x42, 0x32, 0x46, 0x2c, 0x4e, 0x15, 0x63, 0x7f, 0x85, 0x91, 0xa0, 0x14, 0x4d, - 0x33, 0x01, 0x53, 0x83, 0x2f, 0x70, 0x4a, 0x49, 0x1c, 0x2d, 0x14, 0xe8, 0xf8, 0x71, 0xec, 0x87, - 0xb8, 0x2f, 0x4e, 0xa3, 0xd9, 0xa4, 0xcf, 0xc8, 0x14, 0x53, 0x86, 0xa6, 0x89, 0x24, 0xf4, 0xde, - 0x82, 0xfa, 0xd1, 0x60, 0x68, 0x3b, 0x98, 0x26, 0x71, 0x44, 0x31, 0x85, 0xcf, 0x41, 0x63, 0x42, - 0x22, 0x14, 0x92, 0x4f, 0xd8, 0x1d, 0x85, 0xb1, 0xf7, 0xae, 0xf5, 0x57, 0xd7, 0x38, 0xac, 0x3d, - 0x38, 0xb0, 0xb4, 0x76, 0xf0, 0x6b, 0x5a, 0x59, 0xce, 0x53, 0x45, 0x1f, 0x70, 0xb6, 0x53, 0x9f, - 0xe8, 0xc7, 0xde, 0x67, 0x03, 0x34, 0xce, 0xb3, 0x3b, 0x51, 0x3b, 0x9a, 0xc4, 0x70, 0x08, 0xea, - 0xf9, 0x2d, 0x5d, 0x8a, 0x59, 0xcb, 0x10, 0x05, 0x4c, 0xbd, 0x80, 0xbc, 0x43, 0x9e, 0x78, 0x86, - 0x99, 0xb3, 0x75, 0xa1, 0x9d, 0xa0, 0x05, 0x76, 0x42, 0x44, 0x99, 0x1b, 0x60, 0xe2, 0x07, 0xcc, - 0xf5, 0x02, 0x14, 0xf9, 0x78, 0x2c, 0xbc, 0xae, 0x39, 0x7f, 0x73, 0xe8, 0x44, 0x20, 0x43, 0x09, - 0xf4, 0xbe, 0x18, 0x60, 0x67, 0xc8, 0xdd, 0x46, 0x74, 0x46, 0x5f, 0x8a, 0x16, 0x0a, 0x33, 0x0e, - 0xd8, 0xf6, 0xb2, 0xb0, 0x2b, 0x5b, 0xab, 0xfc, 0xfc, 0xb7, 0xea, 0x67, 0x49, 0x60, 0xb0, 0x7e, - 0x75, 0xd3, 0x29, 0x39, 0x4d, 0x6f, 0x31, 0xfc, 0xdb, 0xde, 0x02, 0x50, 0x3e, 0x97, 0x6f, 0x07, - 0x8f, 0x40, 0x35, 0x57, 0x53, 0x3e, 0xf6, 0x75, 0x1f, 0xea, 0x8d, 0x0b, 0x27, 0xca, 0x43, 0x91, - 0x05, 0xdb, 0xa0, 0x42, 0xe3, 0x09, 0x7b, 0x8f, 0x52, 0x2c, 0x4a, 0x56, 0x9d, 0xfc, 0xdc, 0xfb, - 0xb1, 0x09, 0x36, 0xce, 0xf8, 0x28, 0xc3, 0xc7, 0xa0, 0xac, 0xb4, 0x54, 0x99, 0x5d, 0x6b, 0x79, - 0xdc, 0x2d, 0x65, 0x4a, 0x95, 0xc8, 0xf8, 0xf0, 0x00, 0x54, 0xbc, 0x00, 0x91, 0xc8, 0x25, 0xf2, - 0x4e, 0xd5, 0x41, 0x6d, 0x7e, 0xd3, 0x29, 0x0f, 0x79, 0xcc, 0x3e, 0x76, 0xca, 0x02, 0xb4, 0xc7, - 0xf0, 0x1e, 0x68, 0x90, 0x88, 0x30, 0x82, 0x42, 0xd5, 0x89, 0x56, 0x43, 0x74, 0xa0, 0xae, 0xa2, - 0xb2, 0x09, 0xf0, 0x3e, 0x10, 0x2d, 0x91, 0xc3, 0x96, 0x31, 0xd7, 0x04, 0xb3, 0xc9, 0x01, 0x31, - 0x47, 0x8a, 0xeb, 0x80, 0xba, 0xc6, 0x25, 0xe3, 0xd6, 0xfa, 0xaa, 0x77, 0xf9, 0x54, 0x22, 0xcb, - 0x3e, 0x1e, 0xec, 0x70, 0xef, 0xf3, 0x9b, 0x4e, 0xed, 0x34, 0x93, 0xb2, 0x8f, 0x9d, 0x5a, 0xae, - 0x6b, 0x8f, 0xe1, 0x29, 0x68, 0x6a, 0x9a, 0x7c, 0x3f, 0x5a, 0x1b, 0x42, 0xb5, 0x6d, 0xc9, 0xe5, - 0xb1, 0xb2, 0xe5, 0xb1, 0x5e, 0x67, 0xcb, 0x33, 0xa8, 0x70, 0xd9, 0xcb, 0x6f, 0x1d, 0xc3, 0xa9, - 0xe7, 0x5a, 0x1c, 0x85, 0xcf, 0x40, 0x33, 0xc2, 0x1f, 0x98, 0x9b, 0x0f, 0x2b, 0x6d, 0x6d, 0xde, - 0x69, 0xbc, 0x1b, 0x3c, 0xad, 0xd8, 0x14, 0xf8, 0x04, 0x00, 0x4d, 0xa3, 0x7c, 0x27, 0x0d, 0x2d, - 0x83, 0x1b, 0x11, 0xd7, 0xd2, 0x44, 0x2a, 0x77, 0x33, 0xc2, 0xd3, 0x34, 0x23, 0x43, 0x60, 0xea, - 0xd3, 0x5c, 0xe8, 0xe5, 0x83, 0x5d, 0x15, 0x8f, 0xb5, 0x57, 0x0c, 0x76, 0x91, 0xad, 0x46, 0xfc, - 0x97, 0x6b, 0x06, 0xfe, 0x70, 0xcd, 0x5e, 0x80, 0xff, 0x17, 0xd6, 0x6c, 0x49, 0x3f, 0xb7, 0x57, - 0x13, 0xf6, 0xba, 0xda, 0xde, 0x2d, 0x0a, 0x65, 0x1e, 0xb3, 0x41, 0x4c, 0x31, 0x9d, 0x85, 0x8c, - 0xba, 0x01, 0xa2, 0x41, 0x6b, 0xab, 0x6b, 0x1c, 0x6e, 0xc9, 0x41, 0x74, 0x64, 0xfc, 0x04, 0xd1, - 0x00, 0xee, 0x82, 0x0a, 0x4a, 0x12, 0x49, 0xa9, 0x0b, 0x4a, 0x19, 0x25, 0x09, 0x87, 0x06, 0xaf, - 0xae, 0xe6, 0xa6, 0x71, 0x3d, 0x37, 0x8d, 0xef, 0x73, 0xd3, 0xb8, 0xbc, 0x35, 0x4b, 0xd7, 0xb7, - 0x66, 0xe9, 0xeb, 0xad, 0x59, 0x7a, 0xf3, 0xc8, 0x27, 0x2c, 0x98, 0x8d, 0x2c, 0x2f, 0x9e, 0xf6, - 0xf5, 0xbf, 0xf5, 0xe2, 0xa7, 0xfc, 0xb6, 0x2c, 0x7f, 0x95, 0x46, 0x9b, 0x22, 0xfe, 0xf0, 0x67, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x88, 0xe8, 0x4c, 0x4d, 0xb0, 0x06, 0x00, 0x00, -} - -func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ABCIResponses) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.FinalizeBlock != nil { - { - size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil + // 662 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0xd3, 0x30, + 0x1c, 0x6d, 0xd8, 0x9f, 0xb6, 0xee, 0xda, 0x0e, 0x8f, 0x43, 0x56, 0x58, 0x5a, 0x26, 0x40, 0x13, + 0x87, 0x54, 0x82, 0x03, 0xe2, 0x82, 0x44, 0x3b, 0x89, 0x55, 0x9a, 0x10, 0x64, 0x68, 0x07, 0x2e, + 0x91, 0xdb, 0x78, 0x89, 0x45, 0x1a, 0x47, 0xb1, 0x3b, 0xe0, 0x03, 0x70, 0xdf, 0x95, 0x6f, 0xb4, + 0xe3, 0x8e, 0x9c, 0x06, 0x74, 0x5f, 0x04, 0xf9, 0x4f, 0x12, 0xb7, 0xe5, 0x30, 0xc4, 0xad, 0xfe, + 0xbd, 0xf7, 0x7b, 0xbf, 0x67, 0xfb, 0x39, 0x05, 0x0f, 0x38, 0x4e, 0x02, 0x9c, 0x4d, 0x49, 0xc2, + 0xfb, 0x8c, 0x23, 0x8e, 0xfb, 0xfc, 0x6b, 0x8a, 0x99, 0x9b, 0x66, 0x94, 0x53, 0xb8, 0x5d, 0xa2, + 0xae, 0x44, 0x3b, 0xf7, 0x42, 0x1a, 0x52, 0x09, 0xf6, 0xc5, 0x2f, 0xc5, 0xeb, 0x98, 0x2a, 0xb2, + 0xdf, 0x54, 0xe9, 0xf4, 0x56, 0xd0, 0x73, 0x14, 0x93, 0x00, 0x71, 0x9a, 0x69, 0xc6, 0xde, 0x0a, + 0x23, 0x45, 0x19, 0x9a, 0xe6, 0x02, 0x8e, 0x01, 0x9f, 0xe3, 0x8c, 0x11, 0x9a, 0x2c, 0x0c, 0xe8, + 0x86, 0x94, 0x86, 0x31, 0xee, 0xcb, 0xd5, 0x78, 0x76, 0xd6, 0xe7, 0x64, 0x8a, 0x19, 0x47, 0xd3, + 0x54, 0x11, 0xf6, 0xbf, 0x59, 0xa0, 0x75, 0x9a, 0xcf, 0x64, 0xa3, 0xe4, 0x8c, 0xc2, 0x21, 0x68, + 0x16, 0x2e, 0x7c, 0x86, 0xb9, 0x6d, 0xf5, 0xac, 0x83, 0xc6, 0x33, 0xc7, 0x35, 0xb6, 0xac, 0x66, + 0x14, 0x8d, 0x27, 0x98, 0x7b, 0x5b, 0xe7, 0xc6, 0x0a, 0xba, 0x60, 0x27, 0x46, 0x8c, 0xfb, 0x11, + 0x26, 0x61, 0xc4, 0xfd, 0x49, 0x84, 0x92, 0x10, 0x07, 0xf6, 0x9d, 0x9e, 0x75, 0xb0, 0xe6, 0xdd, + 0x15, 0xd0, 0x91, 0x44, 0x86, 0x0a, 0xd8, 0xff, 0x6e, 0x81, 0x9d, 0x21, 0x4d, 0x18, 0x4e, 0xd8, + 0x8c, 0xbd, 0x93, 0x5b, 0x94, 0x66, 0x3c, 0xb0, 0x3d, 0xc9, 0xcb, 0xbe, 0xda, 0xba, 0xf6, 0xf3, + 0x70, 0xd5, 0xcf, 0x92, 0xc0, 0x60, 0xfd, 0xf2, 0xba, 0x5b, 0xf1, 0xda, 0x93, 0xc5, 0xf2, 0x3f, + 0x7b, 0x8b, 0x40, 0xf5, 0x54, 0x9d, 0x2d, 0x7c, 0x0d, 0xea, 0x85, 0x9a, 0xf6, 0xb1, 0x67, 0xfa, + 0xd0, 0x77, 0x50, 0x3a, 0xd1, 0x1e, 0xca, 0x2e, 0xd8, 0x01, 0x35, 0x46, 0xcf, 0xf8, 0x67, 0x94, + 0x61, 0x39, 0xb2, 0xee, 0x15, 0xeb, 0xfd, 0xdf, 0x9b, 0x60, 0xe3, 0x44, 0xa4, 0x09, 0xbe, 0x04, + 0x55, 0xad, 0xa5, 0xc7, 0xec, 0xba, 0xcb, 0x89, 0x73, 0xb5, 0x29, 0x3d, 0x22, 0xe7, 0xc3, 0x27, + 0xa0, 0x36, 0x89, 0x10, 0x49, 0x7c, 0xa2, 0xf6, 0x54, 0x1f, 0x34, 0xe6, 0xd7, 0xdd, 0xea, 0x50, + 0xd4, 0x46, 0x87, 0x5e, 0x55, 0x82, 0xa3, 0x00, 0x3e, 0x06, 0x2d, 0x92, 0x10, 0x4e, 0x50, 0xac, + 0x4f, 0xc2, 0x6e, 0xc9, 0x13, 0x68, 0xea, 0xaa, 0x3a, 0x04, 0xf8, 0x14, 0xc8, 0x23, 0xf1, 0xc7, + 0x31, 0x9d, 0x7c, 0xca, 0x99, 0x6b, 0x92, 0xd9, 0x16, 0xc0, 0x40, 0xd4, 0x35, 0xd7, 0x03, 0x4d, + 0x83, 0x4b, 0x02, 0x7b, 0x7d, 0xd5, 0xbb, 0xba, 0x2a, 0xd9, 0x35, 0x3a, 0x1c, 0xec, 0x08, 0xef, + 0xf3, 0xeb, 0x6e, 0xe3, 0x38, 0x97, 0x1a, 0x1d, 0x7a, 0x8d, 0x42, 0x77, 0x14, 0xc0, 0x63, 0xd0, + 0x36, 0x34, 0x45, 0x7e, 0xed, 0x0d, 0xa9, 0xda, 0x71, 0x55, 0xb8, 0xdd, 0x3c, 0xdc, 0xee, 0x87, + 0x3c, 0xdc, 0x83, 0x9a, 0x90, 0xbd, 0xf8, 0xd9, 0xb5, 0xbc, 0x66, 0xa1, 0x25, 0x50, 0xf8, 0x06, + 0xb4, 0x13, 0xfc, 0x85, 0xfb, 0x45, 0x58, 0x99, 0xbd, 0x79, 0xab, 0x78, 0xb7, 0x44, 0x5b, 0xf9, + 0x52, 0xe0, 0x2b, 0x00, 0x0c, 0x8d, 0xea, 0xad, 0x34, 0x8c, 0x0e, 0x61, 0x44, 0x6e, 0xcb, 0x10, + 0xa9, 0xdd, 0xce, 0x88, 0x68, 0x33, 0x8c, 0x0c, 0x81, 0x63, 0xa6, 0xb9, 0xd4, 0x2b, 0x82, 0x5d, + 0x97, 0x97, 0x75, 0xbf, 0x0c, 0x76, 0xd9, 0xad, 0x23, 0xfe, 0xd7, 0x67, 0x06, 0xfe, 0xf3, 0x99, + 0xbd, 0x05, 0x8f, 0x16, 0x9e, 0xd9, 0x92, 0x7e, 0x61, 0xaf, 0x21, 0xed, 0xf5, 0x8c, 0x77, 0xb7, + 0x28, 0x94, 0x7b, 0xcc, 0x83, 0x98, 0x61, 0x36, 0x8b, 0x39, 0xf3, 0x23, 0xc4, 0x22, 0x7b, 0xab, + 0x67, 0x1d, 0x6c, 0xa9, 0x20, 0x7a, 0xaa, 0x7e, 0x84, 0x58, 0x04, 0x77, 0x41, 0x0d, 0xa5, 0xa9, + 0xa2, 0x34, 0x25, 0xa5, 0x8a, 0xd2, 0x54, 0x40, 0x83, 0xf7, 0x97, 0x73, 0xc7, 0xba, 0x9a, 0x3b, + 0xd6, 0xaf, 0xb9, 0x63, 0x5d, 0xdc, 0x38, 0x95, 0xab, 0x1b, 0xa7, 0xf2, 0xe3, 0xc6, 0xa9, 0x7c, + 0x7c, 0x11, 0x12, 0x1e, 0xcd, 0xc6, 0xee, 0x84, 0x4e, 0xfb, 0xe6, 0x67, 0xb7, 0xfc, 0xa9, 0x3e, + 0xef, 0xcb, 0x7f, 0x0c, 0xe3, 0x4d, 0x59, 0x7f, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3f, 0xca, + 0x73, 0xb6, 0x33, 0x06, 0x00, 0x00, } func (m *ValidatorsInfo) Marshal() (dAtA []byte, err error) { @@ -702,12 +615,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err9 != nil { - return 0, err9 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err8 != nil { + return 0, err8 } - i -= n9 - i = encodeVarintTypes(dAtA, i, uint64(n9)) + i -= n8 + i = encodeVarintTypes(dAtA, i, uint64(n8)) i-- dAtA[i] = 0x2a { @@ -756,19 +669,6 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *ABCIResponses) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FinalizeBlock != nil { - l = m.FinalizeBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - func (m *ValidatorsInfo) Size() (n int) { if m == nil { return 0 @@ -873,92 +773,6 @@ func sovTypes(x uint64) (n int) { func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *ABCIResponses) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ABCIResponses: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FinalizeBlock == nil { - m.FinalizeBlock = &types.ResponseFinalizeBlock{} - } - if err := m.FinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1018,7 +832,7 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ValidatorSet == nil { - m.ValidatorSet = &types1.ValidatorSet{} + m.ValidatorSet = &types.ValidatorSet{} } if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1490,7 +1304,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.NextValidators == nil { - m.NextValidators = &types1.ValidatorSet{} + m.NextValidators = &types.ValidatorSet{} } if err := m.NextValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1526,7 +1340,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Validators == nil { - m.Validators = &types1.ValidatorSet{} + m.Validators = &types.ValidatorSet{} } if err := m.Validators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1562,7 +1376,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastValidators == nil { - m.LastValidators = &types1.ValidatorSet{} + m.LastValidators = &types.ValidatorSet{} } if err := m.LastValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index 35eab761de..5bcbfbf41a 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -4,20 +4,12 @@ package tendermint.state; option go_package = "github.com/tendermint/tendermint/proto/tendermint/state"; import "gogoproto/gogo.proto"; -import "tendermint/abci/types.proto"; import "tendermint/types/types.proto"; import "tendermint/types/validator.proto"; import "tendermint/types/params.proto"; import "tendermint/version/types.proto"; import "google/protobuf/timestamp.proto"; -// ABCIResponses retains the responses -// of the various ABCI calls during block processing. -// It is persisted to disk for each height before calling Commit. -message ABCIResponses { - tendermint.abci.ResponseFinalizeBlock finalize_block = 2; -} - // ValidatorsInfo represents the latest validator set, or the last height it changed message ValidatorsInfo { tendermint.types.ValidatorSet validator_set = 1; From a0f3107d28bd05695a05e0413be0ba25bc757abf Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 3 Jun 2022 10:50:06 +0200 Subject: [PATCH 084/203] e2e: fix initialization of light client (#8682) --- test/e2e/node/main.go | 50 +++++++++++++++++++++------------------- test/e2e/pkg/testnet.go | 6 +++++ test/e2e/runner/setup.go | 1 + 3 files changed, 33 insertions(+), 24 deletions(-) diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 94c1af1abe..57e92aa9a7 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -68,35 +68,37 @@ func run(ctx context.Context, configFile string) error { return err } - // Start remote signer (must start before node if running builtin). - if cfg.PrivValServer != "" { - if err = startSigner(ctx, logger, cfg); err != nil { - logger.Error("starting signer", - "server", cfg.PrivValServer, - "err", err) - return err - } - if cfg.Protocol == "builtin" { - time.Sleep(1 * time.Second) + if cfg.Mode == string(e2e.ModeLight) { + err = startLightNode(ctx, logger, cfg) + } else { + // Start remote signer (must start before node if running builtin). + if cfg.PrivValServer != "" { + if err = startSigner(ctx, logger, cfg); err != nil { + logger.Error("starting signer", + "server", cfg.PrivValServer, + "err", err) + return err + } + if cfg.Protocol == "builtin" { + time.Sleep(1 * time.Second) + } } - } - // Start app server. - switch cfg.Protocol { - case "socket", "grpc": - err = startApp(ctx, logger, cfg) - case "builtin": - switch cfg.Mode { - case string(e2e.ModeLight): - err = startLightNode(ctx, logger, cfg) - case string(e2e.ModeSeed): - err = startSeedNode(ctx) + // Start app server. + switch cfg.Protocol { + case "socket", "grpc": + err = startApp(ctx, logger, cfg) + case "builtin": + if cfg.Mode == string(e2e.ModeSeed) { + err = startSeedNode(ctx) + } else { + err = startNode(ctx, cfg) + } default: - err = startNode(ctx, cfg) + err = fmt.Errorf("invalid protocol %q", cfg.Protocol) } - default: - err = fmt.Errorf("invalid protocol %q", cfg.Protocol) } + if err != nil { logger.Error("starting node", "protocol", cfg.Protocol, diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 0e87466b07..a5c8dffd67 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -310,6 +310,12 @@ func (t Testnet) Validate() error { default: return errors.New("unsupported KeyType") } + switch t.ABCIProtocol { + case ProtocolBuiltin, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: + default: + return fmt.Errorf("invalid ABCI protocol setting %q", t.ABCIProtocol) + } + for _, node := range t.Nodes { if err := node.Validate(t); err != nil { return fmt.Errorf("invalid node %q: %w", node.Name, err) diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index f3c1ddc0f6..9e6e5cc183 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -354,6 +354,7 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "vote_extension_delay_ms": node.Testnet.VoteExtensionDelayMS, "finalize_block_delay_ms": node.Testnet.FinalizeBlockDelayMS, } + switch node.Testnet.ABCIProtocol { case e2e.ProtocolUNIX: cfg["listen"] = AppAddressUNIX From db168ca558da2575b5625b19fa9638f186bbca5d Mon Sep 17 00:00:00 2001 From: elias-orijtech <103319121+elias-orijtech@users.noreply.github.com> Date: Fri, 3 Jun 2022 15:02:59 +0200 Subject: [PATCH 085/203] test/fuzz: fix OSS-Fuzz build (#8669) Broken by https://github.com/AdamKorcz/go-118-fuzz-build/commit/7bed8f92ea820f668efab099ec774d548f1873a8. - note the purpose of test/fuzz/oss-fuzz-build.sh --- test/fuzz/oss-fuzz-build.sh | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/test/fuzz/oss-fuzz-build.sh b/test/fuzz/oss-fuzz-build.sh index 836253d4d1..fdfbfaf29c 100755 --- a/test/fuzz/oss-fuzz-build.sh +++ b/test/fuzz/oss-fuzz-build.sh @@ -1,19 +1,29 @@ #!/bin/bash - +# This script is invoked by OSS-Fuzz to run fuzz tests against Tendermint core. +# See https://github.com/google/oss-fuzz/blob/master/projects/tendermint/build.sh set -euo pipefail +# Upgrade to Go 1.18. Remove when it's the default. +apt-get update && apt-get install -y wget +wget https://go.dev/dl/go1.18.2.linux-amd64.tar.gz + +mkdir -p temp-go +rm -rf /root/.go/* +tar -C temp-go/ -xzf go1.18.2.linux-amd64.tar.gz +mv temp-go/go/* /root/.go/ + export FUZZ_ROOT="github.com/tendermint/tendermint" build_go_fuzzer() { local function="$1" local fuzzer="$2" - gotip run github.com/orijtech/otils/corpus2ossfuzz@latest -o "$OUT"/"$fuzzer"_seed_corpus.zip -corpus test/fuzz/tests/testdata/fuzz/"$function" + go run github.com/orijtech/otils/corpus2ossfuzz@latest -o "$OUT"/"$fuzzer"_seed_corpus.zip -corpus test/fuzz/tests/testdata/fuzz/"$function" compile_native_go_fuzzer "$FUZZ_ROOT"/test/fuzz/tests "$function" "$fuzzer" } -gotip get github.com/AdamKorcz/go-118-fuzz-build/utils -gotip get github.com/prometheus/common/expfmt@v0.32.1 +go get github.com/AdamKorcz/go-118-fuzz-build/utils +go get github.com/prometheus/common/expfmt@v0.32.1 build_go_fuzzer FuzzP2PSecretConnection fuzz_p2p_secretconnection From 30929cf1902ad4ba9cea76ea02187a4dcb3abec6 Mon Sep 17 00:00:00 2001 From: Evan Forbes <42654277+evan-forbes@users.noreply.github.com> Date: Fri, 3 Jun 2022 09:22:38 -0500 Subject: [PATCH 086/203] p2p: pass maxConns for MaxPeers during node setup (#8684) * pass maxConns for MaxPeers * add upgrade connections to max connections for max peers * change the formula to calculate max peers --- node/setup.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/node/setup.go b/node/setup.go index 8089ea4665..51a0482494 100644 --- a/node/setup.go +++ b/node/setup.go @@ -223,11 +223,13 @@ func createPeerManager( maxConns = 64 } + maxUpgradeConns := uint16(4) + options := p2p.PeerManagerOptions{ SelfAddress: selfAddr, MaxConnected: maxConns, - MaxConnectedUpgrade: 4, - MaxPeers: 1000, + MaxConnectedUpgrade: maxUpgradeConns, + MaxPeers: maxUpgradeConns + 2*maxConns, MinRetryTime: 250 * time.Millisecond, MaxRetryTime: 30 * time.Minute, MaxRetryTimePersistent: 5 * time.Minute, From 75a12ea0c6ef166dbc38695fb7cc6b2a64f6a77a Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Fri, 3 Jun 2022 19:46:10 +0200 Subject: [PATCH 087/203] consensus: switch timeout message to be debug and clarify meaning (#8694) --- internal/consensus/ticker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/consensus/ticker.go b/internal/consensus/ticker.go index 103c48efc7..efb3f8fba3 100644 --- a/internal/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -115,9 +115,9 @@ func (t *timeoutTicker) timeoutRoutine(ctx context.Context) { // NOTE time.Timer allows duration to be non-positive ti = newti t.timer.Reset(ti.Duration) - t.logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + t.logger.Debug("Internal state machine timeout scheduled", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) case <-t.timer.C: - t.logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + t.logger.Debug("Internal state machine timeout elapsed ", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // go routine here guarantees timeoutRoutine doesn't block. // Determinism comes from playback in the receiveRoutine. // We can eliminate it by merging the timeoutRoutine into receiveRoutine From 48ec78e6720bcd9fb7824387e660e721f1aa57f8 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 3 Jun 2022 15:02:04 -0700 Subject: [PATCH 088/203] Update how Mockery mocks are checked for currency. (#8697) The use of "go install" is deprecated as a way of installing and running the Mockery binary. Update the runner script to depend on an ambient version of the tool and ensure that in CI it's installed. --- .github/workflows/check-generated.yml | 3 +++ scripts/mockery_generate.sh | 14 +++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 1d43b6fe5b..d8c1a79f76 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -25,6 +25,9 @@ jobs: - name: "Check generated mocks" run: | set -euo pipefail + + readonly MOCKERY=2.12.3 # N.B. no leading "v" + curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf - make mockery 2>/dev/null if ! git diff --stat --exit-code ; then diff --git a/scripts/mockery_generate.sh b/scripts/mockery_generate.sh index 382c277bbe..2d6f40e638 100755 --- a/scripts/mockery_generate.sh +++ b/scripts/mockery_generate.sh @@ -1,3 +1,15 @@ #!/bin/sh +# +# Invoke Mockery v2 to update generated mocks for the given type. +# +# This script runs a locally-installed "mockery" if available, otherwise it +# runs the published Docker container. This legerdemain is so that the CI build +# and a local build can work off the same script. +# +if ! which mockery ; then + mockery() { + docker run --rm -v "$PWD":/w --workdir=/w vektra/mockery:v2.12.3 + } +fi -go run github.com/vektra/mockery/v2 --disable-version-string --case underscore --name $* +mockery --disable-version-string --case underscore --name "$@" From 6b2a6ed402e6dd7284b082e02aa98cd9d99d0c01 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 07:35:37 -0700 Subject: [PATCH 089/203] build(deps-dev): Bump watchpack from 2.3.1 to 2.4.0 in /docs (#8700) Bumps [watchpack](https://github.com/webpack/watchpack) from 2.3.1 to 2.4.0. - [Release notes](https://github.com/webpack/watchpack/releases) - [Commits](https://github.com/webpack/watchpack/compare/v2.3.1...v2.4.0) --- updated-dependencies: - dependency-name: watchpack dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/package-lock.json | 14 +++++++------- docs/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index a67545ab3e..7240aaa2d0 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -13,7 +13,7 @@ }, "devDependencies": { "@vuepress/plugin-html-redirect": "^0.1.4", - "watchpack": "^2.3.1" + "watchpack": "^2.4.0" } }, "node_modules/@algolia/cache-browser-local-storage": { @@ -13471,9 +13471,9 @@ } }, "node_modules/watchpack": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", - "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", "dev": true, "dependencies": { "glob-to-regexp": "^0.4.1", @@ -24873,9 +24873,9 @@ } }, "watchpack": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", - "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", "dev": true, "requires": { "glob-to-regexp": "^0.4.1", diff --git a/docs/package.json b/docs/package.json index 3200a5222e..0563805c12 100644 --- a/docs/package.json +++ b/docs/package.json @@ -8,7 +8,7 @@ }, "devDependencies": { "@vuepress/plugin-html-redirect": "^0.1.4", - "watchpack": "^2.3.1" + "watchpack": "^2.4.0" }, "scripts": { "preserve": "./pre.sh", From 85d1946602448251ce6493d17aeb5ffc9f2b79af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 17:17:28 +0000 Subject: [PATCH 090/203] build(deps): Bump bufbuild/buf-setup-action from 1.4.0 to 1.5.0 (#8701) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.4.0 to 1.5.0.
Release notes

Sourced from bufbuild/buf-setup-action's releases.

v1.5.0

  • Set the default buf version to v1.4.0
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=bufbuild/buf-setup-action&package-manager=github_actions&previous-version=1.4.0&new-version=1.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/proto-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index b1fbeab9df..f8fff89cc0 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.4.0 + - uses: bufbuild/buf-setup-action@v1.5.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' From 3e97479ab8e3a7641d734f57711a19465108a90a Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Jun 2022 01:13:05 -0700 Subject: [PATCH 091/203] Fix a "broken" markdown link. (#8706) The link actually works in context because of the auto-redirect from GitHub, but the link checker doesn't follow them. Splice out the redirect. --- docs/roadmap/roadmap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md index 90274ca1d9..b95b43a606 100644 --- a/docs/roadmap/roadmap.md +++ b/docs/roadmap/roadmap.md @@ -90,7 +90,7 @@ Has the same feature set as V0.37 but with a focus towards testing, protocol cor ## Post 1.0 Work -- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347) +- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/tendermint/issues/7932) - Consensus engine refactor - Fork accountability protocol - Bidirectional ABCI From 618b841a4b43b05e3fe10a04631dae3df9eb356a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jun 2022 04:59:00 -0400 Subject: [PATCH 092/203] build(deps): Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#8708) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.1 to 1.7.2. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.1...v1.7.2) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 69284bead4..be5c524453 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.4.0 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.7.2 github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 @@ -229,7 +229,7 @@ require ( google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.3.1 // indirect mvdan.cc/gofumpt v0.3.1 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect diff --git a/go.sum b/go.sum index b107e95b50..f646c2d821 100644 --- a/go.sum +++ b/go.sum @@ -1065,8 +1065,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= @@ -1847,8 +1848,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 3bb68b49f5cac018f85229db759ac91810702717 Mon Sep 17 00:00:00 2001 From: Elias Naur <103319121+elias-orijtech@users.noreply.github.com> Date: Tue, 7 Jun 2022 15:11:06 +0200 Subject: [PATCH 093/203] ci(fuzz): remove Go 1.18 workaround for OSS-Fuzz (#8711) --- test/fuzz/oss-fuzz-build.sh | 9 --------- 1 file changed, 9 deletions(-) diff --git a/test/fuzz/oss-fuzz-build.sh b/test/fuzz/oss-fuzz-build.sh index fdfbfaf29c..528290c187 100755 --- a/test/fuzz/oss-fuzz-build.sh +++ b/test/fuzz/oss-fuzz-build.sh @@ -3,15 +3,6 @@ # See https://github.com/google/oss-fuzz/blob/master/projects/tendermint/build.sh set -euo pipefail -# Upgrade to Go 1.18. Remove when it's the default. -apt-get update && apt-get install -y wget -wget https://go.dev/dl/go1.18.2.linux-amd64.tar.gz - -mkdir -p temp-go -rm -rf /root/.go/* -tar -C temp-go/ -xzf go1.18.2.linux-amd64.tar.gz -mv temp-go/go/* /root/.go/ - export FUZZ_ROOT="github.com/tendermint/tendermint" build_go_fuzzer() { From 931c98f7add66b01b56f9c85ba4e329ab73d67c0 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Jun 2022 12:40:22 -0400 Subject: [PATCH 094/203] rpc: always close http bodies (#8712) Closes #8686 --- rpc/jsonrpc/server/http_json_handler.go | 8 +++++++- rpc/jsonrpc/server/http_json_handler_test.go | 2 +- rpc/jsonrpc/server/rpc_func.go | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 2eeded2d72..4f9e28faa8 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -81,9 +81,15 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han } } +func ensureBodyClose(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + next(w, r) + } +} + func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - // Since the pattern "/" matches all paths not matched by other registered patterns, // we check whether the path is indeed "/", otherwise return a 404 error if r.URL.Path != "/" { http.NotFound(w, r) diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 77c74ffbcf..dd4a9d8e23 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -223,7 +223,7 @@ func TestRPCNotificationInBatch(t *testing.T) { func TestUnknownRPCPath(t *testing.T) { mux := testMux() - req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) + req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", strings.NewReader("")) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index 1fff323d75..947f8be5b7 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -26,11 +26,11 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo if fn.ws { continue // skip websocket endpoints, not usable via GET calls } - mux.HandleFunc("/"+name, makeHTTPHandler(fn, logger)) + mux.HandleFunc("/"+name, ensureBodyClose(makeHTTPHandler(fn, logger))) } // Endpoints for POST. - mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) + mux.HandleFunc("/", ensureBodyClose(handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger)))) } // Function introspection From e84ca617cfe47b37dc6a779006a55c2514e831a0 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Jun 2022 12:25:16 -0700 Subject: [PATCH 095/203] Set up automation settings for the v0.36.x backport branch. (#8714) - Add Mergify settings for backport labels. - Add e2e nightly workflow for v0.36.x. - Update documentation on e2e workflows. - Add v0.36.x to the Dependabot configs. --- .github/dependabot.yml | 23 +++++++- .github/mergify.yml | 8 +++ .github/workflows/e2e-nightly-34x.yml | 7 +-- .github/workflows/e2e-nightly-35x.yml | 6 +- .github/workflows/e2e-nightly-36x.yml | 74 ++++++++++++++++++++++++ .github/workflows/e2e-nightly-master.yml | 5 +- 6 files changed, 113 insertions(+), 10 deletions(-) create mode 100644 .github/workflows/e2e-nightly-36x.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0108f040d7..23e73e7458 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,6 +11,17 @@ updates: - T:dependencies - S:automerge + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + day: monday + target-branch: "v0.34.x" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + - package-ecosystem: github-actions directory: "/" schedule: @@ -27,7 +38,7 @@ updates: schedule: interval: weekly day: monday - target-branch: "v0.34.x" + target-branch: "v0.36.x" open-pull-requests-limit: 10 labels: - T:dependencies @@ -73,3 +84,13 @@ updates: labels: - T:dependencies - S:automerge + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + target-branch: "v0.36.x" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge diff --git a/.github/mergify.yml b/.github/mergify.yml index d13fb851f9..6c703b8ce6 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -33,3 +33,11 @@ pull_request_rules: backport: branches: - v0.35.x + - name: backport patches to v0.36.x branch + conditions: + - base=master + - label=S:backport-to-v0.36.x + actions: + backport: + branches: + - v0.36.x diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 82265a2589..db4268dda4 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -1,8 +1,7 @@ -# Runs randomly generated E2E testnets nightly -# on the 0.34.x release branch +# Runs randomly generated E2E testnets nightly on the 0.34.x branch. -# !! If you change something in this file, you probably want -# to update the e2e-nightly-master workflow as well! +# !! This file should be kept in sync with the e2e-nightly-master.yml file, +# modulo changes to the version labels. name: e2e-nightly-34x on: diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index d737f69c35..04cd6c524c 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -1,7 +1,7 @@ -# Runs randomly generated E2E testnets nightly on v0.35.x. +# Runs randomly generated E2E testnets nightly on the v0.35.x branch. -# !! If you change something in this file, you probably want -# to update the e2e-nightly-master workflow as well! +# !! This file should be kept in sync with the e2e-nightly-master.yml file, +# modulo changes to the version labels. name: e2e-nightly-35x on: diff --git a/.github/workflows/e2e-nightly-36x.yml b/.github/workflows/e2e-nightly-36x.yml new file mode 100644 index 0000000000..2067602e7d --- /dev/null +++ b/.github/workflows/e2e-nightly-36x.yml @@ -0,0 +1,74 @@ +# Runs randomly generated E2E testnets nightly on the v0.36.x branch. + +# !! This file should be kept in sync with the e2e-nightly-master.yml file, +# modulo changes to the version labels. + +name: e2e-nightly-36x +on: + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + group: ['00', '01', '02', '03'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.18' + + - uses: actions/checkout@v3 + with: + ref: 'v0.36.x' + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 4 -d networks/nightly + + - name: Run testnets in group ${{ matrix.group }} + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + e2e-nightly-fail-2: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':skull:' + SLACK_COLOR: danger + SLACK_MESSAGE: Nightly E2E tests failed on v0.36.x + SLACK_FOOTER: '' + + e2e-nightly-success: # may turn this off once they seem to pass consistently + needs: e2e-nightly-test + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on success + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':white_check_mark:' + SLACK_COLOR: good + SLACK_MESSAGE: Nightly E2E tests passed on v0.36.x + SLACK_FOOTER: '' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 7a02ad143a..7fac05f2db 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -1,7 +1,8 @@ # Runs randomly generated E2E testnets nightly on master -# !! If you change something in this file, you probably want -# to update the e2e-nightly-34x workflow as well! +# !! Relevant changes to this file should be propagated to the e2e-nightly-x +# files for the supported backport branches, when appropriate, modulo version +# markers. name: e2e-nightly-master on: From bb0737ee3c1d1988bdb7b220fb8c4f34aedcf6b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Jun 2022 13:09:16 +0000 Subject: [PATCH 096/203] build(deps): Bump github.com/rs/zerolog from 1.26.1 to 1.27.0 (#8724) Bumps [github.com/rs/zerolog](https://github.com/rs/zerolog) from 1.26.1 to 1.27.0.
Commits
  • e9344a8 docs: add an example for Lshortfile-like implementation of CallerMarshalFunc ...
  • 263b0bd #411 Add FieldsExclude parameter to console writer (#418)
  • 588a61c ctx: Modify WithContext to use a non-pointer receiver (#409)
  • 361cdf6 Remove extra space in console when there is no message (#413)
  • fc26014 MsgFunc function added to Event (#406)
  • 025f9f1 journald: don't call Enabled before each write (#407)
  • 3efdd82 call done function when logger is disabled (#393)
  • c0c2e11 Consistent casing, redundancy, and spelling/grammar (#391)
  • 665519c Fix ConsoleWriter color on Windows (#390)
  • 0c8d3c0 move the lint command to its own package (#389)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/rs/zerolog&package-manager=go_modules&previous-version=1.26.1&new-version=1.27.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index be5c524453..5d005836f9 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/ory/dockertest v3.3.5+incompatible github.com/prometheus/client_golang v1.12.2 github.com/rs/cors v1.8.2 - github.com/rs/zerolog v1.26.1 + github.com/rs/zerolog v1.27.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.4.0 github.com/spf13/viper v1.12.0 diff --git a/go.sum b/go.sum index f646c2d821..bf2c4f0674 100644 --- a/go.sum +++ b/go.sum @@ -228,6 +228,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= @@ -965,8 +966,9 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= +github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= +github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= From 69838858350df1dca9048c95610ef15689f15d6d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 8 Jun 2022 13:34:14 -0700 Subject: [PATCH 097/203] Update config migration test data for v0.36.x (#8727) This is a trivial update, but also a convenient way to make sure the mergify settings are working. --- scripts/confix/testdata/diff-35-36.txt | 1 + scripts/confix/testdata/v36-config.toml | 9 +-------- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/scripts/confix/testdata/diff-35-36.txt b/scripts/confix/testdata/diff-35-36.txt index 76f541b28b..298c53056a 100644 --- a/scripts/confix/testdata/diff-35-36.txt +++ b/scripts/confix/testdata/diff-35-36.txt @@ -16,6 +16,7 @@ -M p2p.max-num-inbound-peers -M p2p.max-num-outbound-peers -M p2p.persistent-peers-max-dial-period +-M p2p.seeds -M p2p.unconditional-peer-ids -M p2p.use-legacy +M rpc.event-log-max-items diff --git a/scripts/confix/testdata/v36-config.toml b/scripts/confix/testdata/v36-config.toml index 0182ab14ca..612f46ece6 100644 --- a/scripts/confix/testdata/v36-config.toml +++ b/scripts/confix/testdata/v36-config.toml @@ -157,7 +157,7 @@ experimental-disable-websocket = false # the latest (up to EventLogMaxItems) will be available for subscribers to # fetch via the /events method. If 0 (the default) the event log and the # /events RPC method are disabled. -event-log-window-size = "0s" +event-log-window-size = "30s" # The maxiumum number of events that may be retained by the event log. If # this value is 0, no upper limit is set. Otherwise, items in excess of @@ -221,13 +221,6 @@ laddr = "tcp://0.0.0.0:26656" # example: 159.89.10.97:26656 external-address = "" -# Comma separated list of seed nodes to connect to -# We only use these if we can’t connect to peers in the addrbook -# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -seeds = "" - # Comma separated list of peers to be added to the peer store # on startup. Either BootstrapPeers or PersistentPeers are # needed for peer discovery From 8d9d19a11353adf52a59a17e01165d7680b6d8f6 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 9 Jun 2022 18:22:47 -0400 Subject: [PATCH 098/203] e2e/generator: enlarge nightly test suite (#8731) --- test/e2e/generator/generate.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 0e84701115..01e04a4183 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -14,14 +14,14 @@ var ( // testnetCombinations defines global testnet options, where we generate a // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ - "topology": {"single", "quad", "large"}, - "initialHeight": {0, 1000}, + "topology": {"single", "quad", "large"}, "initialState": { map[string]string{}, map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, }, "validators": {"genesis", "initchain"}, "abci": {"builtin", "outofprocess"}, + "txSize": {1024, 2048, 4096, 8192}, } // The following specify randomly chosen values for testnet nodes. @@ -63,11 +63,11 @@ var ( } // the following specify random chosen values for the entire testnet - evidence = uniformChoice{0, 1, 10} - txSize = uniformChoice{1024, 4096} // either 1kb or 4kb - ipv6 = uniformChoice{false, true} - keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} - abciDelays = uniformChoice{"none", "small", "large"} + initialHeight = uniformChoice{0, 1000} + evidence = uniformChoice{0, 1, 10} + ipv6 = uniformChoice{false, true} + keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} + abciDelays = uniformChoice{"none", "small", "large"} voteExtensionEnableHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} voteExtensionEnabled = uniformChoice{true, false} @@ -109,7 +109,6 @@ type Options struct { func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), - InitialHeight: int64(opt["initialHeight"].(int)), InitialState: opt["initialState"].(map[string]string), Validators: &map[string]int64{}, ValidatorUpdates: map[string]map[string]int64{}, @@ -117,9 +116,11 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er KeyType: keyType.Choose(r).(string), Evidence: evidence.Choose(r).(int), QueueType: "priority", - TxSize: txSize.Choose(r).(int), + TxSize: opt["txSize"].(int), } + manifest.InitialHeight = int64(initialHeight.Choose(r).(int)) + if voteExtensionEnabled.Choose(r).(bool) { manifest.VoteExtensionsEnableHeight = manifest.InitialHeight + voteExtensionEnableHeightOffset.Choose(r).(int64) } From b0e48ca5f3019256ac87708e7065bd4c10c40caa Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 10 Jun 2022 17:17:33 +0200 Subject: [PATCH 099/203] remove unused config variables (#8738) --- cmd/tendermint/commands/testnet.go | 1 - config/config.go | 10 ---------- config/toml.go | 3 --- 3 files changed, 14 deletions(-) diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index 82c8cc6f9f..5ba76621c3 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -239,7 +239,6 @@ Example: for i := 0; i < nValidators+nNonValidators; i++ { nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) - config.P2P.AllowDuplicateIP = true if populatePersistentPeers { persistentPeersWithoutSelf := make([]string, 0) for j := 0; j < len(persistentPeers); j++ { diff --git a/config/config.go b/config/config.go index 43b3fc10f3..d875b8569f 100644 --- a/config/config.go +++ b/config/config.go @@ -638,9 +638,6 @@ type P2PConfig struct { //nolint: maligned // other peers) PrivatePeerIDs string `mapstructure:"private-peer-ids"` - // Toggle to disable guard against peers connecting from the same ip. - AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` - // Time to wait before flushing messages out on the connection FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` @@ -657,10 +654,6 @@ type P2PConfig struct { //nolint: maligned HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` DialTimeout time.Duration `mapstructure:"dial-timeout"` - // Testing params. - // Force dial to fail - TestDialFail bool `mapstructure:"test-dial-fail"` - // Makes it possible to configure which queue backend the p2p // layer uses. Options are: "fifo" and "priority", // with the default being "priority". @@ -685,10 +678,8 @@ func DefaultP2PConfig() *P2PConfig { SendRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s PexReactor: true, - AllowDuplicateIP: false, HandshakeTimeout: 20 * time.Second, DialTimeout: 3 * time.Second, - TestDialFail: false, QueueType: "priority", } } @@ -715,7 +706,6 @@ func (cfg *P2PConfig) ValidateBasic() error { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" - cfg.AllowDuplicateIP = true cfg.FlushThrottleTimeout = 10 * time.Millisecond return cfg } diff --git a/config/toml.go b/config/toml.go index 4db4f4e65d..cf6635d45e 100644 --- a/config/toml.go +++ b/config/toml.go @@ -319,9 +319,6 @@ pex = {{ .P2P.PexReactor }} # Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 private-peer-ids = "{{ .P2P.PrivatePeerIDs }}" -# Toggle to disable guard against peers connecting from the same ip. -allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} - # Peer connection configuration. handshake-timeout = "{{ .P2P.HandshakeTimeout }}" dial-timeout = "{{ .P2P.DialTimeout }}" From 7172862786cabaeb8ac06a6d646955a2faa6da31 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Sat, 11 Jun 2022 08:21:55 -0400 Subject: [PATCH 100/203] e2e: split test cases to avoid hitting timeouts (#8741) --- .github/workflows/e2e-manual.yml | 4 ++-- .github/workflows/e2e-nightly-master.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index 6da4c33428..56a406b7a3 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -11,7 +11,7 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01', '02', '03', '04'] runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -29,7 +29,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly/ + run: ./build/generator -g 5 -d networks/nightly/ - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 7fac05f2db..367364efb6 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01', '02', '03', "04"] runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -34,7 +34,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly/ + run: ./build/generator -g 5 -d networks/nightly/ - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e From 06175129ed7927e58ab070c9e33699d8b0e78e1c Mon Sep 17 00:00:00 2001 From: Jasmina Malicevic Date: Mon, 13 Jun 2022 14:09:57 +0200 Subject: [PATCH 101/203] abci-cli: added `PrepareProposal` command to cli (#8656) * Prepare prosal cli --- CHANGELOG_PENDING.md | 3 +- abci/cmd/abci-cli/abci-cli.go | 80 +++++++++++++++++++++++++++++++- abci/tests/server/client.go | 13 ++++++ abci/tests/test_cli/ex1.abci | 2 + abci/tests/test_cli/ex1.abci.out | 10 ++++ abci/tests/test_cli/test.sh | 2 + 6 files changed, 107 insertions(+), 3 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c3e191e7e2..f331984034 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -31,7 +31,8 @@ Special thanks to external contributors on this release: - [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish) - [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic) - [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena) - + - [abci] \#8656 Added cli command for `PrepareProposal`. (@jmalicevic) + - P2P Protocol - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b09f3c9a7b..237493e0ed 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -130,6 +131,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) { cmd.AddCommand(commitCmd) cmd.AddCommand(versionCmd) cmd.AddCommand(testCmd) + cmd.AddCommand(prepareProposalCmd) cmd.AddCommand(getQueryCmd()) // examples @@ -170,7 +172,7 @@ This command opens an interactive console for running any of the other commands without opening a new connection each time `, Args: cobra.ExactArgs(0), - ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"}, + ValidArgs: []string{"echo", "info", "query", "check_tx", "prepare_proposal", "finalize_block", "commit"}, RunE: cmdConsole, } @@ -224,6 +226,14 @@ var versionCmd = &cobra.Command{ }, } +var prepareProposalCmd = &cobra.Command{ + Use: "prepare_proposal", + Short: "prepare proposal", + Long: "prepare proposal", + Args: cobra.MinimumNArgs(1), + RunE: cmdPrepareProposal, +} + func getQueryCmd() *cobra.Command { cmd := &cobra.Command{ Use: "query", @@ -335,6 +345,13 @@ func cmdTest(cmd *cobra.Command, args []string) error { }, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, func() error { return servertest.Commit(ctx, client) }, + func() error { + return servertest.PrepareProposal(ctx, client, [][]byte{ + {0x01}, + }, []types.TxRecord_TxAction{ + types.TxRecord_UNMODIFIED, + }, nil) + }, }) } @@ -435,6 +452,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { return cmdInfo(cmd, actualArgs) case "query": return cmdQuery(cmd, actualArgs) + case "prepare_proposal": + return cmdPrepareProposal(cmd, actualArgs) default: return cmdUnimplemented(cmd, pArgs) } @@ -605,6 +624,64 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return nil } +func inTxArray(txByteArray [][]byte, tx []byte) bool { + for _, txTmp := range txByteArray { + if bytes.Equal(txTmp, tx) { + return true + } + + } + return false +} +func cmdPrepareProposal(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + printResponse(cmd, args, response{ + Code: codeBad, + Info: "Must provide at least one transaction", + Log: "Must provide at least one transaction", + }) + return nil + } + txsBytesArray := make([][]byte, len(args)) + + for i, arg := range args { + txBytes, err := stringOrHexToBytes(arg) + if err != nil { + return err + } + txsBytesArray[i] = txBytes + } + + res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{ + Txs: txsBytesArray, + // kvstore has to have this parameter in order not to reject a tx as the default value is 0 + MaxTxBytes: 65536, + }) + if err != nil { + return err + } + resps := make([]response, 0, len(res.TxResults)+1) + for _, tx := range res.TxRecords { + existingTx := inTxArray(txsBytesArray, tx.Tx) + if tx.Action == types.TxRecord_UNKNOWN || + (existingTx && tx.Action == types.TxRecord_ADDED) || + (!existingTx && (tx.Action == types.TxRecord_UNMODIFIED || tx.Action == types.TxRecord_REMOVED)) { + resps = append(resps, response{ + Code: codeBad, + Log: "Failed. Tx: " + string(tx.GetTx()) + " action: " + tx.Action.String(), + }) + } else { + resps = append(resps, response{ + Code: code.CodeTypeOK, + Log: "Succeeded. Tx: " + string(tx.Tx) + " action: " + tx.Action.String(), + }) + } + } + + printResponse(cmd, args, resps...) + return nil +} + func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { // Create the application - in memory or persisted to disk @@ -649,7 +726,6 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) { fmt.Printf("-> code: OK\n") } else { fmt.Printf("-> code: %d\n", rsp.Code) - } if len(rsp.Data) != 0 { diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index cddb42ec0a..7762c8d033 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -70,6 +70,19 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by return nil } +func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []types.TxRecord_TxAction, dataExp []byte) error { + res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes}) + for i, tx := range res.TxRecords { + if tx.Action != codeExp[i] { + fmt.Println("Failed test: PrepareProposal") + fmt.Printf("PrepareProposal response code was unexpected. Got %v expected %v.", + tx.Action, codeExp) + return errors.New("PrepareProposal error") + } + } + fmt.Println("Passed test: PrepareProposal") + return nil +} func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes}) code, data := res.Code, res.Data diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci index 56355dc945..dc9e213ecb 100644 --- a/abci/tests/test_cli/ex1.abci +++ b/abci/tests/test_cli/ex1.abci @@ -1,5 +1,6 @@ echo hello info +prepare_proposal "abc" finalize_block "abc" commit info @@ -7,3 +8,4 @@ query "abc" finalize_block "def=xyz" "ghi=123" commit query "def" +prepare_proposal "preparedef" \ No newline at end of file diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 9a35290b01..f4f342dfb4 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -8,6 +8,10 @@ -> data: {"size":0} -> data.hex: 0x7B2273697A65223A307D +> prepare_proposal "abc" +-> code: OK +-> log: Succeeded. Tx: abc action: UNMODIFIED + > finalize_block "abc" -> code: OK -> code: OK @@ -48,3 +52,9 @@ -> value: xyz -> value.hex: 78797A +> prepare_proposal "preparedef" +-> code: OK +-> log: Succeeded. Tx: def action: ADDED +-> code: OK +-> log: Succeeded. Tx: preparedef action: REMOVED + diff --git a/abci/tests/test_cli/test.sh b/abci/tests/test_cli/test.sh index 9c02ce6f54..d160d59c9e 100755 --- a/abci/tests/test_cli/test.sh +++ b/abci/tests/test_cli/test.sh @@ -30,6 +30,8 @@ function testExample() { cat "${INPUT}.out.new" echo "Expected:" cat "${INPUT}.out" + echo "Diff:" + diff "${INPUT}.out" "${INPUT}.out.new" exit 1 fi From 82907c84fa53501696b21931c9e97c7ee99d2f75 Mon Sep 17 00:00:00 2001 From: Marko Date: Mon, 13 Jun 2022 19:20:54 +0200 Subject: [PATCH 102/203] sink/psql: json marshal instead of proto (#8637) Storing transaction records as JSON makes it simpler for clients of the index. --- CHANGELOG_PENDING.md | 3 ++- internal/state/indexer/sink/psql/psql.go | 9 ++++++--- internal/state/indexer/sink/psql/psql_test.go | 12 ++++++++---- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f331984034..608abc06d2 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -32,7 +32,8 @@ Special thanks to external contributors on this release: - [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic) - [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena) - [abci] \#8656 Added cli command for `PrepareProposal`. (@jmalicevic) - + - [sink/psql] \#8637 tx_results emitted from psql sink are now json encoded, previously they were protobuf encoded + - P2P Protocol - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go index c063832640..57f5e5c3d6 100644 --- a/internal/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -9,8 +9,7 @@ import ( "strings" "time" - "github.com/gogo/protobuf/proto" - + "github.com/gogo/protobuf/jsonpb" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" @@ -177,12 +176,16 @@ INSERT INTO `+tableBlocks+` (height, chain_id, created_at) }) } +var ( + jsonpbMarshaller = jsonpb.Marshaler{} +) + func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error { ts := time.Now().UTC() for _, txr := range txrs { // Encode the result message in protobuf wire format for indexing. - resultData, err := proto.Marshal(txr) + resultData, err := jsonpbMarshaller.MarshalToString(txr) if err != nil { return fmt.Errorf("marshaling tx_result: %w", err) } diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index 72d14b5d89..2625d72451 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -1,6 +1,7 @@ package psql import ( + "bytes" "context" "database/sql" "flag" @@ -12,7 +13,7 @@ import ( "time" "github.com/adlio/schema" - "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/jsonpb" "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" @@ -151,6 +152,8 @@ func TestType(t *testing.T) { assert.Equal(t, indexer.PSQL, psqlSink.Type()) } +var jsonpbUnmarshaller = jsonpb.Unmarshaler{} + func TestIndexing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -278,13 +281,14 @@ func loadTxResult(hash []byte) (*abci.TxResult, error) { hashString := fmt.Sprintf("%X", hash) var resultData []byte if err := testDB().QueryRow(` -SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; -`, hashString).Scan(&resultData); err != nil { + SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; + `, hashString).Scan(&resultData); err != nil { return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err) } + reader := bytes.NewBuffer(resultData) txr := new(abci.TxResult) - if err := proto.Unmarshal(resultData, txr); err != nil { + if err := jsonpbUnmarshaller.Unmarshal(reader, txr); err != nil { return nil, fmt.Errorf("unmarshaling txr: %w", err) } From 21bbbe3e2a1ad133241abdefdaf6874f72fbbc4d Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Tue, 14 Jun 2022 16:50:55 +0800 Subject: [PATCH 103/203] mempool: fix typos in test (#8746) --- internal/mempool/priority_queue_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/mempool/priority_queue_test.go b/internal/mempool/priority_queue_test.go index ddc84806da..90f6111625 100644 --- a/internal/mempool/priority_queue_test.go +++ b/internal/mempool/priority_queue_test.go @@ -90,7 +90,7 @@ func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) { expectedLen int }{ { - name: "larest priority; single tx", + name: "largest priority; single tx", priority: int64(max + 1), txSize: 5, totalSize: totalSize, @@ -98,7 +98,7 @@ func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) { expectedLen: 1, }, { - name: "larest priority; multi tx", + name: "largest priority; multi tx", priority: int64(max + 1), txSize: 17, totalSize: totalSize, @@ -106,7 +106,7 @@ func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) { expectedLen: 4, }, { - name: "larest priority; out of capacity", + name: "largest priority; out of capacity", priority: int64(max + 1), txSize: totalSize + 1, totalSize: totalSize, From a4cf8939b88b70072f40a87e87516496ee3cc347 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Tue, 14 Jun 2022 18:54:35 +0800 Subject: [PATCH 104/203] mempool: fix error message check in test (#8750) --- internal/mempool/mempool_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 538cb3e1fc..42fb13bdce 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -631,7 +631,7 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) } else { err = txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0}) - fmt.Print(err.Error()) + require.EqualError(t, err, "test error") } }) } From a2908c29d5aa6d9437edb052d52bedb254e2c963 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Jun 2022 11:06:36 +0000 Subject: [PATCH 105/203] build(deps): Bump github.com/vektra/mockery/v2 from 2.12.3 to 2.13.0 (#8748) Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.12.3 to 2.13.0.
Release notes

Sourced from github.com/vektra/mockery/v2's releases.

v2.13.0

Changelog

  • f9f6d38 Merge pull request #477 from LandonTClipp/generics_release
  • 6498bd6 Updating dependencies

v2.13.0-beta.1

Changelog

  • 9dba1fd Merge pull request #454 from Gevrai/gejo-move-expecter-test
  • cde38d9 Move generated ExpecterTest to mocks directory

v2.13.0-beta.0

Changelog

  • dc5539e Add support for generating mocks for generic interfaces
  • 33c4bf3 Generics mocking fixes
  • a727d70 Genrics support: rename and comment methods
  • 4c0f6c8 Merge conflict resolution
  • 46c61f0 Merge pull request #456 from cruickshankpg/mock-generic-interfaces
  • ed55a47 Update x/tools to pick up fix for golang/go#51629
Commits
  • f9f6d38 Merge pull request #477 from LandonTClipp/generics_release
  • 6498bd6 Updating dependencies
  • 9dba1fd Merge pull request #454 from Gevrai/gejo-move-expecter-test
  • cde38d9 Move generated ExpecterTest to mocks directory
  • 46c61f0 Merge pull request #456 from cruickshankpg/mock-generic-interfaces
  • 4c0f6c8 Merge conflict resolution
  • ed55a47 Update x/tools to pick up fix for golang/go#51629
  • a727d70 Genrics support: rename and comment methods
  • 33c4bf3 Generics mocking fixes
  • dc5539e Add support for generating mocks for generic interfaces
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/vektra/mockery/v2&package-manager=go_modules&previous-version=2.12.3&new-version=2.13.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 19 +++++++++---------- go.sum | 37 ++++++++++++++++++------------------- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index 5d005836f9..7a5a1a9070 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.7.2 github.com/tendermint/tm-db v0.6.6 - golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 google.golang.org/grpc v1.47.0 @@ -42,7 +42,7 @@ require ( github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 github.com/google/go-cmp v0.5.8 - github.com/vektra/mockery/v2 v2.12.3 + github.com/vektra/mockery/v2 v2.13.0 gotest.tools v2.2.0+incompatible ) @@ -169,7 +169,7 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.0.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.2 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -199,7 +199,7 @@ require ( github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.1.1 // indirect - github.com/subosito/gotenv v1.3.0 // indirect + github.com/subosito/gotenv v1.4.0 // indirect github.com/sylvia7788/contextcheck v1.0.4 // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect @@ -219,15 +219,14 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect - golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect - golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect + golang.org/x/tools v0.1.11 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.3.1 // indirect diff --git a/go.sum b/go.sum index bf2c4f0674..297b27a74f 100644 --- a/go.sum +++ b/go.sum @@ -874,8 +874,9 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= @@ -966,7 +967,6 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= @@ -980,7 +980,6 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8 github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= -github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= @@ -1022,7 +1021,6 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1045,7 +1043,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= -github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= @@ -1071,8 +1068,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= +github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1118,8 +1116,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektra/mockery/v2 v2.12.3 h1:74h0R+p75tdr3QNwiNz3MXeCwSP/I5bYUbZY6oT4t20= -github.com/vektra/mockery/v2 v2.12.3/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U= +github.com/vektra/mockery/v2 v2.13.0 h1:jzHQuiWMbLK52usAz/3wyIf07gZnACOsTJ8/AcHA/2s= +github.com/vektra/mockery/v2 v2.13.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= @@ -1221,11 +1219,10 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1271,8 +1268,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1487,13 +1485,15 @@ golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d h1:Zu/JngovGLVi6t2J3nmAf3AoTDwuzw85YZ3b9o4yU7s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 h1:EH1Deb8WZJ0xc0WK//leUHXcX9aLE5SymusoTmMZye8= golang.org/x/term v0.0.0-20220411215600-e5f449aeb171/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1611,14 +1611,14 @@ golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II= golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= @@ -1793,7 +1793,6 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= @@ -1830,9 +1829,9 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= +gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= From 7971f4a2fca1ee3054fbc996e2bc041de63d3b89 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 14 Jun 2022 12:45:05 -0400 Subject: [PATCH 106/203] p2p: self-add node should not error (#8753) --- internal/p2p/peermanager.go | 12 +++++++++++- internal/p2p/peermanager_test.go | 14 ++++++++------ node/node.go | 2 +- node/seed.go | 2 +- node/setup.go | 2 ++ 5 files changed, 23 insertions(+), 9 deletions(-) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 7391de4ea7..65741f63ff 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -15,6 +15,7 @@ import ( dbm "github.com/tendermint/tm-db" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -145,6 +146,8 @@ type PeerManagerOptions struct { // persistentPeers provides fast PersistentPeers lookups. It is built // by optimize(). persistentPeers map[types.NodeID]bool + + Logger log.Logger } // Validate validates the options. @@ -264,6 +267,7 @@ type PeerManager struct { rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes + logger log.Logger mtx sync.Mutex store *peerStore @@ -298,6 +302,7 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec dialWaker: tmsync.NewWaker(), evictWaker: tmsync.NewWaker(), + logger: log.NewNopLogger(), store: store, dialing: map[types.NodeID]bool{}, @@ -308,6 +313,11 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio evicting: map[types.NodeID]bool{}, subscriptions: map[*PeerUpdates]*PeerUpdates{}, } + + if options.Logger != nil { + peerManager.logger = options.Logger + } + if err = peerManager.configurePeers(); err != nil { return nil, err } @@ -390,7 +400,7 @@ func (m *PeerManager) Add(address NodeAddress) (bool, error) { return false, err } if address.NodeID == m.selfID { - return false, fmt.Errorf("can't add self (%v) to peer store", m.selfID) + return false, nil } m.mtx.Lock() diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 47e8462a42..bb79fe7712 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -265,8 +265,9 @@ func TestPeerManager_Add(t *testing.T) { require.Error(t, err) // Adding self should error - _, err = peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) - require.Error(t, err) + ok, err := peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) + require.False(t, ok) + require.NoError(t, err) } func TestPeerManager_DialNext(t *testing.T) { @@ -841,13 +842,14 @@ func TestPeerManager_Dialed_Connected(t *testing.T) { require.Error(t, peerManager.Dialed(b)) } -func TestPeerManager_Dialed_Self(t *testing.T) { +func TestPeerManager_Adding_Self(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - // Dialing self should error. - _, err = peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) - require.Error(t, err) + // Ingesting self should not error. + ok, err := peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) + require.False(t, ok) + require.NoError(t, err) } func TestPeerManager_Dialed_MaxConnected(t *testing.T) { diff --git a/node/node.go b/node/node.go index 1bda1f0f74..f2c4cd6a85 100644 --- a/node/node.go +++ b/node/node.go @@ -203,7 +203,7 @@ func makeNode( } } - peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, peerCloser, err := createPeerManager(logger, cfg, dbProvider, nodeKey.ID) closers = append(closers, peerCloser) if err != nil { return nil, combineCloseError( diff --git a/node/seed.go b/node/seed.go index a0b71e411f..3ba4d86d4d 100644 --- a/node/seed.go +++ b/node/seed.go @@ -67,7 +67,7 @@ func makeSeedNode( // Setup Transport and Switch. p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, closer, err := createPeerManager(logger, cfg, dbProvider, nodeKey.ID) if err != nil { return nil, combineCloseError( fmt.Errorf("failed to create peer manager: %w", err), diff --git a/node/setup.go b/node/setup.go index 51a0482494..3240ba0e7e 100644 --- a/node/setup.go +++ b/node/setup.go @@ -199,6 +199,7 @@ func createEvidenceReactor( } func createPeerManager( + logger log.Logger, cfg *config.Config, dbProvider config.DBProvider, nodeID types.NodeID, @@ -226,6 +227,7 @@ func createPeerManager( maxUpgradeConns := uint16(4) options := p2p.PeerManagerOptions{ + Logger: logger.With("module", "peermanager"), SelfAddress: selfAddr, MaxConnected: maxConns, MaxConnectedUpgrade: maxUpgradeConns, From bf1cb89bb7c34d75e9aade09f7304fd376ce084b Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 14 Jun 2022 16:55:10 -0400 Subject: [PATCH 107/203] Revert "p2p: self-add node should not error (tendermint#8753)" (#8757) --- internal/p2p/peermanager.go | 12 +----------- internal/p2p/peermanager_test.go | 14 ++++++-------- node/node.go | 2 +- node/seed.go | 2 +- node/setup.go | 2 -- 5 files changed, 9 insertions(+), 23 deletions(-) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 65741f63ff..7391de4ea7 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -15,7 +15,6 @@ import ( dbm "github.com/tendermint/tm-db" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -146,8 +145,6 @@ type PeerManagerOptions struct { // persistentPeers provides fast PersistentPeers lookups. It is built // by optimize(). persistentPeers map[types.NodeID]bool - - Logger log.Logger } // Validate validates the options. @@ -267,7 +264,6 @@ type PeerManager struct { rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes - logger log.Logger mtx sync.Mutex store *peerStore @@ -302,7 +298,6 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec dialWaker: tmsync.NewWaker(), evictWaker: tmsync.NewWaker(), - logger: log.NewNopLogger(), store: store, dialing: map[types.NodeID]bool{}, @@ -313,11 +308,6 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio evicting: map[types.NodeID]bool{}, subscriptions: map[*PeerUpdates]*PeerUpdates{}, } - - if options.Logger != nil { - peerManager.logger = options.Logger - } - if err = peerManager.configurePeers(); err != nil { return nil, err } @@ -400,7 +390,7 @@ func (m *PeerManager) Add(address NodeAddress) (bool, error) { return false, err } if address.NodeID == m.selfID { - return false, nil + return false, fmt.Errorf("can't add self (%v) to peer store", m.selfID) } m.mtx.Lock() diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index bb79fe7712..47e8462a42 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -265,9 +265,8 @@ func TestPeerManager_Add(t *testing.T) { require.Error(t, err) // Adding self should error - ok, err := peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) - require.False(t, ok) - require.NoError(t, err) + _, err = peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) + require.Error(t, err) } func TestPeerManager_DialNext(t *testing.T) { @@ -842,14 +841,13 @@ func TestPeerManager_Dialed_Connected(t *testing.T) { require.Error(t, peerManager.Dialed(b)) } -func TestPeerManager_Adding_Self(t *testing.T) { +func TestPeerManager_Dialed_Self(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - // Ingesting self should not error. - ok, err := peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) - require.False(t, ok) - require.NoError(t, err) + // Dialing self should error. + _, err = peerManager.Add(p2p.NodeAddress{Protocol: "memory", NodeID: selfID}) + require.Error(t, err) } func TestPeerManager_Dialed_MaxConnected(t *testing.T) { diff --git a/node/node.go b/node/node.go index f2c4cd6a85..1bda1f0f74 100644 --- a/node/node.go +++ b/node/node.go @@ -203,7 +203,7 @@ func makeNode( } } - peerManager, peerCloser, err := createPeerManager(logger, cfg, dbProvider, nodeKey.ID) + peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) closers = append(closers, peerCloser) if err != nil { return nil, combineCloseError( diff --git a/node/seed.go b/node/seed.go index 3ba4d86d4d..a0b71e411f 100644 --- a/node/seed.go +++ b/node/seed.go @@ -67,7 +67,7 @@ func makeSeedNode( // Setup Transport and Switch. p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - peerManager, closer, err := createPeerManager(logger, cfg, dbProvider, nodeKey.ID) + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) if err != nil { return nil, combineCloseError( fmt.Errorf("failed to create peer manager: %w", err), diff --git a/node/setup.go b/node/setup.go index 3240ba0e7e..51a0482494 100644 --- a/node/setup.go +++ b/node/setup.go @@ -199,7 +199,6 @@ func createEvidenceReactor( } func createPeerManager( - logger log.Logger, cfg *config.Config, dbProvider config.DBProvider, nodeID types.NodeID, @@ -227,7 +226,6 @@ func createPeerManager( maxUpgradeConns := uint16(4) options := p2p.PeerManagerOptions{ - Logger: logger.With("module", "peermanager"), SelfAddress: selfAddr, MaxConnected: maxConns, MaxConnectedUpgrade: maxUpgradeConns, From 979a6a1b13331ae90efb1c03b9dc69bc227d1909 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 14 Jun 2022 19:12:53 -0400 Subject: [PATCH 108/203] p2p: accept should not abort on first error (#8759) --- internal/p2p/router.go | 17 ++++--- internal/p2p/router_test.go | 96 +++++++++++++------------------------ 2 files changed, 43 insertions(+), 70 deletions(-) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 8b77541ded..511fa0fb92 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -470,14 +470,17 @@ func (r *Router) dialSleep(ctx context.Context) { func (r *Router) acceptPeers(ctx context.Context, transport Transport) { for { conn, err := transport.Accept(ctx) - switch err { - case nil: - case io.EOF: - r.logger.Debug("stopping accept routine", "transport", transport) + switch { + case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): + r.logger.Debug("stopping accept routine", "transport", transport, "err", "context canceled") return - default: - r.logger.Error("failed to accept connection", "transport", transport, "err", err) + case errors.Is(err, io.EOF): + r.logger.Debug("stopping accept routine", "transport", transport, "err", "EOF") return + case err != nil: + // in this case we got an error from the net.Listener. + r.logger.Error("failed to accept connection", "transport", transport, "err", err) + continue } incomingIP := conn.RemoteEndpoint().IP @@ -489,7 +492,7 @@ func (r *Router) acceptPeers(ctx context.Context, transport Transport) { "close_err", closeErr, ) - return + continue } // Spawn a goroutine for the handshake, to avoid head-of-line blocking. diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 663e6b81c9..e0910fb218 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -442,78 +442,48 @@ func TestRouter_AcceptPeers(t *testing.T) { } } -func TestRouter_AcceptPeers_Error(t *testing.T) { - t.Cleanup(leaktest.Check(t)) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Set up a mock transport that returns an error, which should prevent - // the router from calling Accept again. - mockTransport := &mocks.Transport{} - mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Accept", mock.Anything).Once().Return(nil, errors.New("boom")) - mockTransport.On("Close").Return(nil) - mockTransport.On("Listen", mock.Anything).Return(nil) - - // Set up and start the router. - peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) - require.NoError(t, err) +func TestRouter_AcceptPeers_Errors(t *testing.T) { - router, err := p2p.NewRouter( - log.NewNopLogger(), - p2p.NopMetrics(), - selfKey, - peerManager, - func() *types.NodeInfo { return &selfInfo }, - mockTransport, - nil, - p2p.RouterOptions{}, - ) - require.NoError(t, err) - - require.NoError(t, router.Start(ctx)) - time.Sleep(time.Second) - router.Stop() + for _, err := range []error{io.EOF, context.Canceled, context.DeadlineExceeded} { + t.Run(err.Error(), func(t *testing.T) { + t.Cleanup(leaktest.Check(t)) - mockTransport.AssertExpectations(t) -} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { - t.Cleanup(leaktest.Check(t)) + // Set up a mock transport that returns io.EOF once, which should prevent + // the router from calling Accept again. + mockTransport := &mocks.Transport{} + mockTransport.On("String").Maybe().Return("mock") + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) + mockTransport.On("Close").Return(nil) + mockTransport.On("Listen", mock.Anything).Return(nil) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // Set up and start the router. + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) + require.NoError(t, err) - // Set up a mock transport that returns io.EOF once, which should prevent - // the router from calling Accept again. - mockTransport := &mocks.Transport{} - mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) - mockTransport.On("Close").Return(nil) - mockTransport.On("Listen", mock.Anything).Return(nil) + router, err := p2p.NewRouter( + log.NewNopLogger(), + p2p.NopMetrics(), + selfKey, + peerManager, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, + p2p.RouterOptions{}, + ) + require.NoError(t, err) - // Set up and start the router. - peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) - require.NoError(t, err) + require.NoError(t, router.Start(ctx)) + time.Sleep(time.Second) + router.Stop() - router, err := p2p.NewRouter( - log.NewNopLogger(), - p2p.NopMetrics(), - selfKey, - peerManager, - func() *types.NodeInfo { return &selfInfo }, - mockTransport, - nil, - p2p.RouterOptions{}, - ) - require.NoError(t, err) + mockTransport.AssertExpectations(t) - require.NoError(t, router.Start(ctx)) - time.Sleep(time.Second) - router.Stop() + }) - mockTransport.AssertExpectations(t) + } } func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { From 51b3f111dceb2ffb9ee0e756283303aba608579c Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 14 Jun 2022 19:48:48 -0400 Subject: [PATCH 109/203] p2p: fix mconn transport accept test (#8762) Fix minor test incongruency missed earlier. --- internal/p2p/router_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index e0910fb218..86af193859 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -455,7 +455,7 @@ func TestRouter_AcceptPeers_Errors(t *testing.T) { // the router from calling Accept again. mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, err) mockTransport.On("Close").Return(nil) mockTransport.On("Listen", mock.Anything).Return(nil) From f0b0f34f3f0c1e59145cb07f25a1b84f3c82d86e Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 15 Jun 2022 06:32:22 -0400 Subject: [PATCH 110/203] refactor: improve string representation for a vote against the proposal (#8745) --- types/vote.go | 19 ++++++++++----- types/vote_set.go | 4 ++-- types/vote_test.go | 60 +++++++++++++++++++++++++++++++++++++++------- 3 files changed, 67 insertions(+), 16 deletions(-) diff --git a/types/vote.go b/types/vote.go index f7006b8cde..821a630188 100644 --- a/types/vote.go +++ b/types/vote.go @@ -13,7 +13,8 @@ import ( ) const ( - nilVoteStr string = "nil-Vote" + absentVoteStr string = "Vote{absent}" + nilVoteStr string = "nil" // The maximum supported number of bytes in a vote extension. MaxVoteExtensionSize int = 1024 * 1024 @@ -189,7 +190,7 @@ func (vote *Vote) Copy() *Vote { // 10. timestamp func (vote *Vote) String() string { if vote == nil { - return nilVoteStr + return absentVoteStr } var typeString string @@ -202,16 +203,22 @@ func (vote *Vote) String() string { panic("Unknown vote type") } - return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X %X @ %s}", + var blockHashString string + if len(vote.BlockID.Hash) > 0 { + blockHashString = fmt.Sprintf("%X", tmbytes.Fingerprint(vote.BlockID.Hash)) + } else { + blockHashString = nilVoteStr + } + + return fmt.Sprintf("Vote{%v:%X %v/%d %s %s %X %d @ %s}", vote.ValidatorIndex, tmbytes.Fingerprint(vote.ValidatorAddress), vote.Height, vote.Round, - vote.Type, typeString, - tmbytes.Fingerprint(vote.BlockID.Hash), + blockHashString, tmbytes.Fingerprint(vote.Signature), - tmbytes.Fingerprint(vote.Extension), + len(vote.Extension), CanonicalTime(vote.Timestamp), ) } diff --git a/types/vote_set.go b/types/vote_set.go index 6d83ac85dd..7ca69f3025 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -505,7 +505,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string { voteStrings := make([]string, len(voteSet.votes)) for i, vote := range voteSet.votes { if vote == nil { - voteStrings[i] = nilVoteStr + voteStrings[i] = absentVoteStr } else { voteStrings[i] = vote.String() } @@ -570,7 +570,7 @@ func (voteSet *VoteSet) voteStrings() []string { voteStrings := make([]string, len(voteSet.votes)) for i, vote := range voteSet.votes { if vote == nil { - voteStrings[i] = nilVoteStr + voteStrings[i] = absentVoteStr } else { voteStrings[i] = vote.String() } diff --git a/types/vote_test.go b/types/vote_test.go index d0819d7c4c..917de2e4bb 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -2,6 +2,7 @@ package types import ( "context" + "fmt" "testing" "time" @@ -16,6 +17,22 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) +const ( + //nolint: lll + preCommitTestStr = `Vote{56789:6AF1F4111082 12345/2 Precommit 8B01023386C3 000000000000 0 @ 2017-12-25T03:00:01.234Z}` + //nolint: lll + preVoteTestStr = `Vote{56789:6AF1F4111082 12345/2 Prevote 8B01023386C3 000000000000 0 @ 2017-12-25T03:00:01.234Z}` +) + +var ( + // nolint: lll + nilVoteTestStr = fmt.Sprintf(`Vote{56789:6AF1F4111082 12345/2 Precommit %s 000000000000 0 @ 2017-12-25T03:00:01.234Z}`, nilVoteStr) + formatNonEmptyVoteExtensionFn = func(voteExtensionLength int) string { + // nolint: lll + return fmt.Sprintf(`Vote{56789:6AF1F4111082 12345/2 Precommit 8B01023386C3 000000000000 %d @ 2017-12-25T03:00:01.234Z}`, voteExtensionLength) + } +) + func examplePrevote(t *testing.T) *Vote { t.Helper() return exampleVote(t, byte(tmproto.PrevoteType)) @@ -321,16 +338,43 @@ func TestVoteVerify(t *testing.T) { } func TestVoteString(t *testing.T) { - str := examplePrecommit(t).String() - expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests - if str != expected { - t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) + testcases := map[string]struct { + vote *Vote + expectedResult string + }{ + "pre-commit": { + vote: examplePrecommit(t), + expectedResult: preCommitTestStr, + }, + "pre-vote": { + vote: examplePrevote(t), + expectedResult: preVoteTestStr, + }, + "absent vote": { + expectedResult: absentVoteStr, + }, + "nil vote": { + vote: func() *Vote { + v := examplePrecommit(t) + v.BlockID.Hash = nil + return v + }(), + expectedResult: nilVoteTestStr, + }, + "non-empty vote extension": { + vote: func() *Vote { + v := examplePrecommit(t) + v.Extension = []byte{1, 2} + return v + }(), + expectedResult: formatNonEmptyVoteExtensionFn(2), + }, } - str2 := examplePrevote(t).String() - expected = `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests - if str2 != expected { - t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + require.Equal(t, tc.expectedResult, tc.vote.String()) + }) } } From 134bfefbe52d40dcbe5637bf48242e6584cdbb27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jun 2022 10:46:37 +0000 Subject: [PATCH 111/203] build(deps): Bump github.com/vektra/mockery/v2 from 2.13.0 to 2.13.1 (#8766) Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.13.0 to 2.13.1.
Release notes

Sourced from github.com/vektra/mockery/v2's releases.

v2.13.1

Changelog

  • f04b040 Fix infinity mocking caused interface in mock
  • 9d7c819 Merge pull request #472 from grongor/fix-infinite-mocking
Commits
  • 9d7c819 Merge pull request #472 from grongor/fix-infinite-mocking
  • f04b040 Fix infinity mocking caused interface in mock
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/vektra/mockery/v2&package-manager=go_modules&previous-version=2.13.0&new-version=2.13.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7a5a1a9070..2ecb3ea483 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 github.com/google/go-cmp v0.5.8 - github.com/vektra/mockery/v2 v2.13.0 + github.com/vektra/mockery/v2 v2.13.1 gotest.tools v2.2.0+incompatible ) diff --git a/go.sum b/go.sum index 297b27a74f..5f391f3940 100644 --- a/go.sum +++ b/go.sum @@ -1116,8 +1116,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektra/mockery/v2 v2.13.0 h1:jzHQuiWMbLK52usAz/3wyIf07gZnACOsTJ8/AcHA/2s= -github.com/vektra/mockery/v2 v2.13.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= +github.com/vektra/mockery/v2 v2.13.1 h1:Lqs7aZiC7TwZO76fJ/4Zsb3NaO4F7cuuz0mZLYeNwtQ= +github.com/vektra/mockery/v2 v2.13.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= From 1062ae73d63d49d37ce83177d8de6a09018b36cc Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 15 Jun 2022 09:04:13 -0400 Subject: [PATCH 112/203] e2e/ci: add extra split to 0.36 (#8770) --- .github/workflows/e2e-nightly-36x.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-nightly-36x.yml b/.github/workflows/e2e-nightly-36x.yml index 2067602e7d..9ac7971467 100644 --- a/.github/workflows/e2e-nightly-36x.yml +++ b/.github/workflows/e2e-nightly-36x.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01', '02', '03', '04'] runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -35,7 +35,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly + run: ./build/generator -g 5 -d networks/nightly - name: Run testnets in group ${{ matrix.group }} working-directory: test/e2e From 56e329aa9e1fa19f8947288f047688abad652526 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Wed, 15 Jun 2022 21:46:50 -0600 Subject: [PATCH 113/203] cmd/tendermint/commands/debug: guard against PID int overflows (#8764) --- cmd/tendermint/commands/debug/kill.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index a6c1ac7d86..7755817a63 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -33,10 +33,14 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, Args: cobra.ExactArgs(2), RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - pid, err := strconv.ParseInt(args[0], 10, 64) + // Using Atoi so that the size of an integer can be automatically inferred. + pid, err := strconv.Atoi(args[0]) if err != nil { return err } + if pid <= 0 { + return fmt.Errorf("PID value must be > 0; given value %q, got %d", args[0], pid) + } outFile := args[1] if outFile == "" { @@ -95,7 +99,7 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, } logger.Info("killing Tendermint process") - if err := killProc(int(pid), tmpDir); err != nil { + if err := killProc(pid, tmpDir); err != nil { return err } @@ -113,6 +117,9 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. func killProc(pid int, dir string) error { + if pid <= 0 { + return fmt.Errorf("PID must be > 0, got %d", pid) + } // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. From 8854ce4e68ad719a4b231795e8252552360e7e2e Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Thu, 16 Jun 2022 18:04:10 +0200 Subject: [PATCH 114/203] e2e: reactivate network test (#8635) --- test/e2e/runner/test.go | 2 +- test/e2e/tests/net_test.go | 29 ++++++++++++++++++----------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index 2237588a11..da7a4a50ff 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -13,5 +13,5 @@ func Test(testnet *e2e.Testnet) error { return err } - return execVerbose("./build/tests", "-test.count=1", "-test.v") + return execVerbose("./build/tests", "-test.count=1") } diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index 71a9584122..2d8f285496 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -7,33 +7,40 @@ import ( "github.com/stretchr/testify/require" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" ) // Tests that all nodes have peered with each other, regardless of discovery method. func TestNet_Peers(t *testing.T) { - // FIXME Skip test since nodes aren't always able to fully mesh - t.SkipNow() - testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) netInfo, err := client.NetInfo(ctx) require.NoError(t, err) - require.Equal(t, len(node.Testnet.Nodes)-1, netInfo.NPeers, - "node is not fully meshed with peers") - + expectedPeers := len(node.Testnet.Nodes) + peers := make(map[string]*e2e.Node, 0) seen := map[string]bool{} for _, n := range node.Testnet.Nodes { - seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself + // we never save light client addresses as they use RPC or ourselves + if n.Mode == e2e.ModeLight || n.Name == node.Name { + expectedPeers-- + continue + } + peers[string(types.NodeIDFromPubKey(n.NodeKey.PubKey()))] = n + seen[n.Name] = false } + + require.Equal(t, expectedPeers, netInfo.NPeers, + "node is not fully meshed with peers") + for _, peerInfo := range netInfo.Peers { - id := peerInfo.ID - peer := node.Testnet.LookupNode(string(id)) - require.NotNil(t, peer, "unknown node %v", id) + id := string(peerInfo.ID) + peer, ok := peers[id] + require.True(t, ok, "unknown node %v", id) require.Contains(t, peerInfo.URL, peer.IP.String(), "unexpected IP address for peer %v", id) - seen[string(id)] = true + seen[peer.Name] = true } for name := range seen { From 7cf09399bb834373002d06af83bb5190e7db06f2 Mon Sep 17 00:00:00 2001 From: Adolfo Olivera Date: Thu, 16 Jun 2022 15:36:58 -0300 Subject: [PATCH 115/203] Fix typo in Using Tendermint section (#8780) Modify using-tendermint.md to replace unsafe_reset_all to unsafe-reset-all . Closes #8779 . --- docs/tendermint-core/using-tendermint.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index 9501c5e662..b249d26192 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -254,7 +254,7 @@ afford to lose all blockchain data! To reset a blockchain, stop the node and run: ```sh -tendermint unsafe_reset_all +tendermint unsafe-reset-all ``` This command will remove the data directory and reset private validator and From a4f29bfd4483d928aa9571affcf9149820d8a051 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Thu, 16 Jun 2022 21:31:17 +0200 Subject: [PATCH 116/203] Don't check PBTS-timeliness when in replay mode (#8774) Closes #8781 Temporary fix to this issue, so that e2e tests don't fail and potentially leave other problems uncovered. --- internal/consensus/state.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 3af775bb62..a7a0b8fed6 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -1530,7 +1530,8 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 } sp := cs.state.ConsensusParams.Synchrony.SynchronyParamsOrDefaults() - if cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() { + //TODO: Remove this temporary fix when the complete solution is ready. See #8739 + if !cs.replayMode && cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() { logger.Debug("prevote step: Proposal is not timely; prevoting nil", "proposed", tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano), From 9e5b13725d8b1061354eb99acbd2c3c1cefe18c9 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 17 Jun 2022 08:02:10 -0400 Subject: [PATCH 117/203] p2p: peer store and dialing changes (#8737) --- CHANGELOG_PENDING.md | 4 + config/config.go | 8 + config/toml.go | 4 + internal/p2p/metrics.gen.go | 50 +++- internal/p2p/metrics.go | 20 +- internal/p2p/p2ptest/network.go | 1 + internal/p2p/peermanager.go | 344 ++++++++++++++++++++--- internal/p2p/peermanager_scoring_test.go | 172 +++++++++++- internal/p2p/peermanager_test.go | 74 +++-- internal/p2p/router.go | 31 +- node/node.go | 2 +- node/seed.go | 2 +- node/setup.go | 13 +- proto/tendermint/p2p/types.pb.go | 121 +++++--- proto/tendermint/p2p/types.proto | 1 + 15 files changed, 706 insertions(+), 141 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 608abc06d2..07e7555011 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -39,6 +39,10 @@ Special thanks to external contributors on this release: - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) - [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish) - [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui) + - [p2p] \#8737 Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish) + - [p2p] \#8737 Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish) + - [p2p] \#8737 Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish) + - [p2p] \#8737 Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish) - Go API diff --git a/config/config.go b/config/config.go index d875b8569f..804c5fc872 100644 --- a/config/config.go +++ b/config/config.go @@ -627,6 +627,10 @@ type P2PConfig struct { //nolint: maligned // outbound). MaxConnections uint16 `mapstructure:"max-connections"` + // MaxOutgoingConnections defines the maximum number of connected peers (inbound and + // outbound). + MaxOutgoingConnections uint16 `mapstructure:"max-outgoing-connections"` + // MaxIncomingConnectionAttempts rate limits the number of incoming connection // attempts per IP address. MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"` @@ -667,6 +671,7 @@ func DefaultP2PConfig() *P2PConfig { ExternalAddress: "", UPNP: false, MaxConnections: 64, + MaxOutgoingConnections: 32, MaxIncomingConnectionAttempts: 100, FlushThrottleTimeout: 100 * time.Millisecond, // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. @@ -699,6 +704,9 @@ func (cfg *P2PConfig) ValidateBasic() error { if cfg.RecvRate < 0 { return errors.New("recv-rate can't be negative") } + if cfg.MaxOutgoingConnections > cfg.MaxConnections { + return errors.New("max-outgoing-connections cannot be larger than max-connections") + } return nil } diff --git a/config/toml.go b/config/toml.go index cf6635d45e..0aecbc1a3c 100644 --- a/config/toml.go +++ b/config/toml.go @@ -309,6 +309,10 @@ upnp = {{ .P2P.UPNP }} # Maximum number of connections (inbound and outbound). max-connections = {{ .P2P.MaxConnections }} +# Maximum number of connections reserved for outgoing +# connections. Must be less than max-connections +max-outgoing-connections = {{ .P2P.MaxOutgoingConnections }} + # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }} diff --git a/internal/p2p/metrics.gen.go b/internal/p2p/metrics.gen.go index cbfba29d94..cb215f2b68 100644 --- a/internal/p2p/metrics.gen.go +++ b/internal/p2p/metrics.gen.go @@ -14,11 +14,23 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { labels = append(labels, labelsAndValues[i]) } return &Metrics{ - Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + PeersConnected: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "peers", - Help: "Number of peers.", + Name: "peers_connected", + Help: "Number of peers connected.", + }, labels).With(labelsAndValues...), + PeersStored: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_stored", + Help: "Nomber of peers in the peer store database.", + }, labels).With(labelsAndValues...), + PeersInactivated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_inactivated", + Help: "Number of inactive peers stored.", }, labels).With(labelsAndValues...), PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, @@ -38,6 +50,30 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "peer_pending_send_bytes", Help: "Number of bytes pending being sent to a given peer.", }, append(labels, "peer_id")).With(labelsAndValues...), + PeersConnectedSuccess: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_success", + Help: "Number of successful connection attempts", + }, labels).With(labelsAndValues...), + PeersConnectedFailure: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_failure", + Help: "Number of failed connection attempts", + }, labels).With(labelsAndValues...), + PeersConnectedIncoming: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_incoming", + Help: "Number of peers connected as a result of dialing the peer.", + }, labels).With(labelsAndValues...), + PeersConnectedOutgoing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_connected_outgoing", + Help: "Number of peers connected as a result of the peer dialing this node.", + }, labels).With(labelsAndValues...), RouterPeerQueueRecv: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -73,10 +109,16 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), + PeersConnected: discard.NewGauge(), + PeersStored: discard.NewGauge(), + PeersInactivated: discard.NewGauge(), PeerReceiveBytesTotal: discard.NewCounter(), PeerSendBytesTotal: discard.NewCounter(), PeerPendingSendBytes: discard.NewGauge(), + PeersConnectedSuccess: discard.NewCounter(), + PeersConnectedFailure: discard.NewCounter(), + PeersConnectedIncoming: discard.NewGauge(), + PeersConnectedOutgoing: discard.NewGauge(), RouterPeerQueueRecv: discard.NewHistogram(), RouterPeerQueueSend: discard.NewHistogram(), RouterChannelQueueSend: discard.NewHistogram(), diff --git a/internal/p2p/metrics.go b/internal/p2p/metrics.go index b45f128e5a..a88aaa3f2e 100644 --- a/internal/p2p/metrics.go +++ b/internal/p2p/metrics.go @@ -26,8 +26,12 @@ var ( // Metrics contains metrics exposed by this package. type Metrics struct { - // Number of peers. - Peers metrics.Gauge + // Number of peers connected. + PeersConnected metrics.Gauge + // Nomber of peers in the peer store database. + PeersStored metrics.Gauge + // Number of inactive peers stored. + PeersInactivated metrics.Gauge // Number of bytes per channel received from a given peer. PeerReceiveBytesTotal metrics.Counter `metrics_labels:"peer_id, chID, message_type"` // Number of bytes per channel sent to a given peer. @@ -35,6 +39,18 @@ type Metrics struct { // Number of bytes pending being sent to a given peer. PeerPendingSendBytes metrics.Gauge `metrics_labels:"peer_id"` + // Number of successful connection attempts + PeersConnectedSuccess metrics.Counter + // Number of failed connection attempts + PeersConnectedFailure metrics.Counter + + // Number of peers connected as a result of dialing the + // peer. + PeersConnectedIncoming metrics.Gauge + // Number of peers connected as a result of the peer dialing + // this node. + PeersConnectedOutgoing metrics.Gauge + // RouterPeerQueueRecv defines the time taken to read off of a peer's queue // before sending on the connection. //metrics:The time taken to read off of a peer's queue before sending on the connection. diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 85df029d83..5c1b0a2187 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -257,6 +257,7 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) RetryTimeJitter: time.Millisecond, MaxPeers: opts.MaxPeers, MaxConnected: opts.MaxConnected, + Metrics: p2p.NopMetrics(), }) require.NoError(t, err) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 7391de4ea7..ef4011093d 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -38,11 +38,18 @@ const ( PeerStatusBad PeerStatus = "bad" // peer observed as bad ) +type peerConnectionDirection int + +const ( + peerConnectionIncoming peerConnectionDirection = iota + 1 + peerConnectionOutgoing +) + // PeerScore is a numeric score assigned to a peer (higher is better). -type PeerScore uint8 +type PeerScore int16 const ( - PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers + PeerScorePersistent PeerScore = math.MaxInt16 // persistent peers MaxPeerScoreNotPersistent PeerScore = PeerScorePersistent - 1 ) @@ -101,6 +108,13 @@ type PeerManagerOptions struct { // outbound). 0 means no limit. MaxConnected uint16 + // MaxOutgoingConnections specifies how many outgoing + // connections a node will maintain. It must be lower than MaxConnected. If it is + // 0, then all connections can be outgoing. Once this limit is + // reached, the node will not dial peers, allowing the + // remaining peer connections to be used by incoming connections. + MaxOutgoingConnections uint16 + // MaxConnectedUpgrade is the maximum number of additional connections to // use for probing any better-scored peers to upgrade to when all connection // slots are full. 0 disables peer upgrading. @@ -145,6 +159,9 @@ type PeerManagerOptions struct { // persistentPeers provides fast PersistentPeers lookups. It is built // by optimize(). persistentPeers map[types.NodeID]bool + + // Peer Metrics + Metrics *Metrics } // Validate validates the options. @@ -193,6 +210,10 @@ func (o *PeerManagerOptions) Validate() error { } } + if o.MaxOutgoingConnections > 0 && o.MaxConnected < o.MaxOutgoingConnections { + return errors.New("cannot set MaxOutgoingConnections to a value larger than MaxConnected") + } + return nil } @@ -261,19 +282,20 @@ func (o *PeerManagerOptions) optimize() { type PeerManager struct { selfID types.NodeID options PeerManagerOptions + metrics *Metrics rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes mtx sync.Mutex store *peerStore - subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address) - dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail) - upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail) - connected map[types.NodeID]bool // connected peers (Dialed/Accepted → Disconnected) - ready map[types.NodeID]bool // ready peers (Ready → Disconnected) - evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext) - evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected) + subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address) + dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail) + upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail) + connected map[types.NodeID]peerConnectionDirection // connected peers (Dialed/Accepted → Disconnected) + ready map[types.NodeID]bool // ready peers (Ready → Disconnected) + evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext) + evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected) } // NewPeerManager creates a new peer manager. @@ -298,16 +320,22 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec dialWaker: tmsync.NewWaker(), evictWaker: tmsync.NewWaker(), + metrics: NopMetrics(), store: store, dialing: map[types.NodeID]bool{}, upgrading: map[types.NodeID]types.NodeID{}, - connected: map[types.NodeID]bool{}, + connected: map[types.NodeID]peerConnectionDirection{}, ready: map[types.NodeID]bool{}, evict: map[types.NodeID]bool{}, evicting: map[types.NodeID]bool{}, subscriptions: map[*PeerUpdates]*PeerUpdates{}, } + + if options.Metrics != nil { + peerManager.metrics = options.Metrics + } + if err = peerManager.configurePeers(); err != nil { return nil, err } @@ -368,20 +396,45 @@ func (m *PeerManager) prunePeers() error { ranked := m.store.Ranked() for i := len(ranked) - 1; i >= 0; i-- { peerID := ranked[i].ID + switch { case m.store.Size() <= int(m.options.MaxPeers): return nil case m.dialing[peerID]: - case m.connected[peerID]: + case m.isConnected(peerID): default: if err := m.store.Delete(peerID); err != nil { return err } + m.metrics.PeersStored.Add(-1) } } return nil } +func (m *PeerManager) isConnected(peerID types.NodeID) bool { + _, ok := m.connected[peerID] + return ok +} + +type connectionStats struct { + incoming uint16 + outgoing uint16 +} + +func (m *PeerManager) getConnectedInfo() connectionStats { + out := connectionStats{} + for _, direction := range m.connected { + switch direction { + case peerConnectionIncoming: + out.incoming++ + case peerConnectionOutgoing: + out.outgoing++ + } + } + return out +} + // Add adds a peer to the manager, given as an address. If the peer already // exists, the address is added to it if it isn't already present. This will push // low scoring peers out of the address book if it exceeds the maximum size. @@ -405,12 +458,17 @@ func (m *PeerManager) Add(address NodeAddress) (bool, error) { if ok { return false, nil } + if peer.Inactive { + return false, nil + } // else add the new address peer.AddressInfo[address] = &peerAddressInfo{Address: address} if err := m.store.Set(peer); err != nil { return false, err } + + m.metrics.PeersStored.Add(1) if err := m.prunePeers(); err != nil { return true, err } @@ -464,13 +522,17 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { // We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including // MaxConnectedUpgrade allows us to probe additional peers that have a // higher score than any other peers, and if successful evict it. - if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= - int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { + if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { + return NodeAddress{}, nil + } + + cinfo := m.getConnectedInfo() + if m.options.MaxOutgoingConnections > 0 && cinfo.outgoing >= m.options.MaxOutgoingConnections { return NodeAddress{}, nil } for _, peer := range m.store.Ranked() { - if m.dialing[peer.ID] || m.connected[peer.ID] { + if m.dialing[peer.ID] || m.isConnected(peer.ID) { continue } @@ -503,11 +565,10 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { // DialFailed reports a failed dial attempt. This will make the peer available // for dialing again when appropriate (possibly after a retry timeout). -// -// FIXME: This should probably delete or mark bad addresses/peers after some time. func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error { m.mtx.Lock() defer m.mtx.Unlock() + m.metrics.PeersConnectedFailure.Add(1) delete(m.dialing, address.NodeID) for from, to := range m.upgrading { @@ -527,6 +588,7 @@ func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error addressInfo.LastDialFailure = time.Now().UTC() addressInfo.DialFailures++ + if err := m.store.Set(peer); err != nil { return err } @@ -560,6 +622,8 @@ func (m *PeerManager) Dialed(address NodeAddress) error { m.mtx.Lock() defer m.mtx.Unlock() + m.metrics.PeersConnectedSuccess.Add(1) + delete(m.dialing, address.NodeID) var upgradeFromPeer types.NodeID @@ -574,12 +638,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error { if address.NodeID == m.selfID { return fmt.Errorf("rejecting connection to self (%v)", address.NodeID) } - if m.connected[address.NodeID] { + if m.isConnected(address.NodeID) { return fmt.Errorf("peer %v is already connected", address.NodeID) } if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { - if upgradeFromPeer == "" || len(m.connected) >= - int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { + if upgradeFromPeer == "" || len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { return fmt.Errorf("already connected to maximum number of peers") } } @@ -589,6 +652,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error { return fmt.Errorf("peer %q was removed while dialing", address.NodeID) } now := time.Now().UTC() + if peer.Inactive { + m.metrics.PeersInactivated.Add(-1) + } + peer.Inactive = false + peer.LastConnected = now if addressInfo, ok := peer.AddressInfo[address]; ok { addressInfo.DialFailures = 0 @@ -611,7 +679,9 @@ func (m *PeerManager) Dialed(address NodeAddress) error { } m.evict[upgradeFromPeer] = true } - m.connected[peer.ID] = true + + m.metrics.PeersConnectedOutgoing.Add(1) + m.connected[peer.ID] = peerConnectionOutgoing m.evictWaker.Wake() return nil @@ -641,11 +711,10 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { if peerID == m.selfID { return fmt.Errorf("rejecting connection from self (%v)", peerID) } - if m.connected[peerID] { + if m.isConnected(peerID) { return fmt.Errorf("peer %q is already connected", peerID) } - if m.options.MaxConnected > 0 && - len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { + if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { return fmt.Errorf("already connected to maximum number of peers") } @@ -670,12 +739,17 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { } } + if peer.Inactive { + m.metrics.PeersInactivated.Add(-1) + } + peer.Inactive = false peer.LastConnected = time.Now().UTC() if err := m.store.Set(peer); err != nil { return err } - m.connected[peerID] = true + m.metrics.PeersConnectedIncoming.Add(1) + m.connected[peerID] = peerConnectionIncoming if upgradeFromPeer != "" { m.evict[upgradeFromPeer] = true } @@ -694,7 +768,7 @@ func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID, channels C m.mtx.Lock() defer m.mtx.Unlock() - if m.connected[peerID] { + if m.isConnected(peerID) { m.ready[peerID] = true m.broadcast(ctx, PeerUpdate{ NodeID: peerID, @@ -730,7 +804,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { // random one. for peerID := range m.evict { delete(m.evict, peerID) - if m.connected[peerID] && !m.evicting[peerID] { + if m.isConnected(peerID) && !m.evicting[peerID] { m.evicting[peerID] = true return peerID, nil } @@ -747,7 +821,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { ranked := m.store.Ranked() for i := len(ranked) - 1; i >= 0; i-- { peer := ranked[i] - if m.connected[peer.ID] && !m.evicting[peer.ID] { + if m.isConnected(peer.ID) && !m.evicting[peer.ID] { m.evicting[peer.ID] = true return peer.ID, nil } @@ -762,6 +836,13 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() + switch m.connected[peerID] { + case peerConnectionIncoming: + m.metrics.PeersConnectedIncoming.Add(-1) + case peerConnectionOutgoing: + m.metrics.PeersConnectedOutgoing.Add(-1) + } + ready := m.ready[peerID] delete(m.connected, peerID) @@ -792,17 +873,34 @@ func (m *PeerManager) Errored(peerID types.NodeID, err error) { m.mtx.Lock() defer m.mtx.Unlock() - if m.connected[peerID] { + if m.isConnected(peerID) { m.evict[peerID] = true } m.evictWaker.Wake() } +// Inactivate marks a peer as inactive which means we won't attempt to +// dial this peer again. A peer can be reactivated by successfully +// dialing and connecting to the node. +func (m *PeerManager) Inactivate(peerID types.NodeID) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + peer, ok := m.store.peers[peerID] + if !ok { + return nil + } + + peer.Inactive = true + m.metrics.PeersInactivated.Add(1) + return m.store.Set(*peer) +} + // Advertise returns a list of peer addresses to advertise to a peer. // -// FIXME: This is fairly naïve and only returns the addresses of the -// highest-ranked peers. +// It sorts all peers in the peer store, and assembles a list of peers +// that is most likely to include the highest priority of peers. func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress { m.mtx.Lock() defer m.mtx.Unlock() @@ -815,19 +913,92 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress addresses = append(addresses, m.options.SelfAddress) } - for _, peer := range m.store.Ranked() { + var numAddresses int + var totalScore int + ranked := m.store.Ranked() + seenAddresses := map[NodeAddress]struct{}{} + scores := map[types.NodeID]int{} + + // get the total number of possible addresses + for _, peer := range ranked { if peer.ID == peerID { continue } + score := int(peer.Score()) + + totalScore += score + scores[peer.ID] = score + for addr := range peer.AddressInfo { + if _, ok := m.options.PrivatePeers[addr.NodeID]; !ok { + numAddresses++ + } + } + } + + var attempts uint16 + var addedLastIteration bool + + // if the number of addresses is less than the number of peers + // to advertise, adjust the limit downwards + if numAddresses < int(limit) { + limit = uint16(numAddresses) + } + + // collect addresses until we have the number requested + // (limit), or we've added all known addresses, or we've tried + // at least 256 times and the last time we iterated over + // remaining addresses we added no new candidates. + for len(addresses) < int(limit) && (attempts < (limit*2) || !addedLastIteration) { + attempts++ + addedLastIteration = false + + for idx, peer := range ranked { + if peer.ID == peerID { + continue + } - for nodeAddr, addressInfo := range peer.AddressInfo { if len(addresses) >= int(limit) { - return addresses + break } - // only add non-private NodeIDs - if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok { - addresses = append(addresses, addressInfo.Address) + for nodeAddr, addressInfo := range peer.AddressInfo { + if len(addresses) >= int(limit) { + break + } + + // only look at each address once, by + // tracking a set of addresses seen + if _, ok := seenAddresses[addressInfo.Address]; ok { + continue + } + + // only add non-private NodeIDs + if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok { + // add the peer if the total number of ranked addresses is + // will fit within the limit, or otherwise adding + // addresses based on a coin flip. + + // the coinflip is based on the score, commonly, but + // 10% of the time we'll randomly insert a "loosing" + // peer. + + // nolint:gosec // G404: Use of weak random number generator + if numAddresses <= int(limit) || rand.Intn(totalScore+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 { + addresses = append(addresses, addressInfo.Address) + addedLastIteration = true + seenAddresses[addressInfo.Address] = struct{}{} + } + } else { + seenAddresses[addressInfo.Address] = struct{}{} + // if the number of addresses + // is the same as the limit, + // we should remove private + // addresses from the limit so + // we can still return early. + if numAddresses == int(limit) { + limit-- + } + } } } } @@ -901,8 +1072,14 @@ func (m *PeerManager) processPeerEvent(ctx context.Context, pu PeerUpdate) { switch pu.Status { case PeerStatusBad: + if m.store.peers[pu.NodeID].MutableScore == math.MinInt16 { + return + } m.store.peers[pu.NodeID].MutableScore-- case PeerStatusGood: + if m.store.peers[pu.NodeID].MutableScore == math.MaxInt16 { + return + } m.store.peers[pu.NodeID].MutableScore++ } } @@ -993,9 +1170,11 @@ func (m *PeerManager) findUpgradeCandidate(id types.NodeID, score PeerScore) typ for i := len(ranked) - 1; i >= 0; i-- { candidate := ranked[i] switch { + case candidate.ID == id: + continue case candidate.Score() >= score: return "" // no further peers can be scored lower, due to sorting - case !m.connected[candidate.ID]: + case !m.isConnected(candidate.ID): case m.evict[candidate.ID]: case m.evicting[candidate.ID]: case m.upgrading[candidate.ID] != "": @@ -1175,9 +1354,48 @@ func (s *peerStore) Ranked() []*peerInfo { s.ranked = append(s.ranked, peer) } sort.Slice(s.ranked, func(i, j int) bool { - // FIXME: If necessary, consider precomputing scores before sorting, - // to reduce the number of Score() calls. return s.ranked[i].Score() > s.ranked[j].Score() + // TODO: reevaluate more wholistic sorting, perhaps as follows: + + // // sort inactive peers after active peers + // if s.ranked[i].Inactive && !s.ranked[j].Inactive { + // return false + // } else if !s.ranked[i].Inactive && s.ranked[j].Inactive { + // return true + // } + + // iLastDialed, iLastDialSuccess := s.ranked[i].LastDialed() + // jLastDialed, jLastDialSuccess := s.ranked[j].LastDialed() + + // // sort peers who our most recent dialing attempt was + // // successful ahead of peers with recent dialing + // // failures + // switch { + // case iLastDialSuccess && jLastDialSuccess: + // // if both peers were (are?) successfully + // // connected, convey their score, but give the + // // one we dialed successfully most recently a bonus + + // iScore := s.ranked[i].Score() + // jScore := s.ranked[j].Score() + // if jLastDialed.Before(iLastDialed) { + // jScore++ + // } else { + // iScore++ + // } + + // return iScore > jScore + // case iLastDialSuccess: + // return true + // case jLastDialSuccess: + // return false + // default: + // // if both peers were not successful in their + // // most recent dialing attempt, fall back to + // // peer score. + + // return s.ranked[i].Score() > s.ranked[j].Score() + // } }) return s.ranked } @@ -1195,11 +1413,11 @@ type peerInfo struct { // These fields are ephemeral, i.e. not persisted to the database. Persistent bool - Seed bool Height int64 FixedScore PeerScore // mainly for tests MutableScore int64 // updated by router + Inactive bool } // peerInfoFromProto converts a Protobuf PeerInfo message to a peerInfo, @@ -1208,6 +1426,7 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) { p := &peerInfo{ ID: types.NodeID(msg.ID), AddressInfo: map[NodeAddress]*peerAddressInfo{}, + Inactive: msg.Inactive, } if msg.LastConnected != nil { p.LastConnected = *msg.LastConnected @@ -1231,6 +1450,7 @@ func (p *peerInfo) ToProto() *p2pproto.PeerInfo { msg := &p2pproto.PeerInfo{ ID: string(p.ID), LastConnected: &p.LastConnected, + Inactive: p.Inactive, } for _, addressInfo := range p.AddressInfo { msg.AddressInfo = append(msg.AddressInfo, addressInfo.ToProto()) @@ -1254,6 +1474,46 @@ func (p *peerInfo) Copy() peerInfo { return c } +// LastDialed returns when the peer was last dialed, and if that dial +// attempt was successful. If the peer was never dialed the time stamp +// is zero time. +func (p *peerInfo) LastDialed() (time.Time, bool) { + var ( + last time.Time + success bool + ) + last = last.Add(-1) // so it's after the epoch + + for _, addr := range p.AddressInfo { + if addr.LastDialFailure.Equal(addr.LastDialSuccess) { + if addr.LastDialFailure.IsZero() { + continue + } + if last.After(addr.LastDialSuccess) { + continue + } + success = true + last = addr.LastDialSuccess + } + if addr.LastDialFailure.After(last) { + success = false + last = addr.LastDialFailure + } + if addr.LastDialSuccess.After(last) || last.Equal(addr.LastDialSuccess) { + success = true + last = addr.LastDialSuccess + } + } + + // if we never modified last, then we should return it to the + // zero value + if last.Add(1).IsZero() { + return time.Time{}, success + } + + return last, success +} + // Score calculates a score for the peer. Higher-scored peers will be // preferred over lower scores. func (p *peerInfo) Score() PeerScore { @@ -1275,10 +1535,6 @@ func (p *peerInfo) Score() PeerScore { score -= int64(addr.DialFailures) } - if score <= 0 { - return 0 - } - return PeerScore(score) } diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index a45df0b728..b454da151f 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -34,7 +34,7 @@ func TestPeerScoring(t *testing.T) { t.Run("Synchronous", func(t *testing.T) { // update the manager and make sure it's correct - require.EqualValues(t, 0, peerManager.Scores()[id]) + require.Zero(t, peerManager.Scores()[id]) // add a bunch of good status updates and watch things increase. for i := 1; i < 10; i++ { @@ -97,3 +97,173 @@ func TestPeerScoring(t *testing.T) { } }) } + +func makeMockPeerStore(t *testing.T, peers ...peerInfo) *peerStore { + t.Helper() + s, err := newPeerStore(dbm.NewMemDB()) + if err != nil { + t.Fatal(err) + } + for idx := range peers { + if err := s.Set(peers[idx]); err != nil { + t.Fatal(err) + } + } + return s +} + +func TestPeerRanking(t *testing.T) { + t.Run("InactiveSecond", func(t *testing.T) { + t.Skip("inactive status is not currently factored into peer rank.") + + store := makeMockPeerStore(t, + peerInfo{ID: "second", Inactive: true}, + peerInfo{ID: "first", Inactive: false}, + ) + + ranked := store.Ranked() + if len(ranked) != 2 { + t.Fatal("missing peer in ranked output") + } + if ranked[0].ID != "first" { + t.Error("inactive peer is first") + } + if ranked[1].ID != "second" { + t.Error("active peer is second") + } + }) + t.Run("ScoreOrder", func(t *testing.T) { + for _, test := range []struct { + Name string + First int64 + Second int64 + }{ + { + Name: "Mirror", + First: 100, + Second: -100, + }, + { + Name: "VeryLow", + First: 0, + Second: -100, + }, + { + Name: "High", + First: 300, + Second: 256, + }, + } { + t.Run(test.Name, func(t *testing.T) { + store := makeMockPeerStore(t, + peerInfo{ + ID: "second", + MutableScore: test.Second, + }, + peerInfo{ + ID: "first", + MutableScore: test.First, + }) + + ranked := store.Ranked() + if len(ranked) != 2 { + t.Fatal("missing peer in ranked output") + } + if ranked[0].ID != "first" { + t.Error("higher peer is first") + } + if ranked[1].ID != "second" { + t.Error("higher peer is second") + } + }) + } + }) +} + +func TestLastDialed(t *testing.T) { + t.Run("Zero", func(t *testing.T) { + p := &peerInfo{} + ts, ok := p.LastDialed() + if !ts.IsZero() { + t.Error("timestamp should be zero:", ts) + } + if ok { + t.Error("peer reported success, despite none") + } + }) + t.Run("NeverDialed", func(t *testing.T) { + p := &peerInfo{ + AddressInfo: map[NodeAddress]*peerAddressInfo{ + {NodeID: "kip"}: {}, + {NodeID: "merlin"}: {}, + }, + } + ts, ok := p.LastDialed() + if !ts.IsZero() { + t.Error("timestamp should be zero:", ts) + } + if ok { + t.Error("peer reported success, despite none") + } + }) + t.Run("Ordered", func(t *testing.T) { + base := time.Now() + for _, test := range []struct { + Name string + SuccessTime time.Time + FailTime time.Time + ExpectedSuccess bool + }{ + { + Name: "Zero", + }, + { + Name: "Success", + SuccessTime: base.Add(time.Hour), + FailTime: base, + ExpectedSuccess: true, + }, + { + Name: "Equal", + SuccessTime: base, + FailTime: base, + ExpectedSuccess: true, + }, + { + Name: "Failure", + SuccessTime: base, + FailTime: base.Add(time.Hour), + ExpectedSuccess: false, + }, + } { + t.Run(test.Name, func(t *testing.T) { + p := &peerInfo{ + AddressInfo: map[NodeAddress]*peerAddressInfo{ + {NodeID: "kip"}: {LastDialSuccess: test.SuccessTime}, + {NodeID: "merlin"}: {LastDialFailure: test.FailTime}, + }, + } + ts, ok := p.LastDialed() + if test.ExpectedSuccess && !ts.Equal(test.SuccessTime) { + if !ts.Equal(test.FailTime) { + t.Fatal("got unexpected timestamp:", ts) + } + + t.Error("last dialed time reported incorrect value:", ts) + } + if !test.ExpectedSuccess && !ts.Equal(test.FailTime) { + if !ts.Equal(test.SuccessTime) { + t.Fatal("got unexpected timestamp:", ts) + } + + t.Error("last dialed time reported incorrect value:", ts) + } + if test.ExpectedSuccess != ok { + t.Error("test reported incorrect outcome for last dialed type") + } + }) + } + + }) + +} diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 47e8462a42..5e9c3a8a49 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -524,11 +524,11 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - a.NodeID: 0, - b.NodeID: 1, - c.NodeID: 2, - d.NodeID: 3, - e.NodeID: 0, + a.NodeID: p2p.PeerScore(0), + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(2), + d.NodeID: p2p.PeerScore(3), + e.NodeID: p2p.PeerScore(0), }, PersistentPeers: []types.NodeID{c.NodeID, d.NodeID}, MaxConnected: 2, @@ -581,10 +581,8 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { // Now, if we disconnect a, we should be allowed to dial d because we have a // free upgrade slot. + require.Error(t, peerManager.Dialed(d)) peerManager.Disconnected(ctx, a.NodeID) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) - require.Equal(t, d, dial) require.NoError(t, peerManager.Dialed(d)) // However, if we disconnect b (such that only c and d are connected), we @@ -605,7 +603,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1}, MaxConnected: 1, MaxConnectedUpgrade: 2, }) @@ -771,7 +769,10 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{ + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(2), + }, MaxConnected: 1, MaxConnectedUpgrade: 2, }) @@ -887,7 +888,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 2, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: p2p.PeerScore(1), d.NodeID: 1}, }) require.NoError(t, err) @@ -937,7 +938,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 2, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1), c.NodeID: 1}, }) require.NoError(t, err) @@ -984,10 +985,10 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { MaxConnected: 2, MaxConnectedUpgrade: 1, PeerScores: map[types.NodeID]p2p.PeerScore{ - a.NodeID: 3, - b.NodeID: 2, - c.NodeID: 10, - d.NodeID: 1, + a.NodeID: p2p.PeerScore(3), + b.NodeID: p2p.PeerScore(2), + c.NodeID: p2p.PeerScore(10), + d.NodeID: p2p.PeerScore(1), }, }) require.NoError(t, err) @@ -1040,9 +1041,9 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { MaxConnected: 2, MaxConnectedUpgrade: 1, PeerScores: map[types.NodeID]p2p.PeerScore{ - a.NodeID: 1, - b.NodeID: 2, - c.NodeID: 3, + a.NodeID: p2p.PeerScore(1), + b.NodeID: p2p.PeerScore(2), + c.NodeID: p2p.PeerScore(3), }, }) require.NoError(t, err) @@ -1161,8 +1162,8 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - c.NodeID: 1, - d.NodeID: 2, + c.NodeID: p2p.PeerScore(1), + d.NodeID: p2p.PeerScore(2), }, MaxConnected: 1, MaxConnectedUpgrade: 1, @@ -1209,8 +1210,8 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - b.NodeID: 1, - c.NodeID: 1, + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(1), }, MaxConnected: 1, MaxConnectedUpgrade: 2, @@ -1252,8 +1253,8 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ PeerScores: map[types.NodeID]p2p.PeerScore{ - b.NodeID: 1, - c.NodeID: 1, + b.NodeID: p2p.PeerScore(1), + c.NodeID: p2p.PeerScore(1), }, MaxConnected: 1, MaxConnectedUpgrade: 2, @@ -1428,7 +1429,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: p2p.PeerScore(1)}, }) require.NoError(t, err) @@ -1469,7 +1470,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1}, + PeerScores: map[types.NodeID]p2p.PeerScore{ + b.NodeID: p2p.PeerScore(1), + }, }) require.NoError(t, err) @@ -1833,6 +1836,7 @@ func TestPeerManager_Advertise(t *testing.T) { require.NoError(t, err) require.True(t, added) + require.Len(t, peerManager.Advertise(dID, 100), 6) // d should get all addresses. require.ElementsMatch(t, []p2p.NodeAddress{ aTCP, aMem, bTCP, bMem, cTCP, cMem, @@ -1846,10 +1850,18 @@ func TestPeerManager_Advertise(t *testing.T) { // Asking for 0 addresses should return, well, 0. require.Empty(t, peerManager.Advertise(aID, 0)) - // Asking for 2 addresses should get the highest-rated ones, i.e. a. - require.ElementsMatch(t, []p2p.NodeAddress{ - aTCP, aMem, - }, peerManager.Advertise(dID, 2)) + // Asking for 2 addresses should get two addresses + // the content of the list when there are two + addrs := peerManager.Advertise(dID, 2) + require.Len(t, addrs, 2) + for _, addr := range addrs { + if dID == addr.NodeID { + t.Fatal("never advertise self") + } + if cID == addr.NodeID { + t.Fatal("should not have returned the lowest ranked peer") + } + } } func TestPeerManager_Advertise_Self(t *testing.T) { diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 511fa0fb92..d7236d472f 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -443,9 +443,12 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { + // the connTracker (on the other side) only rate + // limits peers for dialing more than once every 10ms, + // so these numbers are safe. const ( - maxDialerInterval = 3000 - minDialerInterval = 250 + maxDialerInterval = 500 // ms + minDialerInterval = 100 // ms ) // nolint:gosec // G404: Use of weak random number generator @@ -611,7 +614,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { case errors.Is(err, context.Canceled): return case err != nil: - r.logger.Error("failed to dial peer", "peer", address, "err", err) + r.logger.Debug("failed to dial peer", "peer", address, "err", err) if err = r.peerManager.DialFailed(ctx, address); err != nil { r.logger.Error("failed to report dial failure", "peer", address, "err", err) } @@ -633,8 +636,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { } if err := r.runWithPeerMutex(func() error { return r.peerManager.Dialed(address) }); err != nil { - r.logger.Error("failed to dial peer", - "op", "outgoing/dialing", "peer", address.NodeID, "err", err) + r.logger.Error("failed to dial peer", "op", "outgoing/dialing", "peer", address.NodeID, "err", err) conn.Close() return } @@ -692,12 +694,13 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection, // Internet can't and needs a different public address. conn, err := r.transport.Dial(dialCtx, endpoint) if err != nil { - r.logger.Error("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err) + r.logger.Debug("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err) } else { r.logger.Debug("dialed peer", "peer", address.NodeID, "endpoint", endpoint) return conn, nil } } + return nil, errors.New("all endpoints failed") } @@ -724,14 +727,6 @@ func (r *Router) handshakePeer( return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err) } - if peerInfo.Network != nodeInfo.Network { - if err := r.peerManager.store.Delete(peerInfo.NodeID); err != nil { - return peerInfo, fmt.Errorf("problem removing peer from store from incorrect network [%s]: %w", peerInfo.Network, err) - } - - return peerInfo, fmt.Errorf("connected to peer from wrong network, %q, removed from peer store", peerInfo.Network) - } - if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) @@ -742,6 +737,10 @@ func (r *Router) handshakePeer( } if err := nodeInfo.CompatibleWith(peerInfo); err != nil { + if err := r.peerManager.Inactivate(peerInfo.NodeID); err != nil { + return peerInfo, fmt.Errorf("problem inactivating peer %q: %w", peerInfo.ID(), err) + } + return peerInfo, ErrRejected{ err: err, id: peerInfo.ID(), @@ -761,7 +760,7 @@ func (r *Router) runWithPeerMutex(fn func() error) error { // channels. It will close the given connection and send queue when done, or if // they are closed elsewhere it will cause this method to shut down and return. func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connection, channels ChannelIDSet) { - r.metrics.Peers.Add(1) + r.metrics.PeersConnected.Add(1) r.peerManager.Ready(ctx, peerID, channels) sendQueue := r.getOrMakeQueue(peerID, channels) @@ -774,7 +773,7 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec sendQueue.close() r.peerManager.Disconnected(ctx, peerID) - r.metrics.Peers.Add(-1) + r.metrics.PeersConnected.Add(-1) }() r.logger.Info("peer connected", "peer", peerID, "endpoint", conn) diff --git a/node/node.go b/node/node.go index 1bda1f0f74..77773044b0 100644 --- a/node/node.go +++ b/node/node.go @@ -203,7 +203,7 @@ func makeNode( } } - peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID, nodeMetrics.p2p) closers = append(closers, peerCloser) if err != nil { return nil, combineCloseError( diff --git a/node/seed.go b/node/seed.go index a0b71e411f..92d55230f2 100644 --- a/node/seed.go +++ b/node/seed.go @@ -67,7 +67,7 @@ func makeSeedNode( // Setup Transport and Switch. p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID, p2pMetrics) if err != nil { return nil, combineCloseError( fmt.Errorf("failed to create peer manager: %w", err), diff --git a/node/setup.go b/node/setup.go index 51a0482494..345489f8d6 100644 --- a/node/setup.go +++ b/node/setup.go @@ -202,6 +202,7 @@ func createPeerManager( cfg *config.Config, dbProvider config.DBProvider, nodeID types.NodeID, + metrics *p2p.Metrics, ) (*p2p.PeerManager, closer, error) { selfAddr, err := p2p.ParseNodeAddress(nodeID.AddressString(cfg.P2P.ExternalAddress)) @@ -223,18 +224,28 @@ func createPeerManager( maxConns = 64 } + var maxOutgoingConns uint16 + switch { + case cfg.P2P.MaxOutgoingConnections > 0: + maxOutgoingConns = cfg.P2P.MaxOutgoingConnections + default: + maxOutgoingConns = maxConns / 2 + } + maxUpgradeConns := uint16(4) options := p2p.PeerManagerOptions{ SelfAddress: selfAddr, MaxConnected: maxConns, + MaxOutgoingConnections: maxOutgoingConns, MaxConnectedUpgrade: maxUpgradeConns, - MaxPeers: maxUpgradeConns + 2*maxConns, + MaxPeers: maxUpgradeConns + 4*maxConns, MinRetryTime: 250 * time.Millisecond, MaxRetryTime: 30 * time.Minute, MaxRetryTimePersistent: 5 * time.Minute, RetryTimeJitter: 5 * time.Second, PrivatePeers: privatePeerIDs, + Metrics: metrics, } peers := []p2p.NodeAddress{} diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index bffa6884fe..7965b668bb 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -243,6 +243,7 @@ type PeerInfo struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` AddressInfo []*PeerAddressInfo `protobuf:"bytes,2,rep,name=address_info,json=addressInfo,proto3" json:"address_info,omitempty"` LastConnected *time.Time `protobuf:"bytes,3,opt,name=last_connected,json=lastConnected,proto3,stdtime" json:"last_connected,omitempty"` + Inactive bool `protobuf:"varint,4,opt,name=inactive,proto3" json:"inactive,omitempty"` } func (m *PeerInfo) Reset() { *m = PeerInfo{} } @@ -299,6 +300,13 @@ func (m *PeerInfo) GetLastConnected() *time.Time { return nil } +func (m *PeerInfo) GetInactive() bool { + if m != nil { + return m.Inactive + } + return false +} + type PeerAddressInfo struct { Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` LastDialSuccess *time.Time `protobuf:"bytes,2,opt,name=last_dial_success,json=lastDialSuccess,proto3,stdtime" json:"last_dial_success,omitempty"` @@ -378,46 +386,46 @@ func init() { func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) } var fileDescriptor_c8a29e659aeca578 = []byte{ - // 610 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x4e, 0x1b, 0x3d, - 0x14, 0xcd, 0x24, 0x21, 0x09, 0x37, 0x84, 0xf0, 0x59, 0xe8, 0xd3, 0x10, 0xa9, 0x19, 0x14, 0x36, - 0xac, 0x26, 0x52, 0xaa, 0x2e, 0xba, 0x64, 0x40, 0xad, 0x22, 0x55, 0x25, 0x9a, 0xa2, 0x2e, 0xda, - 0xc5, 0x68, 0x32, 0x76, 0x82, 0xc5, 0xc4, 0xb6, 0x3c, 0x4e, 0x4b, 0xdf, 0x82, 0x37, 0xe9, 0x63, - 0x94, 0x25, 0xcb, 0xae, 0xd2, 0x6a, 0xd8, 0xf6, 0x21, 0x2a, 0xdb, 0x33, 0x40, 0xa2, 0x2e, 0xd8, - 0xf9, 0xdc, 0xe3, 0x73, 0xee, 0x8f, 0xad, 0x0b, 0x3d, 0x45, 0x18, 0x26, 0x72, 0x41, 0x99, 0x1a, - 0x8a, 0x91, 0x18, 0xaa, 0x6f, 0x82, 0x64, 0xbe, 0x90, 0x5c, 0x71, 0xb4, 0xfb, 0xc8, 0xf9, 0x62, - 0x24, 0x7a, 0xfb, 0x73, 0x3e, 0xe7, 0x86, 0x1a, 0xea, 0x93, 0xbd, 0xd5, 0xf3, 0xe6, 0x9c, 0xcf, - 0x53, 0x32, 0x34, 0x68, 0xba, 0x9c, 0x0d, 0x15, 0x5d, 0x90, 0x4c, 0xc5, 0x0b, 0x61, 0x2f, 0x0c, - 0x2e, 0xa0, 0x3b, 0xd1, 0x87, 0x84, 0xa7, 0x1f, 0x89, 0xcc, 0x28, 0x67, 0xe8, 0x00, 0x6a, 0x62, - 0x24, 0x5c, 0xe7, 0xd0, 0x39, 0xae, 0x07, 0xcd, 0x7c, 0xe5, 0xd5, 0x26, 0xa3, 0x49, 0xa8, 0x63, - 0x68, 0x1f, 0xb6, 0xa6, 0x29, 0x4f, 0xae, 0xdc, 0xaa, 0x26, 0x43, 0x0b, 0xd0, 0x1e, 0xd4, 0x62, - 0x21, 0xdc, 0x9a, 0x89, 0xe9, 0xe3, 0xe0, 0x47, 0x15, 0x5a, 0xef, 0x39, 0x26, 0x63, 0x36, 0xe3, - 0x68, 0x02, 0x7b, 0xa2, 0x48, 0x11, 0x7d, 0xb1, 0x39, 0x8c, 0x79, 0x7b, 0xe4, 0xf9, 0xeb, 0x4d, - 0xf8, 0x1b, 0xa5, 0x04, 0xf5, 0xdb, 0x95, 0x57, 0x09, 0xbb, 0x62, 0xa3, 0xc2, 0x23, 0x68, 0x32, - 0x8e, 0x49, 0x44, 0xb1, 0x29, 0x64, 0x3b, 0x80, 0x7c, 0xe5, 0x35, 0x4c, 0xc2, 0xb3, 0xb0, 0xa1, - 0xa9, 0x31, 0x46, 0x1e, 0xb4, 0x53, 0x9a, 0x29, 0xc2, 0xa2, 0x18, 0x63, 0x69, 0xaa, 0xdb, 0x0e, - 0xc1, 0x86, 0x4e, 0x30, 0x96, 0xc8, 0x85, 0x26, 0x23, 0xea, 0x2b, 0x97, 0x57, 0x6e, 0xdd, 0x90, - 0x25, 0xd4, 0x4c, 0x59, 0xe8, 0x96, 0x65, 0x0a, 0x88, 0x7a, 0xd0, 0x4a, 0x2e, 0x63, 0xc6, 0x48, - 0x9a, 0xb9, 0x8d, 0x43, 0xe7, 0x78, 0x27, 0x7c, 0xc0, 0x5a, 0xb5, 0xe0, 0x8c, 0x5e, 0x11, 0xe9, - 0x36, 0xad, 0xaa, 0x80, 0xe8, 0x35, 0x6c, 0x71, 0x75, 0x49, 0xa4, 0xdb, 0x32, 0x6d, 0xbf, 0xd8, - 0x6c, 0xbb, 0x1c, 0xd5, 0xb9, 0xbe, 0x54, 0x34, 0x6d, 0x15, 0x83, 0xcf, 0xd0, 0x59, 0x63, 0xd1, - 0x01, 0xb4, 0xd4, 0x75, 0x44, 0x19, 0x26, 0xd7, 0x66, 0x8a, 0xdb, 0x61, 0x53, 0x5d, 0x8f, 0x35, - 0x44, 0x43, 0x68, 0x4b, 0x91, 0x98, 0x76, 0x49, 0x96, 0x15, 0xa3, 0xd9, 0xcd, 0x57, 0x1e, 0x84, - 0x93, 0xd3, 0x13, 0x1b, 0x0d, 0x41, 0x8a, 0xa4, 0x38, 0x0f, 0xbe, 0x3b, 0xd0, 0x9a, 0x10, 0x22, - 0xcd, 0x33, 0xfd, 0x0f, 0x55, 0x8a, 0xad, 0x65, 0xd0, 0xc8, 0x57, 0x5e, 0x75, 0x7c, 0x16, 0x56, - 0x29, 0x46, 0x01, 0xec, 0x14, 0x8e, 0x11, 0x65, 0x33, 0xee, 0x56, 0x0f, 0x6b, 0xff, 0x7c, 0x3a, - 0x42, 0x64, 0xe1, 0xab, 0xed, 0xc2, 0x76, 0xfc, 0x08, 0xd0, 0x5b, 0xd8, 0x4d, 0xe3, 0x4c, 0x45, - 0x09, 0x67, 0x8c, 0x24, 0x8a, 0x60, 0xf3, 0x1c, 0xed, 0x51, 0xcf, 0xb7, 0xff, 0xd3, 0x2f, 0xff, - 0xa7, 0x7f, 0x51, 0xfe, 0xcf, 0xa0, 0x7e, 0xf3, 0xcb, 0x73, 0xc2, 0x8e, 0xd6, 0x9d, 0x96, 0xb2, - 0xc1, 0x1f, 0x07, 0xba, 0x1b, 0x99, 0xf4, 0xdc, 0xcb, 0x96, 0x8b, 0x81, 0x14, 0x10, 0xbd, 0x83, - 0xff, 0x4c, 0x5a, 0x4c, 0xe3, 0x34, 0xca, 0x96, 0x49, 0x52, 0x8e, 0xe5, 0x39, 0x99, 0xbb, 0x5a, - 0x7a, 0x46, 0xe3, 0xf4, 0x83, 0x15, 0xae, 0xbb, 0xcd, 0x62, 0x9a, 0x2e, 0x25, 0x79, 0x76, 0x1f, - 0x0f, 0x6e, 0x6f, 0xac, 0x10, 0x1d, 0x41, 0xe7, 0xa9, 0x51, 0x66, 0xfe, 0x60, 0x27, 0xdc, 0xc1, - 0x8f, 0x77, 0xb2, 0xe0, 0xfc, 0x36, 0xef, 0x3b, 0x77, 0x79, 0xdf, 0xf9, 0x9d, 0xf7, 0x9d, 0x9b, - 0xfb, 0x7e, 0xe5, 0xee, 0xbe, 0x5f, 0xf9, 0x79, 0xdf, 0xaf, 0x7c, 0x7a, 0x35, 0xa7, 0xea, 0x72, - 0x39, 0xf5, 0x13, 0xbe, 0x18, 0x3e, 0xd9, 0x12, 0x4f, 0x17, 0x86, 0xd9, 0x05, 0xeb, 0x1b, 0x64, - 0xda, 0x30, 0xd1, 0x97, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0b, 0xe9, 0x56, 0xd3, 0x5a, 0x04, - 0x00, 0x00, + // 621 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xdb, 0x30, + 0x14, 0x6e, 0xda, 0xd2, 0x96, 0x57, 0x4a, 0x99, 0x85, 0xa6, 0x50, 0x69, 0x0d, 0x2a, 0x17, 0x4e, + 0x89, 0xd4, 0x69, 0x87, 0x1d, 0x09, 0x68, 0x53, 0xa5, 0x69, 0x54, 0x1e, 0xda, 0x61, 0x3b, 0x44, + 0x69, 0xec, 0x16, 0x8b, 0xd4, 0xb6, 0x12, 0x97, 0xb1, 0x7f, 0xc1, 0xbf, 0x1a, 0xd2, 0x2e, 0x1c, + 0x77, 0xea, 0xa6, 0x70, 0xdd, 0x8f, 0x98, 0xec, 0x24, 0xd0, 0x56, 0x3b, 0x70, 0xf3, 0xf7, 0x9e, + 0xbf, 0xcf, 0xdf, 0x7b, 0xcf, 0x7a, 0xd0, 0x53, 0x94, 0x13, 0x9a, 0xcc, 0x19, 0x57, 0x9e, 0x1c, + 0x4a, 0x4f, 0x7d, 0x97, 0x34, 0x75, 0x65, 0x22, 0x94, 0x40, 0xbb, 0x4f, 0x39, 0x57, 0x0e, 0x65, + 0x6f, 0x7f, 0x26, 0x66, 0xc2, 0xa4, 0x3c, 0x7d, 0xca, 0x6f, 0xf5, 0x9c, 0x99, 0x10, 0xb3, 0x98, + 0x7a, 0x06, 0x4d, 0x16, 0x53, 0x4f, 0xb1, 0x39, 0x4d, 0x55, 0x38, 0x97, 0xf9, 0x85, 0xc1, 0x05, + 0x74, 0xc7, 0xfa, 0x10, 0x89, 0xf8, 0x33, 0x4d, 0x52, 0x26, 0x38, 0x3a, 0x80, 0x9a, 0x1c, 0x4a, + 0xdb, 0x3a, 0xb4, 0x8e, 0xeb, 0x7e, 0x33, 0x5b, 0x3a, 0xb5, 0xf1, 0x70, 0x8c, 0x75, 0x0c, 0xed, + 0xc3, 0xd6, 0x24, 0x16, 0xd1, 0x95, 0x5d, 0xd5, 0x49, 0x9c, 0x03, 0xb4, 0x07, 0xb5, 0x50, 0x4a, + 0xbb, 0x66, 0x62, 0xfa, 0x38, 0xf8, 0x51, 0x85, 0xd6, 0x47, 0x41, 0xe8, 0x88, 0x4f, 0x05, 0x1a, + 0xc3, 0x9e, 0x2c, 0x9e, 0x08, 0xae, 0xf3, 0x37, 0x8c, 0x78, 0x7b, 0xe8, 0xb8, 0xeb, 0x45, 0xb8, + 0x1b, 0x56, 0xfc, 0xfa, 0xdd, 0xd2, 0xa9, 0xe0, 0xae, 0xdc, 0x70, 0x78, 0x04, 0x4d, 0x2e, 0x08, + 0x0d, 0x18, 0x31, 0x46, 0xb6, 0x7d, 0xc8, 0x96, 0x4e, 0xc3, 0x3c, 0x78, 0x86, 0x1b, 0x3a, 0x35, + 0x22, 0xc8, 0x81, 0x76, 0xcc, 0x52, 0x45, 0x79, 0x10, 0x12, 0x92, 0x18, 0x77, 0xdb, 0x18, 0xf2, + 0xd0, 0x09, 0x21, 0x09, 0xb2, 0xa1, 0xc9, 0xa9, 0xfa, 0x26, 0x92, 0x2b, 0xbb, 0x6e, 0x92, 0x25, + 0xd4, 0x99, 0xd2, 0xe8, 0x56, 0x9e, 0x29, 0x20, 0xea, 0x41, 0x2b, 0xba, 0x0c, 0x39, 0xa7, 0x71, + 0x6a, 0x37, 0x0e, 0xad, 0xe3, 0x1d, 0xfc, 0x88, 0x35, 0x6b, 0x2e, 0x38, 0xbb, 0xa2, 0x89, 0xdd, + 0xcc, 0x59, 0x05, 0x44, 0x6f, 0x61, 0x4b, 0xa8, 0x4b, 0x9a, 0xd8, 0x2d, 0x53, 0xf6, 0xab, 0xcd, + 0xb2, 0xcb, 0x56, 0x9d, 0xeb, 0x4b, 0x45, 0xd1, 0x39, 0x63, 0xf0, 0x15, 0x3a, 0x6b, 0x59, 0x74, + 0x00, 0x2d, 0x75, 0x13, 0x30, 0x4e, 0xe8, 0x8d, 0xe9, 0xe2, 0x36, 0x6e, 0xaa, 0x9b, 0x91, 0x86, + 0xc8, 0x83, 0x76, 0x22, 0x23, 0x53, 0x2e, 0x4d, 0xd3, 0xa2, 0x35, 0xbb, 0xd9, 0xd2, 0x01, 0x3c, + 0x3e, 0x3d, 0xc9, 0xa3, 0x18, 0x12, 0x19, 0x15, 0xe7, 0xc1, 0x4f, 0x0b, 0x5a, 0x63, 0x4a, 0x13, + 0x33, 0xa6, 0x97, 0x50, 0x65, 0x24, 0x97, 0xf4, 0x1b, 0xd9, 0xd2, 0xa9, 0x8e, 0xce, 0x70, 0x95, + 0x11, 0xe4, 0xc3, 0x4e, 0xa1, 0x18, 0x30, 0x3e, 0x15, 0x76, 0xf5, 0xb0, 0xf6, 0xdf, 0xd1, 0x51, + 0x9a, 0x14, 0xba, 0x5a, 0x0e, 0xb7, 0xc3, 0x27, 0x80, 0xde, 0xc3, 0x6e, 0x1c, 0xa6, 0x2a, 0x88, + 0x04, 0xe7, 0x34, 0x52, 0x94, 0x98, 0x71, 0xb4, 0x87, 0x3d, 0x37, 0xff, 0x9f, 0x6e, 0xf9, 0x3f, + 0xdd, 0x8b, 0xf2, 0x7f, 0xfa, 0xf5, 0xdb, 0xdf, 0x8e, 0x85, 0x3b, 0x9a, 0x77, 0x5a, 0xd2, 0x74, + 0xff, 0x19, 0x0f, 0x23, 0xc5, 0xae, 0xa9, 0x19, 0x5a, 0x0b, 0x3f, 0xe2, 0xc1, 0x5f, 0x0b, 0xba, + 0x1b, 0x2e, 0xf4, 0x4c, 0xca, 0x76, 0x14, 0xcd, 0x2a, 0x20, 0xfa, 0x00, 0x2f, 0x8c, 0x25, 0xc2, + 0xc2, 0x38, 0x48, 0x17, 0x51, 0x54, 0xb6, 0xec, 0x39, 0xae, 0xba, 0x9a, 0x7a, 0xc6, 0xc2, 0xf8, + 0x53, 0x4e, 0x5c, 0x57, 0x9b, 0x86, 0x2c, 0x5e, 0x24, 0xf4, 0xd9, 0x35, 0x3e, 0xaa, 0xbd, 0xcb, + 0x89, 0xe8, 0x08, 0x3a, 0xab, 0x42, 0xa9, 0x29, 0xb5, 0x83, 0x77, 0xc8, 0xd3, 0x9d, 0xd4, 0x3f, + 0xbf, 0xcb, 0xfa, 0xd6, 0x7d, 0xd6, 0xb7, 0xfe, 0x64, 0x7d, 0xeb, 0xf6, 0xa1, 0x5f, 0xb9, 0x7f, + 0xe8, 0x57, 0x7e, 0x3d, 0xf4, 0x2b, 0x5f, 0xde, 0xcc, 0x98, 0xba, 0x5c, 0x4c, 0xdc, 0x48, 0xcc, + 0xbd, 0x95, 0x0d, 0xb2, 0xba, 0x4c, 0xcc, 0x9e, 0x58, 0xdf, 0x2e, 0x93, 0x86, 0x89, 0xbe, 0xfe, + 0x17, 0x00, 0x00, 0xff, 0xff, 0x42, 0xcb, 0x37, 0x26, 0x76, 0x04, 0x00, 0x00, } func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) { @@ -600,6 +608,16 @@ func (m *PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Inactive { + i-- + if m.Inactive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } if m.LastConnected != nil { n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastConnected, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected):]) if err3 != nil { @@ -792,6 +810,9 @@ func (m *PeerInfo) Size() (n int) { l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected) n += 1 + l + sovTypes(uint64(l)) } + if m.Inactive { + n += 2 + } return n } @@ -1487,6 +1508,26 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Inactive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Inactive = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto index faccd59d2b..a2ed6c90fc 100644 --- a/proto/tendermint/p2p/types.proto +++ b/proto/tendermint/p2p/types.proto @@ -32,6 +32,7 @@ message PeerInfo { string id = 1 [(gogoproto.customname) = "ID"]; repeated PeerAddressInfo address_info = 2; google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true]; + bool inactive = 4; } message PeerAddressInfo { From 0ac03468d84147df8c23061c8cfe2f14c5695e12 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 17 Jun 2022 14:10:10 -0400 Subject: [PATCH 118/203] p2p: track peers stored on startup (#8787) --- internal/p2p/peermanager.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index ef4011093d..10b95798b3 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -367,6 +367,9 @@ func (m *PeerManager) configurePeers() error { } } } + + m.metrics.PeersStored.Add(float64(m.store.Size())) + return nil } From 82c1372f9e5f17a2de2eeb72aa29f092b615724e Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 17 Jun 2022 17:22:40 -0400 Subject: [PATCH 119/203] abci+test/e2e/app: add mutex for new methods (#8577) These methods should be protected by a mutex. --- test/e2e/app/app.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 771b726438..1f66e95cce 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -342,6 +342,9 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA // total number of transaction bytes to exceed `req.MaxTxBytes`, we will not // append our special vote extension transaction. func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + app.mu.Lock() + defer app.mu.Unlock() + var sum int64 var extCount int for _, vote := range req.LocalLastCommit.Votes { @@ -423,6 +426,9 @@ func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrep // ProcessProposal implements part of the Application interface. // It accepts any proposal that does not contain a malformed transaction. func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + app.mu.Lock() + defer app.mu.Unlock() + for _, tx := range req.Txs { k, v, err := parseTx(tx) if err != nil { @@ -454,6 +460,9 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc // key/value store ("extensionSum") with the sum of all of the numbers collected // from the vote extensions. func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { + app.mu.Lock() + defer app.mu.Unlock() + // We ignore any requests for vote extensions that don't match our expected // next height. if req.Height != int64(app.state.Height)+1 { @@ -485,6 +494,9 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot // without doing anything about them. In this case, it just makes sure that the // vote extension is a well-formed integer value. func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + app.mu.Lock() + defer app.mu.Unlock() + // We allow vote extensions to be optional if len(req.VoteExtension) == 0 { return &abci.ResponseVerifyVoteExtension{ From 4d820ff4f5c93cf00e7618b2d3086ad23e1bb5de Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 17 Jun 2022 18:27:38 -0400 Subject: [PATCH 120/203] p2p: peer score should not wrap around (#8790) --- internal/p2p/peermanager.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 10b95798b3..a5438b9eca 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -1538,6 +1538,10 @@ func (p *peerInfo) Score() PeerScore { score -= int64(addr.DialFailures) } + if score < math.MinInt16 { + score = math.MinInt16 + } + return PeerScore(score) } From e3e162ff10a169c0a16f9c5723580fa51f2668b1 Mon Sep 17 00:00:00 2001 From: Ian Jungyong Um <31336310+code0xff@users.noreply.github.com> Date: Mon, 20 Jun 2022 00:26:25 +0900 Subject: [PATCH 121/203] p2p: fix typo (#8793) Fix the typo in router --- internal/p2p/router.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index d7236d472f..55dc737205 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -417,7 +417,7 @@ func (r *Router) routeChannel( } } -func (r *Router) numConccurentDials() int { +func (r *Router) numConcurrentDials() int { if r.options.NumConcurrentDials == nil { return runtime.NumCPU() } @@ -564,7 +564,7 @@ func (r *Router) dialPeers(ctx context.Context) { // able to add peers at a reasonable pace, though the number // is somewhat arbitrary. The action is further throttled by a // sleep after sending to the addresses channel. - for i := 0; i < r.numConccurentDials(); i++ { + for i := 0; i < r.numConcurrentDials(); i++ { wg.Add(1) go func() { defer wg.Done() From 2382b5c364e3ab4915d07fbec2bb4769fa3c4688 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jun 2022 08:54:09 +0000 Subject: [PATCH 122/203] build(deps): Bump github.com/adlio/schema from 1.3.0 to 1.3.3 (#8798) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.3.0 to 1.3.3.
Release notes

Sourced from github.com/adlio/schema's releases.

v1.3.3

Full Changelog: https://github.com/adlio/schema/compare/v1.3.1...v1.3.3

v1.3.1

What's Changed

Full Changelog: https://github.com/adlio/schema/compare/v1.3.0...v1.3.1

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/adlio/schema&package-manager=go_modules&previous-version=1.3.0&new-version=1.3.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 13 ++++---- go.sum | 102 +++++++++++++++++++++++++++++++++------------------------ 2 files changed, 66 insertions(+), 49 deletions(-) diff --git a/go.mod b/go.mod index 2ecb3ea483..227e0f5cbb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/BurntSushi/toml v1.1.0 - github.com/adlio/schema v1.3.0 + github.com/adlio/schema v1.3.3 github.com/btcsuite/btcd v0.22.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/fortytw2/leaktest v1.3.0 @@ -30,7 +30,7 @@ require ( github.com/stretchr/testify v1.7.2 github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 + golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 google.golang.org/grpc v1.47.0 pgregory.net/rapid v0.4.7 @@ -55,7 +55,7 @@ require ( github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Microsoft/go-winio v0.5.1 // indirect + github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/OpenPeeDeeP/depguard v1.1.0 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect @@ -73,7 +73,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect - github.com/containerd/continuity v0.2.1 // indirect + github.com/containerd/continuity v0.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/daixiang0/gci v0.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -123,6 +123,7 @@ require ( github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-version v1.4.0 // indirect @@ -167,7 +168,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/runc v1.0.3 // indirect + github.com/opencontainers/runc v1.1.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.2 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect @@ -220,7 +221,7 @@ require ( go.uber.org/zap v1.21.0 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d // indirect + golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.11 // indirect diff --git a/go.sum b/go.sum index 5f391f3940..8ee6ee3111 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,5 @@ 4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -70,6 +69,7 @@ github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZ github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -95,8 +95,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -107,8 +107,8 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/adlio/schema v1.3.0 h1:eSVYLxYWbm/6ReZBCkLw4Fz7uqC+ZNoPvA39bOwi52A= -github.com/adlio/schema v1.3.0/go.mod h1:51QzxkpeFs6lRY11kPye26IaFPOV+HqEj01t5aXXKfs= +github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -150,7 +150,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= @@ -185,6 +184,8 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -196,11 +197,11 @@ github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M= github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= @@ -216,16 +217,14 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= -github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -246,7 +245,8 @@ github.com/creachadair/tomledit v0.0.22 h1:lRtepmrwhzDq+g1gv5ftVn5itgo7CjYbm6abK github.com/creachadair/tomledit v0.0.22/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4= github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -266,8 +266,13 @@ github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KP github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/cli v20.10.14+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= +github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -376,6 +381,7 @@ github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= @@ -394,7 +400,6 @@ github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -497,6 +502,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -522,7 +529,6 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -546,14 +552,12 @@ github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Rep github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -610,6 +614,9 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -702,6 +709,7 @@ github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3 github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -771,13 +779,17 @@ github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -819,7 +831,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -850,15 +861,18 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= -github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= +github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -904,7 +918,6 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -918,8 +931,6 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -929,14 +940,12 @@ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE= github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= @@ -985,7 +994,8 @@ github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5A github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI= github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1029,7 +1039,6 @@ github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= @@ -1041,7 +1050,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= @@ -1101,8 +1109,6 @@ github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoi github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= @@ -1121,6 +1127,13 @@ github.com/vektra/mockery/v2 v2.13.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu1 github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1141,7 +1154,6 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w= gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= @@ -1284,7 +1296,6 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1331,14 +1342,14 @@ golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1406,7 +1417,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1429,8 +1439,8 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1450,7 +1460,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1463,12 +1472,15 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1481,13 +1493,15 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d h1:Zu/JngovGLVi6t2J3nmAf3AoTDwuzw85YZ3b9o4yU7s= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c h1:aFV+BgZ4svzjfabn8ERpuB4JI4N6/rdy1iusx77G3oU= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1531,6 +1545,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1764,7 +1779,6 @@ google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -1854,6 +1868,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From acf97128f32c91bd8c2b8bc2b47bd5bb1cfa1c2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jun 2022 12:04:19 +0000 Subject: [PATCH 123/203] build(deps): Bump github.com/prometheus/common from 0.34.0 to 0.35.0 (#8800) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.34.0 to 0.35.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/prometheus/common&package-manager=go_modules&previous-version=0.34.0&new-version=0.35.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 227e0f5cbb..545a866555 100644 --- a/go.mod +++ b/go.mod @@ -240,6 +240,6 @@ require ( require ( github.com/creachadair/tomledit v0.0.22 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.34.0 + github.com/prometheus/common v0.35.0 github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca ) diff --git a/go.sum b/go.sum index 8ee6ee3111..aa6b126337 100644 --- a/go.sum +++ b/go.sum @@ -937,8 +937,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE= -github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= From 28d323995891080484617c076649b63a504c5bf4 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 20 Jun 2022 11:47:56 -0400 Subject: [PATCH 124/203] p2p: wake dialing thread after sleep (#8803) --- internal/p2p/peermanager.go | 9 +++++++++ internal/p2p/router.go | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index a5438b9eca..210c34e2ab 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -498,6 +498,15 @@ func (m *PeerManager) HasMaxPeerCapacity() bool { return len(m.connected) >= int(m.options.MaxConnected) } +func (m *PeerManager) HasDialedMaxPeers() bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + stats := m.getConnectedInfo() + + return stats.outgoing >= m.options.MaxOutgoingConnections +} + // DialNext finds an appropriate peer address to dial, and marks it as dialing. // If no peer is found, or all connection slots are full, it blocks until one // becomes available. The caller must call Dialed() or DialFailed() for the diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 55dc737205..ec225f8e36 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -466,6 +466,11 @@ func (r *Router) dialSleep(ctx context.Context) { } r.options.DialSleep(ctx) + + if !r.peerManager.HasDialedMaxPeers() { + r.peerManager.dialWaker.Wake() + } + } // acceptPeers accepts inbound connections from peers on the given transport, From 6f168df7e46228458a43840053a99c4fa7d1da97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jun 2022 17:46:01 +0000 Subject: [PATCH 125/203] build(deps): Bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#8812) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.4.0 to 1.5.0.
Release notes

Sourced from github.com/spf13/cobra's releases.

v1.5.0

Spring 2022 Release 🌥️

Hello everyone! Welcome to another release of cobra. Completions continue to get better and better. This release adds a few really cool new features. We also continue to patch versions of our dependencies as they become available via dependabot. Happy coding!

Active help 👐🏼

Shout out to @​marckhouzam for a big value add: Active Help spf13/cobra#1482. With active help, a program can provide some inline warnings or hints for users as they hit tab. Now, your CLIs can be even more intuitive to use!

Currently active help is only supported for bash V2 and zsh. Marc wrote a whole guide on how to do this, so make sure to give it a good read to learn how you can add this to your cobra code! https://github.com/spf13/cobra/blob/master/active_help.md

Group flags 🧑🏼‍🤝‍🧑🏼

Cobra now has the ability to mark flags as required or exclusive as a group. Shout out to our newest maintainer @​johnSchnake for this! spf13/cobra#1654 Let's say you have a username flag that MUST be partnered with a password flag. Well, now, you can enforce those as being required together:

rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)")
rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)")
rootCmd.MarkFlagsRequiredTogether("username", "password")

Flags may also be marked as "mutally exclusive" with the MarkFlagsMutuallyExclusive(string, string ... ) command API. Refer to our user guide documentation for further info!

Completions 👀

Documentation 📝

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/spf13/cobra&package-manager=go_modules&previous-version=1.4.0&new-version=1.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 4 ++-- go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 545a866555..8dd4c92884 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/rs/cors v1.8.2 github.com/rs/zerolog v1.27.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.4.0 + github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.7.2 github.com/tendermint/tm-db v0.6.6 @@ -74,7 +74,7 @@ require ( github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect github.com/containerd/continuity v0.3.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/daixiang0/gci v0.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect diff --git a/go.sum b/go.sum index aa6b126337..bdb93f91eb 100644 --- a/go.sum +++ b/go.sum @@ -234,8 +234,9 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY= @@ -1040,8 +1041,9 @@ github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= From cfd13825e2c4ea5b6099ae05f73524a97a1eba82 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 21 Jun 2022 16:44:14 -0400 Subject: [PATCH 126/203] p2p: add eviction metrics and cleanup dialing error handling (#8819) --- internal/p2p/metrics.gen.go | 7 ++ internal/p2p/metrics.go | 3 + internal/p2p/peermanager.go | 24 +++--- internal/p2p/peermanager_test.go | 132 +++++++++++-------------------- internal/p2p/router.go | 39 ++++----- 5 files changed, 84 insertions(+), 121 deletions(-) diff --git a/internal/p2p/metrics.gen.go b/internal/p2p/metrics.gen.go index cb215f2b68..9cffbc46b6 100644 --- a/internal/p2p/metrics.gen.go +++ b/internal/p2p/metrics.gen.go @@ -74,6 +74,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "peers_connected_outgoing", Help: "Number of peers connected as a result of the peer dialing this node.", }, labels).With(labelsAndValues...), + PeersEvicted: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peers_evicted", + Help: "Number of peers evicted by this node.", + }, labels).With(labelsAndValues...), RouterPeerQueueRecv: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -119,6 +125,7 @@ func NopMetrics() *Metrics { PeersConnectedFailure: discard.NewCounter(), PeersConnectedIncoming: discard.NewGauge(), PeersConnectedOutgoing: discard.NewGauge(), + PeersEvicted: discard.NewCounter(), RouterPeerQueueRecv: discard.NewHistogram(), RouterPeerQueueSend: discard.NewHistogram(), RouterChannelQueueSend: discard.NewHistogram(), diff --git a/internal/p2p/metrics.go b/internal/p2p/metrics.go index a88aaa3f2e..bc233f691f 100644 --- a/internal/p2p/metrics.go +++ b/internal/p2p/metrics.go @@ -51,6 +51,9 @@ type Metrics struct { // this node. PeersConnectedOutgoing metrics.Gauge + // Number of peers evicted by this node. + PeersEvicted metrics.Counter + // RouterPeerQueueRecv defines the time taken to read off of a peer's queue // before sending on the connection. //metrics:The time taken to read off of a peer's queue before sending on the connection. diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 210c34e2ab..f2f74007b2 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -513,12 +513,13 @@ func (m *PeerManager) HasDialedMaxPeers() bool { // returned peer. func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) { for { - address, err := m.TryDialNext() - if err != nil || (address != NodeAddress{}) { - return address, err + if address := m.TryDialNext(); (address != NodeAddress{}) { + return address, nil } + select { case <-m.dialWaker.Sleep(): + continue case <-ctx.Done(): return NodeAddress{}, ctx.Err() } @@ -527,7 +528,7 @@ func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) { // TryDialNext is equivalent to DialNext(), but immediately returns an empty // address if no peers or connection slots are available. -func (m *PeerManager) TryDialNext() (NodeAddress, error) { +func (m *PeerManager) TryDialNext() NodeAddress { m.mtx.Lock() defer m.mtx.Unlock() @@ -535,12 +536,12 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { // MaxConnectedUpgrade allows us to probe additional peers that have a // higher score than any other peers, and if successful evict it. if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { - return NodeAddress{}, nil + return NodeAddress{} } cinfo := m.getConnectedInfo() if m.options.MaxOutgoingConnections > 0 && cinfo.outgoing >= m.options.MaxOutgoingConnections { - return NodeAddress{}, nil + return NodeAddress{} } for _, peer := range m.store.Ranked() { @@ -563,16 +564,16 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { upgradeFromPeer := m.findUpgradeCandidate(peer.ID, peer.Score()) if upgradeFromPeer == "" { - return NodeAddress{}, nil + return NodeAddress{} } m.upgrading[upgradeFromPeer] = peer.ID } m.dialing[peer.ID] = true - return addressInfo.Address, nil + return addressInfo.Address } } - return NodeAddress{}, nil + return NodeAddress{} } // DialFailed reports a failed dial attempt. This will make the peer available @@ -680,8 +681,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error { return err } - if upgradeFromPeer != "" && m.options.MaxConnected > 0 && - len(m.connected) >= int(m.options.MaxConnected) { + if upgradeFromPeer != "" && m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { // Look for an even lower-scored peer that may have appeared since we // started the upgrade. if p, ok := m.store.Get(upgradeFromPeer); ok { @@ -690,11 +690,11 @@ func (m *PeerManager) Dialed(address NodeAddress) error { } } m.evict[upgradeFromPeer] = true + m.evictWaker.Wake() } m.metrics.PeersConnectedOutgoing.Add(1) m.connected[peer.ID] = peerConnectionOutgoing - m.evictWaker.Wake() return nil } diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 5e9c3a8a49..a1543bf18d 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -384,16 +384,14 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) // Add b. We shouldn't be able to dial it, due to MaxConnected. added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Spawn a goroutine to fail a's dial attempt. @@ -427,8 +425,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.DialFailed(ctx, dial)) failed := time.Now() @@ -458,8 +455,7 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { err = peerManager.Accepted(a.NodeID) require.NoError(t, err) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Zero(t, dial) dctx, dcancel := context.WithTimeout(ctx, 300*time.Millisecond) @@ -490,8 +486,7 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -499,16 +494,14 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // At this point, adding c will not allow dialing it. added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -540,7 +533,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() + dial := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -549,8 +542,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // Even though we are at capacity, we should be allowed to dial c for an @@ -558,8 +550,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, c, dial) // However, since we're using all upgrade slots now, we can't add and dial @@ -567,16 +558,14 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(d) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // We go through with c's upgrade. require.NoError(t, peerManager.Dialed(c)) // Still can't dial d. - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Now, if we disconnect a, we should be allowed to dial d because we have a @@ -592,8 +581,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(e) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -613,8 +601,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -622,8 +609,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // Adding c and dialing it will fail, because a is the only connected @@ -631,8 +617,7 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Empty(t, dial) } @@ -653,22 +638,19 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) // Adding a's TCP address will not dispense a, since it's already dialing. added, err = peerManager.Add(aTCP) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Marking a as dialed will still not dispense it. require.NoError(t, peerManager.Dialed(a)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) // Adding b and accepting a connection from it will not dispense it either. @@ -676,8 +658,7 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(bID)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -706,16 +687,14 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) { // All addresses should be dispensed as long as dialing them has failed. dial := []p2p.NodeAddress{} for range addresses { - address, err := peerManager.TryDialNext() - require.NoError(t, err) + address := peerManager.TryDialNext() require.NotZero(t, address) require.NoError(t, peerManager.DialFailed(ctx, address)) dial = append(dial, address) } require.ElementsMatch(t, dial, addresses) - address, err := peerManager.TryDialNext() - require.NoError(t, err) + address := peerManager.TryDialNext() require.Zero(t, address) } @@ -740,15 +719,14 @@ func TestPeerManager_DialFailed(t *testing.T) { // Dialing and then calling DialFailed with a different address (same // NodeID) should unmark as dialing and allow us to dial the other address // again, but not register the failed address. - dial, err := peerManager.TryDialNext() + dial := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) require.NoError(t, peerManager.DialFailed(ctx, p2p.NodeAddress{ Protocol: "tcp", NodeID: aID, Hostname: "localhost"})) require.Equal(t, []p2p.NodeAddress{a}, peerManager.Addresses(aID)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, a, dial) // Calling DialFailed on same address twice should be fine. @@ -782,8 +760,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -791,8 +768,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) // Adding c and dialing it will fail, even though it could upgrade a and we @@ -801,14 +777,12 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Empty(t, dial) // Failing b's dial will now make c available for dialing. require.NoError(t, peerManager.DialFailed(ctx, b)) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, c, dial) } @@ -823,8 +797,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.Dialed(a)) @@ -834,8 +807,7 @@ func TestPeerManager_Dialed_Connected(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, b, dial) require.NoError(t, peerManager.Accepted(b.NodeID)) @@ -864,8 +836,7 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) // Marking b as dialed in the meanwhile (even without TryDialNext) @@ -907,8 +878,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, c, dial) require.NoError(t, peerManager.Dialed(c)) @@ -952,8 +922,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, b, dial) require.NoError(t, peerManager.Dialed(b)) @@ -962,8 +931,7 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Empty(t, dial) // a should now be evicted. @@ -1009,8 +977,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, c, dial) // In the meanwhile, a disconnects and d connects. d is even lower-scored @@ -1063,7 +1030,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() + dial := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, c, dial) @@ -1109,8 +1076,7 @@ func TestPeerManager_Accepted(t *testing.T) { added, err = peerManager.Add(c) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, c, dial) require.NoError(t, peerManager.Accepted(c.NodeID)) require.Error(t, peerManager.Dialed(c)) @@ -1119,8 +1085,7 @@ func TestPeerManager_Accepted(t *testing.T) { added, err = peerManager.Add(d) require.NoError(t, err) require.True(t, added) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, d, dial) require.NoError(t, peerManager.Dialed(d)) require.Error(t, peerManager.Accepted(d.NodeID)) @@ -1271,8 +1236,7 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, b, dial) // a has already been claimed as an upgrade of a, so accepting @@ -1446,8 +1410,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { added, err := peerManager.Add(b) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, b, dial) require.NoError(t, peerManager.Dialed(b)) }() @@ -1581,13 +1544,11 @@ func TestPeerManager_Disconnected(t *testing.T) { // Disconnecting a dialing peer does not unmark it as dialing, to avoid // dialing it multiple times in parallel. - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) peerManager.Disconnected(ctx, a.NodeID) - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Zero(t, dial) } @@ -1660,8 +1621,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) // Outbound connection with peer error and eviction. - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.Empty(t, sub.Updates()) @@ -1684,8 +1644,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) // Outbound connection with dial failure. - dial, err = peerManager.TryDialNext() - require.NoError(t, err) + dial = peerManager.TryDialNext() require.Equal(t, a, dial) require.Empty(t, sub.Updates()) @@ -1790,8 +1749,7 @@ func TestPeerManager_Close(t *testing.T) { added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) - dial, err := peerManager.TryDialNext() - require.NoError(t, err) + dial := peerManager.TryDialNext() require.Equal(t, a, dial) require.NoError(t, peerManager.DialFailed(ctx, a)) } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index ec225f8e36..ff90e8c21c 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -310,11 +310,7 @@ func (r *Router) routeChannel( ) { for { select { - case envelope, ok := <-outCh: - if !ok { - return - } - + case envelope := <-outCh: // Mark the envelope with the channel ID to allow sendPeer() to pass // it on to Transport.SendMessage(). envelope.ChannelID = chID @@ -391,20 +387,22 @@ func (r *Router) routeChannel( } } - case peerError, ok := <-errCh: - if !ok { - return - } - - shouldEvict := peerError.Fatal || r.peerManager.HasMaxPeerCapacity() + case peerError := <-errCh: + maxPeerCapacity := r.peerManager.HasMaxPeerCapacity() r.logger.Error("peer error", "peer", peerError.NodeID, "err", peerError.Err, - "evicting", shouldEvict, + "disconnecting", peerError.Fatal || maxPeerCapacity, ) - if shouldEvict { + + if peerError.Fatal || maxPeerCapacity { + // if the error is fatal or all peer + // slots are in use, we can error + // (disconnect) from the peer. r.peerManager.Errored(peerError.NodeID, peerError.Err) } else { + // this just decrements the peer + // score. r.peerManager.processPeerEvent(ctx, PeerUpdate{ NodeID: peerError.NodeID, Status: PeerStatusBad, @@ -466,11 +464,6 @@ func (r *Router) dialSleep(ctx context.Context) { } r.options.DialSleep(ctx) - - if !r.peerManager.HasDialedMaxPeers() { - r.peerManager.dialWaker.Wake() - } - } // acceptPeers accepts inbound connections from peers on the given transport, @@ -591,9 +584,8 @@ LOOP: switch { case errors.Is(err, context.Canceled): break LOOP - case err != nil: - r.logger.Error("failed to find next peer to dial", "err", err) - break LOOP + case address == NodeAddress{}: + continue LOOP } select { @@ -603,7 +595,7 @@ LOOP: // create connections too quickly. r.dialSleep(ctx) - continue + continue LOOP case <-ctx.Done(): close(addresses) break LOOP @@ -642,6 +634,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { if err := r.runWithPeerMutex(func() error { return r.peerManager.Dialed(address) }); err != nil { r.logger.Error("failed to dial peer", "op", "outgoing/dialing", "peer", address.NodeID, "err", err) + r.peerManager.dialWaker.Wake() conn.Close() return } @@ -937,6 +930,8 @@ func (r *Router) evictPeers(ctx context.Context) { queue, ok := r.peerQueues[peerID] r.peerMtx.RUnlock() + r.metrics.PeersEvicted.Add(1) + if ok { queue.close() } From 8860e027a888e67936c096c7c5dc81806f6665bf Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 21 Jun 2022 20:51:09 -0400 Subject: [PATCH 127/203] p2p: more dial routines (#8827) The dial routines perform network i/o, which is a blocking call into the kernel. These routines are completely unable to do anything else while the dial occurs, so for most of their lifecycle they are sitting idle waiting for the tcp stack to hand them data. We should increase this value by _a lot_ to enable more concurrent dials. This is unlikely to cause CPU starvation because these routines sit idle most of the time. The current value causes dials to occur _way_ too slowly. Below is a graph demonstrating the before and after of this change in a testnetwork with many dead peers. You can observe that the rate that we connect to new, valid peers, is _much_ higher than previously. Change was deployed around the 31 minute mark on the graph. ![image](https://user-images.githubusercontent.com/4561443/174919007-50e4453a-edd8-41d0-97ee-dea8853d57f7.png) --- internal/p2p/router.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index ff90e8c21c..7ad5529fbb 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -417,7 +417,7 @@ func (r *Router) routeChannel( func (r *Router) numConcurrentDials() int { if r.options.NumConcurrentDials == nil { - return runtime.NumCPU() + return runtime.NumCPU() * 32 } return r.options.NumConcurrentDials() From 2e11760fbe96dacdf8c4e37b7a179ed777a9ebfa Mon Sep 17 00:00:00 2001 From: Ian Jungyong Um <31336310+code0xff@users.noreply.github.com> Date: Wed, 22 Jun 2022 16:30:11 +0900 Subject: [PATCH 128/203] p2p: fix typo (#8836) --- internal/p2p/router.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 7ad5529fbb..e3adc77eea 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -68,7 +68,7 @@ type RouterOptions struct { // seconds between submitting each peer to be dialed. DialSleep func(context.Context) - // NumConcrruentDials controls how many parallel go routines + // NumConcurrentDials controls how many parallel go routines // are used to dial peers. This defaults to the value of // runtime.NumCPU. NumConcurrentDials func() int From 52b2efb8274879468af9d032afd844b300b40f6e Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 23 Jun 2022 08:40:36 -0400 Subject: [PATCH 129/203] e2e: report peer heights in error message (#8843) --- test/e2e/runner/rpc.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index 5f5c22149c..c08805f406 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -23,7 +23,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty clients = map[string]*rpchttp.HTTP{} lastHeight int64 lastIncrease = time.Now() - nodesAtHeight = map[string]struct{}{} + nodesAtHeight = map[string]int64{} numRunningNodes int ) if height == 0 { @@ -85,7 +85,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty // add this node to the set of target // height nodes - nodesAtHeight[node.Name] = struct{}{} + nodesAtHeight[node.Name] = result.SyncInfo.LatestBlockHeight // if not all of the nodes that we // have clients for have reached the From 436a38f8768f32c2abf3668e586019dcc3defb04 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 23 Jun 2022 10:03:10 -0400 Subject: [PATCH 130/203] p2p: track peers by address (#8841) --- internal/p2p/peermanager.go | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index f2f74007b2..b30c5dbfbf 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -554,6 +554,10 @@ func (m *PeerManager) TryDialNext() NodeAddress { continue } + if id, ok := m.store.Resolve(addressInfo.Address); ok && (m.isConnected(id) || m.dialing[id]) { + continue + } + // We now have an eligible address to dial. If we're full but have // upgrade capacity (as checked above), we find a lower-scored peer // we can replace and mark it as upgrading so noone else claims it. @@ -1235,6 +1239,7 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration type peerStore struct { db dbm.DB peers map[types.NodeID]*peerInfo + index map[NodeAddress]types.NodeID ranked []*peerInfo // cache for Ranked(), nil invalidates cache } @@ -1254,6 +1259,7 @@ func newPeerStore(db dbm.DB) (*peerStore, error) { // loadPeers loads all peers from the database into memory. func (s *peerStore) loadPeers() error { peers := map[types.NodeID]*peerInfo{} + addrs := map[NodeAddress]types.NodeID{} start, end := keyPeerInfoRange() iter, err := s.db.Iterator(start, end) @@ -1273,11 +1279,18 @@ func (s *peerStore) loadPeers() error { return fmt.Errorf("invalid peer data: %w", err) } peers[peer.ID] = peer + for addr := range peer.AddressInfo { + // TODO maybe check to see if we've seen this + // addr before for a different peer, there + // could be duplicates. + addrs[addr] = peer.ID + } } if iter.Error() != nil { return iter.Error() } s.peers = peers + s.index = addrs s.ranked = nil // invalidate cache if populated return nil } @@ -1289,6 +1302,12 @@ func (s *peerStore) Get(id types.NodeID) (peerInfo, bool) { return peer.Copy(), ok } +// Resolve returns the peer ID for a given node address if known. +func (s *peerStore) Resolve(addr NodeAddress) (types.NodeID, bool) { + id, ok := s.index[addr] + return id, ok +} + // Set stores peer data. The input data will be copied, and can safely be reused // by the caller. func (s *peerStore) Set(peer peerInfo) error { @@ -1317,20 +1336,29 @@ func (s *peerStore) Set(peer peerInfo) error { // update the existing pointer address. *current = peer } + for addr := range peer.AddressInfo { + s.index[addr] = peer.ID + } return nil } // Delete deletes a peer, or does nothing if it does not exist. func (s *peerStore) Delete(id types.NodeID) error { - if _, ok := s.peers[id]; !ok { + peer, ok := s.peers[id] + if !ok { return nil } - if err := s.db.Delete(keyPeerInfo(id)); err != nil { - return err + for _, addr := range peer.AddressInfo { + delete(s.index, addr.Address) } delete(s.peers, id) s.ranked = nil + + if err := s.db.Delete(keyPeerInfo(id)); err != nil { + return err + } + return nil } From fb209136f85083e1d491ac2589fff41dd80518cb Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Thu, 23 Jun 2022 19:11:21 +0200 Subject: [PATCH 131/203] e2e: add tolerance to peer discovery test (#8849) --- test/e2e/tests/net_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index 2d8f285496..939c136c77 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -18,7 +18,9 @@ func TestNet_Peers(t *testing.T) { netInfo, err := client.NetInfo(ctx) require.NoError(t, err) - expectedPeers := len(node.Testnet.Nodes) + // FIXME: https://github.com/tendermint/tendermint/issues/8848 + // We should be able to assert that we can discover all peers in a network + expectedPeers := len(node.Testnet.Nodes) - 1 // includes extra tolerance peers := make(map[string]*e2e.Node, 0) seen := map[string]bool{} for _, n := range node.Testnet.Nodes { @@ -31,7 +33,7 @@ func TestNet_Peers(t *testing.T) { seen[n.Name] = false } - require.Equal(t, expectedPeers, netInfo.NPeers, + require.GreaterOrEqual(t, netInfo.NPeers, expectedPeers, "node is not fully meshed with peers") for _, peerInfo := range netInfo.Peers { From 5f5e74798bcc6045bb454a082405145a3230ab1a Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 23 Jun 2022 18:33:21 -0400 Subject: [PATCH 132/203] p2p: set empty timeouts to small values. (#8847) These timeouts default to 'do not time out' if they are not set. This times up resources, potentially indefinitely. If node on the other side of the the handshake is up but unresponsive, the[ handshake call](https://github.com/tendermint/tendermint/blob/edec79448aa1d62b84683b1b22e12e145dbdda7c/internal/p2p/router.go#L720) will _never_ return. These are proposed values that have not been validated. I intend to validate them in a production setting. --- internal/p2p/mocks/connection.go | 20 +++++++++++--------- internal/p2p/router.go | 8 +------- internal/p2p/router_test.go | 14 +++++++------- internal/p2p/transport.go | 3 ++- internal/p2p/transport_mconn.go | 18 +++++++++++++++--- internal/p2p/transport_memory.go | 8 ++++++++ internal/p2p/transport_test.go | 12 ++++++------ node/node.go | 4 +++- 8 files changed, 53 insertions(+), 34 deletions(-) diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 766bbf6576..20727f8a61 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -13,6 +13,8 @@ import ( p2p "github.com/tendermint/tendermint/internal/p2p" + time "time" + types "github.com/tendermint/tendermint/types" ) @@ -35,20 +37,20 @@ func (_m *Connection) Close() error { return r0 } -// Handshake provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { - ret := _m.Called(_a0, _a1, _a2) +// Handshake provides a mock function with given fields: _a0, _a1, _a2, _a3 +func (_m *Connection) Handshake(_a0 context.Context, _a1 time.Duration, _a2 types.NodeInfo, _a3 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { + ret := _m.Called(_a0, _a1, _a2, _a3) var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func(context.Context, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok { - r0 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok { + r0 = rf(_a0, _a1, _a2, _a3) } else { r0 = ret.Get(0).(types.NodeInfo) } var r1 crypto.PubKey - if rf, ok := ret.Get(1).(func(context.Context, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok { - r1 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(1).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok { + r1 = rf(_a0, _a1, _a2, _a3) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(crypto.PubKey) @@ -56,8 +58,8 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 cry } var r2 error - if rf, ok := ret.Get(2).(func(context.Context, types.NodeInfo, crypto.PrivKey) error); ok { - r2 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(2).(func(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) error); ok { + r2 = rf(_a0, _a1, _a2, _a3) } else { r2 = ret.Error(2) } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index e3adc77eea..d5edd42be9 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -710,14 +710,8 @@ func (r *Router) handshakePeer( expectID types.NodeID, ) (types.NodeInfo, error) { - if r.options.HandshakeTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, r.options.HandshakeTimeout) - defer cancel() - } - nodeInfo := r.nodeInfoProducer() - peerInfo, peerKey, err := conn.Handshake(ctx, *nodeInfo, r.privKey) + peerInfo, peerKey, err := conn.Handshake(ctx, r.options.HandshakeTimeout, *nodeInfo, r.privKey) if err != nil { return peerInfo, err } diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 86af193859..8a58146fde 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -385,7 +385,7 @@ func TestRouter_AcceptPeers(t *testing.T) { connCtx, connCancel := context.WithCancel(context.Background()) mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) @@ -500,7 +500,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF) mockConnection.On("Close").Return(nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) @@ -588,7 +588,7 @@ func TestRouter_DialPeers(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") if tc.dialErr == nil { - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() } @@ -674,7 +674,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF) mockConnection.On("Close").Return(nil) @@ -757,7 +757,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(peerInfo, peerKey.PubKey(), nil) mockConnection.On("ReceiveMessage", mock.Anything).WaitUntil(closeCh).Return(chID, nil, io.EOF) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) @@ -826,7 +826,7 @@ func TestRouter_ChannelCompatability(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(incompatiblePeer, peerKey.PubKey(), nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Return(nil) @@ -877,7 +877,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") - mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). + mockConnection.On("Handshake", mock.Anything, mock.Anything, selfInfo, selfKey). Return(peer, peerKey.PubKey(), nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Return(nil) diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index 7a965260ac..e644a11ae2 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net" + "time" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" @@ -81,7 +82,7 @@ type Connection interface { // FIXME: The handshake should really be the Router's responsibility, but // that requires the connection interface to be byte-oriented rather than // message-oriented (see comment above). - Handshake(context.Context, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) + Handshake(context.Context, time.Duration, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) // ReceiveMessage returns the next message received on the connection, // blocking until one is available. Returns io.EOF if closed. diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 7bf17d1a06..13a65b9737 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -9,6 +9,7 @@ import ( "net" "strconv" "sync" + "time" "golang.org/x/net/netutil" @@ -274,6 +275,7 @@ func newMConnConnection( // Handshake implements Connection. func (c *mConnConnection) Handshake( ctx context.Context, + timeout time.Duration, nodeInfo types.NodeInfo, privKey crypto.PrivKey, ) (types.NodeInfo, crypto.PubKey, error) { @@ -283,6 +285,12 @@ func (c *mConnConnection) Handshake( peerKey crypto.PubKey errCh = make(chan error, 1) ) + handshakeCtx := ctx + if timeout > 0 { + var cancel context.CancelFunc + handshakeCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } // To handle context cancellation, we need to do the handshake in a // goroutine and abort the blocking network calls by closing the connection // when the context is canceled. @@ -295,17 +303,17 @@ func (c *mConnConnection) Handshake( } }() var err error - mconn, peerInfo, peerKey, err = c.handshake(ctx, nodeInfo, privKey) + mconn, peerInfo, peerKey, err = c.handshake(handshakeCtx, nodeInfo, privKey) select { case errCh <- err: - case <-ctx.Done(): + case <-handshakeCtx.Done(): } }() select { - case <-ctx.Done(): + case <-handshakeCtx.Done(): _ = c.Close() return types.NodeInfo{}, nil, ctx.Err() @@ -314,6 +322,10 @@ func (c *mConnConnection) Handshake( return types.NodeInfo{}, nil, err } c.mconn = mconn + // Start must not use the handshakeCtx. The handshakeCtx may have a + // timeout set that is intended to terminate only the handshake procedure. + // The context passed to Start controls the entire lifecycle of the + // mconn. if err = c.mconn.Start(ctx); err != nil { return types.NodeInfo{}, nil, err } diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 3eb4c5b515..c321bc174d 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -7,6 +7,7 @@ import ( "io" "net" "sync" + "time" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/log" @@ -273,9 +274,16 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint { // Handshake implements Connection. func (c *MemoryConnection) Handshake( ctx context.Context, + timeout time.Duration, nodeInfo types.NodeInfo, privKey crypto.PrivKey, ) (types.NodeInfo, crypto.PubKey, error) { + if timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + select { case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}: c.logger.Debug("sent handshake", "nodeInfo", nodeInfo) diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index b4edf9bc95..d58c23955b 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -296,7 +296,7 @@ func TestConnection_Handshake(t *testing.T) { errCh := make(chan error, 1) go func() { // Must use assert due to goroutine. - peerInfo, peerKey, err := ba.Handshake(ctx, bInfo, bKey) + peerInfo, peerKey, err := ba.Handshake(ctx, 0, bInfo, bKey) if err == nil { assert.Equal(t, aInfo, peerInfo) assert.Equal(t, aKey.PubKey(), peerKey) @@ -307,7 +307,7 @@ func TestConnection_Handshake(t *testing.T) { } }() - peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey) + peerInfo, peerKey, err := ab.Handshake(ctx, 0, aInfo, aKey) require.NoError(t, err) require.Equal(t, bInfo, peerInfo) require.Equal(t, bKey.PubKey(), peerKey) @@ -328,7 +328,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { ab, ba := dialAccept(ctx, t, a, b) timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) cancel() - _, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) + _, _, err := ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey()) require.Error(t, err) require.Equal(t, context.Canceled, err) _ = ab.Close() @@ -338,7 +338,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { ab, ba = dialAccept(ctx, t, a, b) timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() - _, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) + _, _, err = ab.Handshake(timeoutCtx, 0, types.NodeInfo{}, ed25519.GenPrivKey()) require.Error(t, err) require.Equal(t, context.DeadlineExceeded, err) _ = ab.Close() @@ -642,13 +642,13 @@ func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport) go func() { privKey := ed25519.GenPrivKey() nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} - _, _, err := ba.Handshake(ctx, nodeInfo, privKey) + _, _, err := ba.Handshake(ctx, 0, nodeInfo, privKey) errCh <- err }() privKey := ed25519.GenPrivKey() nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} - _, _, err := ab.Handshake(ctx, nodeInfo, privKey) + _, _, err := ab.Handshake(ctx, 0, nodeInfo, privKey) require.NoError(t, err) timer := time.NewTimer(2 * time.Second) diff --git a/node/node.go b/node/node.go index 77773044b0..187a7a1d65 100644 --- a/node/node.go +++ b/node/node.go @@ -715,7 +715,9 @@ func loadStateFromDBOrGenesisDocProvider(stateStore sm.Store, genDoc *types.Gene func getRouterConfig(conf *config.Config, appClient abciclient.Client) p2p.RouterOptions { opts := p2p.RouterOptions{ - QueueType: conf.P2P.QueueType, + QueueType: conf.P2P.QueueType, + HandshakeTimeout: conf.P2P.HandshakeTimeout, + DialTimeout: conf.P2P.DialTimeout, } if conf.FilterPeers && appClient != nil { From 6b5053046ac2390d513dc8c8b832cf92b6b3d8f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Jun 2022 13:12:17 +0000 Subject: [PATCH 133/203] build(deps): Bump github.com/stretchr/testify from 1.7.2 to 1.7.5 (#8864) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.2 to 1.7.5.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/stretchr/testify&package-manager=go_modules&previous-version=1.7.2&new-version=1.7.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 4 ++-- go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 8dd4c92884..fde7586ade 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.7.5 github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 @@ -199,7 +199,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/objx v0.4.0 // indirect github.com/subosito/gotenv v1.4.0 // indirect github.com/sylvia7788/contextcheck v1.0.4 // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect diff --git a/go.sum b/go.sum index bdb93f91eb..e055c300e5 100644 --- a/go.sum +++ b/go.sum @@ -1064,8 +1064,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3 github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1075,8 +1076,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= From 409e057d73a854ea207ebb459733c2c7779b407a Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Fri, 24 Jun 2022 12:10:27 -0400 Subject: [PATCH 134/203] fix light client select statement (#8871) --- light/client.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/light/client.go b/light/client.go index dfb4fccf7a..f38e9d59de 100644 --- a/light/client.go +++ b/light/client.go @@ -1034,7 +1034,12 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) // process all the responses as they come in for i := 0; i < cap(witnessResponsesC); i++ { - response := <-witnessResponsesC + var response witnessResponse + select { + case response = <-witnessResponsesC: + case <-ctx.Done(): + return nil, ctx.Err() + } switch response.err { // success! We have found a new primary case nil: @@ -1063,10 +1068,6 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) // return the light block that new primary responded with return response.lb, nil - // catch canceled contexts or deadlines - case context.Canceled, context.DeadlineExceeded: - return nil, response.err - // process benign errors by logging them only case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh: lastError = response.err From c4d24eed7d0b8902c41d790e1de446baa0256e6b Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 24 Jun 2022 18:31:30 +0200 Subject: [PATCH 135/203] e2e: disable another network test (#8862) Follow up on: https://github.com/tendermint/tendermint/pull/8849 --- test/e2e/tests/net_test.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index 939c136c77..43cab77e8a 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -20,7 +20,7 @@ func TestNet_Peers(t *testing.T) { // FIXME: https://github.com/tendermint/tendermint/issues/8848 // We should be able to assert that we can discover all peers in a network - expectedPeers := len(node.Testnet.Nodes) - 1 // includes extra tolerance + expectedPeers := len(node.Testnet.Nodes) peers := make(map[string]*e2e.Node, 0) seen := map[string]bool{} for _, n := range node.Testnet.Nodes { @@ -33,7 +33,7 @@ func TestNet_Peers(t *testing.T) { seen[n.Name] = false } - require.GreaterOrEqual(t, netInfo.NPeers, expectedPeers, + require.GreaterOrEqual(t, netInfo.NPeers, expectedPeers-1, "node is not fully meshed with peers") for _, peerInfo := range netInfo.Peers { @@ -45,8 +45,10 @@ func TestNet_Peers(t *testing.T) { seen[peer.Name] = true } - for name := range seen { - require.True(t, seen[name], "node %v not peered with %v", node.Name, name) - } + // FIXME: https://github.com/tendermint/tendermint/issues/8848 + // We should be able to assert that we can discover all peers in a network + // for name := range seen { + // require.True(t, seen[name], "node %v not peered with %v", node.Name, name) + // } }) } From 52b6dc19badf50938ae2b2a1d2e22813614e5ad5 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Jun 2022 13:57:49 -0400 Subject: [PATCH 136/203] p2p: remove dial sleep and provide disconnect cooldown (#8839) Alternative proposal for #8826 --- internal/p2p/p2ptest/network.go | 2 +- internal/p2p/peermanager.go | 34 +++++++++++++++++++++++---- internal/p2p/router.go | 41 +-------------------------------- internal/p2p/router_test.go | 1 - node/setup.go | 23 +++++++++--------- 5 files changed, 44 insertions(+), 57 deletions(-) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 5c1b0a2187..c040a08a88 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -269,7 +269,7 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) func() *types.NodeInfo { return &nodeInfo }, transport, ep, - p2p.RouterOptions{DialSleep: func(_ context.Context) {}}, + p2p.RouterOptions{}, ) require.NoError(t, err) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index b30c5dbfbf..ac2e28225c 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -144,6 +144,10 @@ type PeerManagerOptions struct { // retry times, to avoid thundering herds. 0 disables jitter. RetryTimeJitter time.Duration + // DisconnectCooldownPeriod is the amount of time after we + // disconnect from a peer before we'll consider dialing a new peer + DisconnectCooldownPeriod time.Duration + // PeerScores sets fixed scores for specific peers. It is mainly used // for testing. A score of 0 is ignored. PeerScores map[types.NodeID]PeerScore @@ -549,6 +553,10 @@ func (m *PeerManager) TryDialNext() NodeAddress { continue } + if !peer.LastDisconnected.IsZero() && time.Since(peer.LastDisconnected) < m.options.DisconnectCooldownPeriod { + continue + } + for _, addressInfo := range peer.AddressInfo { if time.Since(addressInfo.LastDialFailure) < m.retryDelay(addressInfo.DialFailures, peer.Persistent) { continue @@ -867,6 +875,22 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { delete(m.evicting, peerID) delete(m.ready, peerID) + if peer, ok := m.store.Get(peerID); ok { + peer.LastDisconnected = time.Now() + _ = m.store.Set(peer) + // launch a thread to ping the dialWaker when the + // disconnected peer can be dialed again. + go func() { + timer := time.NewTimer(m.options.DisconnectCooldownPeriod) + defer timer.Stop() + select { + case <-timer.C: + m.dialWaker.Wake() + case <-ctx.Done(): + } + }() + } + if ready { m.broadcast(ctx, PeerUpdate{ NodeID: peerID, @@ -1447,9 +1471,10 @@ func (s *peerStore) Size() int { // peerInfo contains peer information stored in a peerStore. type peerInfo struct { - ID types.NodeID - AddressInfo map[NodeAddress]*peerAddressInfo - LastConnected time.Time + ID types.NodeID + AddressInfo map[NodeAddress]*peerAddressInfo + LastConnected time.Time + LastDisconnected time.Time // These fields are ephemeral, i.e. not persisted to the database. Persistent bool @@ -1489,8 +1514,8 @@ func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) { func (p *peerInfo) ToProto() *p2pproto.PeerInfo { msg := &p2pproto.PeerInfo{ ID: string(p.ID), - LastConnected: &p.LastConnected, Inactive: p.Inactive, + LastConnected: &p.LastConnected, } for _, addressInfo := range p.AddressInfo { msg.AddressInfo = append(msg.AddressInfo, addressInfo.ToProto()) @@ -1498,6 +1523,7 @@ func (p *peerInfo) ToProto() *p2pproto.PeerInfo { if msg.LastConnected.IsZero() { msg.LastConnected = nil } + return msg } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index d5edd42be9..8b7db9a034 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "math/rand" "net" "runtime" "sync" @@ -62,13 +61,7 @@ type RouterOptions struct { // return an error to reject the peer. FilterPeerByID func(context.Context, types.NodeID) error - // DialSleep controls the amount of time that the router - // sleeps between dialing peers. If not set, a default value - // is used that sleeps for a (random) amount of time up to 3 - // seconds between submitting each peer to be dialed. - DialSleep func(context.Context) - - // NumConcurrentDials controls how many parallel go routines + // NumConcrruentDials controls how many parallel go routines // are used to dial peers. This defaults to the value of // runtime.NumCPU. NumConcurrentDials func() int @@ -439,33 +432,6 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { return r.options.FilterPeerByID(ctx, id) } -func (r *Router) dialSleep(ctx context.Context) { - if r.options.DialSleep == nil { - // the connTracker (on the other side) only rate - // limits peers for dialing more than once every 10ms, - // so these numbers are safe. - const ( - maxDialerInterval = 500 // ms - minDialerInterval = 100 // ms - ) - - // nolint:gosec // G404: Use of weak random number generator - dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval) - - timer := time.NewTimer(dur * time.Millisecond) - defer timer.Stop() - - select { - case <-ctx.Done(): - case <-timer.C: - } - - return - } - - r.options.DialSleep(ctx) -} - // acceptPeers accepts inbound connections from peers on the given transport, // and spawns goroutines that route messages to/from them. func (r *Router) acceptPeers(ctx context.Context, transport Transport) { @@ -590,11 +556,6 @@ LOOP: select { case addresses <- address: - // this jitters the frequency that we call - // DialNext and prevents us from attempting to - // create connections too quickly. - - r.dialSleep(ctx) continue LOOP case <-ctx.Done(): close(addresses) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 8a58146fde..92f56f768f 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -715,7 +715,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockTransport, nil, p2p.RouterOptions{ - DialSleep: func(_ context.Context) {}, NumConcurrentDials: func() int { ncpu := runtime.NumCPU() if ncpu <= 3 { diff --git a/node/setup.go b/node/setup.go index 345489f8d6..df9d19195a 100644 --- a/node/setup.go +++ b/node/setup.go @@ -235,17 +235,18 @@ func createPeerManager( maxUpgradeConns := uint16(4) options := p2p.PeerManagerOptions{ - SelfAddress: selfAddr, - MaxConnected: maxConns, - MaxOutgoingConnections: maxOutgoingConns, - MaxConnectedUpgrade: maxUpgradeConns, - MaxPeers: maxUpgradeConns + 4*maxConns, - MinRetryTime: 250 * time.Millisecond, - MaxRetryTime: 30 * time.Minute, - MaxRetryTimePersistent: 5 * time.Minute, - RetryTimeJitter: 5 * time.Second, - PrivatePeers: privatePeerIDs, - Metrics: metrics, + SelfAddress: selfAddr, + MaxConnected: maxConns, + MaxOutgoingConnections: maxOutgoingConns, + MaxConnectedUpgrade: maxUpgradeConns, + DisconnectCooldownPeriod: 2 * time.Second, + MaxPeers: maxUpgradeConns + 4*maxConns, + MinRetryTime: 250 * time.Millisecond, + MaxRetryTime: 30 * time.Minute, + MaxRetryTimePersistent: 5 * time.Minute, + RetryTimeJitter: 5 * time.Second, + PrivatePeers: privatePeerIDs, + Metrics: metrics, } peers := []p2p.NodeAddress{} From 27ff2f46b8b5d54c986228a84edd053b2809c7aa Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Fri, 24 Jun 2022 20:53:31 +0200 Subject: [PATCH 137/203] Add @sergio-mena and @jmalicevic to list of spec reviewers (#8870) Co-authored-by: Callum Waters --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 35a27cfe78..950ed1dc24 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -10,4 +10,4 @@ * @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir # Spec related changes can be approved by the protocol design team -/spec @josef-widder @milosevic @cason +/spec @josef-widder @milosevic @cason @sergio-mena @jmalicevic From 463cff456b36ba7a64b6dd269163bbb661eb21da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jun 2022 09:13:25 -0400 Subject: [PATCH 138/203] build(deps): Bump bufbuild/buf-setup-action from 1.5.0 to 1.6.0 (#8883) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.5.0 to 1.6.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.5.0...v1.6.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/proto-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index f8fff89cc0..377e43ca63 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.5.0 + - uses: bufbuild/buf-setup-action@v1.6.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' From 373b262f35cc6cf3ea3e7d7e51d5412f960d2177 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jun 2022 13:15:29 +0000 Subject: [PATCH 139/203] build(deps): Bump styfle/cancel-workflow-action from 0.9.1 to 0.10.0 (#8881) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [styfle/cancel-workflow-action](https://github.com/styfle/cancel-workflow-action) from 0.9.1 to 0.10.0.
Release notes

Sourced from styfle/cancel-workflow-action's releases.

0.10.0

Changes

  • Feat(all):support for considering all workflows with one term: #165
  • Chore: rebuild: 74a81dc1a9321342ebc12fa8670cc91600c8c494
  • Chore: update main.yml: #78
  • Bump @​vercel/ncc from 0.28.6 to 0.29.1: #106
  • Bump @​vercel/ncc from 0.29.1 to 0.29.2: #109
  • Bump @​vercel/ncc from 0.29.2 to 0.30.0: #112
  • Bump husky from 7.0.1 to 7.0.2: #110
  • Bump prettier from 2.3.2 to 2.4.0: #116
  • Bump @​vercel/ncc from 0.30.0 to 0.31.1: #115
  • Bump typescript from 4.3.5 to 4.4.3: #114
  • Bump prettier from 2.4.0 to 2.4.1: #117
  • Bump @​actions/github from 4.0.0 to 5.0.0: #89
  • Bump @​actions/core from 1.3.0 to 1.6.0: #118
  • Bump typescript from 4.4.3 to 4.4.4: #119
  • Bump husky from 7.0.2 to 7.0.4: #120
  • Bump typescript from 4.4.4 to 4.5.2: #124
  • Bump @​vercel/ncc from 0.31.1 to 0.32.0: #123
  • Bump prettier from 2.4.1 to 2.5.0: #125
  • Bump prettier from 2.5.0 to 2.5.1: #126
  • Bump @​vercel/ncc from 0.32.0 to 0.33.0: #127
  • Bump typescript from 4.5.2 to 4.5.3: #128
  • Bump @​vercel/ncc from 0.33.0 to 0.33.1: #130
  • Bump typescript from 4.5.3 to 4.5.4: #129
  • Bump typescript from 4.5.4 to 4.5.5: #131
  • Bump node-fetch from 2.6.5 to 2.6.7: #132
  • Bump @​vercel/ncc from 0.33.1 to 0.33.3: #138
  • Bump actions/setup-node from 2 to 3.0.0: #140
  • Bump actions/checkout from 2 to 3: #141
  • Bump typescript from 4.5.5 to 4.6.2: #142
  • Bump prettier from 2.5.1 to 2.6.0: #143
  • Bump prettier from 2.6.0 to 2.6.1: #145
  • Bump actions/setup-node from 3.0.0 to 3.1.0: #146
  • Bump typescript from 4.6.2 to 4.6.3: #144
  • Bump prettier from 2.6.1 to 2.6.2: #147
  • Bump @​actions/github from 5.0.0 to 5.0.1: #148
  • Bump actions/setup-node from 3.1.0 to 3.1.1: #149
  • Bump @​vercel/ncc from 0.33.3 to 0.33.4: #151
  • Bump @​actions/core from 1.6.0 to 1.7.0: #153
  • Bump typescript from 4.6.3 to 4.6.4: #154
  • Bump husky from 7.0.4 to 8.0.1: #155
  • Bump @​actions/core from 1.7.0 to 1.8.0: #156
  • Bump actions/setup-node from 3.1.1 to 3.2.0: #159
  • Bump @​actions/github from 5.0.1 to 5.0.3: #157
  • Bump @​actions/core from 1.8.0 to 1.8.2: #158
  • Bump typescript from 4.6.4 to 4.7.2: #160
  • Bump @​vercel/ncc from 0.33.4 to 0.34.0: #161
  • Bump typescript from 4.7.2 to 4.7.3: #163

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=styfle/cancel-workflow-action&package-manager=github_actions&previous-version=0.9.1&new-version=0.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/janitor.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml index e6bc45ec19..ceb21941d1 100644 --- a/.github/workflows/janitor.yml +++ b/.github/workflows/janitor.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 3 steps: - - uses: styfle/cancel-workflow-action@0.9.1 + - uses: styfle/cancel-workflow-action@0.10.0 with: workflow_id: 1041851,1401230,2837803 access_token: ${{ github.token }} From 013b46a6c37b53efffdad27ed1426329972d1166 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 27 Jun 2022 17:44:50 -0400 Subject: [PATCH 140/203] libs/strings: move to internal (#8890) I think we were leaving this library public because the SDK dependend upon it, but the function the SDK was using was one that we'd removed because *we* weren't using it any more, and I made a PR agasint the SDK to clean that up. ref: https://github.com/cosmos/cosmos-sdk/pull/12368 --- internal/inspect/inspect.go | 2 +- {libs => internal/libs}/strings/string.go | 0 {libs => internal/libs}/strings/string_test.go | 0 internal/rpc/core/env.go | 2 +- node/setup.go | 2 +- types/node_info.go | 2 +- types/params.go | 2 +- 7 files changed, 5 insertions(+), 5 deletions(-) rename {libs => internal/libs}/strings/string.go (100%) rename {libs => internal/libs}/strings/string_test.go (100%) diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 6381ea888a..573b63f406 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -10,13 +10,13 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/inspect/rpc" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmstrings "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/types" "golang.org/x/sync/errgroup" diff --git a/libs/strings/string.go b/internal/libs/strings/string.go similarity index 100% rename from libs/strings/string.go rename to internal/libs/strings/string.go diff --git a/libs/strings/string_test.go b/internal/libs/strings/string_test.go similarity index 100% rename from libs/strings/string_test.go rename to internal/libs/strings/string_test.go diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 124525f26f..f1ba58ee32 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -18,6 +18,7 @@ import ( "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/eventlog" + "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" tmpubsub "github.com/tendermint/tendermint/internal/pubsub" @@ -26,7 +27,6 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/rpc/coretypes" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" diff --git a/node/setup.go b/node/setup.go index df9d19195a..38ad585187 100644 --- a/node/setup.go +++ b/node/setup.go @@ -17,6 +17,7 @@ import ( "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" @@ -28,7 +29,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" - tmstrings "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" "github.com/tendermint/tendermint/types" diff --git a/types/node_info.go b/types/node_info.go index fd47816e29..bc614f55e0 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -7,8 +7,8 @@ import ( "strconv" "strings" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/bytes" - tmstrings "github.com/tendermint/tendermint/libs/strings" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) diff --git a/types/params.go b/types/params.go index 28c969e461..754b50322d 100644 --- a/types/params.go +++ b/types/params.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" - tmstrings "github.com/tendermint/tendermint/libs/strings" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) From 37f9d59969b03d49dc1ff4191b4a5ddac2bc8d13 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 28 Jun 2022 10:40:16 -0400 Subject: [PATCH 141/203] log: do not pre-process log results (#8895) I was digging around in the zero log functions, and the following functions using the `Fields()` method directly in zerolog, - https://github.com/rs/zerolog/blob/v1.27.0/event.go#L161 - https://github.com/rs/zerolog/blob/e9344a8c507b5f25a4962ff022526be0ddab8e72/fields.go#L15 Have meaningfully equivalent semantics and our pre-processing of values is getting us much (except forcing zerolog to always sort our keys, and nooping in the case when you miss the last field.) With this change also, we can pass maps (or, pratically a single map) to the logger, which might be a less wacky seeming interface. --- libs/log/default.go | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/libs/log/default.go b/libs/log/default.go index 7069776599..557ba6551b 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -61,20 +61,20 @@ func NewDefaultLogger(format, level string) (Logger, error) { } func (l defaultLogger) Info(msg string, keyVals ...interface{}) { - l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg) + l.Logger.Info().Fields(keyVals).Msg(msg) } func (l defaultLogger) Error(msg string, keyVals ...interface{}) { - l.Logger.Error().Fields(getLogFields(keyVals...)).Msg(msg) + l.Logger.Error().Fields(keyVals).Msg(msg) } func (l defaultLogger) Debug(msg string, keyVals ...interface{}) { - l.Logger.Debug().Fields(getLogFields(keyVals...)).Msg(msg) + l.Logger.Debug().Fields(keyVals).Msg(msg) } func (l defaultLogger) With(keyVals ...interface{}) Logger { return &defaultLogger{ - Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger(), + Logger: l.Logger.With().Fields(keyVals).Logger(), } } @@ -99,16 +99,3 @@ func OverrideWithNewLogger(logger Logger, format, level string) error { ol.Logger = nl.Logger return nil } - -func getLogFields(keyVals ...interface{}) map[string]interface{} { - if len(keyVals)%2 != 0 { - return nil - } - - fields := make(map[string]interface{}, len(keyVals)) - for i := 0; i < len(keyVals); i += 2 { - fields[fmt.Sprint(keyVals[i])] = keyVals[i+1] - } - - return fields -} From 3bec1668c61ee8d0afcb870836e34905463c1399 Mon Sep 17 00:00:00 2001 From: Thane Thomson Date: Wed, 29 Jun 2022 08:02:05 -0400 Subject: [PATCH 142/203] e2e: Extract Docker-specific functionality (#8754) * e2e: Extract Docker-specific functionality Extract Docker-specific functionality and put it behind an interface that should hopefully, without too much modification, allow us to implement a Digital Ocean-based infrastructure provider. Signed-off-by: Thane Thomson * Thread contexts through all potentially long-running functions Signed-off-by: Thane Thomson * Drop the "API" from interface/struct/var naming Signed-off-by: Thane Thomson * Simplify function returns Signed-off-by: Thane Thomson * Rename GenerateConfig to Setup to make it more generic Signed-off-by: Thane Thomson * Consolidate all infra functions into a single interface Signed-off-by: Thane Thomson * Localize linter directives Signed-off-by: Thane Thomson * Look up and use complete node in ShowNodeLogs and TailNodeLogs calls Signed-off-by: Thane Thomson * Restructure infra provider API into a separate package Signed-off-by: Thane Thomson * Rename interface again Signed-off-by: Thane Thomson * Rename exec functions for readability Signed-off-by: Thane Thomson * Relocate staticcheck lint directive Signed-off-by: Thane Thomson * Remove staticcheck lint directive Signed-off-by: Thane Thomson * Make testnet infra struct private Signed-off-by: Thane Thomson * Only pass testnetDir to Cleanup function Signed-off-by: Thane Thomson --- test/e2e/generator/main.go | 3 +- test/e2e/pkg/exec/exec.go | 34 +++++++ test/e2e/pkg/infra/docker/compose.go | 69 +++++++++++++ test/e2e/pkg/infra/docker/exec.go | 27 ++++++ test/e2e/pkg/infra/docker/infra.go | 140 +++++++++++++++++++++++++++ test/e2e/pkg/infra/infra.go | 84 ++++++++++++++++ test/e2e/pkg/testnet.go | 5 +- test/e2e/runner/cleanup.go | 58 ++--------- test/e2e/runner/exec.go | 50 ---------- test/e2e/runner/main.go | 75 +++++++++----- test/e2e/runner/perturb.go | 24 ++--- test/e2e/runner/setup.go | 84 +++------------- test/e2e/runner/start.go | 13 +-- test/e2e/runner/test.go | 6 +- 14 files changed, 454 insertions(+), 218 deletions(-) create mode 100644 test/e2e/pkg/exec/exec.go create mode 100644 test/e2e/pkg/infra/docker/compose.go create mode 100644 test/e2e/pkg/infra/docker/exec.go create mode 100644 test/e2e/pkg/infra/docker/infra.go create mode 100644 test/e2e/pkg/infra/infra.go delete mode 100644 test/e2e/runner/exec.go diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index bec78d89c4..da32b2831d 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -1,4 +1,3 @@ -//nolint: gosec package main import ( @@ -77,6 +76,8 @@ func (cli *CLI) generate() error { return err } + // nolint: gosec + // G404: Use of weak random number generator (math/rand instead of crypto/rand) manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), cli.opts) if err != nil { return err diff --git a/test/e2e/pkg/exec/exec.go b/test/e2e/pkg/exec/exec.go new file mode 100644 index 0000000000..9dcd793844 --- /dev/null +++ b/test/e2e/pkg/exec/exec.go @@ -0,0 +1,34 @@ +package exec + +import ( + "context" + "fmt" + "os" + osexec "os/exec" +) + +// Command executes a shell command. +func Command(ctx context.Context, args ...string) error { + // nolint: gosec + // G204: Subprocess launched with a potential tainted input or cmd arguments + cmd := osexec.CommandContext(ctx, args[0], args[1:]...) + out, err := cmd.CombinedOutput() + switch err := err.(type) { + case nil: + return nil + case *osexec.ExitError: + return fmt.Errorf("failed to run %q:\n%v", args, string(out)) + default: + return err + } +} + +// CommandVerbose executes a shell command while displaying its output. +func CommandVerbose(ctx context.Context, args ...string) error { + // nolint: gosec + // G204: Subprocess launched with a potential tainted input or cmd arguments + cmd := osexec.CommandContext(ctx, args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/test/e2e/pkg/infra/docker/compose.go b/test/e2e/pkg/infra/docker/compose.go new file mode 100644 index 0000000000..3ea5845ee7 --- /dev/null +++ b/test/e2e/pkg/infra/docker/compose.go @@ -0,0 +1,69 @@ +package docker + +import ( + "bytes" + "text/template" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// makeDockerCompose generates a Docker Compose config for a testnet. +func makeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { + // Must use version 2 Docker Compose format, to support IPv6. + tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ + "addUint32": func(x, y uint32) uint32 { + return x + y + }, + "isBuiltin": func(protocol e2e.Protocol, mode e2e.Mode) bool { + return mode == e2e.ModeLight || protocol == e2e.ProtocolBuiltin + }, + }).Parse(`version: '2.4' + +networks: + {{ .Name }}: + labels: + e2e: true + driver: bridge +{{- if .IPv6 }} + enable_ipv6: true +{{- end }} + ipam: + driver: default + config: + - subnet: {{ .IP }} + +services: +{{- range .Nodes }} + {{ .Name }}: + labels: + e2e: true + container_name: {{ .Name }} + image: tendermint/e2e-node +{{- if isBuiltin $.ABCIProtocol .Mode }} + entrypoint: /usr/bin/entrypoint-builtin +{{- else if .LogLevel }} + command: start --log-level {{ .LogLevel }} +{{- end }} + init: true + ports: + - 26656 + - {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660 + - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 + - 6060 + volumes: + - ./{{ .Name }}:/tendermint + networks: + {{ $.Name }}: + ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} + +{{end}}`) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, testnet) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/test/e2e/pkg/infra/docker/exec.go b/test/e2e/pkg/infra/docker/exec.go new file mode 100644 index 0000000000..de0033e325 --- /dev/null +++ b/test/e2e/pkg/infra/docker/exec.go @@ -0,0 +1,27 @@ +package docker + +import ( + "context" + "path/filepath" + + "github.com/tendermint/tendermint/test/e2e/pkg/exec" +) + +// execCompose runs a Docker Compose command for a testnet. +func execCompose(ctx context.Context, dir string, args ...string) error { + return exec.Command(ctx, append( + []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// execComposeVerbose runs a Docker Compose command for a testnet and displays its output. +func execComposeVerbose(ctx context.Context, dir string, args ...string) error { + return exec.CommandVerbose(ctx, append( + []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// execDocker runs a Docker command. +func execDocker(ctx context.Context, args ...string) error { + return exec.Command(ctx, append([]string{"docker"}, args...)...) +} diff --git a/test/e2e/pkg/infra/docker/infra.go b/test/e2e/pkg/infra/docker/infra.go new file mode 100644 index 0000000000..9827be2414 --- /dev/null +++ b/test/e2e/pkg/infra/docker/infra.go @@ -0,0 +1,140 @@ +package docker + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/tendermint/tendermint/libs/log" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/exec" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" +) + +// testnetInfra provides an API for provisioning and manipulating +// infrastructure for a Docker-based testnet. +type testnetInfra struct { + logger log.Logger + testnet *e2e.Testnet +} + +var _ infra.TestnetInfra = &testnetInfra{} + +// NewTestnetInfra constructs an infrastructure provider that allows for Docker-based +// testnet infrastructure. +func NewTestnetInfra(logger log.Logger, testnet *e2e.Testnet) infra.TestnetInfra { + return &testnetInfra{ + logger: logger, + testnet: testnet, + } +} + +func (ti *testnetInfra) Setup(ctx context.Context) error { + compose, err := makeDockerCompose(ti.testnet) + if err != nil { + return err + } + // nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(filepath.Join(ti.testnet.Dir, "docker-compose.yml"), compose, 0644) + if err != nil { + return err + } + return nil +} + +func (ti *testnetInfra) StartNode(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "up", "-d", node.Name) +} + +func (ti *testnetInfra) DisconnectNode(ctx context.Context, node *e2e.Node) error { + return execDocker(ctx, "network", "disconnect", ti.testnet.Name+"_"+ti.testnet.Name, node.Name) +} + +func (ti *testnetInfra) ConnectNode(ctx context.Context, node *e2e.Node) error { + return execDocker(ctx, "network", "connect", ti.testnet.Name+"_"+ti.testnet.Name, node.Name) +} + +func (ti *testnetInfra) KillNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "kill", "-s", "SIGKILL", node.Name) +} + +func (ti *testnetInfra) StartNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "start", node.Name) +} + +func (ti *testnetInfra) PauseNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "pause", node.Name) +} + +func (ti *testnetInfra) UnpauseNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "unpause", node.Name) +} + +func (ti *testnetInfra) TerminateNodeProcess(ctx context.Context, node *e2e.Node) error { + return execCompose(ctx, ti.testnet.Dir, "kill", "-s", "SIGTERM", node.Name) +} + +func (ti *testnetInfra) Stop(ctx context.Context) error { + return execCompose(ctx, ti.testnet.Dir, "down") +} + +func (ti *testnetInfra) Pause(ctx context.Context) error { + return execCompose(ctx, ti.testnet.Dir, "pause") +} + +func (ti *testnetInfra) Unpause(ctx context.Context) error { + return execCompose(ctx, ti.testnet.Dir, "unpause") +} + +func (ti *testnetInfra) ShowLogs(ctx context.Context) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--no-color") +} + +func (ti *testnetInfra) ShowNodeLogs(ctx context.Context, node *e2e.Node) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--no-color", node.Name) +} + +func (ti *testnetInfra) TailLogs(ctx context.Context) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--follow") +} + +func (ti *testnetInfra) TailNodeLogs(ctx context.Context, node *e2e.Node) error { + return execComposeVerbose(ctx, ti.testnet.Dir, "logs", "--follow", node.Name) +} + +func (ti *testnetInfra) Cleanup(ctx context.Context) error { + ti.logger.Info("Removing Docker containers and networks") + + // GNU xargs requires the -r flag to not run when input is empty, macOS + // does this by default. Ugly, but works. + xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)` + + err := exec.Command(ctx, "bash", "-c", fmt.Sprintf( + "docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR)) + if err != nil { + return err + } + + err = exec.Command(ctx, "bash", "-c", fmt.Sprintf( + "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) + if err != nil { + return err + } + + // On Linux, some local files in the volume will be owned by root since Tendermint + // runs as root inside the container, so we need to clean them up from within a + // container running as root too. + absDir, err := filepath.Abs(ti.testnet.Dir) + if err != nil { + return err + } + err = execDocker(ctx, "run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), + "tendermint/e2e-node", "sh", "-c", "rm -rf /network/*/") + if err != nil { + return err + } + + return nil +} diff --git a/test/e2e/pkg/infra/infra.go b/test/e2e/pkg/infra/infra.go new file mode 100644 index 0000000000..2fa7c5ad97 --- /dev/null +++ b/test/e2e/pkg/infra/infra.go @@ -0,0 +1,84 @@ +package infra + +import ( + "context" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// TestnetInfra provides an API for manipulating the infrastructure of a +// specific testnet. +type TestnetInfra interface { + // + // Overarching testnet infrastructure management. + // + + // Setup generates any necessary configuration for the infrastructure + // provider during testnet setup. + Setup(ctx context.Context) error + + // Stop will stop all running processes throughout the testnet without + // destroying any infrastructure. + Stop(ctx context.Context) error + + // Pause will pause all processes in the testnet. + Pause(ctx context.Context) error + + // Unpause will resume a paused testnet. + Unpause(ctx context.Context) error + + // ShowLogs prints all logs for the whole testnet to stdout. + ShowLogs(ctx context.Context) error + + // TailLogs tails the logs for all nodes in the testnet, if this is + // supported by the infrastructure provider. + TailLogs(ctx context.Context) error + + // Cleanup stops and destroys all running testnet infrastructure and + // deletes any generated files. + Cleanup(ctx context.Context) error + + // + // Node management, including node infrastructure. + // + + // StartNode provisions infrastructure for the given node and starts it. + StartNode(ctx context.Context, node *e2e.Node) error + + // DisconnectNode modifies the specified node's network configuration such + // that it becomes bidirectionally disconnected from the network (it cannot + // see other nodes, and other nodes cannot see it). + DisconnectNode(ctx context.Context, node *e2e.Node) error + + // ConnectNode modifies the specified node's network configuration such + // that it can become bidirectionally connected. + ConnectNode(ctx context.Context, node *e2e.Node) error + + // ShowNodeLogs prints all logs for the node with the give ID to stdout. + ShowNodeLogs(ctx context.Context, node *e2e.Node) error + + // TailNodeLogs tails the logs for a single node, if this is supported by + // the infrastructure provider. + TailNodeLogs(ctx context.Context, node *e2e.Node) error + + // + // Node process management. + // + + // KillNodeProcess sends SIGKILL to a node's process. + KillNodeProcess(ctx context.Context, node *e2e.Node) error + + // StartNodeProcess will start a stopped node's process. Assumes that the + // node's infrastructure has previously been provisioned using + // ProvisionNode. + StartNodeProcess(ctx context.Context, node *e2e.Node) error + + // PauseNodeProcess sends a signal to the node's process to pause it. + PauseNodeProcess(ctx context.Context, node *e2e.Node) error + + // UnpauseNodeProcess resumes a paused node's process. + UnpauseNodeProcess(ctx context.Context, node *e2e.Node) error + + // TerminateNodeProcess sends SIGTERM to a node's process. + TerminateNodeProcess(ctx context.Context, node *e2e.Node) error +} diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index a5c8dffd67..2041796c4d 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -1,4 +1,3 @@ -//nolint: gosec package e2e import ( @@ -467,7 +466,7 @@ func (n Node) AddressRPC() string { // Client returns an RPC client for a node. func (n Node) Client() (*rpchttp.HTTP, error) { - return rpchttp.New(fmt.Sprintf("http://127.0.0.1:%v", n.ProxyPort)) + return rpchttp.New(fmt.Sprintf("http://%s", n.AddressRPC())) } // Stateless returns true if the node is either a seed node or a light node @@ -481,6 +480,8 @@ type keyGenerator struct { } func newKeyGenerator(seed int64) *keyGenerator { + // nolint: gosec + // G404: Use of weak random number generator (math/rand instead of crypto/rand) return &keyGenerator{ random: rand.New(rand.NewSource(seed)), } diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index b08e39f6da..25a1008e6e 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -1,70 +1,32 @@ package main import ( + "context" "errors" "fmt" "os" - "path/filepath" "github.com/tendermint/tendermint/libs/log" - e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" ) -// Cleanup removes the Docker Compose containers and testnet directory. -func Cleanup(logger log.Logger, testnet *e2e.Testnet) error { - err := cleanupDocker(logger) - if err != nil { - return err +// Cleanup destroys all infrastructure and removes all generated testnet files. +func Cleanup(ctx context.Context, logger log.Logger, testnetDir string, ti infra.TestnetInfra) error { + if testnetDir == "" { + return errors.New("no testnet directory set") } - return cleanupDir(logger, testnet.Dir) -} - -// cleanupDocker removes all E2E resources (with label e2e=True), regardless -// of testnet. -func cleanupDocker(logger log.Logger) error { - logger.Info("Removing Docker containers and networks") - - // GNU xargs requires the -r flag to not run when input is empty, macOS - // does this by default. Ugly, but works. - xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)` - err := exec("bash", "-c", fmt.Sprintf( - "docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR)) - if err != nil { + if err := ti.Cleanup(ctx); err != nil { return err } - return exec("bash", "-c", fmt.Sprintf( - "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) -} - -// cleanupDir cleans up a testnet directory -func cleanupDir(logger log.Logger, dir string) error { - if dir == "" { - return errors.New("no directory set") - } - - _, err := os.Stat(dir) + _, err := os.Stat(testnetDir) if os.IsNotExist(err) { return nil } else if err != nil { return err } - logger.Info(fmt.Sprintf("Removing testnet directory %q", dir)) - - // On Linux, some local files in the volume will be owned by root since Tendermint - // runs as root inside the container, so we need to clean them up from within a - // container running as root too. - absDir, err := filepath.Abs(dir) - if err != nil { - return err - } - err = execDocker("run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), - "tendermint/e2e-node", "sh", "-c", "rm -rf /network/*/") - if err != nil { - return err - } - - return os.RemoveAll(dir) + logger.Info(fmt.Sprintf("Removing testnet directory %q", testnetDir)) + return os.RemoveAll(testnetDir) } diff --git a/test/e2e/runner/exec.go b/test/e2e/runner/exec.go deleted file mode 100644 index f2bc5163c0..0000000000 --- a/test/e2e/runner/exec.go +++ /dev/null @@ -1,50 +0,0 @@ -//nolint: gosec -package main - -import ( - "fmt" - "os" - osexec "os/exec" - "path/filepath" -) - -// execute executes a shell command. -func exec(args ...string) error { - cmd := osexec.Command(args[0], args[1:]...) - out, err := cmd.CombinedOutput() - switch err := err.(type) { - case nil: - return nil - case *osexec.ExitError: - return fmt.Errorf("failed to run %q:\n%v", args, string(out)) - default: - return err - } -} - -// execVerbose executes a shell command while displaying its output. -func execVerbose(args ...string) error { - cmd := osexec.Command(args[0], args[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd.Run() -} - -// execCompose runs a Docker Compose command for a testnet. -func execCompose(dir string, args ...string) error { - return exec(append( - []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, - args...)...) -} - -// execComposeVerbose runs a Docker Compose command for a testnet and displays its output. -func execComposeVerbose(dir string, args ...string) error { - return execVerbose(append( - []string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")}, - args...)...) -} - -// execDocker runs a Docker command. -func execDocker(args ...string) error { - return exec(append([]string{"docker"}, args...)...) -} diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index c4a73d33f9..9a24f11417 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -13,6 +13,8 @@ import ( "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" + "github.com/tendermint/tendermint/test/e2e/pkg/infra/docker" ) const randomSeed = 2308084734268 @@ -33,6 +35,7 @@ func main() { type CLI struct { root *cobra.Command testnet *e2e.Testnet + infra infra.TestnetInfra preserve bool } @@ -53,12 +56,23 @@ func NewCLI(logger log.Logger) *CLI { if err != nil { return err } + providerID, err := cmd.Flags().GetString("provider") + if err != nil { + return err + } + switch providerID { + case "docker": + cli.infra = docker.NewTestnetInfra(logger, testnet) + logger.Info("Using Docker-based infrastructure provider") + default: + return fmt.Errorf("unrecognized infrastructure provider ID: %s", providerID) + } cli.testnet = testnet return nil }, RunE: func(cmd *cobra.Command, args []string) (err error) { - if err = Cleanup(logger, cli.testnet); err != nil { + if err = Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { return err } defer func() { @@ -67,11 +81,11 @@ func NewCLI(logger log.Logger) *CLI { } else if err != nil { logger.Info("Preserving testnet that encountered error", "err", err) - } else if err := Cleanup(logger, cli.testnet); err != nil { + } else if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { logger.Error("error cleaning up testnet contents", "err", err) } }() - if err = Setup(logger, cli.testnet); err != nil { + if err = Setup(cmd.Context(), logger, cli.testnet, cli.infra); err != nil { return err } @@ -87,7 +101,7 @@ func NewCLI(logger log.Logger) *CLI { chLoadResult <- Load(lctx, logger, r, cli.testnet) }() startAt := time.Now() - if err = Start(ctx, logger, cli.testnet); err != nil { + if err = Start(ctx, logger, cli.testnet, cli.infra); err != nil { return err } @@ -96,7 +110,7 @@ func NewCLI(logger log.Logger) *CLI { } if cli.testnet.HasPerturbations() { - if err = Perturb(ctx, logger, cli.testnet); err != nil { + if err = Perturb(ctx, logger, cli.testnet, cli.infra); err != nil { return err } if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // allow some txs to go through @@ -134,7 +148,7 @@ func NewCLI(logger log.Logger) *CLI { if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // wait for network to settle before tests return err } - if err := Test(cli.testnet); err != nil { + if err := Test(ctx, cli.testnet); err != nil { return err } return nil @@ -144,6 +158,8 @@ func NewCLI(logger log.Logger) *CLI { cli.root.PersistentFlags().StringP("file", "f", "", "Testnet TOML manifest") _ = cli.root.MarkPersistentFlagRequired("file") + cli.root.PersistentFlags().String("provider", "docker", "Which infrastructure provider to use") + cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, "Preserves the running of the test net after tests are completed") @@ -156,7 +172,7 @@ func NewCLI(logger log.Logger) *CLI { Use: "setup", Short: "Generates the testnet directory and configuration", RunE: func(cmd *cobra.Command, args []string) error { - return Setup(logger, cli.testnet) + return Setup(cmd.Context(), logger, cli.testnet, cli.infra) }, }) @@ -166,12 +182,12 @@ func NewCLI(logger log.Logger) *CLI { RunE: func(cmd *cobra.Command, args []string) error { _, err := os.Stat(cli.testnet.Dir) if os.IsNotExist(err) { - err = Setup(logger, cli.testnet) + err = Setup(cmd.Context(), logger, cli.testnet, cli.infra) } if err != nil { return err } - return Start(cmd.Context(), logger, cli.testnet) + return Start(cmd.Context(), logger, cli.testnet, cli.infra) }, }) @@ -179,7 +195,7 @@ func NewCLI(logger log.Logger) *CLI { Use: "perturb", Short: "Perturbs the Docker testnet, e.g. by restarting or disconnecting nodes", RunE: func(cmd *cobra.Command, args []string) error { - return Perturb(cmd.Context(), logger, cli.testnet) + return Perturb(cmd.Context(), logger, cli.testnet, cli.infra) }, }) @@ -196,7 +212,7 @@ func NewCLI(logger log.Logger) *CLI { Short: "Stops the Docker testnet", RunE: func(cmd *cobra.Command, args []string) error { logger.Info("Stopping testnet") - return execCompose(cli.testnet.Dir, "down") + return cli.infra.Stop(cmd.Context()) }, }) @@ -205,7 +221,7 @@ func NewCLI(logger log.Logger) *CLI { Short: "Pauses the Docker testnet", RunE: func(cmd *cobra.Command, args []string) error { logger.Info("Pausing testnet") - return execCompose(cli.testnet.Dir, "pause") + return cli.infra.Pause(cmd.Context()) }, }) @@ -214,7 +230,7 @@ func NewCLI(logger log.Logger) *CLI { Short: "Resumes the Docker testnet", RunE: func(cmd *cobra.Command, args []string) error { logger.Info("Resuming testnet") - return execCompose(cli.testnet.Dir, "unpause") + return cli.infra.Unpause(cmd.Context()) }, }) @@ -259,7 +275,7 @@ func NewCLI(logger log.Logger) *CLI { Use: "test", Short: "Runs test cases against a running testnet", RunE: func(cmd *cobra.Command, args []string) error { - return Test(cli.testnet) + return Test(cmd.Context(), cli.testnet) }, }) @@ -267,17 +283,24 @@ func NewCLI(logger log.Logger) *CLI { Use: "cleanup", Short: "Removes the testnet directory", RunE: func(cmd *cobra.Command, args []string) error { - return Cleanup(logger, cli.testnet) + return Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra) }, }) cli.root.AddCommand(&cobra.Command{ Use: "logs [node]", - Short: "Shows the testnet or a specefic node's logs", + Short: "Shows the testnet or a specific node's logs", Example: "runner logs validator03", Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - return execComposeVerbose(cli.testnet.Dir, append([]string{"logs", "--no-color"}, args...)...) + if len(args) > 0 { + node := cli.testnet.LookupNode(args[0]) + if node == nil { + return fmt.Errorf("no such node: %s", args[0]) + } + return cli.infra.ShowNodeLogs(cmd.Context(), node) + } + return cli.infra.ShowLogs(cmd.Context()) }, }) @@ -287,9 +310,13 @@ func NewCLI(logger log.Logger) *CLI { Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 1 { - return execComposeVerbose(cli.testnet.Dir, "logs", "--follow", args[0]) + node := cli.testnet.LookupNode(args[0]) + if node == nil { + return fmt.Errorf("no such node: %s", args[0]) + } + return cli.infra.TailNodeLogs(cmd.Context(), node) } - return execComposeVerbose(cli.testnet.Dir, "logs", "--follow") + return cli.infra.TailLogs(cmd.Context()) }, }) @@ -302,20 +329,20 @@ func NewCLI(logger log.Logger) *CLI { Min Block Interval Max Block Interval over a 100 block sampling period. - + Does not run any perbutations. `, RunE: func(cmd *cobra.Command, args []string) error { - if err := Cleanup(logger, cli.testnet); err != nil { + if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { return err } defer func() { - if err := Cleanup(logger, cli.testnet); err != nil { + if err := Cleanup(cmd.Context(), logger, cli.testnet.Dir, cli.infra); err != nil { logger.Error("error cleaning up testnet contents", "err", err) } }() - if err := Setup(logger, cli.testnet); err != nil { + if err := Setup(cmd.Context(), logger, cli.testnet, cli.infra); err != nil { return err } @@ -331,7 +358,7 @@ Does not run any perbutations. chLoadResult <- Load(lctx, logger, r, cli.testnet) }() - if err := Start(ctx, logger, cli.testnet); err != nil { + if err := Start(ctx, logger, cli.testnet, cli.infra); err != nil { return err } diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index acabf7f342..76a209ea24 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -8,10 +8,11 @@ import ( "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" ) // Perturbs a running testnet. -func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { +func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error { timer := time.NewTimer(0) // first tick fires immediately; reset below defer timer.Stop() @@ -21,7 +22,7 @@ func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error case <-ctx.Done(): return ctx.Err() case <-timer.C: - _, err := PerturbNode(ctx, logger, node, perturbation) + _, err := PerturbNode(ctx, logger, node, perturbation, ti) if err != nil { return err } @@ -36,46 +37,45 @@ func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error // PerturbNode perturbs a node with a given perturbation, returning its status // after recovering. -func PerturbNode(ctx context.Context, logger log.Logger, node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { - testnet := node.Testnet +func PerturbNode(ctx context.Context, logger log.Logger, node *e2e.Node, perturbation e2e.Perturbation, ti infra.TestnetInfra) (*rpctypes.ResultStatus, error) { switch perturbation { case e2e.PerturbationDisconnect: logger.Info(fmt.Sprintf("Disconnecting node %v...", node.Name)) - if err := execDocker("network", "disconnect", testnet.Name+"_"+testnet.Name, node.Name); err != nil { + if err := ti.DisconnectNode(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execDocker("network", "connect", testnet.Name+"_"+testnet.Name, node.Name); err != nil { + if err := ti.ConnectNode(ctx, node); err != nil { return nil, err } case e2e.PerturbationKill: logger.Info(fmt.Sprintf("Killing node %v...", node.Name)) - if err := execCompose(testnet.Dir, "kill", "-s", "SIGKILL", node.Name); err != nil { + if err := ti.KillNodeProcess(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "start", node.Name); err != nil { + if err := ti.StartNodeProcess(ctx, node); err != nil { return nil, err } case e2e.PerturbationPause: logger.Info(fmt.Sprintf("Pausing node %v...", node.Name)) - if err := execCompose(testnet.Dir, "pause", node.Name); err != nil { + if err := ti.PauseNodeProcess(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "unpause", node.Name); err != nil { + if err := ti.UnpauseNodeProcess(ctx, node); err != nil { return nil, err } case e2e.PerturbationRestart: logger.Info(fmt.Sprintf("Restarting node %v...", node.Name)) - if err := execCompose(testnet.Dir, "kill", "-s", "SIGTERM", node.Name); err != nil { + if err := ti.TerminateNodeProcess(ctx, node); err != nil { return nil, err } time.Sleep(10 * time.Second) - if err := execCompose(testnet.Dir, "start", node.Name); err != nil { + if err := ti.StartNodeProcess(ctx, node); err != nil { return nil, err } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 9e6e5cc183..24477c9e01 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -1,8 +1,8 @@ -// nolint: gosec package main import ( "bytes" + "context" "encoding/base64" "encoding/json" "errors" @@ -12,7 +12,6 @@ import ( "regexp" "sort" "strings" - "text/template" "time" "github.com/BurntSushi/toml" @@ -22,6 +21,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" "github.com/tendermint/tendermint/types" ) @@ -39,7 +39,7 @@ const ( ) // Setup sets up the testnet configuration. -func Setup(logger log.Logger, testnet *e2e.Testnet) error { +func Setup(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error { logger.Info(fmt.Sprintf("Generating testnet files in %q", testnet.Dir)) err := os.MkdirAll(testnet.Dir, os.ModePerm) @@ -47,15 +47,6 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error { return err } - compose, err := MakeDockerCompose(testnet) - if err != nil { - return err - } - err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) - if err != nil { - return err - } - genesis, err := MakeGenesis(testnet) if err != nil { return err @@ -92,6 +83,8 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error { if err != nil { return err } + // nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) if err != nil { return err @@ -131,68 +124,11 @@ func Setup(logger log.Logger, testnet *e2e.Testnet) error { } } - return nil -} - -// MakeDockerCompose generates a Docker Compose config for a testnet. -func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { - // Must use version 2 Docker Compose format, to support IPv6. - tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ - "addUint32": func(x, y uint32) uint32 { - return x + y - }, - "isBuiltin": func(protocol e2e.Protocol, mode e2e.Mode) bool { - return mode == e2e.ModeLight || protocol == e2e.ProtocolBuiltin - }, - }).Parse(`version: '2.4' - -networks: - {{ .Name }}: - labels: - e2e: true - driver: bridge -{{- if .IPv6 }} - enable_ipv6: true -{{- end }} - ipam: - driver: default - config: - - subnet: {{ .IP }} - -services: -{{- range .Nodes }} - {{ .Name }}: - labels: - e2e: true - container_name: {{ .Name }} - image: tendermint/e2e-node -{{- if isBuiltin $.ABCIProtocol .Mode }} - entrypoint: /usr/bin/entrypoint-builtin -{{- else if .LogLevel }} - command: start --log-level {{ .LogLevel }} -{{- end }} - init: true - ports: - - 26656 - - {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660 - - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 - - 6060 - volumes: - - ./{{ .Name }}:/tendermint - networks: - {{ $.Name }}: - ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} - -{{end}}`) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = tmpl.Execute(&buf, testnet) - if err != nil { - return nil, err + if err := ti.Setup(ctx); err != nil { + return err } - return buf.Bytes(), nil + + return nil } // MakeGenesis generates a genesis document. @@ -421,5 +357,7 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { } bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height))) bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash))) + // nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less return os.WriteFile(cfgPath, bz, 0644) } diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index be9661df3a..5d5c2e7a96 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -8,9 +8,10 @@ import ( "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" ) -func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { +func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infra.TestnetInfra) error { if len(testnet.Nodes) == 0 { return fmt.Errorf("no nodes in testnet") } @@ -44,7 +45,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { for len(nodeQueue) > 0 && nodeQueue[0].StartAt == 0 { node := nodeQueue[0] nodeQueue = nodeQueue[1:] - if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { + if err := ti.StartNode(ctx, node); err != nil { return err } @@ -58,7 +59,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { return err } node.HasStarted = true - logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort)) + logger.Info(fmt.Sprintf("Node %v up on http://%v:%v", node.IP, node.Name, node.ProxyPort)) } networkHeight := testnet.InitialHeight @@ -106,7 +107,7 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { } } - if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { + if err := ti.StartNode(ctx, node); err != nil { return err } @@ -128,8 +129,8 @@ func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { } else { lastNodeHeight = status.SyncInfo.LatestBlockHeight } - logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v at height %v", - node.Name, node.ProxyPort, lastNodeHeight)) + logger.Info(fmt.Sprintf("Node %v up on http://%v:%v at height %v", + node.IP, node.Name, node.ProxyPort, lastNodeHeight)) } return nil diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index da7a4a50ff..7766d6c8d9 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -1,17 +1,19 @@ package main import ( + "context" "os" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/exec" ) // Test runs test cases under tests/ -func Test(testnet *e2e.Testnet) error { +func Test(ctx context.Context, testnet *e2e.Testnet) error { err := os.Setenv("E2E_MANIFEST", testnet.File) if err != nil { return err } - return execVerbose("./build/tests", "-test.count=1") + return exec.CommandVerbose(ctx, "./build/tests", "-test.count=1") } From 60881f1d06cf69bfc1fb2a27c18d0e4061fc19d7 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 30 Jun 2022 09:01:02 -0400 Subject: [PATCH 143/203] p2p: stop mconn channel sends without timeout (#8906) --- internal/p2p/conn/connection.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index c8fc211888..8f8453e71a 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -100,7 +100,8 @@ type MConnection struct { // used to ensure FlushStop and OnStop // are safe to call concurrently. - stopMtx sync.Mutex + stopMtx sync.Mutex + stopSignal <-chan struct{} cancel context.CancelFunc @@ -207,6 +208,7 @@ func (c *MConnection) OnStart(ctx context.Context) error { c.quitSendRoutine = make(chan struct{}) c.doneSendRoutine = make(chan struct{}) c.quitRecvRoutine = make(chan struct{}) + c.stopSignal = ctx.Done() c.setRecvLastMsgAt(time.Now()) go c.sendRoutine(ctx) go c.recvRoutine(ctx) @@ -681,6 +683,8 @@ func (ch *channel) sendBytes(bytes []byte) bool { return true case <-time.After(defaultSendTimeout): return false + case <-ch.conn.stopSignal: + return false } } From 5c26db733b0b09e35990e70645e093cab0a3ce45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Jun 2022 16:34:43 +0000 Subject: [PATCH 144/203] build(deps): Bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#8907) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.5 to 1.8.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/stretchr/testify&package-manager=go_modules&previous-version=1.7.5&new-version=1.8.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fde7586ade..bae573a4ab 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.5 + github.com/stretchr/testify v1.8.0 github.com/tendermint/tm-db v0.6.6 golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 diff --git a/go.sum b/go.sum index e055c300e5..f7e52298e3 100644 --- a/go.sum +++ b/go.sum @@ -1077,8 +1077,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= From 47cb30fc1d647183d500b0ecc31248df5dfc678c Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 30 Jun 2022 16:51:16 -0400 Subject: [PATCH 145/203] p2p: set outgoing connections to around 20% of total connections (#8913) --- config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 804c5fc872..230fed98cb 100644 --- a/config/config.go +++ b/config/config.go @@ -671,7 +671,7 @@ func DefaultP2PConfig() *P2PConfig { ExternalAddress: "", UPNP: false, MaxConnections: 64, - MaxOutgoingConnections: 32, + MaxOutgoingConnections: 12, MaxIncomingConnectionAttempts: 100, FlushThrottleTimeout: 100 * time.Millisecond, // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. From 5274f80de437795c4782b52f73dcec76616eefe2 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 30 Jun 2022 17:48:10 -0400 Subject: [PATCH 146/203] p2p: fix flakey test due to disconnect cooldown (#8917) This test was made flakey by #8839. The cooldown period means that the node in the test will not try to reconnect as quickly as the test expects. This change makes the cooldown shorter in the test so that the node quickly reconnects. --- internal/p2p/p2ptest/network.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index c040a08a88..8133449152 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -252,12 +252,13 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) require.NotNil(t, ep, "transport not listening an endpoint") peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - MinRetryTime: 10 * time.Millisecond, - MaxRetryTime: 100 * time.Millisecond, - RetryTimeJitter: time.Millisecond, - MaxPeers: opts.MaxPeers, - MaxConnected: opts.MaxConnected, - Metrics: p2p.NopMetrics(), + MinRetryTime: 10 * time.Millisecond, + DisconnectCooldownPeriod: 10 * time.Millisecond, + MaxRetryTime: 100 * time.Millisecond, + RetryTimeJitter: time.Millisecond, + MaxPeers: opts.MaxPeers, + MaxConnected: opts.MaxConnected, + Metrics: p2p.NopMetrics(), }) require.NoError(t, err) From 921530c352d64229d0e78f6e1e14fe186d00b0db Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 30 Jun 2022 18:02:59 -0400 Subject: [PATCH 147/203] p2p: use correct context error (#8916) handshakeCtx is the internal context carrying the timeout. Its error should be used for the error return. --- internal/p2p/transport_mconn.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 13a65b9737..523fbf89fe 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -315,7 +315,7 @@ func (c *mConnConnection) Handshake( select { case <-handshakeCtx.Done(): _ = c.Close() - return types.NodeInfo{}, nil, ctx.Err() + return types.NodeInfo{}, nil, handshakeCtx.Err() case err := <-errCh: if err != nil { From 1d96faa35a216553fcc8d1be7d85a90450ca5be0 Mon Sep 17 00:00:00 2001 From: Ian Jungyong Um <31336310+code0xff@users.noreply.github.com> Date: Fri, 1 Jul 2022 21:44:39 +0900 Subject: [PATCH 148/203] p2p: fix typo (#8922) Fix minor typos in peermanager --- internal/p2p/peermanager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index ac2e28225c..8230b262eb 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -221,11 +221,11 @@ func (o *PeerManagerOptions) Validate() error { return nil } -// isPersistentPeer checks if a peer is in PersistentPeers. It will panic +// isPersistent checks if a peer is in PersistentPeers. It will panic // if called before optimize(). func (o *PeerManagerOptions) isPersistent(id types.NodeID) bool { if o.persistentPeers == nil { - panic("isPersistentPeer() called before optimize()") + panic("isPersistent() called before optimize()") } return o.persistentPeers[id] } From 6d8079559baaf7428e16e504134ceb4e1e9d37db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Jul 2022 14:17:45 +0000 Subject: [PATCH 149/203] build(deps): Bump github.com/vektra/mockery/v2 from 2.13.1 to 2.14.0 (#8924) Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.13.1 to 2.14.0.
Release notes

Sourced from github.com/vektra/mockery/v2's releases.

v2.14.0

Changelog

  • 8582bd6 Add test for getLocalizedPath
  • 686b90c Apply PR comments
  • de0cade Merge pull request #474 from RSid/add-flag-documentation
  • 1fa7d2f Merge pull request #480 from vektra/LandonTClipp-patch-1
  • 4d1f925 Merge pull request #484 from LouisBrunner/fix_generics_with_expecter
  • 519a84f Merge pull request #486 from abhinavnair/replace-ioutil
  • 2ca0b83 Merge pull request #488 from vektra/getLocalizedPath_test
  • cc82d49 Replace deprecated ioutil pkg with os & io
  • a420307 Update README.md
  • 4e4a96b Update issue template
  • fa182fe add documentation to readme
  • e4954a2 fix: add support for with-expecter when using generics
  • ca9ddd4 update issue template
Commits
  • 4d1f925 Merge pull request #484 from LouisBrunner/fix_generics_with_expecter
  • de0cade Merge pull request #474 from RSid/add-flag-documentation
  • 2ca0b83 Merge pull request #488 from vektra/getLocalizedPath_test
  • 8582bd6 Add test for getLocalizedPath
  • 519a84f Merge pull request #486 from abhinavnair/replace-ioutil
  • cc82d49 Replace deprecated ioutil pkg with os & io
  • e4954a2 fix: add support for with-expecter when using generics
  • ca9ddd4 update issue template
  • 686b90c Apply PR comments
  • 1fa7d2f Merge pull request #480 from vektra/LandonTClipp-patch-1
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/vektra/mockery/v2&package-manager=go_modules&previous-version=2.13.1&new-version=2.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bae573a4ab..691efb09f6 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/creachadair/taskgroup v0.3.2 github.com/golangci/golangci-lint v1.46.0 github.com/google/go-cmp v0.5.8 - github.com/vektra/mockery/v2 v2.13.1 + github.com/vektra/mockery/v2 v2.14.0 gotest.tools v2.2.0+incompatible ) diff --git a/go.sum b/go.sum index f7e52298e3..ca88c3e8f4 100644 --- a/go.sum +++ b/go.sum @@ -1126,8 +1126,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektra/mockery/v2 v2.13.1 h1:Lqs7aZiC7TwZO76fJ/4Zsb3NaO4F7cuuz0mZLYeNwtQ= -github.com/vektra/mockery/v2 v2.13.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= +github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/yfqs= +github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= From 331860c2a8c406fa89575165322b963925fb38c5 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Mon, 4 Jul 2022 16:25:48 +0200 Subject: [PATCH 150/203] Restore `Commit` to the ABCI++ spec, and other late modifications (#8796) * Added VoteExtensionsEnableHeight * Fix reference to `modified` * Removed old pseudo-code, now included in spec * Removed markdown warnings in abci++_basic_concepts_002_draft.md * Restored `Commit` in the Methods section * Addressed remaining markdown warnings * Revisited intro and basic concepts section * Extra pass at all spec sections to recover Commit, and other ABCI++ spec modifications * Fixed links * make proto-gen * Remove _primes_ from spec notation * Update proto/tendermint/abci/types.proto Co-authored-by: Callum Waters * Update spec/abci++/abci++_tmint_expected_behavior_002_draft.md Co-authored-by: Callum Waters * Addressed @cmwaters' comments * Addressed @angbrav's and @mpoke's comments on spec * make proto-gen * Fix MD anchor reference * Clarify throughout the spec when `ProcessProposal` and `VerifyVoteExtension` are called * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: M. J. Fromberger * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: M. J. Fromberger * Update spec/abci++/abci++_app_requirements_002_draft.md Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com> * Update spec/abci++/abci++_basic_concepts_002_draft.md Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com> * Update spec/abci++/abci++_basic_concepts_002_draft.md Co-authored-by: M. J. Fromberger * Update spec/abci++/abci++_basic_concepts_002_draft.md Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com> * Update spec/abci++/abci++_methods_002_draft.md Co-authored-by: M. J. Fromberger * Update spec/abci++/abci++_tmint_expected_behavior_002_draft.md Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com> * Addresed comments * Renamed 'draft' files * Adatped links to new filenames * Fixed links and minor cosmetic changes * Renamed 'byzantine_validators' to 'misbehavior' in ABCI++ spec and protobufs * make proto-gen * Renamed 'byzantine_validators' to 'misbehavior' in the code * Fixed link * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_basic_concepts.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Update spec/abci++/abci++_methods.md Co-authored-by: Daniel * Addressed @cason's comments * Clarified conditions for `ProcessProposal` call at proposer Co-authored-by: Callum Waters Co-authored-by: M. J. Fromberger Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com> Co-authored-by: Daniel --- UPGRADING.md | 4 +- abci/example/kvstore/kvstore.go | 2 +- abci/types/types.pb.go | 495 +++++++++--------- docs/rfc/rfc-013-abci++.md | 2 +- .../proposer-based-timestamps-runbook.md | 2 +- internal/state/execution.go | 60 +-- internal/state/execution_test.go | 16 +- internal/state/helpers_test.go | 8 +- proto/tendermint/abci/types.proto | 10 +- spec/abci++/README.md | 8 +- ...02_draft.md => abci++_app_requirements.md} | 89 ++-- spec/abci++/abci++_basic_concepts.md | 468 +++++++++++++++++ .../abci++/abci++_basic_concepts_002_draft.md | 404 -------------- ...r_002_draft.md => abci++_client_server.md} | 10 +- ...methods_002_draft.md => abci++_methods.md} | 476 ++++++++++------- ...t.md => abci++_tmint_expected_behavior.md} | 107 ++-- spec/abci++/v0.md | 156 ------ spec/abci++/v1.md | 162 ------ spec/abci++/v2.md | 180 ------- spec/abci++/v3.md | 201 ------- spec/abci++/v4.md | 199 ------- 21 files changed, 1164 insertions(+), 1895 deletions(-) rename spec/abci++/{abci++_app_requirements_002_draft.md => abci++_app_requirements.md} (93%) create mode 100644 spec/abci++/abci++_basic_concepts.md delete mode 100644 spec/abci++/abci++_basic_concepts_002_draft.md rename spec/abci++/{abci++_client_server_002_draft.md => abci++_client_server.md} (91%) rename spec/abci++/{abci++_methods_002_draft.md => abci++_methods.md} (71%) rename spec/abci++/{abci++_tmint_expected_behavior_002_draft.md => abci++_tmint_expected_behavior.md} (66%) delete mode 100644 spec/abci++/v0.md delete mode 100644 spec/abci++/v1.md delete mode 100644 spec/abci++/v2.md delete mode 100644 spec/abci++/v3.md delete mode 100644 spec/abci++/v4.md diff --git a/UPGRADING.md b/UPGRADING.md index 13582e75b4..f9122a5ded 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -17,9 +17,9 @@ by Tendermint itself. Right now, we return a regular error when this happens. #### ABCI++ For information on how ABCI++ works, see the -[Specification](https://github.com/tendermint/tendermint/blob/master/spec/abci%2B%2B/README.md). +[Specification](spec/abci%2B%2B/README.md). In particular, the simplest way to upgrade your application is described -[here](https://github.com/tendermint/tendermint/blob/master/spec/abci%2B%2B/abci++_tmint_expected_behavior_002_draft.md#adapting-existing-applications-that-use-abci). +[here](spec/abci%2B%2B/abci++_tmint_expected_behavior.md#adapting-existing-applications-that-use-abci). #### Moving the `app_hash` parameter diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index bbb2fbe346..c9a2a148c6 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -175,7 +175,7 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal app.ValUpdates = make([]types.ValidatorUpdate, 0) // Punish validators who committed equivocation. - for _, ev := range req.ByzantineValidators { + for _, ev := range req.Misbehavior { if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE { addr := string(ev.Validator.Address) if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok { diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 946cfa6af5..9ce88f0452 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1120,12 +1120,12 @@ type RequestPrepareProposal struct { MaxTxBytes int64 `protobuf:"varint,1,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` // txs is an array of transactions that will be included in a block, // sent to the app for possible modifications. - Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` - LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` - NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,4,rep,name=misbehavior,proto3" json:"misbehavior"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` // address of the public key of the validator proposing the block. ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` } @@ -1184,9 +1184,9 @@ func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { return ExtendedCommitInfo{} } -func (m *RequestPrepareProposal) GetByzantineValidators() []Misbehavior { +func (m *RequestPrepareProposal) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1220,9 +1220,9 @@ func (m *RequestPrepareProposal) GetProposerAddress() []byte { } type RequestProcessProposal struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` // hash is the merkle root hash of the fields of the proposed block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` @@ -1279,9 +1279,9 @@ func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { return CommitInfo{} } -func (m *RequestProcessProposal) GetByzantineValidators() []Misbehavior { +func (m *RequestProcessProposal) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1444,10 +1444,10 @@ func (m *RequestVerifyVoteExtension) GetVoteExtension() []byte { } type RequestFinalizeBlock struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - // hash is the merkle root hash of the fields of the proposed block. + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` + // hash is the merkle root hash of the fields of the decided block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` @@ -1503,9 +1503,9 @@ func (m *RequestFinalizeBlock) GetDecidedLastCommit() CommitInfo { return CommitInfo{} } -func (m *RequestFinalizeBlock) GetByzantineValidators() []Misbehavior { +func (m *RequestFinalizeBlock) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -2381,7 +2381,6 @@ func (m *ResponseDeliverTx) GetCodespace() string { } type ResponseCommit struct { - // reserve 1 RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } @@ -3829,211 +3828,211 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3253 bytes of a gzipped FileDescriptorProto + // 3257 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x73, 0x23, 0xd5, - 0x11, 0xd7, 0xe8, 0x5b, 0xad, 0xaf, 0xf1, 0xb3, 0x59, 0xb4, 0x62, 0xd7, 0x36, 0x43, 0x01, 0xcb, - 0x02, 0x36, 0xf1, 0x66, 0x61, 0xc9, 0x42, 0x28, 0x5b, 0xd6, 0x46, 0xf6, 0x7a, 0x6d, 0x33, 0x96, - 0x4d, 0x91, 0x0f, 0x86, 0xb1, 0xf4, 0x6c, 0x0d, 0x2b, 0x69, 0x86, 0x99, 0x91, 0x91, 0x39, 0x26, - 0xc5, 0x85, 0x43, 0x8a, 0x4b, 0x2a, 0x49, 0x55, 0xb8, 0x25, 0x55, 0xc9, 0x7f, 0x90, 0x5c, 0x72, - 0xca, 0x81, 0x43, 0x0e, 0x9c, 0x52, 0x39, 0x91, 0x14, 0xdc, 0xf8, 0x07, 0x72, 0x4b, 0xa5, 0xde, - 0xc7, 0x7c, 0x49, 0x33, 0xfa, 0x00, 0x8a, 0xaa, 0x54, 0x71, 0x9b, 0xd7, 0xd3, 0xdd, 0xef, 0xab, - 0x5f, 0x77, 0xff, 0xfa, 0x3d, 0x78, 0xcc, 0xc6, 0xfd, 0x36, 0x36, 0x7b, 0x5a, 0xdf, 0x5e, 0x57, - 0x4f, 0x5b, 0xda, 0xba, 0x7d, 0x69, 0x60, 0x6b, 0xcd, 0x30, 0x75, 0x5b, 0x47, 0x65, 0xef, 0xe7, - 0x1a, 0xf9, 0x59, 0xbd, 0xee, 0xe3, 0x6e, 0x99, 0x97, 0x86, 0xad, 0xaf, 0x1b, 0xa6, 0xae, 0x9f, - 0x31, 0xfe, 0xea, 0xb5, 0xf1, 0xdf, 0x0f, 0xf1, 0x25, 0xd7, 0x16, 0x10, 0xa6, 0xbd, 0xac, 0x1b, - 0xaa, 0xa9, 0xf6, 0x9c, 0xdf, 0x2b, 0xe7, 0xba, 0x7e, 0xde, 0xc5, 0xeb, 0xb4, 0x75, 0x3a, 0x38, - 0x5b, 0xb7, 0xb5, 0x1e, 0xb6, 0x6c, 0xb5, 0x67, 0x70, 0x86, 0xa5, 0x73, 0xfd, 0x5c, 0xa7, 0x9f, - 0xeb, 0xe4, 0x8b, 0x51, 0xa5, 0xbf, 0xe4, 0x20, 0x23, 0xe3, 0x77, 0x07, 0xd8, 0xb2, 0xd1, 0x06, - 0x24, 0x71, 0xab, 0xa3, 0x57, 0x84, 0x55, 0xe1, 0x46, 0x7e, 0xe3, 0xda, 0xda, 0xc8, 0xf0, 0xd7, - 0x38, 0x5f, 0xbd, 0xd5, 0xd1, 0x1b, 0x31, 0x99, 0xf2, 0xa2, 0xdb, 0x90, 0x3a, 0xeb, 0x0e, 0xac, - 0x4e, 0x25, 0x4e, 0x85, 0xae, 0x47, 0x09, 0xdd, 0x23, 0x4c, 0x8d, 0x98, 0xcc, 0xb8, 0x49, 0x57, - 0x5a, 0xff, 0x4c, 0xaf, 0x24, 0x26, 0x77, 0xb5, 0xd3, 0x3f, 0xa3, 0x5d, 0x11, 0x5e, 0xb4, 0x05, - 0xa0, 0xf5, 0x35, 0x5b, 0x69, 0x75, 0x54, 0xad, 0x5f, 0x49, 0x52, 0xc9, 0xc7, 0xa3, 0x25, 0x35, - 0xbb, 0x46, 0x18, 0x1b, 0x31, 0x39, 0xa7, 0x39, 0x0d, 0x32, 0xdc, 0x77, 0x07, 0xd8, 0xbc, 0xac, - 0xa4, 0x26, 0x0f, 0xf7, 0x75, 0xc2, 0x44, 0x86, 0x4b, 0xb9, 0xd1, 0x2b, 0x90, 0x6d, 0x75, 0x70, - 0xeb, 0xa1, 0x62, 0x0f, 0x2b, 0x19, 0x2a, 0xb9, 0x12, 0x25, 0x59, 0x23, 0x7c, 0xcd, 0x61, 0x23, - 0x26, 0x67, 0x5a, 0xec, 0x13, 0xdd, 0x81, 0x74, 0x4b, 0xef, 0xf5, 0x34, 0xbb, 0x02, 0x54, 0x76, - 0x39, 0x52, 0x96, 0x72, 0x35, 0x62, 0x32, 0xe7, 0x47, 0xfb, 0x50, 0xea, 0x6a, 0x96, 0xad, 0x58, - 0x7d, 0xd5, 0xb0, 0x3a, 0xba, 0x6d, 0x55, 0xf2, 0x54, 0xc3, 0x93, 0x51, 0x1a, 0xf6, 0x34, 0xcb, - 0x3e, 0x72, 0x98, 0x1b, 0x31, 0xb9, 0xd8, 0xf5, 0x13, 0x88, 0x3e, 0xfd, 0xec, 0x0c, 0x9b, 0xae, - 0xc2, 0x4a, 0x61, 0xb2, 0xbe, 0x03, 0xc2, 0xed, 0xc8, 0x13, 0x7d, 0xba, 0x9f, 0x80, 0x7e, 0x02, - 0x8b, 0x5d, 0x5d, 0x6d, 0xbb, 0xea, 0x94, 0x56, 0x67, 0xd0, 0x7f, 0x58, 0x29, 0x52, 0xa5, 0xcf, - 0x44, 0x0e, 0x52, 0x57, 0xdb, 0x8e, 0x8a, 0x1a, 0x11, 0x68, 0xc4, 0xe4, 0x85, 0xee, 0x28, 0x11, - 0xbd, 0x05, 0x4b, 0xaa, 0x61, 0x74, 0x2f, 0x47, 0xb5, 0x97, 0xa8, 0xf6, 0x9b, 0x51, 0xda, 0x37, - 0x89, 0xcc, 0xa8, 0x7a, 0xa4, 0x8e, 0x51, 0x51, 0x13, 0x44, 0xc3, 0xc4, 0x86, 0x6a, 0x62, 0xc5, - 0x30, 0x75, 0x43, 0xb7, 0xd4, 0x6e, 0xa5, 0x4c, 0x75, 0x3f, 0x1d, 0xa5, 0xfb, 0x90, 0xf1, 0x1f, - 0x72, 0xf6, 0x46, 0x4c, 0x2e, 0x1b, 0x41, 0x12, 0xd3, 0xaa, 0xb7, 0xb0, 0x65, 0x79, 0x5a, 0xc5, - 0x69, 0x5a, 0x29, 0x7f, 0x50, 0x6b, 0x80, 0x84, 0xea, 0x90, 0xc7, 0x43, 0x22, 0xae, 0x5c, 0xe8, - 0x36, 0xae, 0x2c, 0x50, 0x85, 0x52, 0xe4, 0x09, 0xa5, 0xac, 0x27, 0xba, 0x8d, 0x1b, 0x31, 0x19, - 0xb0, 0xdb, 0x42, 0x2a, 0x3c, 0x72, 0x81, 0x4d, 0xed, 0xec, 0x92, 0xaa, 0x51, 0xe8, 0x1f, 0x4b, - 0xd3, 0xfb, 0x15, 0x44, 0x15, 0x3e, 0x1b, 0xa5, 0xf0, 0x84, 0x0a, 0x11, 0x15, 0x75, 0x47, 0xa4, - 0x11, 0x93, 0x17, 0x2f, 0xc6, 0xc9, 0xc4, 0xc4, 0xce, 0xb4, 0xbe, 0xda, 0xd5, 0xde, 0xc7, 0xca, - 0x69, 0x57, 0x6f, 0x3d, 0xac, 0x2c, 0x4e, 0x36, 0xb1, 0x7b, 0x9c, 0x7b, 0x8b, 0x30, 0x13, 0x13, - 0x3b, 0xf3, 0x13, 0xb6, 0x32, 0x90, 0xba, 0x50, 0xbb, 0x03, 0xbc, 0x9b, 0xcc, 0xa6, 0xc5, 0xcc, - 0x6e, 0x32, 0x9b, 0x15, 0x73, 0xbb, 0xc9, 0x6c, 0x4e, 0x04, 0xe9, 0x69, 0xc8, 0xfb, 0x5c, 0x12, - 0xaa, 0x40, 0xa6, 0x87, 0x2d, 0x4b, 0x3d, 0xc7, 0xd4, 0x83, 0xe5, 0x64, 0xa7, 0x29, 0x95, 0xa0, - 0xe0, 0x77, 0x43, 0xd2, 0x47, 0x82, 0x2b, 0x49, 0x3c, 0x0c, 0x91, 0xbc, 0xc0, 0x26, 0x5d, 0x08, - 0x2e, 0xc9, 0x9b, 0xe8, 0x09, 0x28, 0xd2, 0x49, 0x28, 0xce, 0x7f, 0xe2, 0xe6, 0x92, 0x72, 0x81, - 0x12, 0x4f, 0x38, 0xd3, 0x0a, 0xe4, 0x8d, 0x0d, 0xc3, 0x65, 0x49, 0x50, 0x16, 0x30, 0x36, 0x0c, - 0x87, 0xe1, 0x71, 0x28, 0x90, 0x19, 0xbb, 0x1c, 0x49, 0xda, 0x49, 0x9e, 0xd0, 0x38, 0x8b, 0xf4, - 0xf7, 0x38, 0x88, 0xa3, 0xae, 0x0b, 0xdd, 0x81, 0x24, 0xf1, 0xe2, 0xdc, 0x21, 0x57, 0xd7, 0x98, - 0x8b, 0x5f, 0x73, 0x5c, 0xfc, 0x5a, 0xd3, 0x71, 0xf1, 0x5b, 0xd9, 0x4f, 0x3e, 0x5b, 0x89, 0x7d, - 0xf4, 0xaf, 0x15, 0x41, 0xa6, 0x12, 0xe8, 0x2a, 0x71, 0x58, 0xaa, 0xd6, 0x57, 0xb4, 0x36, 0x1d, - 0x72, 0x8e, 0x78, 0x23, 0x55, 0xeb, 0xef, 0xb4, 0xd1, 0x1e, 0x88, 0x2d, 0xbd, 0x6f, 0xe1, 0xbe, - 0x35, 0xb0, 0x14, 0x16, 0x42, 0xb8, 0x1b, 0x0e, 0x38, 0x53, 0x16, 0xc8, 0x6a, 0x0e, 0xe7, 0x21, - 0x65, 0x94, 0xcb, 0xad, 0x20, 0x01, 0xdd, 0x03, 0xb8, 0x50, 0xbb, 0x5a, 0x5b, 0xb5, 0x75, 0xd3, - 0xaa, 0x24, 0x57, 0x13, 0x37, 0xf2, 0x1b, 0xab, 0x63, 0x5b, 0x7d, 0xe2, 0xb0, 0x1c, 0x1b, 0x6d, - 0xd5, 0xc6, 0x5b, 0x49, 0x32, 0x5c, 0xd9, 0x27, 0x89, 0x9e, 0x82, 0xb2, 0x6a, 0x18, 0x8a, 0x65, - 0xab, 0x36, 0x56, 0x4e, 0x2f, 0x6d, 0x6c, 0x51, 0x17, 0x5d, 0x90, 0x8b, 0xaa, 0x61, 0x1c, 0x11, - 0xea, 0x16, 0x21, 0xa2, 0x27, 0xa1, 0x44, 0xbc, 0xb9, 0xa6, 0x76, 0x95, 0x0e, 0xd6, 0xce, 0x3b, - 0x76, 0x25, 0xbd, 0x2a, 0xdc, 0x48, 0xc8, 0x45, 0x4e, 0x6d, 0x50, 0xa2, 0xd4, 0x76, 0x77, 0x9c, - 0x7a, 0x72, 0x84, 0x20, 0xd9, 0x56, 0x6d, 0x95, 0xae, 0x64, 0x41, 0xa6, 0xdf, 0x84, 0x66, 0xa8, - 0x76, 0x87, 0xaf, 0x0f, 0xfd, 0x46, 0x57, 0x20, 0xcd, 0xd5, 0x26, 0xa8, 0x5a, 0xde, 0x42, 0x4b, - 0x90, 0x32, 0x4c, 0xfd, 0x02, 0xd3, 0xad, 0xcb, 0xca, 0xac, 0x21, 0xc9, 0x50, 0x0a, 0x7a, 0x7d, - 0x54, 0x82, 0xb8, 0x3d, 0xe4, 0xbd, 0xc4, 0xed, 0x21, 0x7a, 0x01, 0x92, 0x64, 0x21, 0x69, 0x1f, - 0xa5, 0x90, 0x38, 0xc7, 0xe5, 0x9a, 0x97, 0x06, 0x96, 0x29, 0xa7, 0x54, 0x86, 0x62, 0x20, 0x1a, - 0x48, 0x57, 0x60, 0x29, 0xcc, 0xb9, 0x4b, 0x1d, 0x97, 0x1e, 0x70, 0xd2, 0xe8, 0x36, 0x64, 0x5d, - 0xef, 0xce, 0x0c, 0xe7, 0xea, 0x58, 0xb7, 0x0e, 0xb3, 0xec, 0xb2, 0x12, 0x8b, 0x21, 0x1b, 0xd0, - 0x51, 0x79, 0x2c, 0x2f, 0xc8, 0x19, 0xd5, 0x30, 0x1a, 0xaa, 0xd5, 0x91, 0xde, 0x86, 0x4a, 0x94, - 0xe7, 0xf6, 0x2d, 0x98, 0x40, 0xcd, 0xde, 0x59, 0xb0, 0x2b, 0x90, 0x3e, 0xd3, 0xcd, 0x9e, 0x6a, - 0x53, 0x65, 0x45, 0x99, 0xb7, 0xc8, 0x42, 0x32, 0x2f, 0x9e, 0xa0, 0x64, 0xd6, 0x90, 0x14, 0xb8, - 0x1a, 0xe9, 0xbd, 0x89, 0x88, 0xd6, 0x6f, 0x63, 0xb6, 0xac, 0x45, 0x99, 0x35, 0x3c, 0x45, 0x6c, - 0xb0, 0xac, 0x41, 0xba, 0xb5, 0xe8, 0x5c, 0xa9, 0xfe, 0x9c, 0xcc, 0x5b, 0xd2, 0x9f, 0x12, 0x70, - 0x25, 0xdc, 0x87, 0xa3, 0x55, 0x28, 0xf4, 0xd4, 0xa1, 0x62, 0x0f, 0xb9, 0xd9, 0x09, 0x74, 0xe3, - 0xa1, 0xa7, 0x0e, 0x9b, 0x43, 0x66, 0x73, 0x22, 0x24, 0xec, 0xa1, 0x55, 0x89, 0xaf, 0x26, 0x6e, - 0x14, 0x64, 0xf2, 0x89, 0x8e, 0x61, 0xa1, 0xab, 0xb7, 0xd4, 0xae, 0xd2, 0x55, 0x2d, 0x5b, 0xe1, - 0xc1, 0x9d, 0x1d, 0xa2, 0x27, 0xc6, 0x16, 0x9b, 0x79, 0x63, 0xdc, 0x66, 0xfb, 0x49, 0x1c, 0x0e, - 0xb7, 0xff, 0x32, 0xd5, 0xb1, 0xa7, 0x3a, 0x5b, 0x8d, 0x8e, 0x61, 0xe9, 0xf4, 0xf2, 0x7d, 0xb5, - 0x6f, 0x6b, 0x7d, 0xac, 0x8c, 0x1d, 0xab, 0x71, 0xeb, 0x79, 0xa0, 0x59, 0xa7, 0xb8, 0xa3, 0x5e, - 0x68, 0xba, 0xc9, 0x55, 0x2e, 0xba, 0xf2, 0x27, 0xde, 0xd9, 0xf2, 0xf6, 0x28, 0x15, 0x30, 0x6a, - 0xc7, 0xbd, 0xa4, 0xe7, 0x76, 0x2f, 0x2f, 0xc0, 0x52, 0x1f, 0x0f, 0x6d, 0xdf, 0x18, 0x99, 0xe1, - 0x64, 0xe8, 0x5e, 0x20, 0xf2, 0xcf, 0xeb, 0x9f, 0xd8, 0x10, 0x7a, 0x86, 0x86, 0x45, 0x43, 0xb7, - 0xb0, 0xa9, 0xa8, 0xed, 0xb6, 0x89, 0x2d, 0xab, 0x92, 0xa5, 0xdc, 0x65, 0x87, 0xbe, 0xc9, 0xc8, - 0xd2, 0x6f, 0xfd, 0x7b, 0x15, 0x0c, 0x83, 0x7c, 0x27, 0x04, 0x6f, 0x27, 0x8e, 0x60, 0x89, 0xcb, - 0xb7, 0x03, 0x9b, 0xc1, 0xd2, 0xd1, 0xc7, 0xc6, 0x0f, 0xdc, 0xe8, 0x26, 0x20, 0x47, 0x7c, 0x86, - 0x7d, 0x48, 0x7c, 0xbd, 0x7d, 0x40, 0x90, 0xa4, 0xab, 0x94, 0x64, 0x4e, 0x88, 0x7c, 0xff, 0xbf, - 0xed, 0xcd, 0x6b, 0xb0, 0x30, 0x96, 0x63, 0xb8, 0xf3, 0x12, 0x42, 0xe7, 0x15, 0xf7, 0xcf, 0x4b, - 0xfa, 0x9d, 0x00, 0xd5, 0xe8, 0xa4, 0x22, 0x54, 0xd5, 0xb3, 0xb0, 0xe0, 0xce, 0xc5, 0x1d, 0x1f, - 0x3b, 0xf5, 0xa2, 0xfb, 0x83, 0x0f, 0x30, 0xd2, 0x81, 0x3f, 0x09, 0xa5, 0x91, 0x94, 0x87, 0xed, - 0x42, 0xf1, 0xc2, 0xdf, 0xbf, 0xf4, 0xab, 0x84, 0xeb, 0x55, 0x03, 0x79, 0x49, 0x88, 0xe5, 0xbd, - 0x0e, 0x8b, 0x6d, 0xdc, 0xd2, 0xda, 0x5f, 0xd5, 0xf0, 0x16, 0xb8, 0xf4, 0x77, 0x76, 0x37, 0x83, - 0xdd, 0xfd, 0x12, 0x20, 0x2b, 0x63, 0xcb, 0x20, 0xd9, 0x07, 0xda, 0x82, 0x1c, 0x1e, 0xb6, 0xb0, - 0x61, 0x3b, 0x09, 0x5b, 0x78, 0x2a, 0xcc, 0xb8, 0xeb, 0x0e, 0x27, 0x01, 0x82, 0xae, 0x18, 0xba, - 0xc5, 0xb1, 0x6e, 0x34, 0x6c, 0xe5, 0xe2, 0x7e, 0xb0, 0xfb, 0xa2, 0x03, 0x76, 0x13, 0x91, 0x38, - 0x8e, 0x49, 0x8d, 0xa0, 0xdd, 0x5b, 0x1c, 0xed, 0x26, 0xa7, 0x74, 0x16, 0x80, 0xbb, 0xb5, 0x00, - 0xdc, 0x4d, 0x4d, 0x99, 0x66, 0x04, 0xde, 0x7d, 0xd1, 0xc1, 0xbb, 0xe9, 0x29, 0x23, 0x1e, 0x01, - 0xbc, 0xaf, 0xfa, 0x00, 0x6f, 0x96, 0x8a, 0xae, 0x46, 0x8a, 0x86, 0x20, 0xde, 0x97, 0x5d, 0xc4, - 0x9b, 0x8f, 0x44, 0xcb, 0x5c, 0x78, 0x14, 0xf2, 0x1e, 0x8c, 0x41, 0x5e, 0x06, 0x51, 0x9f, 0x8a, - 0x54, 0x31, 0x05, 0xf3, 0x1e, 0x8c, 0x61, 0xde, 0xe2, 0x14, 0x85, 0x53, 0x40, 0xef, 0x4f, 0xc3, - 0x41, 0x6f, 0x34, 0x2c, 0xe5, 0xc3, 0x9c, 0x0d, 0xf5, 0x2a, 0x11, 0xa8, 0xb7, 0x1c, 0x89, 0xd0, - 0x98, 0xfa, 0x99, 0x61, 0xef, 0x71, 0x08, 0xec, 0x65, 0x00, 0xf5, 0x46, 0xa4, 0xf2, 0x19, 0x70, - 0xef, 0x71, 0x08, 0xee, 0x5d, 0x98, 0xaa, 0x76, 0x2a, 0xf0, 0xbd, 0x17, 0x04, 0xbe, 0x28, 0x22, - 0xc7, 0xf2, 0x4e, 0x7b, 0x04, 0xf2, 0x3d, 0x8d, 0x42, 0xbe, 0x0c, 0x9d, 0x3e, 0x17, 0xa9, 0x71, - 0x0e, 0xe8, 0x7b, 0x30, 0x06, 0x7d, 0x97, 0xa6, 0x58, 0xda, 0xec, 0xd8, 0x37, 0x23, 0x66, 0x19, - 0xea, 0xdd, 0x4d, 0x66, 0x41, 0xcc, 0x4b, 0xcf, 0x90, 0x40, 0x3c, 0xe2, 0xe1, 0x48, 0x4e, 0x8c, - 0x4d, 0x53, 0x37, 0x39, 0x8a, 0x65, 0x0d, 0xe9, 0x06, 0xc1, 0x42, 0x9e, 0x37, 0x9b, 0x80, 0x93, - 0x29, 0xf6, 0xf0, 0x79, 0x30, 0xe9, 0xcf, 0x82, 0x27, 0x4b, 0x91, 0xb2, 0x1f, 0x47, 0xe5, 0x38, - 0x8e, 0xf2, 0xa1, 0xe7, 0x78, 0x10, 0x3d, 0xaf, 0x40, 0x9e, 0x60, 0x8a, 0x11, 0x60, 0xac, 0x1a, - 0x2e, 0x30, 0xbe, 0x09, 0x0b, 0x34, 0x76, 0x32, 0x8c, 0xcd, 0x03, 0x52, 0x92, 0x06, 0xa4, 0x32, - 0xf9, 0xc1, 0xd6, 0x85, 0x45, 0xa6, 0xe7, 0x61, 0xd1, 0xc7, 0xeb, 0x62, 0x15, 0x86, 0x12, 0x45, - 0x97, 0x7b, 0x93, 0x83, 0x96, 0xbf, 0x09, 0xde, 0x0a, 0x79, 0x88, 0x3a, 0x0c, 0xfc, 0x0a, 0xdf, - 0x10, 0xf8, 0x8d, 0x7f, 0x65, 0xf0, 0xeb, 0xc7, 0x5e, 0x89, 0x20, 0xf6, 0xfa, 0x8f, 0xe0, 0xed, - 0x89, 0x0b, 0x65, 0x5b, 0x7a, 0x1b, 0x73, 0x34, 0x44, 0xbf, 0x49, 0x76, 0xd2, 0xd5, 0xcf, 0x39, - 0xe6, 0x21, 0x9f, 0x84, 0xcb, 0x0d, 0x39, 0x39, 0x1e, 0x51, 0x5c, 0x20, 0xc5, 0x42, 0x3e, 0x07, - 0x52, 0x22, 0x24, 0x1e, 0x62, 0x16, 0x20, 0x0a, 0x32, 0xf9, 0x24, 0x7c, 0xd4, 0xec, 0x78, 0xe8, - 0x66, 0x0d, 0x74, 0x07, 0x72, 0xb4, 0x58, 0xad, 0xe8, 0x86, 0xc5, 0x63, 0x42, 0x20, 0xcb, 0x61, - 0x15, 0xeb, 0xb5, 0x43, 0xc2, 0x73, 0x60, 0x58, 0x72, 0xd6, 0xe0, 0x5f, 0xbe, 0x5c, 0x23, 0x17, - 0xc8, 0x35, 0xae, 0x41, 0x8e, 0x8c, 0xde, 0x32, 0xd4, 0x16, 0xa6, 0xa5, 0xd1, 0x9c, 0xec, 0x11, - 0xa4, 0x4f, 0x04, 0x28, 0x8f, 0x84, 0x98, 0xd0, 0xb9, 0x3b, 0x26, 0x19, 0xf7, 0x41, 0xfb, 0xeb, - 0x00, 0xe7, 0xaa, 0xa5, 0xbc, 0xa7, 0xf6, 0x6d, 0xdc, 0xe6, 0xd3, 0xcd, 0x9d, 0xab, 0xd6, 0x1b, - 0x94, 0x10, 0xec, 0x38, 0x3b, 0xd2, 0xb1, 0x0f, 0x43, 0xe6, 0xfc, 0x18, 0x12, 0x55, 0x21, 0x6b, - 0x98, 0x9a, 0x6e, 0x6a, 0xf6, 0x25, 0x1d, 0x6d, 0x42, 0x76, 0xdb, 0xbb, 0xc9, 0x6c, 0x42, 0x4c, - 0xee, 0x26, 0xb3, 0x49, 0x31, 0xe5, 0x16, 0xaa, 0xd8, 0x91, 0xcd, 0x8b, 0x05, 0xe9, 0x83, 0xb8, - 0x67, 0x8b, 0xdb, 0xb8, 0xab, 0x5d, 0x60, 0x73, 0x8e, 0xc9, 0xcc, 0xb6, 0xb9, 0xcb, 0x21, 0x53, - 0xf6, 0x51, 0xc8, 0xe8, 0x49, 0x6b, 0x60, 0xe1, 0x36, 0x2f, 0x99, 0xb8, 0x6d, 0xd4, 0x80, 0x34, - 0xbe, 0xc0, 0x7d, 0xdb, 0xaa, 0x64, 0xa8, 0x0d, 0x5f, 0x19, 0xc7, 0xb0, 0xe4, 0xf7, 0x56, 0x85, - 0x58, 0xee, 0x97, 0x9f, 0xad, 0x88, 0x8c, 0xfb, 0x39, 0xbd, 0xa7, 0xd9, 0xb8, 0x67, 0xd8, 0x97, - 0x32, 0x97, 0x9f, 0xbc, 0xb2, 0xd2, 0x6d, 0x28, 0x05, 0xe3, 0x3e, 0x7a, 0x02, 0x8a, 0x26, 0xb6, - 0x55, 0xad, 0xaf, 0x04, 0xb2, 0xf6, 0x02, 0x23, 0xf2, 0x62, 0xce, 0x21, 0x3c, 0x12, 0x1a, 0xeb, - 0xd1, 0x4b, 0x90, 0xf3, 0xd2, 0x04, 0x81, 0x0e, 0x7d, 0x42, 0xad, 0xc3, 0xe3, 0x95, 0xfe, 0x2a, - 0x78, 0x2a, 0x83, 0xd5, 0x93, 0x3a, 0xa4, 0x4d, 0x6c, 0x0d, 0xba, 0xac, 0x9e, 0x51, 0xda, 0x78, - 0x7e, 0xb6, 0x2c, 0x81, 0x50, 0x07, 0x5d, 0x5b, 0xe6, 0xc2, 0xd2, 0x5b, 0x90, 0x66, 0x14, 0x94, - 0x87, 0xcc, 0xf1, 0xfe, 0xfd, 0xfd, 0x83, 0x37, 0xf6, 0xc5, 0x18, 0x02, 0x48, 0x6f, 0xd6, 0x6a, - 0xf5, 0xc3, 0xa6, 0x28, 0xa0, 0x1c, 0xa4, 0x36, 0xb7, 0x0e, 0xe4, 0xa6, 0x18, 0x27, 0x64, 0xb9, - 0xbe, 0x5b, 0xaf, 0x35, 0xc5, 0x04, 0x5a, 0x80, 0x22, 0xfb, 0x56, 0xee, 0x1d, 0xc8, 0x0f, 0x36, - 0x9b, 0x62, 0xd2, 0x47, 0x3a, 0xaa, 0xef, 0x6f, 0xd7, 0x65, 0x31, 0x25, 0x7d, 0x0f, 0xae, 0x46, - 0xe6, 0x15, 0x5e, 0x69, 0x44, 0xf0, 0x95, 0x46, 0xa4, 0xdf, 0xc4, 0x09, 0xf2, 0x8a, 0x4a, 0x16, - 0xd0, 0xee, 0xc8, 0xc4, 0x37, 0xe6, 0xc8, 0x34, 0x46, 0x66, 0x4f, 0xc0, 0x96, 0x89, 0xcf, 0xb0, - 0xdd, 0xea, 0xb0, 0xe4, 0x85, 0xf9, 0xc6, 0xa2, 0x5c, 0xe4, 0x54, 0x2a, 0x64, 0x31, 0xb6, 0x77, - 0x70, 0xcb, 0x56, 0xd8, 0x09, 0x63, 0x40, 0x27, 0x47, 0xd8, 0x08, 0xf5, 0x88, 0x11, 0xa5, 0xb7, - 0xe7, 0x5a, 0xcb, 0x1c, 0xa4, 0xe4, 0x7a, 0x53, 0x7e, 0x53, 0x4c, 0x20, 0x04, 0x25, 0xfa, 0xa9, - 0x1c, 0xed, 0x6f, 0x1e, 0x1e, 0x35, 0x0e, 0xc8, 0x5a, 0x2e, 0x42, 0xd9, 0x59, 0x4b, 0x87, 0x98, - 0x92, 0xfe, 0x11, 0x87, 0x47, 0x23, 0x52, 0x1d, 0x74, 0x07, 0xc0, 0x1e, 0x2a, 0x26, 0x6e, 0xe9, - 0x66, 0x3b, 0xda, 0xc8, 0x9a, 0x43, 0x99, 0x72, 0xc8, 0x39, 0x9b, 0x7f, 0x59, 0x13, 0x2a, 0x6a, - 0xe8, 0x15, 0xae, 0x94, 0xcc, 0xca, 0x81, 0x77, 0xd7, 0x43, 0x0a, 0x47, 0xb8, 0x45, 0x14, 0xd3, - 0xb5, 0xa5, 0x8a, 0x29, 0x3f, 0x7a, 0xe0, 0x07, 0xc4, 0x03, 0x1a, 0x54, 0x66, 0x2e, 0xbd, 0xfa, - 0x20, 0x33, 0x23, 0x58, 0xe8, 0x4d, 0x78, 0x74, 0x24, 0x26, 0xba, 0x4a, 0x53, 0xb3, 0x86, 0xc6, - 0x47, 0x82, 0xa1, 0x91, 0xab, 0x96, 0x7e, 0x9f, 0xf0, 0x2f, 0x6c, 0x30, 0xb3, 0x3b, 0x80, 0xb4, - 0x65, 0xab, 0xf6, 0xc0, 0xe2, 0x06, 0xf7, 0xd2, 0xac, 0x69, 0xe2, 0x9a, 0xf3, 0x71, 0x44, 0xc5, - 0x65, 0xae, 0xe6, 0xbb, 0xf5, 0xb6, 0x88, 0x83, 0x0d, 0x2e, 0x4e, 0xf4, 0x91, 0xf1, 0x7c, 0x4e, - 0x5c, 0xba, 0x0b, 0x68, 0x3c, 0x81, 0x0e, 0x29, 0x99, 0x08, 0x61, 0x25, 0x93, 0x3f, 0x08, 0xf0, - 0xd8, 0x84, 0x64, 0x19, 0xbd, 0x3e, 0xb2, 0xcf, 0x2f, 0xcf, 0x93, 0x6a, 0xaf, 0x31, 0x5a, 0x70, - 0xa7, 0xa5, 0x5b, 0x50, 0xf0, 0xd3, 0x67, 0x9b, 0xe4, 0x97, 0x71, 0xcf, 0xe7, 0x07, 0x6b, 0x3b, - 0x5e, 0xf8, 0x13, 0xbe, 0x66, 0xf8, 0x0b, 0xda, 0x59, 0x7c, 0x4e, 0x3b, 0x3b, 0x0a, 0xb3, 0xb3, - 0xc4, 0x5c, 0x59, 0xe5, 0x5c, 0xd6, 0x96, 0xfc, 0x7a, 0xd6, 0x16, 0x38, 0x70, 0xa9, 0x60, 0xda, - 0xfa, 0x26, 0x80, 0x57, 0xf0, 0x22, 0x01, 0xc9, 0xd4, 0x07, 0xfd, 0x36, 0xb5, 0x80, 0x94, 0xcc, - 0x1a, 0xe8, 0x36, 0xa4, 0x88, 0x25, 0x39, 0xeb, 0x34, 0xee, 0x54, 0x89, 0x25, 0xf8, 0x0a, 0x66, - 0x8c, 0x5b, 0xd2, 0x00, 0x8d, 0x57, 0xd4, 0x23, 0xba, 0x78, 0x35, 0xd8, 0xc5, 0xe3, 0x91, 0xb5, - 0xf9, 0xf0, 0xae, 0xde, 0x87, 0x14, 0xdd, 0x79, 0x92, 0x70, 0xd1, 0x6b, 0x1c, 0x0e, 0x7b, 0xc8, - 0x37, 0xfa, 0x19, 0x80, 0x6a, 0xdb, 0xa6, 0x76, 0x3a, 0xf0, 0x3a, 0x58, 0x09, 0xb7, 0x9c, 0x4d, - 0x87, 0x6f, 0xeb, 0x1a, 0x37, 0xa1, 0x25, 0x4f, 0xd4, 0x67, 0x46, 0x3e, 0x85, 0xd2, 0x3e, 0x94, - 0x82, 0xb2, 0x4e, 0xa2, 0xce, 0xc6, 0x10, 0x4c, 0xd4, 0x19, 0xee, 0xe2, 0x89, 0xba, 0x9b, 0xe6, - 0x27, 0xd8, 0x5d, 0x15, 0x6d, 0x48, 0xff, 0x15, 0xa0, 0xe0, 0x37, 0xbc, 0x6f, 0x38, 0xfd, 0x9c, - 0x92, 0x71, 0x5f, 0x1d, 0xcb, 0x3e, 0x33, 0xe7, 0xaa, 0x75, 0xfc, 0x6d, 0x26, 0x9f, 0x1f, 0x08, - 0x90, 0x75, 0x27, 0x1f, 0xbc, 0xb6, 0x0a, 0xdc, 0xf3, 0xb1, 0xb5, 0x8b, 0xfb, 0xef, 0x9a, 0xd8, - 0xad, 0x5e, 0xc2, 0xbd, 0xd5, 0xbb, 0xeb, 0xe6, 0x4a, 0x51, 0x15, 0x3d, 0xff, 0x4a, 0x73, 0x9b, - 0x72, 0x52, 0xc3, 0x5f, 0xf3, 0x71, 0x90, 0x24, 0x01, 0xfd, 0x00, 0xd2, 0x6a, 0xcb, 0xad, 0x63, - 0x96, 0x42, 0x0a, 0x7c, 0x0e, 0xeb, 0x5a, 0x73, 0xb8, 0x49, 0x39, 0x65, 0x2e, 0xc1, 0x47, 0x15, - 0x77, 0x46, 0x25, 0xbd, 0x46, 0xf4, 0x32, 0x9e, 0xa0, 0x47, 0x2c, 0x01, 0x1c, 0xef, 0x3f, 0x38, - 0xd8, 0xde, 0xb9, 0xb7, 0x53, 0xdf, 0xe6, 0xd9, 0xd2, 0xf6, 0x76, 0x7d, 0x5b, 0x8c, 0x13, 0x3e, - 0xb9, 0xfe, 0xe0, 0xe0, 0xa4, 0xbe, 0x2d, 0x26, 0xa4, 0xbb, 0x90, 0x73, 0xbd, 0x0a, 0x41, 0xf5, - 0x4e, 0x4d, 0x56, 0xe0, 0x67, 0x9b, 0x97, 0xd8, 0x97, 0x20, 0x65, 0xe8, 0xef, 0xf1, 0x2b, 0xb6, - 0x84, 0xcc, 0x1a, 0x52, 0x1b, 0xca, 0x23, 0x2e, 0x09, 0xdd, 0x85, 0x8c, 0x31, 0x38, 0x55, 0x1c, - 0xa3, 0x1d, 0xa9, 0x60, 0x3b, 0x78, 0x71, 0x70, 0xda, 0xd5, 0x5a, 0xf7, 0xf1, 0xa5, 0xb3, 0x4c, - 0xc6, 0xe0, 0xf4, 0x3e, 0xb3, 0x6d, 0xd6, 0x4b, 0xdc, 0xdf, 0xcb, 0x05, 0x64, 0x9d, 0xa3, 0x8a, - 0x7e, 0x08, 0x39, 0xd7, 0xdb, 0xb9, 0x57, 0xe4, 0x91, 0x6e, 0x92, 0xab, 0xf7, 0x44, 0xd0, 0x4d, - 0x58, 0xb0, 0xb4, 0xf3, 0xbe, 0x53, 0xbf, 0x67, 0x15, 0x9b, 0x38, 0x3d, 0x33, 0x65, 0xf6, 0x63, - 0xcf, 0x29, 0x2a, 0x90, 0x20, 0x27, 0x8e, 0xfa, 0x8a, 0x6f, 0x73, 0x00, 0x21, 0xc1, 0x38, 0x11, - 0x16, 0x8c, 0x7f, 0x11, 0x87, 0xbc, 0xef, 0x56, 0x00, 0x7d, 0xdf, 0xe7, 0xb8, 0x4a, 0x21, 0x51, - 0xc4, 0xc7, 0xeb, 0xdd, 0x41, 0x07, 0x27, 0x16, 0x9f, 0x7f, 0x62, 0x51, 0x97, 0x30, 0xce, 0xe5, - 0x42, 0x72, 0xee, 0xcb, 0x85, 0xe7, 0x00, 0xd9, 0xba, 0xad, 0x76, 0x95, 0x0b, 0xdd, 0xd6, 0xfa, - 0xe7, 0x0a, 0x33, 0x0d, 0xe6, 0x66, 0x44, 0xfa, 0xe7, 0x84, 0xfe, 0x38, 0xa4, 0x56, 0xf2, 0x73, - 0x01, 0xb2, 0x2e, 0xa2, 0x9b, 0xf7, 0x86, 0xfa, 0x0a, 0xa4, 0x39, 0x68, 0x61, 0x57, 0xd4, 0xbc, - 0x15, 0x7a, 0x8b, 0x52, 0x85, 0x6c, 0x0f, 0xdb, 0x2a, 0xf5, 0x99, 0x2c, 0x02, 0xba, 0xed, 0x9b, - 0x2f, 0x43, 0xde, 0x77, 0xbb, 0x4f, 0xdc, 0xe8, 0x7e, 0xfd, 0x0d, 0x31, 0x56, 0xcd, 0x7c, 0xf8, - 0xf1, 0x6a, 0x62, 0x1f, 0xbf, 0x47, 0x4e, 0x98, 0x5c, 0xaf, 0x35, 0xea, 0xb5, 0xfb, 0xa2, 0x50, - 0xcd, 0x7f, 0xf8, 0xf1, 0x6a, 0x46, 0xc6, 0xb4, 0x80, 0x7e, 0xf3, 0x3e, 0x94, 0x47, 0x36, 0x26, - 0x78, 0xa0, 0x11, 0x94, 0xb6, 0x8f, 0x0f, 0xf7, 0x76, 0x6a, 0x9b, 0xcd, 0xba, 0x72, 0x72, 0xd0, - 0xac, 0x8b, 0x02, 0x7a, 0x14, 0x16, 0xf7, 0x76, 0x7e, 0xd4, 0x68, 0x2a, 0xb5, 0xbd, 0x9d, 0xfa, - 0x7e, 0x53, 0xd9, 0x6c, 0x36, 0x37, 0x6b, 0xf7, 0xc5, 0xf8, 0xc6, 0x1f, 0xf3, 0x50, 0xde, 0xdc, - 0xaa, 0xed, 0x10, 0xd8, 0xa6, 0xb5, 0x54, 0xea, 0x1e, 0x6a, 0x90, 0xa4, 0xa5, 0xc0, 0x89, 0x6f, - 0xfc, 0xaa, 0x93, 0x6f, 0x45, 0xd0, 0x3d, 0x48, 0xd1, 0x2a, 0x21, 0x9a, 0xfc, 0xe8, 0xaf, 0x3a, - 0xe5, 0x9a, 0x84, 0x0c, 0x86, 0x1e, 0xa7, 0x89, 0xaf, 0x00, 0xab, 0x93, 0x6f, 0x4d, 0xd0, 0x1e, - 0x64, 0x9c, 0x22, 0xd1, 0xb4, 0xa7, 0x79, 0xd5, 0xa9, 0x57, 0x19, 0x64, 0x6a, 0xac, 0xd8, 0x36, - 0xf9, 0x81, 0x60, 0x75, 0xca, 0x7d, 0x0a, 0xda, 0x81, 0x34, 0x2f, 0x74, 0x4c, 0x79, 0xf3, 0x57, - 0x9d, 0x76, 0x43, 0x82, 0x64, 0xc8, 0x79, 0x65, 0xcc, 0xe9, 0xcf, 0x1e, 0xab, 0x33, 0x5c, 0x15, - 0xa1, 0xb7, 0xa0, 0x18, 0x2c, 0xa8, 0xcc, 0xf6, 0xae, 0xb0, 0x3a, 0xe3, 0x5d, 0x0c, 0xd1, 0x1f, - 0xac, 0xae, 0xcc, 0xf6, 0xce, 0xb0, 0x3a, 0xe3, 0xd5, 0x0c, 0x7a, 0x07, 0x16, 0xc6, 0xab, 0x1f, - 0xb3, 0x3f, 0x3b, 0xac, 0xce, 0x71, 0x59, 0x83, 0x7a, 0x80, 0x42, 0xaa, 0x26, 0x73, 0xbc, 0x42, - 0xac, 0xce, 0x73, 0x77, 0x83, 0xda, 0x50, 0x1e, 0xad, 0x44, 0xcc, 0xfa, 0x2a, 0xb1, 0x3a, 0xf3, - 0x3d, 0x0e, 0xeb, 0x25, 0x08, 0xcb, 0x67, 0x7d, 0xa5, 0x58, 0x9d, 0xf9, 0x5a, 0x07, 0x1d, 0x03, - 0xf8, 0x60, 0xe5, 0x0c, 0xaf, 0x16, 0xab, 0xb3, 0x5c, 0xf0, 0x20, 0x03, 0x16, 0xc3, 0xf0, 0xe6, - 0x3c, 0x8f, 0x18, 0xab, 0x73, 0xdd, 0xfb, 0x10, 0x7b, 0x0e, 0x22, 0xc7, 0xd9, 0x1e, 0x35, 0x56, - 0x67, 0xbc, 0x00, 0xda, 0xaa, 0x7f, 0xf2, 0xf9, 0xb2, 0xf0, 0xe9, 0xe7, 0xcb, 0xc2, 0xbf, 0x3f, - 0x5f, 0x16, 0x3e, 0xfa, 0x62, 0x39, 0xf6, 0xe9, 0x17, 0xcb, 0xb1, 0x7f, 0x7e, 0xb1, 0x1c, 0xfb, - 0xf1, 0xb3, 0xe7, 0x9a, 0xdd, 0x19, 0x9c, 0xae, 0xb5, 0xf4, 0xde, 0xba, 0xff, 0x1d, 0x78, 0xd8, - 0xeb, 0xf3, 0xd3, 0x34, 0x0d, 0xa8, 0xb7, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xb4, 0x7f, - 0x53, 0x9d, 0x2e, 0x00, 0x00, + 0x11, 0xd7, 0xe8, 0x5b, 0x2d, 0x4b, 0x1a, 0x3f, 0x9b, 0x45, 0x2b, 0x76, 0x6d, 0x33, 0x14, 0xb0, + 0x2c, 0x60, 0x13, 0x6f, 0x16, 0x96, 0x2c, 0x84, 0x92, 0x65, 0x6d, 0x64, 0xaf, 0xd7, 0x36, 0x63, + 0xd9, 0x14, 0xf9, 0x60, 0x18, 0x4b, 0xcf, 0xd6, 0xb0, 0x92, 0x66, 0x98, 0x19, 0x19, 0x99, 0x63, + 0x12, 0xaa, 0x52, 0x1c, 0x52, 0xdc, 0xc2, 0x21, 0xdc, 0x92, 0xaa, 0xfc, 0x09, 0xc9, 0x25, 0xa7, + 0x1c, 0x38, 0xe4, 0xc0, 0x29, 0x95, 0x13, 0x49, 0xc1, 0x8d, 0x7f, 0x20, 0xb7, 0x54, 0xea, 0x7d, + 0xcc, 0x97, 0x34, 0xa3, 0x0f, 0xa0, 0xa8, 0x4a, 0x15, 0xb7, 0x79, 0x3d, 0xdd, 0xfd, 0xbe, 0xfa, + 0x75, 0xf7, 0xaf, 0xdf, 0x83, 0xc7, 0x6c, 0xdc, 0x6f, 0x63, 0xb3, 0xa7, 0xf5, 0xed, 0x0d, 0xf5, + 0xb4, 0xa5, 0x6d, 0xd8, 0x97, 0x06, 0xb6, 0xd6, 0x0d, 0x53, 0xb7, 0x75, 0x54, 0xf2, 0x7e, 0xae, + 0x93, 0x9f, 0x95, 0xeb, 0x3e, 0xee, 0x96, 0x79, 0x69, 0xd8, 0xfa, 0x86, 0x61, 0xea, 0xfa, 0x19, + 0xe3, 0xaf, 0x5c, 0x1b, 0xff, 0xfd, 0x10, 0x5f, 0x72, 0x6d, 0x01, 0x61, 0xda, 0xcb, 0x86, 0xa1, + 0x9a, 0x6a, 0xcf, 0xf9, 0xbd, 0x7a, 0xae, 0xeb, 0xe7, 0x5d, 0xbc, 0x41, 0x5b, 0xa7, 0x83, 0xb3, + 0x0d, 0x5b, 0xeb, 0x61, 0xcb, 0x56, 0x7b, 0x06, 0x67, 0x58, 0x3e, 0xd7, 0xcf, 0x75, 0xfa, 0xb9, + 0x41, 0xbe, 0x18, 0x55, 0xfa, 0x4b, 0x0e, 0x32, 0x32, 0x7e, 0x77, 0x80, 0x2d, 0x1b, 0x6d, 0x42, + 0x12, 0xb7, 0x3a, 0x7a, 0x59, 0x58, 0x13, 0x6e, 0xe4, 0x37, 0xaf, 0xad, 0x8f, 0x0c, 0x7f, 0x9d, + 0xf3, 0xd5, 0x5b, 0x1d, 0xbd, 0x11, 0x93, 0x29, 0x2f, 0xba, 0x0d, 0xa9, 0xb3, 0xee, 0xc0, 0xea, + 0x94, 0xe3, 0x54, 0xe8, 0x7a, 0x94, 0xd0, 0x3d, 0xc2, 0xd4, 0x88, 0xc9, 0x8c, 0x9b, 0x74, 0xa5, + 0xf5, 0xcf, 0xf4, 0x72, 0x62, 0x72, 0x57, 0x3b, 0xfd, 0x33, 0xda, 0x15, 0xe1, 0x45, 0x5b, 0x00, + 0x5a, 0x5f, 0xb3, 0x95, 0x56, 0x47, 0xd5, 0xfa, 0xe5, 0x24, 0x95, 0x7c, 0x3c, 0x5a, 0x52, 0xb3, + 0x6b, 0x84, 0xb1, 0x11, 0x93, 0x73, 0x9a, 0xd3, 0x20, 0xc3, 0x7d, 0x77, 0x80, 0xcd, 0xcb, 0x72, + 0x6a, 0xf2, 0x70, 0x5f, 0x27, 0x4c, 0x64, 0xb8, 0x94, 0x1b, 0xbd, 0x02, 0xd9, 0x56, 0x07, 0xb7, + 0x1e, 0x2a, 0xf6, 0xb0, 0x9c, 0xa1, 0x92, 0xab, 0x51, 0x92, 0x35, 0xc2, 0xd7, 0x1c, 0x36, 0x62, + 0x72, 0xa6, 0xc5, 0x3e, 0xd1, 0x1d, 0x48, 0xb7, 0xf4, 0x5e, 0x4f, 0xb3, 0xcb, 0x40, 0x65, 0x57, + 0x22, 0x65, 0x29, 0x57, 0x23, 0x26, 0x73, 0x7e, 0xb4, 0x0f, 0xc5, 0xae, 0x66, 0xd9, 0x8a, 0xd5, + 0x57, 0x0d, 0xab, 0xa3, 0xdb, 0x56, 0x39, 0x4f, 0x35, 0x3c, 0x19, 0xa5, 0x61, 0x4f, 0xb3, 0xec, + 0x23, 0x87, 0xb9, 0x11, 0x93, 0x0b, 0x5d, 0x3f, 0x81, 0xe8, 0xd3, 0xcf, 0xce, 0xb0, 0xe9, 0x2a, + 0x2c, 0x2f, 0x4c, 0xd6, 0x77, 0x40, 0xb8, 0x1d, 0x79, 0xa2, 0x4f, 0xf7, 0x13, 0xd0, 0xcf, 0x60, + 0xa9, 0xab, 0xab, 0x6d, 0x57, 0x9d, 0xd2, 0xea, 0x0c, 0xfa, 0x0f, 0xcb, 0x05, 0xaa, 0xf4, 0x99, + 0xc8, 0x41, 0xea, 0x6a, 0xdb, 0x51, 0x51, 0x23, 0x02, 0x8d, 0x98, 0xbc, 0xd8, 0x1d, 0x25, 0xa2, + 0xb7, 0x60, 0x59, 0x35, 0x8c, 0xee, 0xe5, 0xa8, 0xf6, 0x22, 0xd5, 0x7e, 0x33, 0x4a, 0x7b, 0x95, + 0xc8, 0x8c, 0xaa, 0x47, 0xea, 0x18, 0x15, 0x35, 0x41, 0x34, 0x4c, 0x6c, 0xa8, 0x26, 0x56, 0x0c, + 0x53, 0x37, 0x74, 0x4b, 0xed, 0x96, 0x4b, 0x54, 0xf7, 0xd3, 0x51, 0xba, 0x0f, 0x19, 0xff, 0x21, + 0x67, 0x6f, 0xc4, 0xe4, 0x92, 0x11, 0x24, 0x31, 0xad, 0x7a, 0x0b, 0x5b, 0x96, 0xa7, 0x55, 0x9c, + 0xa6, 0x95, 0xf2, 0x07, 0xb5, 0x06, 0x48, 0xa8, 0x0e, 0x79, 0x3c, 0x24, 0xe2, 0xca, 0x85, 0x6e, + 0xe3, 0xf2, 0x22, 0x55, 0x28, 0x45, 0x9e, 0x50, 0xca, 0x7a, 0xa2, 0xdb, 0xb8, 0x11, 0x93, 0x01, + 0xbb, 0x2d, 0xa4, 0xc2, 0x23, 0x17, 0xd8, 0xd4, 0xce, 0x2e, 0xa9, 0x1a, 0x85, 0xfe, 0xb1, 0x34, + 0xbd, 0x5f, 0x46, 0x54, 0xe1, 0xb3, 0x51, 0x0a, 0x4f, 0xa8, 0x10, 0x51, 0x51, 0x77, 0x44, 0x1a, + 0x31, 0x79, 0xe9, 0x62, 0x9c, 0x4c, 0x4c, 0xec, 0x4c, 0xeb, 0xab, 0x5d, 0xed, 0x7d, 0xac, 0x9c, + 0x76, 0xf5, 0xd6, 0xc3, 0xf2, 0xd2, 0x64, 0x13, 0xbb, 0xc7, 0xb9, 0xb7, 0x08, 0x33, 0x31, 0xb1, + 0x33, 0x3f, 0x61, 0x2b, 0x03, 0xa9, 0x0b, 0xb5, 0x3b, 0xc0, 0xbb, 0xc9, 0x6c, 0x5a, 0xcc, 0xec, + 0x26, 0xb3, 0x59, 0x31, 0xb7, 0x9b, 0xcc, 0xe6, 0x44, 0x90, 0x9e, 0x86, 0xbc, 0xcf, 0x25, 0xa1, + 0x32, 0x64, 0x7a, 0xd8, 0xb2, 0xd4, 0x73, 0x4c, 0x3d, 0x58, 0x4e, 0x76, 0x9a, 0x52, 0x11, 0x16, + 0xfc, 0x6e, 0x48, 0xfa, 0x48, 0x70, 0x25, 0x89, 0x87, 0x21, 0x92, 0x17, 0xd8, 0xa4, 0x0b, 0xc1, + 0x25, 0x79, 0x13, 0x3d, 0x01, 0x05, 0x3a, 0x09, 0xc5, 0xf9, 0x4f, 0xdc, 0x5c, 0x52, 0x5e, 0xa0, + 0xc4, 0x13, 0xce, 0xb4, 0x0a, 0x79, 0x63, 0xd3, 0x70, 0x59, 0x12, 0x94, 0x05, 0x8c, 0x4d, 0xc3, + 0x61, 0x78, 0x1c, 0x16, 0xc8, 0x8c, 0x5d, 0x8e, 0x24, 0xed, 0x24, 0x4f, 0x68, 0x9c, 0x45, 0xfa, + 0x7b, 0x1c, 0xc4, 0x51, 0xd7, 0x85, 0xee, 0x40, 0x92, 0x78, 0x71, 0xee, 0x90, 0x2b, 0xeb, 0xcc, + 0xc5, 0xaf, 0x3b, 0x2e, 0x7e, 0xbd, 0xe9, 0xb8, 0xf8, 0xad, 0xec, 0xa7, 0x9f, 0xaf, 0xc6, 0x3e, + 0xfa, 0xd7, 0xaa, 0x20, 0x53, 0x09, 0x74, 0x95, 0x38, 0x2c, 0x55, 0xeb, 0x2b, 0x5a, 0x9b, 0x0e, + 0x39, 0x47, 0xbc, 0x91, 0xaa, 0xf5, 0x77, 0xda, 0x68, 0x0f, 0xc4, 0x96, 0xde, 0xb7, 0x70, 0xdf, + 0x1a, 0x58, 0x0a, 0x0b, 0x21, 0xdc, 0x0d, 0x07, 0x9c, 0x29, 0x0b, 0x64, 0x35, 0x87, 0xf3, 0x90, + 0x32, 0xca, 0xa5, 0x56, 0x90, 0x80, 0xee, 0x01, 0x5c, 0xa8, 0x5d, 0xad, 0xad, 0xda, 0xba, 0x69, + 0x95, 0x93, 0x6b, 0x89, 0x1b, 0xf9, 0xcd, 0xb5, 0xb1, 0xad, 0x3e, 0x71, 0x58, 0x8e, 0x8d, 0xb6, + 0x6a, 0xe3, 0xad, 0x24, 0x19, 0xae, 0xec, 0x93, 0x44, 0x4f, 0x41, 0x49, 0x35, 0x0c, 0xc5, 0xb2, + 0x55, 0x1b, 0x2b, 0xa7, 0x97, 0x36, 0xb6, 0xa8, 0x8b, 0x5e, 0x90, 0x0b, 0xaa, 0x61, 0x1c, 0x11, + 0xea, 0x16, 0x21, 0xa2, 0x27, 0xa1, 0x48, 0xbc, 0xb9, 0xa6, 0x76, 0x95, 0x0e, 0xd6, 0xce, 0x3b, + 0x76, 0x39, 0xbd, 0x26, 0xdc, 0x48, 0xc8, 0x05, 0x4e, 0x6d, 0x50, 0xa2, 0xd4, 0x76, 0x77, 0x9c, + 0x7a, 0x72, 0x84, 0x20, 0xd9, 0x56, 0x6d, 0x95, 0xae, 0xe4, 0x82, 0x4c, 0xbf, 0x09, 0xcd, 0x50, + 0xed, 0x0e, 0x5f, 0x1f, 0xfa, 0x8d, 0xae, 0x40, 0x9a, 0xab, 0x4d, 0x50, 0xb5, 0xbc, 0x85, 0x96, + 0x21, 0x65, 0x98, 0xfa, 0x05, 0xa6, 0x5b, 0x97, 0x95, 0x59, 0x43, 0x92, 0xa1, 0x18, 0xf4, 0xfa, + 0xa8, 0x08, 0x71, 0x7b, 0xc8, 0x7b, 0x89, 0xdb, 0x43, 0xf4, 0x02, 0x24, 0xc9, 0x42, 0xd2, 0x3e, + 0x8a, 0x21, 0x71, 0x8e, 0xcb, 0x35, 0x2f, 0x0d, 0x2c, 0x53, 0x4e, 0xa9, 0x04, 0x85, 0x40, 0x34, + 0x90, 0xae, 0xc0, 0x72, 0x98, 0x73, 0x97, 0x3a, 0x2e, 0x3d, 0xe0, 0xa4, 0xd1, 0x6d, 0xc8, 0xba, + 0xde, 0x9d, 0x19, 0xce, 0xd5, 0xb1, 0x6e, 0x1d, 0x66, 0xd9, 0x65, 0x25, 0x16, 0x43, 0x36, 0xa0, + 0xa3, 0xf2, 0x58, 0xbe, 0x20, 0x67, 0x54, 0xc3, 0x68, 0xa8, 0x56, 0x47, 0x7a, 0x1b, 0xca, 0x51, + 0x9e, 0xdb, 0xb7, 0x60, 0x02, 0x35, 0x7b, 0x67, 0xc1, 0xae, 0x40, 0xfa, 0x4c, 0x37, 0x7b, 0xaa, + 0x4d, 0x95, 0x15, 0x64, 0xde, 0x22, 0x0b, 0xc9, 0xbc, 0x78, 0x82, 0x92, 0x59, 0x43, 0x52, 0xe0, + 0x6a, 0xa4, 0xf7, 0x26, 0x22, 0x5a, 0xbf, 0x8d, 0xd9, 0xb2, 0x16, 0x64, 0xd6, 0xf0, 0x14, 0xb1, + 0xc1, 0xb2, 0x06, 0xe9, 0xd6, 0xa2, 0x73, 0xa5, 0xfa, 0x73, 0x32, 0x6f, 0x49, 0x1f, 0x27, 0xe0, + 0x4a, 0xb8, 0x0f, 0x47, 0x6b, 0xb0, 0xd0, 0x53, 0x87, 0x8a, 0x3d, 0xe4, 0x66, 0x27, 0xd0, 0x8d, + 0x87, 0x9e, 0x3a, 0x6c, 0x0e, 0x99, 0xcd, 0x89, 0x90, 0xb0, 0x87, 0x56, 0x39, 0xbe, 0x96, 0xb8, + 0xb1, 0x20, 0x93, 0x4f, 0x74, 0x0c, 0x8b, 0x5d, 0xbd, 0xa5, 0x76, 0x95, 0xae, 0x6a, 0xd9, 0x0a, + 0x0f, 0xee, 0xec, 0x10, 0x3d, 0x31, 0xb6, 0xd8, 0xcc, 0x1b, 0xe3, 0x36, 0xdb, 0x4f, 0xe2, 0x70, + 0xb8, 0xfd, 0x97, 0xa8, 0x8e, 0x3d, 0xd5, 0xd9, 0x6a, 0xb4, 0x0d, 0xf9, 0x9e, 0x66, 0x9d, 0xe2, + 0x8e, 0x7a, 0xa1, 0xe9, 0x26, 0x3f, 0x4d, 0xe3, 0x46, 0xf3, 0xc0, 0xe3, 0xe1, 0x9a, 0xfc, 0x62, + 0xbe, 0x2d, 0x49, 0x05, 0x6c, 0xd8, 0xf1, 0x26, 0xe9, 0xb9, 0xbd, 0xc9, 0x0b, 0xb0, 0xdc, 0xc7, + 0x43, 0x5b, 0xf1, 0xce, 0x2b, 0xb3, 0x93, 0x0c, 0x5d, 0x7a, 0x44, 0xfe, 0xb9, 0x27, 0xdc, 0x22, + 0x26, 0x83, 0x9e, 0xa1, 0x51, 0xd0, 0xd0, 0x2d, 0x6c, 0x2a, 0x6a, 0xbb, 0x6d, 0x62, 0xcb, 0x2a, + 0x67, 0x29, 0x77, 0xc9, 0xa1, 0x57, 0x19, 0x59, 0xfa, 0x8d, 0x7f, 0x6b, 0x82, 0x51, 0x8f, 0x2f, + 0xbc, 0xe0, 0x2d, 0xfc, 0x11, 0x2c, 0x73, 0xf9, 0x76, 0x60, 0xed, 0x59, 0xf6, 0xf9, 0xd8, 0xf8, + 0xf9, 0x1a, 0x5d, 0x73, 0xe4, 0x88, 0x47, 0x2f, 0x7b, 0xe2, 0xeb, 0x2d, 0x3b, 0x82, 0x24, 0x5d, + 0x94, 0x24, 0x73, 0x31, 0xe4, 0xfb, 0xff, 0x6d, 0x2b, 0x5e, 0x83, 0xc5, 0xb1, 0x0c, 0xc2, 0x9d, + 0x97, 0x10, 0x3a, 0xaf, 0xb8, 0x7f, 0x5e, 0xd2, 0xef, 0x05, 0xa8, 0x44, 0xa7, 0x0c, 0xa1, 0xaa, + 0x9e, 0x85, 0x45, 0x77, 0x2e, 0xee, 0xf8, 0xd8, 0x99, 0x16, 0xdd, 0x1f, 0x7c, 0x80, 0x91, 0xee, + 0xf9, 0x49, 0x28, 0x8e, 0x24, 0x34, 0x6c, 0x17, 0x0a, 0x17, 0xfe, 0xfe, 0xa5, 0x5f, 0x27, 0x5c, + 0x9f, 0x19, 0xc8, 0x3a, 0x42, 0x0c, 0xed, 0x75, 0x58, 0x6a, 0xe3, 0x96, 0xd6, 0xfe, 0xba, 0x76, + 0xb6, 0xc8, 0xa5, 0xbf, 0x37, 0xb3, 0x71, 0x33, 0xfb, 0x2d, 0x40, 0x56, 0xc6, 0x96, 0x41, 0x52, + 0x09, 0xb4, 0x05, 0x39, 0x3c, 0x6c, 0x61, 0xc3, 0x76, 0xb2, 0xaf, 0xf0, 0xbc, 0x96, 0x71, 0xd7, + 0x1d, 0x4e, 0x82, 0xea, 0x5c, 0x31, 0x74, 0x8b, 0x03, 0xd7, 0x68, 0x0c, 0xca, 0xc5, 0xfd, 0xc8, + 0xf5, 0x45, 0x07, 0xb9, 0x26, 0x22, 0x41, 0x19, 0x93, 0x1a, 0x81, 0xae, 0xb7, 0x38, 0x74, 0x4d, + 0x4e, 0xe9, 0x2c, 0x80, 0x5d, 0x6b, 0x01, 0xec, 0x9a, 0x9a, 0x32, 0xcd, 0x08, 0xf0, 0xfa, 0xa2, + 0x03, 0x5e, 0xd3, 0x53, 0x46, 0x3c, 0x82, 0x5e, 0x5f, 0xf5, 0xa1, 0xd7, 0x2c, 0x15, 0x5d, 0x8b, + 0x14, 0x0d, 0x81, 0xaf, 0x2f, 0xbb, 0xf0, 0x35, 0x1f, 0x09, 0x7d, 0xb9, 0xf0, 0x28, 0x7e, 0x3d, + 0x18, 0xc3, 0xaf, 0x0c, 0x6f, 0x3e, 0x15, 0xa9, 0x62, 0x0a, 0x80, 0x3d, 0x18, 0x03, 0xb0, 0x85, + 0x29, 0x0a, 0xa7, 0x20, 0xd8, 0x9f, 0x87, 0x23, 0xd8, 0x68, 0x8c, 0xc9, 0x87, 0x39, 0x1b, 0x84, + 0x55, 0x22, 0x20, 0x6c, 0x29, 0x12, 0x6e, 0x31, 0xf5, 0x33, 0x63, 0xd8, 0xe3, 0x10, 0x0c, 0xcb, + 0xd0, 0xe6, 0x8d, 0x48, 0xe5, 0x33, 0x80, 0xd8, 0xe3, 0x10, 0x10, 0xbb, 0x38, 0x55, 0xed, 0x54, + 0x14, 0x7b, 0x2f, 0x88, 0x62, 0x51, 0x44, 0xc2, 0xe4, 0x9d, 0xf6, 0x08, 0x18, 0x7b, 0x1a, 0x05, + 0x63, 0x19, 0xd4, 0x7c, 0x2e, 0x52, 0xe3, 0x1c, 0x38, 0xf6, 0x60, 0x0c, 0xc7, 0x2e, 0x4f, 0xb1, + 0xb4, 0xd9, 0x81, 0x6c, 0x46, 0xcc, 0x32, 0x08, 0xbb, 0x9b, 0xcc, 0x82, 0x98, 0x97, 0x9e, 0x21, + 0x71, 0x77, 0xc4, 0xc3, 0x91, 0x04, 0x17, 0x9b, 0xa6, 0x6e, 0x72, 0x48, 0xca, 0x1a, 0xd2, 0x0d, + 0x02, 0x6c, 0x3c, 0x6f, 0x36, 0x01, 0xf4, 0x52, 0x20, 0xe1, 0xf3, 0x60, 0xd2, 0x9f, 0x05, 0x4f, + 0x96, 0xc2, 0x5e, 0x3f, 0x28, 0xca, 0x71, 0x50, 0xe4, 0x83, 0xc2, 0xf1, 0x20, 0x14, 0x5e, 0x85, + 0x3c, 0x01, 0x08, 0x23, 0x28, 0x57, 0x35, 0x5c, 0x94, 0x7b, 0x13, 0x16, 0x69, 0xa8, 0x64, 0x80, + 0x99, 0x07, 0xa4, 0x24, 0x0d, 0x48, 0x25, 0xf2, 0x83, 0xad, 0x0b, 0x8b, 0x4c, 0xcf, 0xc3, 0x92, + 0x8f, 0xd7, 0x05, 0x1e, 0x0c, 0xf2, 0x89, 0x2e, 0x77, 0x95, 0x23, 0x90, 0xbf, 0x09, 0xde, 0x0a, + 0x79, 0xf0, 0x38, 0x0c, 0xc9, 0x0a, 0xdf, 0x12, 0x92, 0x8d, 0x7f, 0x6d, 0x24, 0xeb, 0x07, 0x52, + 0x89, 0x20, 0x90, 0xfa, 0x8f, 0xe0, 0xed, 0x89, 0x8b, 0x4b, 0x5b, 0x7a, 0x1b, 0x73, 0x68, 0x43, + 0xbf, 0x49, 0x32, 0xd2, 0xd5, 0xcf, 0x39, 0x80, 0x21, 0x9f, 0x84, 0xcb, 0x0d, 0x39, 0x39, 0x1e, + 0x51, 0x5c, 0x54, 0xc4, 0x42, 0x3e, 0x47, 0x45, 0x22, 0x24, 0x1e, 0x62, 0x16, 0x20, 0x16, 0x64, + 0xf2, 0x49, 0xf8, 0xa8, 0xd9, 0xf1, 0xd0, 0xcd, 0x1a, 0xe8, 0x0e, 0xe4, 0x68, 0xe5, 0x59, 0xd1, + 0x0d, 0x8b, 0xc7, 0x84, 0x40, 0x52, 0xc3, 0xca, 0xcf, 0xeb, 0x87, 0x84, 0xe7, 0xc0, 0xb0, 0xe4, + 0xac, 0xc1, 0xbf, 0x7c, 0xb9, 0x46, 0x2e, 0x90, 0x6b, 0x5c, 0x83, 0x1c, 0x19, 0xbd, 0x65, 0xa8, + 0x2d, 0x4c, 0xeb, 0x9c, 0x39, 0xd9, 0x23, 0x48, 0x9f, 0x0a, 0x50, 0x1a, 0x09, 0x31, 0xa1, 0x73, + 0x77, 0x4c, 0x32, 0xee, 0xc3, 0xe9, 0xd7, 0x01, 0xce, 0x55, 0x4b, 0x79, 0x4f, 0xed, 0xdb, 0xb8, + 0xcd, 0xa7, 0x9b, 0x3b, 0x57, 0xad, 0x37, 0x28, 0x21, 0xd8, 0x71, 0x76, 0xa4, 0x63, 0x1f, 0x20, + 0xcc, 0xf9, 0x01, 0x21, 0xaa, 0x40, 0xd6, 0x30, 0x35, 0xdd, 0xd4, 0xec, 0x4b, 0x3a, 0xda, 0x84, + 0xec, 0xb6, 0x77, 0x93, 0xd9, 0x84, 0x98, 0xdc, 0x4d, 0x66, 0x93, 0x62, 0xca, 0xad, 0x3a, 0xb1, + 0x23, 0x9b, 0x17, 0x17, 0xa4, 0x0f, 0xe2, 0x9e, 0x2d, 0x6e, 0xe3, 0xae, 0x76, 0x81, 0xcd, 0x39, + 0x26, 0x33, 0xdb, 0xe6, 0xae, 0x84, 0x4c, 0xd9, 0x47, 0x21, 0xa3, 0x27, 0xad, 0x81, 0x85, 0xdb, + 0xbc, 0xfe, 0xe1, 0xb6, 0x51, 0x03, 0xd2, 0xf8, 0x02, 0xf7, 0x6d, 0xab, 0x9c, 0xa1, 0x36, 0x7c, + 0x65, 0x1c, 0x90, 0x92, 0xdf, 0x5b, 0x65, 0x62, 0xb9, 0x5f, 0x7d, 0xbe, 0x2a, 0x32, 0xee, 0xe7, + 0xf4, 0x9e, 0x66, 0xe3, 0x9e, 0x61, 0x5f, 0xca, 0x5c, 0x7e, 0xf2, 0xca, 0x4a, 0x55, 0x28, 0x06, + 0xe3, 0x3e, 0x7a, 0x02, 0x0a, 0x26, 0xb6, 0x55, 0xad, 0xaf, 0x04, 0x92, 0xf4, 0x05, 0x46, 0x64, + 0x27, 0x7f, 0x37, 0x99, 0x15, 0xc4, 0xf8, 0x6e, 0x32, 0x1b, 0x17, 0x13, 0xd2, 0x21, 0x3c, 0x12, + 0x1a, 0xf7, 0xd1, 0x4b, 0x90, 0xf3, 0x52, 0x06, 0x81, 0x4e, 0x63, 0x42, 0x11, 0xc3, 0xe3, 0x95, + 0xfe, 0x2a, 0x78, 0x2a, 0x83, 0x65, 0x91, 0x3a, 0xa4, 0x4d, 0x6c, 0x0d, 0xba, 0xac, 0x50, 0x51, + 0xdc, 0x7c, 0x7e, 0xb6, 0x8c, 0x81, 0x50, 0x07, 0x5d, 0x5b, 0xe6, 0xc2, 0xd2, 0x5b, 0x90, 0x66, + 0x14, 0x94, 0x87, 0xcc, 0xf1, 0xfe, 0xfd, 0xfd, 0x83, 0x37, 0xf6, 0xc5, 0x18, 0x02, 0x48, 0x57, + 0x6b, 0xb5, 0xfa, 0x61, 0x53, 0x14, 0x50, 0x0e, 0x52, 0xd5, 0xad, 0x03, 0xb9, 0x29, 0xc6, 0x09, + 0x59, 0xae, 0xef, 0xd6, 0x6b, 0x4d, 0x31, 0x81, 0x16, 0xa1, 0xc0, 0xbe, 0x95, 0x7b, 0x07, 0xf2, + 0x83, 0x6a, 0x53, 0x4c, 0xfa, 0x48, 0x47, 0xf5, 0xfd, 0xed, 0xba, 0x2c, 0xa6, 0xa4, 0x1f, 0xc0, + 0xd5, 0xc8, 0x1c, 0xc3, 0xab, 0x79, 0x08, 0xbe, 0x9a, 0x87, 0xf4, 0x71, 0x9c, 0x80, 0xae, 0xa8, + 0xc4, 0x01, 0xed, 0x8e, 0x4c, 0x7c, 0x73, 0x8e, 0xac, 0x63, 0x64, 0xf6, 0x04, 0x67, 0x99, 0xf8, + 0x0c, 0xdb, 0xad, 0x0e, 0x4b, 0x64, 0x98, 0x9f, 0x2c, 0xc8, 0x05, 0x4e, 0xa5, 0x42, 0x16, 0x63, + 0x7b, 0x07, 0xb7, 0x6c, 0x85, 0x9d, 0x36, 0x8b, 0x82, 0x9d, 0x1c, 0x61, 0x23, 0xd4, 0x23, 0x46, + 0x94, 0xde, 0x9e, 0x6b, 0x2d, 0x73, 0x90, 0x92, 0xeb, 0x4d, 0xf9, 0x4d, 0x31, 0x81, 0x10, 0x14, + 0xe9, 0xa7, 0x72, 0xb4, 0x5f, 0x3d, 0x3c, 0x6a, 0x1c, 0x90, 0xb5, 0x5c, 0x82, 0x92, 0xb3, 0x96, + 0x0e, 0x31, 0x25, 0xfd, 0x23, 0x0e, 0x8f, 0x46, 0xa4, 0x3d, 0xe8, 0x0e, 0x80, 0x3d, 0x54, 0x4c, + 0xdc, 0xd2, 0xcd, 0x76, 0xb4, 0x91, 0x35, 0x87, 0x32, 0xe5, 0x90, 0x73, 0x36, 0xff, 0xb2, 0x26, + 0x94, 0xca, 0xd0, 0x2b, 0x5c, 0x29, 0x99, 0x95, 0xc5, 0x21, 0xde, 0xf5, 0x90, 0x8a, 0x10, 0x6e, + 0x11, 0xc5, 0x74, 0x6d, 0xa9, 0x62, 0xca, 0x8f, 0x1e, 0xf8, 0xb1, 0xf0, 0x80, 0x06, 0x98, 0x99, + 0x6b, 0xaa, 0x3e, 0xb4, 0xcc, 0x08, 0x16, 0x7a, 0x13, 0x1e, 0x1d, 0x89, 0x8f, 0xae, 0xd2, 0xd4, + 0xac, 0x61, 0xf2, 0x91, 0x60, 0x98, 0xe4, 0xaa, 0xa5, 0x3f, 0x24, 0xfc, 0x0b, 0x1b, 0xcc, 0xf2, + 0x0e, 0x20, 0x6d, 0xd9, 0xaa, 0x3d, 0xb0, 0xb8, 0xc1, 0xbd, 0x34, 0x6b, 0xca, 0xb8, 0xee, 0x7c, + 0x1c, 0x51, 0x71, 0x99, 0xab, 0xf9, 0x7e, 0xbd, 0x2d, 0xe9, 0x36, 0x14, 0x83, 0x8b, 0x13, 0x7d, + 0x64, 0x3c, 0x9f, 0x13, 0x97, 0xee, 0x02, 0x1a, 0x4f, 0xa6, 0x43, 0xaa, 0x25, 0x42, 0x58, 0xb5, + 0xe4, 0x8f, 0x02, 0x3c, 0x36, 0x21, 0x71, 0x46, 0xaf, 0x8f, 0xec, 0xf3, 0xcb, 0xf3, 0xa4, 0xdd, + 0xeb, 0x8c, 0x16, 0xdc, 0x69, 0xe9, 0x16, 0x2c, 0xf8, 0xe9, 0xb3, 0x4d, 0xf2, 0xab, 0xb8, 0xe7, + 0xf3, 0x83, 0x65, 0x1d, 0x2f, 0x14, 0x0a, 0xdf, 0x30, 0x14, 0x06, 0xed, 0x2c, 0x3e, 0xa7, 0x9d, + 0x1d, 0x85, 0xd9, 0x59, 0x62, 0xae, 0x0c, 0x73, 0x2e, 0x6b, 0x4b, 0x7e, 0x33, 0x6b, 0x0b, 0x1c, + 0xb8, 0x54, 0x30, 0x85, 0x7d, 0x13, 0xc0, 0xab, 0x75, 0x91, 0x80, 0x64, 0xea, 0x83, 0x7e, 0x9b, + 0x5a, 0x40, 0x4a, 0x66, 0x0d, 0x74, 0x1b, 0x52, 0xc4, 0x92, 0x9c, 0x75, 0x1a, 0x77, 0xaa, 0xc4, + 0x12, 0x7c, 0xb5, 0x32, 0xc6, 0x2d, 0x69, 0x80, 0xc6, 0x4b, 0xe5, 0x11, 0x5d, 0xbc, 0x1a, 0xec, + 0xe2, 0xf1, 0xc8, 0xa2, 0x7b, 0x78, 0x57, 0xef, 0x43, 0x8a, 0xee, 0x3c, 0x49, 0xbe, 0xe8, 0xfd, + 0x0c, 0x87, 0x40, 0xe4, 0x1b, 0xfd, 0x02, 0x40, 0xb5, 0x6d, 0x53, 0x3b, 0x1d, 0x78, 0x1d, 0xac, + 0x86, 0x5b, 0x4e, 0xd5, 0xe1, 0xdb, 0xba, 0xc6, 0x4d, 0x68, 0xd9, 0x13, 0xf5, 0x99, 0x91, 0x4f, + 0xa1, 0xb4, 0x0f, 0xc5, 0xa0, 0xac, 0x93, 0xb4, 0xb3, 0x31, 0x04, 0x93, 0x76, 0x86, 0xc1, 0x78, + 0xd2, 0xee, 0xa6, 0xfc, 0x09, 0x76, 0x09, 0x45, 0x1b, 0xd2, 0x7f, 0x05, 0x58, 0xf0, 0x1b, 0xde, + 0xb7, 0x9c, 0x8a, 0x4e, 0xc9, 0xbe, 0xaf, 0x8e, 0x65, 0xa2, 0x99, 0x73, 0xd5, 0x3a, 0xfe, 0x2e, + 0x13, 0xd1, 0x0f, 0x04, 0xc8, 0xba, 0x93, 0x0f, 0xde, 0x47, 0x05, 0x2e, 0xf0, 0xd8, 0xda, 0xc5, + 0xfd, 0x97, 0x48, 0xec, 0xba, 0x2e, 0xe1, 0x5e, 0xd7, 0xdd, 0x75, 0x73, 0xa5, 0xa8, 0xea, 0x9e, + 0x7f, 0xa5, 0xb9, 0x4d, 0x39, 0xa9, 0xe1, 0xef, 0xf8, 0x38, 0x48, 0x92, 0x80, 0x7e, 0x04, 0x69, + 0xb5, 0xe5, 0xd6, 0x34, 0x8b, 0x21, 0xc5, 0x3e, 0x87, 0x75, 0xbd, 0x39, 0xac, 0x52, 0x4e, 0x99, + 0x4b, 0xf0, 0x51, 0xc5, 0x9d, 0x51, 0x49, 0xaf, 0x11, 0xbd, 0x8c, 0x27, 0xe8, 0x11, 0x8b, 0x00, + 0xc7, 0xfb, 0x0f, 0x0e, 0xb6, 0x77, 0xee, 0xed, 0xd4, 0xb7, 0x79, 0xb6, 0xb4, 0xbd, 0x5d, 0xdf, + 0x16, 0xe3, 0x84, 0x4f, 0xae, 0x3f, 0x38, 0x38, 0xa9, 0x6f, 0x8b, 0x09, 0xe9, 0x2e, 0xe4, 0x5c, + 0xaf, 0x42, 0x10, 0xbe, 0x53, 0x9f, 0x15, 0xf8, 0xd9, 0xe6, 0xd5, 0xf5, 0x65, 0x48, 0x19, 0xfa, + 0x7b, 0xfc, 0xee, 0x2c, 0x21, 0xb3, 0x86, 0xd4, 0x86, 0xd2, 0x88, 0x4b, 0x42, 0x77, 0x21, 0x63, + 0x0c, 0x4e, 0x15, 0xc7, 0x68, 0x47, 0xaa, 0xd8, 0x0e, 0x76, 0x1c, 0x9c, 0x76, 0xb5, 0xd6, 0x7d, + 0x7c, 0xe9, 0x2c, 0x93, 0x31, 0x38, 0xbd, 0xcf, 0x6c, 0x9b, 0xf5, 0x12, 0xf7, 0xf7, 0x72, 0x01, + 0x59, 0xe7, 0xa8, 0xa2, 0x1f, 0x43, 0xce, 0xf5, 0x76, 0xee, 0xdd, 0x77, 0xa4, 0x9b, 0xe4, 0xea, + 0x3d, 0x11, 0x74, 0x13, 0x16, 0x2d, 0xed, 0xbc, 0xef, 0x94, 0xee, 0x59, 0xf5, 0x26, 0x4e, 0xcf, + 0x4c, 0x89, 0xfd, 0xd8, 0x73, 0x0a, 0x0c, 0x24, 0xc8, 0x89, 0xa3, 0xbe, 0xe2, 0xbb, 0x1c, 0x40, + 0x48, 0x30, 0x4e, 0x84, 0x05, 0xe3, 0x5f, 0xc5, 0x21, 0xef, 0xbb, 0x19, 0x40, 0x3f, 0xf4, 0x39, + 0xae, 0x62, 0x48, 0x14, 0xf1, 0xf1, 0x7a, 0x97, 0xcb, 0xc1, 0x89, 0xc5, 0xe7, 0x9f, 0x58, 0xd4, + 0xfd, 0x8b, 0x73, 0xd1, 0x90, 0x9c, 0xfb, 0xa2, 0xe1, 0x39, 0x40, 0xb6, 0x6e, 0xab, 0x5d, 0xe5, + 0x42, 0xb7, 0xb5, 0xfe, 0xb9, 0xc2, 0x4c, 0x83, 0xb9, 0x19, 0x91, 0xfe, 0x39, 0xa1, 0x3f, 0x0e, + 0xa9, 0x95, 0xfc, 0x52, 0x80, 0xac, 0x8b, 0xe8, 0xe6, 0xbd, 0x7a, 0xbe, 0x02, 0x69, 0x0e, 0x5a, + 0xd8, 0xdd, 0x33, 0x6f, 0x85, 0xde, 0xa8, 0x54, 0x20, 0xdb, 0xc3, 0xb6, 0x4a, 0x7d, 0x26, 0x8b, + 0x80, 0x6e, 0xfb, 0xe6, 0xcb, 0x90, 0xf7, 0x5d, 0xdb, 0x13, 0x37, 0xba, 0x5f, 0x7f, 0x43, 0x8c, + 0x55, 0x32, 0x1f, 0x7e, 0xb2, 0x96, 0xd8, 0xc7, 0xef, 0x91, 0x13, 0x26, 0xd7, 0x6b, 0x8d, 0x7a, + 0xed, 0xbe, 0x28, 0x54, 0xf2, 0x1f, 0x7e, 0xb2, 0x96, 0x91, 0x31, 0x2d, 0xa6, 0xdf, 0xbc, 0x0f, + 0xa5, 0x91, 0x8d, 0x09, 0x1e, 0x68, 0x04, 0xc5, 0xed, 0xe3, 0xc3, 0xbd, 0x9d, 0x5a, 0xb5, 0x59, + 0x57, 0x4e, 0x0e, 0x9a, 0x75, 0x51, 0x40, 0x8f, 0xc2, 0xd2, 0xde, 0xce, 0x4f, 0x1a, 0x4d, 0xa5, + 0xb6, 0xb7, 0x53, 0xdf, 0x6f, 0x2a, 0xd5, 0x66, 0xb3, 0x5a, 0xbb, 0x2f, 0xc6, 0x37, 0xff, 0x94, + 0x87, 0x52, 0x75, 0xab, 0xb6, 0x43, 0x60, 0x9b, 0xd6, 0x52, 0xa9, 0x7b, 0xa8, 0x41, 0x92, 0x96, + 0x05, 0x27, 0x3e, 0xde, 0xab, 0x4c, 0xbe, 0x21, 0x41, 0xf7, 0x20, 0x45, 0x2b, 0x86, 0x68, 0xf2, + 0x6b, 0xbe, 0xca, 0x94, 0x2b, 0x13, 0x32, 0x18, 0x7a, 0x9c, 0x26, 0x3e, 0xef, 0xab, 0x4c, 0xbe, + 0x41, 0x41, 0x7b, 0x90, 0x71, 0x0a, 0x46, 0xd3, 0xde, 0xdc, 0x55, 0xa6, 0x5e, 0x6b, 0x90, 0xa9, + 0xb1, 0xc2, 0xdb, 0xe4, 0x97, 0x7f, 0x95, 0x29, 0x77, 0x2b, 0x68, 0x07, 0xd2, 0xbc, 0xe8, 0x31, + 0xe5, 0x31, 0x5f, 0x65, 0xda, 0x6d, 0x09, 0x92, 0x21, 0xe7, 0x95, 0x34, 0xa7, 0xbf, 0x67, 0xac, + 0xcc, 0x70, 0x6d, 0x84, 0xde, 0x82, 0x42, 0xb0, 0xa0, 0x32, 0xdb, 0x83, 0xc1, 0xca, 0x8c, 0xf7, + 0x32, 0x44, 0x7f, 0xb0, 0xba, 0x32, 0xdb, 0x03, 0xc2, 0xca, 0x8c, 0xd7, 0x34, 0xe8, 0x1d, 0x58, + 0x1c, 0xaf, 0x7e, 0xcc, 0xfe, 0x9e, 0xb0, 0x32, 0xc7, 0xc5, 0x0d, 0xea, 0x01, 0x0a, 0xa9, 0x9a, + 0xcc, 0xf1, 0xbc, 0xb0, 0x32, 0xcf, 0x3d, 0x0e, 0x6a, 0x43, 0x69, 0xb4, 0x12, 0x31, 0xeb, 0x73, + 0xc3, 0xca, 0xcc, 0x77, 0x3a, 0xac, 0x97, 0x20, 0x2c, 0x9f, 0xf5, 0xf9, 0x61, 0x65, 0xe6, 0x2b, + 0x1e, 0x74, 0x0c, 0xe0, 0x83, 0x95, 0x33, 0x3c, 0x47, 0xac, 0xcc, 0x72, 0xd9, 0x83, 0x0c, 0x58, + 0x0a, 0xc3, 0x9b, 0xf3, 0xbc, 0x4e, 0xac, 0xcc, 0x75, 0x07, 0x44, 0xec, 0x39, 0x88, 0x1c, 0x67, + 0x7b, 0xad, 0x58, 0x99, 0xf1, 0x32, 0x68, 0xab, 0xfe, 0xe9, 0x17, 0x2b, 0xc2, 0x67, 0x5f, 0xac, + 0x08, 0xff, 0xfe, 0x62, 0x45, 0xf8, 0xe8, 0xcb, 0x95, 0xd8, 0x67, 0x5f, 0xae, 0xc4, 0xfe, 0xf9, + 0xe5, 0x4a, 0xec, 0xa7, 0xcf, 0x9e, 0x6b, 0x76, 0x67, 0x70, 0xba, 0xde, 0xd2, 0x7b, 0x1b, 0xfe, + 0x07, 0xde, 0x61, 0xcf, 0xca, 0x4f, 0xd3, 0x34, 0xa0, 0xde, 0xfa, 0x5f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xdc, 0x85, 0x19, 0x4e, 0x76, 0x2e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5510,10 +5509,10 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x28 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5605,10 +5604,10 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x22 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5779,10 +5778,10 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -8145,8 +8144,8 @@ func (m *RequestPrepareProposal) Size() (n int) { } l = m.LocalLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -8181,8 +8180,8 @@ func (m *RequestProcessProposal) Size() (n int) { } l = m.ProposedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -8261,8 +8260,8 @@ func (m *RequestFinalizeBlock) Size() (n int) { } l = m.DecidedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -11139,7 +11138,7 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11166,8 +11165,8 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11408,7 +11407,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11435,8 +11434,8 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11985,7 +11984,7 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12012,8 +12011,8 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/docs/rfc/rfc-013-abci++.md b/docs/rfc/rfc-013-abci++.md index 0289c187ec..6e83c9aa22 100644 --- a/docs/rfc/rfc-013-abci++.md +++ b/docs/rfc/rfc-013-abci++.md @@ -3,7 +3,7 @@ ## Changelog - 2020-01-11: initialized -- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254)) +- 2022-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254)) ## Author(s) diff --git a/docs/tools/debugging/proposer-based-timestamps-runbook.md b/docs/tools/debugging/proposer-based-timestamps-runbook.md index a817bd29eb..cb32248dd0 100644 --- a/docs/tools/debugging/proposer-based-timestamps-runbook.md +++ b/docs/tools/debugging/proposer-based-timestamps-runbook.md @@ -213,4 +213,4 @@ documentation](https://hub.cosmos.network/main/governance/submitting.html#sendin If the application does not implement a way to update the consensus parameters programatically, then the application itself must be updated to do so. More information on updating -the consensus parameters via ABCI can be found in the [FinalizeBlock documentation](https://github.com/tendermint/tendermint/blob/master/spec/abci++/abci++_methods_002_draft.md#finalizeblock). +the consensus parameters via ABCI can be found in the [FinalizeBlock documentation](../../../spec/abci++/abci%2B%2B_methods.md#finalizeblock). diff --git a/internal/state/execution.go b/internal/state/execution.go index 2710478e6a..2638a67c66 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -105,14 +105,14 @@ func (blockExec *BlockExecutor) CreateProposalBlock( rpp, err := blockExec.appClient.PrepareProposal( ctx, &abci.RequestPrepareProposal{ - MaxTxBytes: maxDataBytes, - Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: buildExtendedCommitInfo(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.ABCI), - ByzantineValidators: block.Evidence.ToABCI(), - Height: block.Height, - Time: block.Time, - NextValidatorsHash: block.NextValidatorsHash, - ProposerAddress: block.ProposerAddress, + MaxTxBytes: maxDataBytes, + Txs: block.Txs.ToSliceOfBytes(), + LocalLastCommit: buildExtendedCommitInfo(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.ABCI), + Misbehavior: block.Evidence.ToABCI(), + Height: block.Height, + Time: block.Time, + NextValidatorsHash: block.NextValidatorsHash, + ProposerAddress: block.ProposerAddress, }, ) if err != nil { @@ -147,14 +147,14 @@ func (blockExec *BlockExecutor) ProcessProposal( state State, ) (bool, error) { resp, err := blockExec.appClient.ProcessProposal(ctx, &abci.RequestProcessProposal{ - Hash: block.Header.Hash(), - Height: block.Header.Height, - Time: block.Header.Time, - Txs: block.Data.Txs.ToSliceOfBytes(), - ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), - ByzantineValidators: block.Evidence.ToABCI(), - ProposerAddress: block.ProposerAddress, - NextValidatorsHash: block.NextValidatorsHash, + Hash: block.Header.Hash(), + Height: block.Header.Height, + Time: block.Header.Time, + Txs: block.Data.Txs.ToSliceOfBytes(), + ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + Misbehavior: block.Evidence.ToABCI(), + ProposerAddress: block.ProposerAddress, + NextValidatorsHash: block.NextValidatorsHash, }) if err != nil { return false, ErrInvalidBlock(err) @@ -208,14 +208,14 @@ func (blockExec *BlockExecutor) ApplyBlock( fBlockRes, err := blockExec.appClient.FinalizeBlock( ctx, &abci.RequestFinalizeBlock{ - Hash: block.Hash(), - Height: block.Header.Height, - Time: block.Header.Time, - Txs: block.Txs.ToSliceOfBytes(), - DecidedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), - ByzantineValidators: block.Evidence.ToABCI(), - ProposerAddress: block.ProposerAddress, - NextValidatorsHash: block.NextValidatorsHash, + Hash: block.Hash(), + Height: block.Header.Height, + Time: block.Header.Time, + Txs: block.Txs.ToSliceOfBytes(), + DecidedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + Misbehavior: block.Evidence.ToABCI(), + ProposerAddress: block.ProposerAddress, + NextValidatorsHash: block.NextValidatorsHash, }, ) endTime := time.Now().UnixNano() @@ -677,12 +677,12 @@ func ExecCommitBlock( finalizeBlockResponse, err := appConn.FinalizeBlock( ctx, &abci.RequestFinalizeBlock{ - Hash: block.Hash(), - Height: block.Height, - Time: block.Time, - Txs: block.Txs.ToSliceOfBytes(), - DecidedLastCommit: buildLastCommitInfo(block, store, initialHeight), - ByzantineValidators: block.Evidence.ToABCI(), + Hash: block.Hash(), + Height: block.Height, + Time: block.Time, + Txs: block.Txs.ToSliceOfBytes(), + DecidedLastCommit: buildLastCommitInfo(block, store, initialHeight), + Misbehavior: block.Evidence.ToABCI(), }, ) diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 79557b7876..93b0d1cfbe 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -158,8 +158,8 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { } } -// TestFinalizeBlockByzantineValidators ensures we send byzantine validators list. -func TestFinalizeBlockByzantineValidators(t *testing.T) { +// TestFinalizeBlockMisbehavior ensures we send misbehavior list. +func TestFinalizeBlockMisbehavior(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -274,7 +274,7 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) { require.NoError(t, err) // TODO check state and mempool - assert.Equal(t, abciMb, app.ByzantineValidators) + assert.Equal(t, abciMb, app.Misbehavior) } func TestProcessProposal(t *testing.T) { @@ -338,11 +338,11 @@ func TestProcessProposal(t *testing.T) { block1.Txs = txs expectedRpp := &abci.RequestProcessProposal{ - Txs: block1.Txs.ToSliceOfBytes(), - Hash: block1.Hash(), - Height: block1.Header.Height, - Time: block1.Header.Time, - ByzantineValidators: block1.Evidence.ToABCI(), + Txs: block1.Txs.ToSliceOfBytes(), + Hash: block1.Hash(), + Height: block1.Header.Height, + Time: block1.Header.Time, + Misbehavior: block1.Evidence.ToABCI(), ProposedLastCommit: abci.CommitInfo{ Round: 0, Votes: voteInfos, diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 354a2874f3..bfdc8b1f50 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -266,9 +266,9 @@ func makeRandomStateFromConsensusParams( type testApp struct { abci.BaseApplication - CommitVotes []abci.VoteInfo - ByzantineValidators []abci.Misbehavior - ValidatorUpdates []abci.ValidatorUpdate + CommitVotes []abci.VoteInfo + Misbehavior []abci.Misbehavior + ValidatorUpdates []abci.ValidatorUpdate } var _ abci.Application = (*testApp)(nil) @@ -279,7 +279,7 @@ func (app *testApp) Info(_ context.Context, req *abci.RequestInfo) (*abci.Respon func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.CommitVotes = req.DecidedLastCommit.Votes - app.ByzantineValidators = req.ByzantineValidators + app.Misbehavior = req.Misbehavior resTxs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index aa9e3fcbe1..7f9e57df5e 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -109,7 +109,7 @@ message RequestPrepareProposal { // sent to the app for possible modifications. repeated bytes txs = 2; ExtendedCommitInfo local_last_commit = 3 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 4 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 4 [(gogoproto.nullable) = false]; int64 height = 5; google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; bytes next_validators_hash = 7; @@ -120,7 +120,7 @@ message RequestPrepareProposal { message RequestProcessProposal { repeated bytes txs = 1; CommitInfo proposed_last_commit = 2 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; // hash is the merkle root hash of the fields of the proposed block. bytes hash = 4; int64 height = 5; @@ -147,8 +147,8 @@ message RequestVerifyVoteExtension { message RequestFinalizeBlock { repeated bytes txs = 1; CommitInfo decided_last_commit = 2 [(gogoproto.nullable) = false]; - repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; - // hash is the merkle root hash of the fields of the proposed block. + repeated Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; + // hash is the merkle root hash of the fields of the decided block. bytes hash = 4; int64 height = 5; google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; @@ -248,7 +248,7 @@ message ResponseDeliverTx { } message ResponseCommit { - // reserve 1 + reserved 1, 2; int64 retain_height = 3; } diff --git a/spec/abci++/README.md b/spec/abci++/README.md index a22babfeed..2cd2bb4834 100644 --- a/spec/abci++/README.md +++ b/spec/abci++/README.md @@ -25,14 +25,14 @@ This allows Tendermint to run with applications written in many programming lang This specification is split as follows: -- [Overview and basic concepts](./abci++_basic_concepts_002_draft.md) - interface's overview and concepts +- [Overview and basic concepts](./abci++_basic_concepts.md) - interface's overview and concepts needed to understand other parts of this specification. -- [Methods](./abci++_methods_002_draft.md) - complete details on all ABCI++ methods +- [Methods](./abci++_methods.md) - complete details on all ABCI++ methods and message types. -- [Requirements for the Application](./abci++_app_requirements_002_draft.md) - formal requirements +- [Requirements for the Application](./abci++_app_requirements.md) - formal requirements on the Application's logic to ensure Tendermint properties such as liveness. These requirements define what Tendermint expects from the Application; second part on managing ABCI application state and related topics. -- [Tendermint's expected behavior](./abci++_tmint_expected_behavior_002_draft.md) - specification of +- [Tendermint's expected behavior](./abci++_tmint_expected_behavior.md) - specification of how the different ABCI++ methods may be called by Tendermint. This explains what the Application is to expect from Tendermint. - [Client and Server](../abci/client-server.md) - for those looking to implement their diff --git a/spec/abci++/abci++_app_requirements_002_draft.md b/spec/abci++/abci++_app_requirements.md similarity index 93% rename from spec/abci++/abci++_app_requirements_002_draft.md rename to spec/abci++/abci++_app_requirements.md index 3203f7a957..cd4d877c4a 100644 --- a/spec/abci++/abci++_app_requirements_002_draft.md +++ b/spec/abci++/abci++_app_requirements.md @@ -1,23 +1,24 @@ --- order: 3 -title: Application Requirements +title: Requirements for the Application --- -# Application Requirements +# Requirements for the Application ## Formal Requirements This section specifies what Tendermint expects from the Application. It is structured as a set of formal requirements that can be used for testing and verification of the Application's logic. -Let *p* and *q* be two different correct proposers in rounds *rp* and *rq* -respectively, in height *h*. +Let *p* and *q* be two correct processes. +Let *rp* (resp. *rq*) be a round of height *h* where *p* (resp. *q*) is the +proposer. Let *sp,h-1* be *p*'s Application's state committed for height *h-1*. Let *vp* (resp. *vq*) be the block that *p*'s (resp. *q*'s) Tendermint passes on to the Application via `RequestPrepareProposal` as proposer of round *rp* (resp *rq*), height *h*, also known as the raw proposal. -Let *v'p* (resp. *v'q*) the possibly modified block *p*'s (resp. *q*'s) Application +Let *up* (resp. *uq*) the possibly modified block *p*'s (resp. *q*'s) Application returns via `ResponsePrepareProposal` to Tendermint, also known as the prepared proposal. Process *p*'s prepared proposal can differ in two different rounds where *p* is the proposer. @@ -49,6 +50,7 @@ same-block execution mode and *does not* provide values for Full execution of blocks at `PrepareProposal` time stands on Tendermint's critical path. Thus, Requirement 3 ensures the Application will set a value for `TimeoutPropose` such that the time it takes to fully execute blocks in `PrepareProposal` does not interfere with Tendermint's propose timer. +Note that violation of Requirement 3 may just lead to further rounds, but will not compromise liveness. * Requirement 4 [`PrepareProposal`, tx-size]: When *p*'s Application calls `ResponsePrepareProposal`, the total size in bytes of the transactions returned does not exceed `RequestPrepareProposal.max_tx_bytes`. @@ -62,7 +64,7 @@ transaction list returned by the application will never cause the resulting bloc limit. * Requirement 5 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*, - if *q*'s Tendermint calls `RequestProcessProposal` on *v'p*, + if *q*'s Tendermint calls `RequestProcessProposal` on *up*, *q*'s Application returns Accept in `ResponseProcessProposal`. Requirement 5 makes sure that blocks proposed by correct processes *always* pass the correct receiving process's @@ -75,14 +77,14 @@ serious consequences on Tendermint's liveness that this entails. Due to its crit target for extensive testing and automated verification. * Requirement 6 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current - state and the block that is about to be applied. In other words, for any correct process *p*, and any arbitrary block *v'*, - if *p*'s Tendermint calls `RequestProcessProposal` on *v'* at height *h*, - then *p*'s Application's acceptance or rejection **exclusively** depends on *v'* and *sp,h-1*. + state and the block that is about to be applied. In other words, for any correct process *p*, and any arbitrary block *u*, + if *p*'s Tendermint calls `RequestProcessProposal` on *u* at height *h*, + then *p*'s Application's acceptance or rejection **exclusively** depends on *u* and *sp,h-1*. * Requirement 7 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary - block *v'*, - if *p*'s (resp. *q*'s) Tendermint calls `RequestProcessProposal` on *v'* at height *h*, - then *p*'s Application accepts *v'* if and only if *q*'s Application accepts *v'*. + block *u*, + if *p*'s (resp. *q*'s) Tendermint calls `RequestProcessProposal` on *u* at height *h*, + then *p*'s Application accepts *u* if and only if *q*'s Application accepts *u*. Note that this requirement follows from Requirement 6 and the Agreement property of consensus. Requirements 6 and 7 ensure that all correct processes will react in the same way to a proposed block, even @@ -97,7 +99,7 @@ of `ProcessProposal`. As a general rule `ProcessProposal` SHOULD always accept t According to the Tendermint algorithm, a correct process can broadcast at most one precommit message in round *r*, height *h*. -Since, as stated in the [Methods](./abci++_methods_002_draft.md#extendvote) section, `ResponseExtendVote` +Since, as stated in the [Methods](./abci++_methods.md#extendvote) section, `ResponseExtendVote` is only called when Tendermint is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension in round *r*, height *h*. @@ -106,9 +108,9 @@ Let *erp* be the vote extension that the Application of a Let *wrp* be the proposed block that *p*'s Tendermint passes to the Application via `RequestExtendVote` in round *r*, height *h*. -* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two correct processes *p* and *q*, if *q* -receives *erp* - from *p* in height *h*, *q*'s Application returns Accept in `ResponseVerifyVoteExtension`. +* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two different correct + processes *p* and *q*, if *q* receives *erp* from *p* in height *h*, *q*'s + Application returns Accept in `ResponseVerifyVoteExtension`. Requirement 8 constrains the creation and handling of vote extensions in a similar way as Requirement 5 constrains the creation and handling of proposed blocks. @@ -170,8 +172,8 @@ Finally, notice that neither `PrepareProposal` nor `ExtendVote` have determinism requirements associated. Indeed, `PrepareProposal` is not required to be deterministic: -* *v'p* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. -* *vp = vq ⇏ v'p = v'q*. +* *up* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. +* *vp = vq ⇏ up = uq*. Likewise, `ExtendVote` can also be non-deterministic: @@ -365,12 +367,12 @@ For more information, see Section [State Sync](#state-sync). ### Transaction Results The Application is expected to return a list of -[`ExecTxResult`](./abci%2B%2B_methods_002_draft.md#exectxresult) in -[`ResponseFinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock). The list of transaction +[`ExecTxResult`](./abci%2B%2B_methods.md#exectxresult) in +[`ResponseFinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock). The list of transaction results must respect the same order as the list of transactions delivered via -[`RequestFinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock). +[`RequestFinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock). This section discusses the fields inside this structure, along with the fields in -[`ResponseCheckTx`](./abci%2B%2B_methods_002_draft.md#checktx), +[`ResponseCheckTx`](./abci%2B%2B_methods.md#checktx), whose semantics are similar. The `Info` and `Log` fields are @@ -471,12 +473,12 @@ events took place during their execution. ### Updating the Validator Set The application may set the validator set during -[`InitChain`](./abci%2B%2B_methods_002_draft.md#initchain), and may update it during -[`FinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock) +[`InitChain`](./abci%2B%2B_methods.md#initchain), and may update it during +[`FinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock) (next block execution mode) or -[`PrepareProposal`](./abci%2B%2B_methods_002_draft.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods_002_draft.md#processproposal) +[`PrepareProposal`](./abci%2B%2B_methods.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods.md#processproposal) (same block execution mode). In all cases, a structure of type -[`ValidatorUpdate`](./abci%2B%2B_methods_002_draft.md#validatorupdate) is returned. +[`ValidatorUpdate`](./abci%2B%2B_methods.md#validatorupdate) is returned. The `InitChain` method, used to initialize the Application, can return a list of validators. If the list is empty, Tendermint will use the validators loaded from the genesis @@ -510,7 +512,7 @@ Applications must ensure that `MaxTotalVotingPower = MaxInt64 / 8` Note the updates returned after processing the block at height `H` will only take effect -at block `H+2` (see Section [Methods](./abci%2B%2B_methods_002_draft.md)). +at block `H+2` (see Section [Methods](./abci%2B%2B_methods.md)). ### Consensus Parameters @@ -518,10 +520,10 @@ at block `H+2` (see Section [Methods](./abci%2B%2B_methods_002_draft.md)). They enforce certain limits in the blockchain, like the maximum size of blocks, amount of gas used in a block, and the maximum acceptable age of evidence. They can be set in -[`InitChain`](./abci%2B%2B_methods_002_draft.md#initchain), and updated in -[`FinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock) +[`InitChain`](./abci%2B%2B_methods.md#initchain), and updated in +[`FinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock) (next block execution mode) or -[`PrepareProposal`](./abci%2B%2B_methods_002_draft.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods_002_draft.md#processproposal) +[`PrepareProposal`](./abci%2B%2B_methods.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods.md#processproposal) (same block execution model). These parameters are deterministically set and/or updated by the Application, so all full nodes have the same value at a given height. @@ -672,14 +674,33 @@ node has received all precommits for a block, forgoing the remaining commit time Setting this parameter to `false` (the default) causes Tendermint to wait for the full commit timeout configured in `TimeoutParams.Commit`. +##### ABCIParams.VoteExtensionsEnableHeight + +This parameter is either 0 or a positive height at which vote extensions +become mandatory. If the value is zero (which is the default), vote +extensions are not required. Otherwise, at all heights greater than the +configured height `H` vote extensions must be present (even if empty). +When the configured height `H` is reached, `PrepareProposal` will not +include vote extensions yet, but `ExtendVote` and `VerifyVoteExtension` will +be called. Then, when reaching height `H+1`, `PrepareProposal` will +include the vote extensions from height `H`. For all heights after `H` + +* vote extensions cannot be disabled, +* they are mandatory: all precommit messages sent MUST have an extension + attached. Nevetheless, the application MAY provide 0-length + extensions. + +Must always be set to a future height. Once set to a value different from +0, its value must not be changed. + #### Updating Consensus Parameters The application may set the `ConsensusParams` during -[`InitChain`](./abci%2B%2B_methods_002_draft.md#initchain), +[`InitChain`](./abci%2B%2B_methods.md#initchain), and update them during -[`FinalizeBlock`](./abci%2B%2B_methods_002_draft.md#finalizeblock) +[`FinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock) (next block execution mode) or -[`PrepareProposal`](./abci%2B%2B_methods_002_draft.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods_002_draft.md#processproposal) +[`PrepareProposal`](./abci%2B%2B_methods.md#prepareproposal)/[`ProcessProposal`](./abci%2B%2B_methods.md#processproposal) (same block execution mode). If the `ConsensusParams` is empty, it will be ignored. Each field that is not empty will be applied in full. For instance, if updating the @@ -885,7 +906,7 @@ truncated block history - users are advised to consider the broader network impl terms of block availability and auditability. This functionality may be added in the future. For details on the specific ABCI calls and types, see the -[methods](abci%2B%2B_methods_002_draft.md) section. +[methods](abci%2B%2B_methods.md) section. #### Taking Snapshots diff --git a/spec/abci++/abci++_basic_concepts.md b/spec/abci++/abci++_basic_concepts.md new file mode 100644 index 0000000000..a467b623ed --- /dev/null +++ b/spec/abci++/abci++_basic_concepts.md @@ -0,0 +1,468 @@ +--- +order: 1 +title: Overview and basic concepts +--- + +## Outline + +- [ABCI++ vs. ABCI](#abci-vs-abci) +- [Method overview](#method-overview) + - [Consensus/block execution methods](#consensusblock-execution-methods) + - [Mempool methods](#mempool-methods) + - [Info methods](#info-methods) + - [State-sync methods](#state-sync-methods) +- [Next-block execution vs. same-block execution](#next-block-execution-vs-same-block-execution) +- [Tendermint proposal timeout](#tendermint-proposal-timeout) +- [Deterministic State-Machine Replication](#deterministic-state-machine-replication) +- [Events](#events) +- [Evidence](#evidence) +- [Errors](#errors) + +# Overview and basic concepts + +## ABCI++ vs. ABCI + +[↑ Back to Outline](#outline) + +The Application's main role is to execute blocks decided (a.k.a. finalized) by consensus. The +decided blocks are the main consensus's ouput to the (replicated) Application. With ABCI, the +application only interacts with consensus at *decision* time. This restricted mode of interaction +prevents numerous features for the Application, including many scalability improvements that are +now better understood than when ABCI was first written. For example, many ideas proposed to improve +scalability can be boiled down to "make the block proposers do work, so the network does not have +to". This includes optimizations such as transaction level signature aggregation, state transition +proofs, etc. Furthermore, many new security properties cannot be achieved in the current paradigm, +as the Application cannot require validators to do more than execute the transactions contained in +finalized blocks. This includes features such as threshold cryptography, and guaranteed IBC +connection attempts. + +ABCI++ addresses these limitations by allowing the application to intervene at three key places of +consensus execution: (a) at the moment a new proposal is to be created, (b) at the moment a +proposal is to be validated, and (c) at the moment a (precommit) vote is sent/received. The new +interface allows block proposers to perform application-dependent work in a block through the +`PrepareProposal` method (a); validators to perform application-dependent work and checks in a +proposed block through the `ProcessProposal` method (b); and applications to require their validators +do more than just validate blocks through the `ExtendVote` and `VerifyVoteExtension` methods (c). +Furthermore, ABCI++ coalesces {`BeginBlock`, [`DeliverTx`], `EndBlock`} into `FinalizeBlock`, as a +simplified, efficient way to deliver a decided block to the Application. + +## Method overview + +[↑ Back to Outline](#outline) + +Methods can be classified into four categories: *consensus*, *mempool*, *info*, and *state-sync*. + +### Consensus/block execution methods + +The first time a new blockchain is started, Tendermint calls `InitChain`. From then on, method +`FinalizeBlock` is executed upon the decision of each block, resulting in an updated Application +state. During the execution of an instance of consensus, which decides the block for a given +height, and before method `FinalizeBlock` is called, methods `PrepareProposal`, `ProcessProposal`, +`ExtendVote`, and `VerifyVoteExtension` may be called several times. See +[Tendermint's expected behavior](abci++_tmint_expected_behavior.md) for details on the possible +call sequences of these methods. + +- [**InitChain:**](./abci++_methods.md#initchain) This method initializes the blockchain. + Tendermint calls it once upon genesis. + +- [**PrepareProposal:**](./abci++_methods.md#prepareproposal) It allows the block + proposer to perform application-dependent work in a block before proposing it. + This enables, for instance, batch optimizations to a block, which has been empirically + demonstrated to be a key component for improved performance. Method `PrepareProposal` is called + every time Tendermint is about to broadcast a Proposal message, but no previous proposal has + been locked at the Tendermint level. Tendermint gathers outstanding transactions from the + mempool, generates a block header, and uses them to create a block to propose. Then, it calls + `RequestPrepareProposal` with the newly created proposal, called *raw proposal*. The Application + can make changes to the raw proposal, such as modifying transactions, and returns the + (potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal` + call. The logic modifying the raw proposal can be non-deterministic. + +- [**ProcessProposal:**](./abci++_methods.md#processproposal) It allows a validator to + perform application-dependent work in a proposed block. This enables features such as immediate + block execution, and allows the Application to reject invalid blocks. + Tendermint calls it when it receives a proposal and the Tendermint algorithms has not locked on a + value. The Application cannot modify the proposal at this point but can reject it if it is + invalid. If that is the case, Tendermint will prevote `nil` on the proposal, which has + strong liveness implications for Tendermint. As a general rule, the Application + SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of + the proposal is invalid (e.g., an invalid transaction); the Application can + ignore the invalid part of the prepared proposal at block execution time. + +- [**ExtendVote:**](./abci++_methods.md#extendvote) It allows applications to force their + validators to do more than just validate within consensus. `ExtendVote` allows applications to + include non-deterministic data, opaque to Tendermint, to precommit messages (the final round of + voting). The data, called *vote extension*, will be broadcast and received together with the + vote it is extending, and will be made available to the Application in the next height, + in the rounds where the local process is the proposer. + Tendermint calls `ExtendVote` when it is about to send a non-`nil` precommit message. + If the Application does not have vote extension information to provide at that time, it returns + a 0-length byte array as its vote extension. + +- [**VerifyVoteExtension:**](./abci++_methods.md#verifyvoteextension) It allows + validators to validate the vote extension data attached to a precommit message. If the validation + fails, the whole precommit message will be deemed invalid and ignored by Tendermint. + This has a negative impact on Tendermint's liveness, i.e., if vote extensions repeatedly cannot be + verified by correct validators, Tendermint may not be able to finalize a block even if sufficiently + many (+2/3) validators send precommit votes for that block. Thus, `VerifyVoteExtension` + should be used with special care. + As a general rule, an Application that detects an invalid vote extension SHOULD + accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. Tendermint calls it when + a process receives a precommit message with a (possibly empty) vote extension. + +- [**FinalizeBlock:**](./abci++_methods.md#finalizeblock) It delivers a decided block to the + Application. The Application must execute the transactions in the block deterministically and + update its state accordingly. Cryptographic commitments to the block and transaction results, + returned via the corresponding parameters in `ResponseFinalizeBlock`, are included in the header + of the next block. Tendermint calls it when a new block is decided. + +- [**Commit:**](./abci++_methods.md#commit) Instructs the Application to persist its + state. It is a fundamental part of Tendermint's crash-recovery mechanism that ensures the + synchronization between Tendermint and the Applicatin upon recovery. Tendermint calls it just after + having persisted the data returned by `ResponseFinalizeBlock`. The Application can now discard + any state or data except the one resulting from executing the transactions in the decided block. + +### Mempool methods + +- [**CheckTx:**](./abci++_methods.md#checktx) This method allows the Application to validate + transactions. Validation can be stateless (e.g., checking signatures ) or stateful + (e.g., account balances). The type of validation performed is up to the application. If a + transaction passes the validation, then Tendermint adds it to the mempool; otherwise the + transaction is discarded. + Tendermint calls it when it receives a new transaction either coming from an external + user (e.g., a client) or another node. Furthermore, Tendermint can be configured to call + re-`CheckTx` on all outstanding transactions in the mempool after calling `Commit`for a block. + +### Info methods + +- [**Info:**](./abci++_methods.md#info) Used to sync Tendermint with the Application during a + handshake that happens upon recovery, or on startup when state-sync is used. + +- [**Query:**](./abci++_methods.md#query) This method can be used to query the Application for + information about the application state. + +### State-sync methods + +State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying +state machine (application) snapshots instead of replaying historical blocks. For more details, see the +[state sync documentation](../p2p/messages/state-sync.md). + +New nodes discover and request snapshots from other nodes in the P2P network. +A Tendermint node that receives a request for snapshots from a peer will call +`ListSnapshots` on its Application. The Application returns the list of locally available +snapshots. +Note that the list does not contain the actual snapshots but metadata about them: height at which +the snapshot was taken, application-specific verification data and more (see +[snapshot data type](./abci++_methods.md#snapshot) for more details). After receiving a +list of available snapshots from a peer, the new node can offer any of the snapshots in the list to +its local Application via the `OfferSnapshot` method. The Application can check at this point the +validity of the snapshot metadata. + +Snapshots may be quite large and are thus broken into smaller "chunks" that can be +assembled into the whole snapshot. Once the Application accepts a snapshot and +begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. +The node providing "chunks" will fetch them from its local Application using +the `LoadSnapshotChunk` method. + +As the new node receives "chunks" it will apply them sequentially to the local +application with `ApplySnapshotChunk`. When all chunks have been applied, the +Application's `AppHash` is retrieved via an `Info` query. +To ensure that the sync proceeded correctly, Tendermint compares the local Application's `AppHash` +to the `AppHash` stored on the blockchain (verified via +[light client verification](../light-client/verification/README.md)). + +In summary: + +- [**ListSnapshots:**](./abci++_methods.md#listsnapshots) Used by nodes to discover available + snapshots on peers. + +- [**OfferSnapshot:**](./abci++_methods.md#offersnapshot) When a node receives a snapshot from a + peer, Tendermint uses this method to offer the snapshot to the Application. + +- [**LoadSnapshotChunk:**](./abci++_methods.md#loadsnapshotchunk) Used by Tendermint to retrieve + snapshot chunks from the Application to send to peers. + +- [**ApplySnapshotChunk:**](./abci++_methods.md#applysnapshotchunk) Used by Tendermint to hand + snapshot chunks to the Application. + +### Other methods + +Additionally, there is a [**Flush**](./abci++_methods.md#flush) method that is called on every connection, +and an [**Echo**](./abci++_methods.md#echo) method that is used for debugging. + +More details on managing state across connections can be found in the section on +[Managing Application State](./abci%2B%2B_app_requirements.md#managing-the-application-state-and-related-topics). + +## Next-block execution vs. same-block execution + +[↑ Back to Outline](#outline) + +In the original ABCI protocol, the only moment when the Application had access to a +block was after it was decided. This led to a block execution model, called *next-block +execution*, where some fields hashed in a block header refer to the execution of the +previous block, namely: + +- the Merkle root of the Application's state +- the transaction results +- the consensus parameter updates +- the validator updates + +With ABCI++, an Application may be configured to keep using the next-block execution model, by +executing the decided block in `FinalizeBlock`. However, the new methods introduced — +`PrepareProposal` and `ProcessProposal` — disclose the entire proposed block to the +Application, allowing for its immediate exectution. An Application implementing immediate execution +may additionally wish to store certain data resulting from the block's execution in the same block +that has just been executed. This brings about a new execution model, called +*same-block execution*. An Application implementing this execution model, upon receiving a raw +proposal via `RequestPrepareProposal` and potentially modifying its transaction list, fully +executes the resulting prepared proposal as though it was the decided block (immediate execution), +and the results of the block execution are used as follows: + +- The block execution may generate a set of events. The Application should store these events and + return them back to Tendermint during the `FinalizeBlock` call if the block is finally decided. +- The Merkle root resulting from executing the prepared proposal is provided in + `ResponsePrepareProposal` and thus refers to the **current block**. Tendermint + will use it in the prepared proposal's header. +- Likewise, the transaction results from executing the prepared proposal are + provided in `ResponsePrepareProposal` and refer to the transactions in the + **current block**. Tendermint will use them to calculate the results hash + in the prepared proposal's header. +- The consensus parameter updates and validator updates are also provided in + `ResponsePrepareProposal` and reflect the result of the prepared proposal's + execution. They come into force in height H+1 (as opposed to the H+2 rule + in next-block execution model). + +If the Application is configured to keep the next-block execution model, it will not +provide any data in `ResponsePrepareProposal`, other than a potentially modified +transaction list. The Application may nevertheless choose to perform immediate execution even in +next-block execution mode, however same-block execution mode *requires* immediate execution. + +The long term plan is for the execution model to be set in a new boolean parameter *same_block* in +`ConsensusParams`. Once this parameter is introduced, it **must not** be changed once the +blockchain has started, unless the Application developers *really* know what they are doing. +However, modifying `ConsensusParams` structure cannot be done lightly if we are to +preserve blockchain compatibility. Therefore we need an interim solution until +soft upgrades are specified and implemented in Tendermint. This somewhat *unsafe* +solution consists in Tendermint assuming same-block execution if the Application +fills the above mentioned fields in `ResponsePrepareProposal`. + +## Tendermint proposal timeout + +Immediate execution requires the Application to fully execute the prepared block +before returning from `PrepareProposal`, this means that Tendermint cannot make progress +during the block execution. +This stands on Tendermint's critical path: if the Application takes a long time +executing the block, the default value of *TimeoutPropose* might not be sufficient +to accommodate the long block execution time and non-proposer nodes might time +out and prevote `nil`. The proposal, in this case, will probably be rejected and a new round will be necessary. + +The Application is the best suited to provide a value for *TimeoutPropose* so +that the block execution time upon `PrepareProposal` fits well in the propose +timeout interval. Thus, the Application can adapt the value of *TimeoutPropose* at every height via +`TimeoutParams.Propose`, contained in `ConsensusParams`. + +## Deterministic State-Machine Replication + +[↑ Back to Outline](#outline) + +ABCI++ applications must implement deterministic finite-state machines to be +securely replicated by the Tendermint consensus engine. This means block execution +must be strictly deterministic: given the same +ordered set of transactions, all nodes will compute identical responses, for all +successive `FinalizeBlock` calls. This is critical because the +responses are included in the header of the next block, either via a Merkle root +or directly, so all nodes must agree on exactly what they are. + +For this reason, it is recommended that application state is not exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. The Application must only change its state based on input +from block execution (`FinalizeBlock` calls), and not through +any other kind of request. This is the only way to ensure all nodes see the same +transactions and compute the same results. + +Some Applications may choose to implement immediate execution, which entails executing the blocks +that are about to be proposed (via `PrepareProposal`), and those that the Application is asked to +validate (via `ProcessProposal`). However, the state changes caused by processing those +proposed blocks must never replace the previous state until `FinalizeBlock` confirms +the block decided. + +Additionally, vote extensions or the validation thereof (via `ExtendVote` or +`VerifyVoteExtension`) must *never* have side effects on the current state. +They can only be used when their data is provided in a `RequestPrepareProposal` call. + +If there is some non-determinism in the state machine, consensus will eventually +fail as nodes disagree over the correct values for the block header. The +non-determinism must be fixed and the nodes restarted. + +Sources of non-determinism in applications may include: + +- Hardware failures + - Cosmic rays, overheating, etc. +- Node-dependent state + - Random numbers + - Time +- Underspecification + - Library version changes + - Race conditions + - Floating point numbers + - JSON or protobuf serialization + - Iterating through hash-tables/maps/dictionaries +- External Sources + - Filesystem + - Network calls (eg. some external REST API service) + +See [#56](https://github.com/tendermint/abci/issues/56) for the original discussion. + +Note that some methods (`Query, CheckTx, FinalizeBlock`) return non-deterministic data in the form +of `Info` and `Log` fields. The `Log` is intended for the literal output from the Application's +logger, while the `Info` is any additional info that should be returned. These are the only fields +that are not included in block header computations, so we don't need agreement +on them. All other fields in the `Response*` must be strictly deterministic. + +## Events + +[↑ Back to Outline](#outline) + +Method `FinalizeBlock` includes an `events` field at the top level in its +`Response*`, and one `events` field per transaction included in the block. +Applications may respond to this ABCI++ method with an event list for each executed +transaction, and a general event list for the block itself. +Events allow applications to associate metadata with transactions and blocks. +Events returned via `FinalizeBlock` do not impact Tendermint consensus in any way +and instead exist to power subscriptions and queries of Tendermint state. + +An `Event` contains a `type` and a list of `EventAttributes`, which are key-value +string pairs denoting metadata about what happened during the method's (or transaction's) +execution. `Event` values can be used to index transactions and blocks according to what +happened during their execution. + +Each event has a `type` which is meant to categorize the event for a particular +`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate +`type` values, where each distinct entry is meant to categorize attributes for a +particular event. Every key and value in an event's attributes must be UTF-8 +encoded strings along with the event type itself. + +```protobuf +message Event { + string type = 1; + repeated EventAttribute attributes = 2; +} +``` + +The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The +index flag notifies the Tendermint indexer to index the attribute. The value of +the `index` flag is non-deterministic and may vary across different nodes in the network. + +```protobuf +message EventAttribute { + bytes key = 1; + bytes value = 2; + bool index = 3; // nondeterministic +} +``` + +Example: + +```go + abci.ResponseFinalizeBlock{ + // ... + Events: []abci.Event{ + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: true}, + }, + }, + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: false}, + }, + }, + { + Type: "validator.slashed", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("reason"), Value: []byte("..."), Index: true}, + }, + }, + // ... + }, +} +``` + +## Evidence + +[↑ Back to Outline](#outline) + +Tendermint's security model relies on the use of evidences of misbehavior. An evidence is an +irrefutable proof of malicious behavior by a network participant. It is the responsibility of +Tendermint to detect such malicious behavior. When malicious behavior is detected, Tendermint +will gossip evidences of misbehavior to other nodes and commit the evidences to +the chain once they are verified by a subset of validators. These evidences will then be +passed on to the Application through ABCI++. It is the responsibility of the +Application to handle evidence of misbehavior and exercise punishment. + +There are two forms of evidence: Duplicate Vote and Light Client Attack. More +information can be found in either [data structures](../core/data_structures.md) +or [accountability](../light-client/accountability/). + +EvidenceType has the following protobuf format: + +```protobuf +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} +``` + +## Errors + +[↑ Back to Outline](#outline) + +The `Query`, and `CheckTx` methods include a `Code` field in their `Response*`. +Field `Code` is meant to contain an application-specific response code. +A response code of `0` indicates no error. Any other response code +indicates to Tendermint that an error occurred. + +These methods also return a `Codespace` string to Tendermint. This field is +used to disambiguate `Code` values returned by different domains of the +Application. The `Codespace` is a namespace for the `Code`. + +Methods `Echo`, `Info`, and `InitChain` do not return errors. +An error in any of these methods represents a critical issue that Tendermint +has no reasonable way to handle. If there is an error in one +of these methods, the Application must crash to ensure that the error is safely +handled by an operator. + +Method `FinalizeBlock` is a special case. It contains a number of +`Code` and `Codespace` fields as part of type `ExecTxResult`. Each of +these codes reports errors related to the transaction it is attached to. +However, `FinalizeBlock` does not return errors at the top level, so the +same considerations on critical issues made for `Echo`, `Info`, and +`InitChain` also apply here. + +The handling of non-zero response codes by Tendermint is described below. + +### `CheckTx` + +When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated +transaction will not be added to Tendermint's mempool or it will be removed if +it is already included. + +### `ExecTxResult` (as part of `FinalizeBlock`) + +The `ExecTxResult` type delivers transaction results from the Application to Tendermint. When +Tendermint receives a `ResponseFinalizeBlock` containing an `ExecTxResult` with a non-zero `Code`, +the response code is logged. Past `Code` values can be queried by clients. As the transaction was +part of a decided block, the `Code` does not influence Tendermint consensus. + +### `Query` + +When Tendermint receives a `ResponseQuery` with a non-zero `Code`, this code is +returned directly to the client that initiated the query. diff --git a/spec/abci++/abci++_basic_concepts_002_draft.md b/spec/abci++/abci++_basic_concepts_002_draft.md deleted file mode 100644 index a1ad038a51..0000000000 --- a/spec/abci++/abci++_basic_concepts_002_draft.md +++ /dev/null @@ -1,404 +0,0 @@ ---- -order: 1 -title: Overview and basic concepts ---- - -## Outline -- [ABCI++ vs. ABCI](#abci-vs-abci) -- [Methods overview](#methods-overview) - - [Consensus methods](#consensus-methods) - - [Mempool methods](#mempool-methods) - - [Info methods](#info-methods) - - [State-sync methods](#state-sync-methods) -- [Next-block execution vs. same-block execution](#next-block-execution-vs-same-block-execution) - - [Tendermint timeouts](#tendermint-timeouts-in-same-block-execution) -- [Determinism](#determinism) -- [Errors](#errors) -- [Events](#events) -- [Evidence](#evidence) - -# Overview and basic concepts - -## ABCI++ vs. ABCI -[↑ Back to Outline](#outline) - -With ABCI, the application can only act at one phase in consensus, immediately after a block has been finalized. This restriction on the application prevents numerous features for the application, including many scalability improvements that are now better understood than when ABCI was first written. For example, many of the scalability proposals can be boiled down to "Make the miner / block proposers / validators do work, so the network does not have to". This includes optimizations such as tx-level signature aggregation, state transition proofs, etc. Furthermore, many new security properties cannot be achieved in the current paradigm, as the application cannot enforce validators to do more than just finalize txs. This includes features such as threshold cryptography, and guaranteed IBC connection attempts. - -ABCI++ overcomes these limitations by allowing the application to intervene at three key places of the block execution. The new interface allows block proposers to perform application-dependent work in a block through the `PrepareProposal` method; validators to perform application-dependent work in a proposed block through the `ProcessProposal` method; and applications to require their validators do more than just validate blocks, e.g., validator guaranteed IBC connection attempts, through the `ExtendVote` and `VerifyVoteExtension` methods. Furthermore, ABCI++ renames {`BeginBlock`, [`DeliverTx`], `EndBlock`} to `FinalizeBlock`, as a simplified way to deliver a decided block to the Application. - -## Methods overview -[↑ Back to Outline](#outline) - -Methods can be classified into four categories: consensus, mempool, info, and state-sync. - -### Consensus/block execution methods - -The first time a new blockchain is started, Tendermint calls -`InitChain`. From then on, method `FinalizeBlock` is executed at the end of each -block, resulting in an updated Application state. -During consensus execution of a block height, before method `FinalizeBlock` is -called, methods `PrepareProposal`, `ProcessProposal`, `ExtendVote`, and -`VerifyVoteExtension` may be called several times. -See [Tendermint's expected behavior](abci++_tmint_expected_behavior_002_draft.md) -for details on the possible call sequences of these methods. - -* [**InitChain:**](./abci++_methods_002_draft.md#initchain) This method initializes the blockchain. Tendermint calls it once upon genesis. - -* [**PrepareProposal:**](./abci++_methods_002_draft.md#prepareproposal) It allows the block proposer to perform application-dependent work in a block before using it as its proposal. This enables, for instance, batch optimizations to a block, which has been empirically demonstrated to be a key component for scaling. Method `PrepareProposal` is called every time Tendermint is about to send -a proposal message, but no previous proposal has been locked at Tendermint level. -Tendermint gathers outstanding transactions from the mempool, generates a block header, and uses -them to create a block to propose. Then, it calls `RequestPrepareProposal` -with the newly created proposal, called _raw proposal_. The Application can -make changes to the raw proposal, such as modifying transactions, and returns -the (potentially) modified proposal, called _prepared proposal_ in the -`Response*` call. The logic modifying the raw proposal can be non-deterministic. - -* [**ProcessProposal:**](./abci++_methods_002_draft.md#processproposal) It allows a validator to perform application-dependent work in a proposed block. This enables features such as allowing validators to reject a block according to whether the state machine deems it valid, and changing the block execution pipeline. Tendermint calls it when it receives a proposal and it is not locked on a block. The Application cannot -modify the proposal at this point but can reject it if it realizes it is invalid. -If that is the case, Tendermint will prevote `nil` on the proposal, which has -strong liveness implications for Tendermint. As a general rule, the Application -SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of -the proposal is invalid (e.g., an invalid transaction); the Application can -ignore the invalid part of the prepared proposal at block execution time. - -* [**ExtendVote:**](./abci++_methods_002_draft.md#extendvote) It allows applications to force their validators to do more than just validate within consensus. `ExtendVote` allows applications to include non-deterministic data, opaque to Tendermint, to precommit messages (the final round of voting). -The data, called _vote extension_, will also be made available to the -application in the next height, along with the vote it is extending, in the rounds -where the local process is the proposer. -If the Application does not have vote extension information to provide, it returns a 0-length byte array as its vote extension. -Tendermint calls `ExtendVote` when is about to send a non-`nil` precommit message. - -* [**VerifyVoteExtension:**](./abci++_methods_002_draft.md#verifyvoteextension) It allows validators to validate the vote extension data attached to a precommit message. If the validation fails, the precommit message will be deemed invalid and ignored -by Tendermint. This has a negative impact on Tendermint's liveness, i.e., if vote extensions repeatedly cannot be verified by correct validators, Tendermint may not be able to finalize a block even if sufficiently many (+2/3) of the validators send precommit votes for that block. Thus, `VerifyVoteExtension` should be used with special care. -As a general rule, an Application that detects an invalid vote extension SHOULD -accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. Tendermint calls it when -a process receives a precommit message with a (possibly empty) vote extension. - -* [**FinalizeBlock:**](./abci++_methods_002_draft.md#finalizeblock) It delivers a decided block to the Application. The Application must execute the transactions in the block in order and update its state accordingly. Cryptographic commitments to the block and transaction results, via the corresponding -parameters in `ResponseFinalizeBlock`, are included in the header of the next block. Tendermint calls it when a new block is decided. - -### Mempool methods - -* [**CheckTx:**](./abci++_methods_002_draft.md#checktx) This method allows the Application to validate transactions against its current state, e.g., checking signatures and account balances. If a transaction passes the validation, then tendermint adds it to its local mempool, discarding it otherwise. Tendermint calls it when it receives a new transaction either coming from an external user or another node. Furthermore, Tendermint can be configured to re-call `CheckTx` on any decided transaction (after `FinalizeBlock`). - -### Info methods - -* [**Info:**](./abci++_methods_002_draft.md#info) Used to sync Tendermint with the Application during a handshake that happens on startup. - -* [**Query:**](./abci++_methods_002_draft.md#query) Clients can use this method to query the Application for information about the application state. - -### State-sync methods - -State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying -state machine snapshots instead of replaying historical blocks. For more details, see the -[state sync section](../p2p/messages/state-sync.md). - -New nodes will discover and request snapshots from other nodes in the P2P network. -A Tendermint node that receives a request for snapshots from a peer will call -`ListSnapshots` on its Application. The Application returns the list of locally avaiable snapshots. -Note that the list does not contain the actual snapshot but metadata about it: height at which the snapshot was taken, application-specific verification data and more (see [snapshot data type](./abci++_methods_002_draft.md#snapshot) for more details). After receiving a list of available snapshots from a peer, the new node can offer any of the snapshots in the list to its local Application via the `OfferSnapshot` method. The Application can check at this point the validity of the snapshot metadata. - -Snapshots may be quite large and are thus broken into smaller "chunks" that can be -assembled into the whole snapshot. Once the Application accepts a snapshot and -begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. -The node providing "chunks" will fetch them from its local Application using -the `LoadSnapshotChunk` method. - -As the new node receives "chunks" it will apply them sequentially to the local -application with `ApplySnapshotChunk`. When all chunks have been applied, the -Application's `AppHash` is retrieved via an `Info` query. -To ensure that the sync proceeded correctly, Tendermint compares the local Application's `AppHash` to the `AppHash` stored on the blockchain (verified via -[light client verification](../light-client/verification/README.md)). - -In summary: - -* [**ListSnapshots:**](./abci++_methods_002_draft.md#listsnapshots) Used by nodes to discover available snapshots on peers. - -* [**LoadSnapshotChunk:**](./abci++_methods_002_draft.md#loadsnapshotchunk) Used by Tendermint to retrieve snapshot chunks from the application to send to peers. - -* [**OfferSnapshot:**](./abci++_methods_002_draft.md#offersnapshot) When a node receives a snapshot from a peer, Tendermint uses this method to offer the snapshot to the Application. - -* [**ApplySnapshotChunk:**](./abci++_methods_002_draft.md#applysnapshotchunk) Used by Tendermint to hand snapshot chunks to the Application. - -### Other methods - -Additionally, there is a [**Flush**](./abci++_methods_002_draft.md#flush) method that is called on every connection, -and an [**Echo**](./abci++_methods_002_draft.md#echo) method that is just for debugging. - -More details on managing state across connections can be found in the section on -[ABCI Applications](../abci/apps.md). - -## Next-block execution vs. same-block execution -[↑ Back to Outline](#outline) - -In the original ABCI protocol, the only moment when the Application had access to a -block was after it was decided. This led to a block execution model, called _next-block -execution_, where some fields hashed in a block header refer to the execution of the -previous block, namely: - -* the Merkle root of the Application's state -* the transaction results -* the consensus parameter updates -* the validator updates - -With ABCI++, an Application may decide to keep using the next-block execution model, by doing all its processing in `FinalizeBlock`; -however the new methods introduced, `PrepareProposal` and `ProcessProposal` allow -for a new execution model, called _same-block execution_. An Application implementing -this execution model, upon receiving a raw proposal via `RequestPrepareProposal` -and potentially modifying its transaction list, -fully executes the resulting prepared proposal as though it was the decided block. -The results of the block execution are used as follows: - -* The block execution may generate a set of events. The Application should store these events and return them back to Tendermint during the `FinalizeBlock` call if the block is finally decided. -* The Merkle root resulting from executing the prepared proposal is provided in - `ResponsePrepareProposal` and thus refers to the **current block**. Tendermint - will use it in the prepared proposal's header. -* likewise, the transaction results from executing the prepared proposal are - provided in `ResponsePrepareProposal` and refer to the transactions in the - **current block**. Tendermint will use them to calculate the results hash - in the prepared proposal's header. -* The consensus parameter updates and validator updates are also provided in - `ResponsePrepareProposal` and reflect the result of the prepared proposal's - execution. They come into force in height H+1 (as opposed to the H+2 rule - in next-block execution model). - -If the Application decides to keep the next-block execution model, it will not -provide any data in `ResponsePrepareProposal`, other than an optionally modified -transaction list. - -In the long term, the execution model will be set in a new boolean parameter -*same_block* in `ConsensusParams`. -It **must not** be changed once the blockchain has started unless the Application -developers _really_ know what they are doing. -However, modifying `ConsensusParams` structure cannot be done lightly if we are to -preserve blockchain compatibility. Therefore we need an interim solution until -soft upgrades are specified and implemented in Tendermint. This somewhat _unsafe_ -solution consists in Tendermint assuming same-block execution if the Application -fills the above mentioned fields in `ResponsePrepareProposal`. - -### Tendermint timeouts in same-block execution - -The new same-block execution mode requires the Application to fully execute the -prepared block at `PrepareProposal` time. This execution is synchronous, so -Tendermint cannot make progress until the Application returns from `PrepareProposal`. -This stands on Tendermint's critical path: if the Application takes a long time -executing the block, the default value of _TimeoutPropose_ might not be sufficient -to accommodate the long block execution time and non-proposer processes might time -out and prevote `nil`, thus starting a further round unnecessarily. - -The Application is the best suited to provide a value for _TimeoutPropose_ so -that the block execution time upon `PrepareProposal` fits well in the propose -timeout interval. - -Currently, the Application can override the value of _TimeoutPropose_ via the -`config.toml` file. In the future, `ConsensusParams` will have an extra field -with the current _TimeoutPropose_ value so that the Application can adapt it at every height. - -## Determinism -[↑ Back to Outline](#outline) - -ABCI++ applications must implement deterministic finite-state machines to be -securely replicated by the Tendermint consensus engine. This means block execution -over the Consensus Connection must be strictly deterministic: given the same -ordered set of transactions, all nodes will compute identical responses, for all -successive `FinalizeBlock` calls. This is critical because the -responses are included in the header of the next block, either via a Merkle root -or directly, so all nodes must agree on exactly what they are. - -For this reason, it is recommended that application state is not exposed to any -external user or process except via the ABCI connections to a consensus engine -like Tendermint Core. The Application must only change its state based on input -from block execution (`FinalizeBlock` calls), and not through -any other kind of request. This is the only way to ensure all nodes see the same -transactions and compute the same results. - -Some Applications may choose to execute the blocks that are about to be proposed -(via `PrepareProposal`), or those that the Application is asked to validate -(via `ProcessProposal`). However, the state changes caused by processing those -proposed blocks must never replace the previous state until `FinalizeBlock` confirms -the block decided. - -Additionally, vote extensions or the validation thereof (via `ExtendVote` or -`VerifyVoteExtension`) must _never_ have side effects on the current state. -They can only be used when their data is provided in a `RequestPrepareProposal` call. - -If there is some non-determinism in the state machine, consensus will eventually -fail as nodes disagree over the correct values for the block header. The -non-determinism must be fixed and the nodes restarted. - -Sources of non-determinism in applications may include: - -* Hardware failures - * Cosmic rays, overheating, etc. -* Node-dependent state - * Random numbers - * Time -* Underspecification - * Library version changes - * Race conditions - * Floating point numbers - * JSON or protobuf serialization - * Iterating through hash-tables/maps/dictionaries -* External Sources - * Filesystem - * Network calls (eg. some external REST API service) - -See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. - -Note that some methods (`Query, CheckTx, FinalizeBlock`) return -explicitly non-deterministic data in the form of `Info` and `Log` fields. The `Log` is -intended for the literal output from the Application's logger, while the -`Info` is any additional info that should be returned. These are the only fields -that are not included in block header computations, so we don't need agreement -on them. All other fields in the `Response*` must be strictly deterministic. - -## Errors -[↑ Back to Outline](#outline) - -The `Query`, and `CheckTx` methods include a `Code` field in their `Response*`. -The `Code` field is also included in type `TxResult`, used by -method `FinalizeBlock`'s `Response*`. -Field `Code` is meant to contain an application-specific response code. -A response code of `0` indicates no error. Any other response code -indicates to Tendermint that an error occurred. - -These methods also return a `Codespace` string to Tendermint. This field is -used to disambiguate `Code` values returned by different domains of the -Application. The `Codespace` is a namespace for the `Code`. - -Methods `Echo`, `Info`, and `InitChain` do not return errors. -An error in any of these methods represents a critical issue that Tendermint -has no reasonable way to handle. If there is an error in one -of these methods, the Application must crash to ensure that the error is safely -handled by an operator. - -Method `FinalizeBlock` is a special case. It contains a number of -`Code` and `Codespace` fields as part of type `TxResult`. Each of -these codes reports errors related to the transaction it is attached to. -However, `FinalizeBlock` does not return errors at the top level, so the -same considerations on critical issues made for `Echo`, `Info`, and -`InitChain` also apply here. - -The handling of non-zero response codes by Tendermint is described below - -### `CheckTx` - -When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated -transaction will not be added to Tendermint's mempool or it will be removed if -it is already included. - -### `TxResult` (as part of `FinalizeBlock`) - -The `TxResult` type delivers transactions from Tendermint to the Application. -When Tendermint receives a `ResponseFinalizeBlock` containing a `TxResult` -with a non-zero `Code`, the response code is logged. -The transaction was already included in a block, so the `Code` does not influence -Tendermint consensus. - -### `Query` - -When Tendermint receives a `ResponseQuery` with a non-zero `Code`, this code is -returned directly to the client that initiated the query. - -## Events -[↑ Back to Outline](#outline) - -Method `CheckTx` includes an `Events` field in its `Response*`. -Method `FinalizeBlock` includes an `Events` field at the top level in its -`Response*`, and one `events` field per transaction included in the block. -Applications may respond to these ABCI++ methods with a set of events. -Events allow applications to associate metadata about ABCI++ method execution with the -transactions and blocks this metadata relates to. -Events returned via these ABCI++ methods do not impact Tendermint consensus in any way -and instead exist to power subscriptions and queries of Tendermint state. - -An `Event` contains a `type` and a list of `EventAttributes`, which are key-value -string pairs denoting metadata about what happened during the method's (or transaction's) -execution. `Event` values can be used to index transactions and blocks according to what -happened during their execution. - -Each event has a `type` which is meant to categorize the event for a particular -`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate -`type` values, where each distinct entry is meant to categorize attributes for a -particular event. Every key and value in an event's attributes must be UTF-8 -encoded strings along with the event type itself. - -```protobuf -message Event { - string type = 1; - repeated EventAttribute attributes = 2; -} -``` - -The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The -index flag notifies the Tendermint indexer to index the attribute. The value of -the `index` flag is non-deterministic and may vary across different nodes in the network. - -```protobuf -message EventAttribute { - bytes key = 1; - bytes value = 2; - bool index = 3; // nondeterministic -} -``` - -Example: - -```go - abci.ResponseCheckTx{ - // ... - Events: []abci.Event{ - { - Type: "validator.provisions", - Attributes: []abci.EventAttribute{ - abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, - abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, - abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: true}, - }, - }, - { - Type: "validator.provisions", - Attributes: []abci.EventAttribute{ - abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, - abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: false}, - abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: false}, - }, - }, - { - Type: "validator.slashed", - Attributes: []abci.EventAttribute{ - abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: false}, - abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, - abci.EventAttribute{Key: []byte("reason"), Value: []byte("..."), Index: true}, - }, - }, - // ... - }, -} -``` - -## Evidence -[↑ Back to Outline](#outline) - -Tendermint's security model relies on the use of "evidence". Evidence is proof of -malicious behavior by a network participant. It is the responsibility of Tendermint -to detect such malicious behavior. When malicious behavior is detected, Tendermint -will gossip evidence of the behavior to other nodes and commit the evidence to -the chain once it is verified by all validators. This evidence will then be -passed on to the Application through ABCI++. It is the responsibility of the -Application to handle the evidence and exercise punishment. - -EvidenceType has the following protobuf format: - -```protobuf -enum EvidenceType { - UNKNOWN = 0; - DUPLICATE_VOTE = 1; - LIGHT_CLIENT_ATTACK = 2; -} -``` - -There are two forms of evidence: Duplicate Vote and Light Client Attack. More -information can be found in either [data structures](../core/data_structures.md) -or [accountability](../light-client/accountability/) - diff --git a/spec/abci++/abci++_client_server_002_draft.md b/spec/abci++/abci++_client_server.md similarity index 91% rename from spec/abci++/abci++_client_server_002_draft.md rename to spec/abci++/abci++_client_server.md index f26ee8cd51..652652dc9f 100644 --- a/spec/abci++/abci++_client_server_002_draft.md +++ b/spec/abci++/abci++_client_server.md @@ -9,10 +9,10 @@ This section is for those looking to implement their own ABCI Server, perhaps in a new programming language. You are expected to have read all previous sections of ABCI++ specification, namely -[Basic Concepts](./abci%2B%2B_basic_concepts_002_draft.md), -[Methods](./abci%2B%2B_methods_002_draft.md), -[Application Requirements](./abci%2B%2B_app_requirements_002_draft.md), and -[Expected Behavior](./abci%2B%2B_tmint_expected_behavior_002_draft.md). +[Basic Concepts](./abci%2B%2B_basic_concepts.md), +[Methods](./abci%2B%2B_methods.md), +[Application Requirements](./abci%2B%2B_app_requirements.md), and +[Expected Behavior](./abci%2B%2B_tmint_expected_behavior.md). ## Message Protocol and Synchrony @@ -25,7 +25,7 @@ or custom protobuf types. For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). As of v0.36 requests are synchronous. For each of ABCI++'s four connections (see -[Connections](./abci%2B%2B_app_requirements_002_draft.md)), when Tendermint issues a request to the +[Connections](./abci%2B%2B_app_requirements.md)), when Tendermint issues a request to the Application, it will wait for the response before continuing execution. As a side effect, requests and responses are ordered for each connection, but not necessarily across connections. diff --git a/spec/abci++/abci++_methods_002_draft.md b/spec/abci++/abci++_methods.md similarity index 71% rename from spec/abci++/abci++_methods_002_draft.md rename to spec/abci++/abci++_methods.md index 4eb1bb295e..9d33652dd5 100644 --- a/spec/abci++/abci++_methods_002_draft.md +++ b/spec/abci++/abci++_methods.md @@ -38,22 +38,21 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |---------------------|--------|--------------------------------------------------|--------------| - | data | string | Some arbitrary information | 1 | - | version | string | The application software semantic version | 2 | - | app_version | uint64 | The application protocol version | 3 | - | last_block_height | int64 | Latest block for which the app has called Commit | 4 | - | last_block_app_hash | bytes | Latest result of Commit | 5 | + | Name | Type | Description | Field Number | + |---------------------|--------|-----------------------------------------------------|--------------| + | data | string | Some arbitrary information | 1 | + | version | string | The application software semantic version | 2 | + | app_version | uint64 | The application protocol version | 3 | + | last_block_height | int64 | Latest height for which the app persisted its state | 4 | + | last_block_app_hash | bytes | Latest AppHash returned by `FinalizeBlock` | 5 | * **Usage**: * Return information about the application state. * Used to sync Tendermint with the application during a handshake - that happens on startup. + that happens on startup or on recovery. * The returned `app_version` will be included in the Header of every block. * Tendermint expects `last_block_app_hash` and `last_block_height` to - be updated during `Commit`, ensuring that `Commit` is never - called twice for the same block height. + be updated during `FinalizeBlock` and persisted during `Commit`. > Note: Semantic version is a reference to [semantic versioning](https://semver.org/). Semantic versions in info will be displayed as X.X.x. @@ -61,22 +60,22 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |------------------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------|--------------| - | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Genesis time | 1 | - | chain_id | string | ID of the blockchain. | 2 | - | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters. | 3 | - | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial genesis validators, sorted by voting power. | 4 | - | app_state_bytes | bytes | Serialized initial application state. JSON bytes. | 5 | - | initial_height | int64 | Height of the initial block (typically `1`). | 6 | + | Name | Type | Description | Field Number | + |------------------|-------------------------------------------------|-----------------------------------------------------|--------------| + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Genesis time | 1 | + | chain_id | string | ID of the blockchain. | 2 | + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters. | 3 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial genesis validators, sorted by voting power. | 4 | + | app_state_bytes | bytes | Serialized initial application state. JSON bytes. | 5 | + | initial_height | int64 | Height of the initial block (typically `1`). | 6 | * **Response**: - | Name | Type | Description | Field Number | - |------------------|----------------------------------------------|-------------------------------------------------|--------------| + | Name | Type | Description | Field Number | + |------------------|----------------------------------------------|--------------------------------------------------|--------------| | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional) | 1 | - | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | - | app_hash | bytes | Initial application hash. | 3 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | + | app_hash | bytes | Initial application hash. | 3 | * **Usage**: * Called once upon genesis. @@ -84,10 +83,10 @@ title: Methods * If `ResponseInitChain.Validators` is not empty, it will be the initial validator set (regardless of what is in `RequestInitChain.Validators`). * This allows the app to decide if it wants to accept the initial validator - set proposed by tendermint (ie. in the genesis file), or if it wants to use + set proposed by Tendermint (ie. in the genesis file), or if it wants to use a different one (perhaps computed based on some application specific information in the genesis file). - * Both `ResponseInitChain.Validators` and `ResponseInitChain.Validators` are [ValidatorUpdate](#validatorupdate) structs. + * Both `RequestInitChain.Validators` and `ResponseInitChain.Validators` are [ValidatorUpdate](#validatorupdate) structs. So, technically, they both are _updating_ the set of validators from the empty set. ### Query @@ -97,7 +96,7 @@ title: Methods | Name | Type | Description | Field Number | |--------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | data | bytes | Raw query bytes. Can be used with or in lieu of Path. | 1 | - | path | string | Path field of the request URI. Can be used with or in lieu of `data`. Apps MUST interpret `/store` as a query by key on the underlying store. The key SHOULD be specified in the `data` field. Apps SHOULD allow queries over specific types like `/accounts/...` or `/votes/...` | 2 | + | path | string | Path field of the request URI. Can be used with or in lieu of `data`. Apps MUST interpret `/store` as a query by key on the underlying store. The key SHOULD be specified in the `data` field. Apps SHOULD allow queries over specific types like `/accounts/...` or `/votes/...` | 2 | | height | int64 | The block height for which you want the query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 3 | | prove | bool | Return Merkle proof with response if possible | 4 | @@ -145,15 +144,40 @@ title: Methods * Technically optional - not involved in processing blocks. * Guardian of the mempool: every node runs `CheckTx` before letting a - transaction into its local mempool. + transaction into its local mempool. * The transaction may come from an external user or another node * `CheckTx` validates the transaction against the current state of the application, - for example, checking signatures and account balances, but does not apply any - of the state changes described in the transaction. - not running code in a virtual machine. - * Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast to - other nodes or included in a proposal block. - * Tendermint attributes no other value to the response code + for example, checking signatures and account balances, but does not apply any + of the state changes described in the transaction. + * Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast + to other nodes or included in a proposal block. + Tendermint attributes no other value to the response code. + +### Commit + +#### Parameters and Types + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|-------------|--------------| + + Commit signals the application to persist application state. It takes no parameters. + +* **Response**: + + | Name | Type | Description | Field Number | + |---------------|-------|------------------------------------------------------------------------|--------------| + | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 3 | + +* **Usage**: + + * Signal the Application to persist the application state. + Application is expected to persist its state at the end of this call, before calling `ResponseCommit`. + * Use `ResponseCommit.retain_height` with caution! If all nodes in the network remove historical + blocks then this data is permanently lost, and no new nodes will be able to join the network and + bootstrap. Historical blocks may also be required for other purposes, e.g. auditing, replay of + non-persisted heights, light client verification, and so on. ### ListSnapshots @@ -269,7 +293,7 @@ title: Methods `Snapshot.Metadata` and/or incrementally verifying contents against `AppHash`. * When all chunks have been accepted, Tendermint will make an ABCI `Info` call to verify that `LastBlockAppHash` and `LastBlockHeight` matches the expected values, and record the - `AppVersion` in the node state. It then switches to fast sync or consensus and joins the + `AppVersion` in the node state. It then switches to block sync or consensus and joins the network. * If Tendermint is unable to retrieve the next chunk after some time (e.g. because no suitable peers are available), it will reject the snapshot and try a different one via `OfferSnapshot`. @@ -283,16 +307,16 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |-------------------------|---------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------| - | max_tx_bytes | int64 | Currently configured maximum size in bytes taken by the modified transactions. | 1 | - | txs | repeated bytes | Preliminary list of transactions that have been picked as part of the block to propose. | 2 | - | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from Tendermint's data structures. | 3 | - | byzantine_validators | repeated [Misbehavior](#misbehavior) | List of information about validators that acted incorrectly. | 4 | - | height | int64 | The height of the block that will be proposed. | 5 | - | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Timestamp of the block that that will be proposed. | 6 | - | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | - | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that is creating the proposal. | 8 | + | Name | Type | Description | Field Number | + |----------------------|-------------------------------------------------|-----------------------------------------------------------------------------------------------|--------------| + | max_tx_bytes | int64 | Currently configured maximum size in bytes taken by the modified transactions. | 1 | + | txs | repeated bytes | Preliminary list of transactions that have been picked as part of the block to propose. | 2 | + | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from Tendermint's data structures. | 3 | + | misbehavior | repeated [Misbehavior](#misbehavior) | List of information about validators that misbehaved. | 4 | + | height | int64 | The height of the block that will be proposed. | 5 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that that will be proposed. | 6 | + | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | + | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that is creating the proposal. | 8 | * **Response**: @@ -302,92 +326,128 @@ title: Methods | app_hash | bytes | The Merkle root hash of the application state. | 3 | | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 4 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 5 | - | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 6 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to gas, size, and other consensus-related parameters. | 6 | * **Usage**: - * The first six parameters of `RequestPrepareProposal` are the same as `RequestProcessProposal` + * `RequestPrepareProposal`'s parameters `txs`, `misbehavior`, `height`, `time`, + `next_validators_hash`, and `proposer_address` are the same as in `RequestProcessProposal` and `RequestFinalizeBlock`. - * The height and time values match the values from the header of the proposed block. - * `RequestPrepareProposal` contains a preliminary set of transactions `txs` that Tendermint considers to be a good block proposal, called _raw proposal_. The Application can modify this set via `ResponsePrepareProposal.tx_records` (see [TxRecord](#txrecord)). - * The Application _can_ reorder, remove or add transactions to the raw proposal. Let `tx` be a transaction in `txs`: - * If the Application considers that `tx` should not be proposed in this block, e.g., there are other transactions with higher priority, then it should not include it in `tx_records`. In this case, Tendermint won't remove `tx` from the mempool. The Application should be extra-careful, as abusing this feature may cause transactions to stay forever in the mempool. - * If the Application considers that a `tx` should not be included in the proposal and removed from the mempool, then the Application should include it in `tx_records` and _mark_ it as `REMOVED`. In this case, Tendermint will remove `tx` from the mempool. - * If the Application wants to add a new transaction, then the Application should include it in `tx_records` and _mark_ it as `ADD`. In this case, Tendermint will add it to the mempool. - * The Application should be aware that removing and adding transactions may compromise _traceability_. - > Consider the following example: the Application transforms a client-submitted transaction `t1` into a second transaction `t2`, i.e., the Application asks Tendermint to remove `t1` and add `t2` to the mempool. If a client wants to eventually check what happened to `t1`, it will discover that `t_1` is not in the mempool or in a committed block, getting the wrong idea that `t_1` did not make it into a block. Note that `t_2` _will be_ in a committed block, but unless the Application tracks this information, no component will be aware of it. Thus, if the Application wants traceability, it is its responsability to support it. For instance, the Application could attach to a transformed transaction a list with the hashes of the transactions it derives from. - * Tendermint MAY include a list of transactions in `RequestPrepareProposal.txs` whose total size in bytes exceeds `RequestPrepareProposal.max_tx_bytes`. - Therefore, if the size of `RequestPrepareProposal.txs` is greater than `RequestPrepareProposal.max_tx_bytes`, the Application MUST make sure that the - `RequestPrepareProposal.max_tx_bytes` limit is respected by those transaction records returned in `ResponsePrepareProposal.tx_records` that are marked as `UNMODIFIED` or `ADDED`. - * In same-block execution mode, the Application must provide values for `ResponsePrepareProposal.app_hash`, - `ResponsePrepareProposal.tx_results`, `ResponsePrepareProposal.validator_updates`, and + * `RequestPrepareProposal.local_last_commit` is a set of the precommit votes that allowed the + decision of the previous block, together with their corresponding vote extensions. + * The `height`, `time`, and `proposer_address` values match the values from the header of the + proposed block. + * `RequestPrepareProposal` contains a preliminary set of transactions `txs` that Tendermint + retrieved from the mempool, called _raw proposal_. The Application can modify this + set via `ResponsePrepareProposal.tx_records` (see [TxRecord](#txrecord)). + * The Application _can_ modify the raw proposal: it can reorder, remove or add transactions. + Let `tx` be a transaction in `txs`: + * If the Application considers that `tx` should not be proposed in this block, e.g., + there are other transactions with higher priority, then it should not include it in + `tx_records`. In this case, Tendermint will not remove `tx` from the mempool. The + Application should be extra-careful, as abusing this feature may cause transactions + to stay much longer than needed in the mempool. + * If the Application considers that `tx` should not be included in the proposal and + removed from the mempool, then the Application should include it in `tx_records` and + _mark_ it as `REMOVED`. In this case, Tendermint will remove `tx` from the mempool. + * If the Application wants to add a new transaction to the proposed block, then the + Application includes it in `tx_records` and _marks_ it as `ADDED`. In this case, Tendermint + will also add the transaction to the mempool. + * The Application should be aware that removing and adding transactions may compromise + _traceability_. + > Consider the following example: the Application transforms a client-submitted + transaction `t1` into a second transaction `t2`, i.e., the Application asks Tendermint + to remove `t1` and add `t2` to the mempool. If a client wants to eventually check what + happened to `t1`, it will discover that `t1` is neither in the mempool nor in a + committed block, getting the wrong idea that `t1` did not make it into a block. Note + that `t2` _will be_ in a committed block, but unless the Application tracks this + information, no component will be aware of it. Thus, if the Application wants + traceability, it is its responsability to support it. For instance, the Application + could attach to a transformed transaction a list with the hashes of the transactions it + derives from. + * Tendermint MAY include a list of transactions in `RequestPrepareProposal.txs` whose total + size in bytes exceeds `RequestPrepareProposal.max_tx_bytes`. + Therefore, if the size of `RequestPrepareProposal.txs` is greater than + `RequestPrepareProposal.max_tx_bytes`, the Application MUST remove transactions to ensure + that the `RequestPrepareProposal.max_tx_bytes` limit is respected by those transaction + records returned in `ResponsePrepareProposal.tx_records` that are marked as `UNMODIFIED` or + `ADDED`. + * In same-block execution mode, the Application must provide values for + `ResponsePrepareProposal.app_hash`, `ResponsePrepareProposal.tx_results`, + `ResponsePrepareProposal.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`, as a result of fully executing the block. * The values for `ResponsePrepareProposal.validator_updates`, or `ResponsePrepareProposal.consensus_param_updates` may be empty. In this case, Tendermint will keep the current values. * `ResponsePrepareProposal.validator_updates`, triggered by block `H`, affect validation for blocks `H+1`, and `H+2`. Heights following a validator update are affected in the following way: - * `H`: `NextValidatorsHash` includes the new `validator_updates` value. - * `H+1`: The validator set change takes effect and `ValidatorsHash` is updated. - * `H+2`: `local_last_commit` now includes the altered validator set. + * Height `H`: `NextValidatorsHash` includes the new `validator_updates` value. + * Height `H+1`: The validator set change takes effect and `ValidatorsHash` is updated. + * Height `H+2`: `*_last_commit` fields in `PrepareProposal`, `ProcessProposal`, and + `FinalizeBlock` now include the altered validator set. * `ResponseFinalizeBlock.consensus_param_updates` returned for block `H` apply to the consensus params for block `H+1` even if the change is agreed in block `H`. For more information on the consensus parameters, - see the [application spec entry on consensus parameters](../abci/apps.md#consensus-parameters). - * It is the responsibility of the Application to set the right value for _TimeoutPropose_ so that + see the [consensus parameters](./abci%2B%2B_app_requirements.md#consensus-parameters) + section. + * It is the Application's responsibility to set the right value for _TimeoutPropose_ so that the (synchronous) execution of the block does not cause other processes to prevote `nil` because their propose timeout goes off. - * In next-block execution mode, Tendermint will ignore parameters `ResponsePrepareProposal.tx_results`, + * In next-block execution mode, Tendermint will ignore parameters + `ResponsePrepareProposal.app_hash`, `ResponsePrepareProposal.tx_results`, `ResponsePrepareProposal.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`. - * As a result of executing the prepared proposal, the Application may produce header events or transaction events. + * As a result of executing the prepared proposal, the Application may produce block events or transaction events. The Application must keep those events until a block is decided and then pass them on to Tendermint via `ResponseFinalizeBlock`. - * Likewise, in next-block execution mode, the Application must keep all responses to executing transactions - until it can call `ResponseFinalizeBlock`. + * Likewise, in next-block execution mode, the Application must keep all responses to executing + transactions until it can call `ResponseFinalizeBlock`. * As a sanity check, Tendermint will check the returned parameters for validity if the Application modified them. In particular, `ResponsePrepareProposal.tx_records` will be deemed invalid if * There is a duplicate transaction in the list. - * A new or modified transaction is marked as `UNMODIFIED` or `REMOVED`. - * An unmodified transaction is marked as `ADDED`. + * A new transaction is marked as `UNMODIFIED` or `REMOVED`. + * An existing transaction is marked as `ADDED`. * A transaction is marked as `UNKNOWN`. - * If Tendermint fails to validate the `ResponsePrepareProposal`, Tendermint will assume the application is faulty and crash. + * If Tendermint fails to validate the `ResponsePrepareProposal`, Tendermint will assume the + Application is faulty and crash. * The implementation of `PrepareProposal` can be non-deterministic. -#### When does Tendermint call it? +#### When does Tendermint call `PrepareProposal`? When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which _p_ is the proposer, and _p_'s _validValue_ is `nil`: -1. _p_'s Tendermint collects outstanding transactions from the mempool - * The transactions will be collected in order of priority - * Let $C$ the list of currently collected transactions - * The collection stops when any of the following conditions are met - * the mempool is empty - * the total size of transactions $\in C$ is greater than or equal to `consensusParams.block.max_bytes` - * the sum of `GasWanted` field of transactions $\in C$ is greater than or equal to - `consensusParams.block.max_gas` +1. Tendermint collects outstanding transactions from _p_'s mempool + * the transactions will be collected in order of priority * _p_'s Tendermint creates a block header. -2. _p_'s Tendermint calls `RequestPrepareProposal` with the newly generated block. - The call is synchronous: Tendermint's execution will block until the Application returns from the call. -3. The Application checks the block (hashes, transactions, commit info, misbehavior). Besides, - * in same-block execution mode, the Application can (and should) provide `ResponsePrepareProposal.app_hash`, - `ResponsePrepareProposal.validator_updates`, or +2. _p_'s Tendermint calls `RequestPrepareProposal` with the newly generated block, the local + commit of the previous height (with vote extensions), and any outstanding evidence of + misbehavior. The call is synchronous: Tendermint's execution will block until the Application + returns from the call. +3. The Application uses the information received (transactions, commit info, misbehavior, time) to + (potentially) modify the proposal. + * in same-block execution mode, the Application fully executes the block and provides values + for `ResponsePrepareProposal.app_hash`, `ResponsePrepareProposal.tx_results`, + `ResponsePrepareProposal.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`. - * in "next-block execution" mode, _p_'s Tendermint will ignore the values for `ResponsePrepareProposal.app_hash`, - `ResponsePrepareProposal.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`. - * in both modes, the Application can manipulate transactions + * in next-block execution mode, _p_'s Tendermint will ignore the values for + `ResponsePrepareProposal.app_hash`, `ResponsePrepareProposal.tx_results`, + `ResponsePrepareProposal.validator_updates`, and + `ResponsePrepareProposal.consensus_param_updates`. + * in both modes, the Application can manipulate transactions: * leave transactions untouched - `TxAction = UNMODIFIED` - * add new transactions directly to the proposal - `TxAction = ADDED` - * remove transactions (invalid) from the proposal and from the mempool - `TxAction = REMOVED` + * add new transactions (not present initially) to the proposal - `TxAction = ADDED` + * remove (invalid) transactions from the proposal and from the mempool - `TxAction = REMOVED` * remove transactions from the proposal but not from the mempool (effectively _delaying_ them) - the - Application removes the transaction from the list - * modify transactions (e.g. aggregate them) - `TxAction = ADDED` followed by `TxAction = REMOVED`. As explained above, this compromises client traceability, unless it is implemented at the Application level. + Application does not include the transaction in `ResponsePrepareProposal.tx_records` + * modify transactions (e.g. aggregate them) - `TxAction = ADDED` followed by + `TxAction = REMOVED`. As explained above, this compromises client traceability, unless + it is implemented at the Application level. * reorder transactions - the Application reorders transactions in the list -4. If the block is modified, the Application sets `ResponsePrepareProposal.modified` to true, - and includes the modified block in the return parameters (see the rules in section _Usage_). - The Application returns from the call. +4. The Application includes the transaction list (whether modified or not) in the return parameters + (see the rules in section _Usage_), and returns from the call. 5. _p_'s Tendermint uses the (possibly) modified block as _p_'s proposal in round _r_, height _h_. -Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as proposal and will not call `RequestPrepareProposal`. +Note that, if _p_ has a non-`nil` _validValue_ in round _r_, height _h_, Tendermint will use it as +proposal and will not call `RequestPrepareProposal`. ### ProcessProposal @@ -395,16 +455,16 @@ Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as propos * **Request**: - | Name | Type | Description | Field Number | - |----------------------|---------------------------------------------|----------------------------------------------------------------------------------------------------------------|--------------| - | txs | repeated bytes | List of transactions that have been picked as part of the proposed block. | 1 | - | proposed_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the information in the proposed block. | 2 | - | byzantine_validators | repeated [Misbehavior](#misbehavior) | List of information about validators that acted incorrectly. | 3 | - | hash | bytes | The block header's hash of the proposed block. | 4 | - | height | int64 | The height of the proposed block. | 5 | - | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Timestamp included in the proposed block. | 6 | - | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | - | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal. | 8 | + | Name | Type | Description | Field Number | + |----------------------|-------------------------------------------------|-------------------------------------------------------------------------------------------|--------------| + | txs | repeated bytes | List of transactions of the proposed block. | 1 | + | proposed_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the information in the proposed block. | 2 | + | misbehavior | repeated [Misbehavior](#misbehavior) | List of information about validators that misbehaved. | 3 | + | hash | bytes | The hash of the proposed block. | 4 | + | height | int64 | The height of the proposed block. | 5 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the proposed block. | 6 | + | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | + | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal. | 8 | * **Response**: @@ -414,14 +474,19 @@ Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as propos | app_hash | bytes | The Merkle root hash of the application state. | 2 | | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions. | 3 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 4 | - | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 5 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to gas, size, and other consensus-related parameters. | 5 | * **Usage**: - * Contains fields from the proposed block. - * The Application may fully execute the block as though it was handling `RequestFinalizeBlock`. - However, any resulting state changes must be kept as _candidate state_, - and the Application should be ready to backtrack/discard it in case the decided block is different. - * The height and timestamp values match the values from the header of the proposed block. + * Contains all information on the proposed block needed to fully execute it. + * The Application may fully execute the block as though it was handling + `RequestFinalizeBlock`. + * However, any resulting state changes must be kept as _candidate state_, + and the Application should be ready to discard it in case another block is decided. + * `RequestProcessProposal` is also called at the proposer of a round. The reason for this is to + inform the Application of the block header's hash, which cannot be done at `PrepareProposal` + time. In this case, the call to `RequestProcessProposal` occurs right after the call to + `RequestPrepareProposal`. + * The height and time values match the values from the header of the proposed block. * If `ResponseProcessProposal.status` is `REJECT`, Tendermint assumes the proposal received is not valid. * In same-block execution mode, the Application is required to fully execute the block and provide values @@ -430,35 +495,39 @@ Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as propos so that Tendermint can then verify the hashes in the block's header are correct. If the hashes mismatch, Tendermint will reject the block even if `ResponseProcessProposal.status` was set to `ACCEPT`. - * In next-block execution mode, the Application should *not* provide values for parameters + * In next-block execution mode, the Application should _not_ provide values for parameters `ResponseProcessProposal.app_hash`, `ResponseProcessProposal.tx_results`, `ResponseProcessProposal.validator_updates`, and `ResponseProcessProposal.consensus_param_updates`. * The implementation of `ProcessProposal` MUST be deterministic. Moreover, the value of `ResponseProcessProposal.status` MUST **exclusively** depend on the parameters passed in the call to `RequestProcessProposal`, and the last committed Application state - (see [Requirements](abci++_app_requirements_002_draft.md) section). + (see [Requirements](./abci++_app_requirements.md) section). * Moreover, application implementors SHOULD always set `ResponseProcessProposal.status` to `ACCEPT`, unless they _really_ know what the potential liveness implications of returning `REJECT` are. -#### When does Tendermint call it? +#### When does Tendermint call `ProcessProposal`? When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which _q_ is the proposer (possibly _p_ = _q_): 1. _p_ sets up timer `ProposeTimeout`. 2. If _p_ is the proposer, _p_ executes steps 1-6 in [PrepareProposal](#prepareproposal). -3. Upon reception of Proposal message (which contains the header) for round _r_, height _h_ from _q_, _p_'s Tendermint verifies the block header. -4. Upon reception of Proposal message, along with all the block parts, for round _r_, height _h_ from _q_, _p_'s Tendermint follows its algorithm - to check whether it should prevote for the block just received, or `nil` -5. If Tendermint should prevote for the block just received +3. Upon reception of Proposal message (which contains the header) for round _r_, height _h_ from + _q_, _p_'s Tendermint verifies the block header. +4. Upon reception of Proposal message, along with all the block parts, for round _r_, height _h_ + from _q_, _p_'s Tendermint follows its algorithm to check whether it should prevote for the + proposed block, or `nil`. +5. If Tendermint should prevote for the proposed block: 1. Tendermint calls `RequestProcessProposal` with the block. The call is synchronous. - 2. The Application checks/processes the proposed block, which is read-only, and returns true (_accept_) or false (_reject_) in `ResponseProcessProposal.accept`. + 2. The Application checks/processes the proposed block, which is read-only, and returns + `ACCEPT` or `REJECT` in the `ResponseProcessProposal.status` field. * The Application, depending on its needs, may call `ResponseProcessProposal` - * either after it has completely processed the block (the simpler case), - * or immediately (after doing some basic checks), and process the block asynchronously. In this case the Application will - not be able to reject the block, or force prevote/precommit `nil` afterwards. + * either after it has completely processed the block (immediate execution), + * or after doing some basic checks, and process the block asynchronously. In this case the + Application will not be able to reject the block, or force prevote/precommit `nil` + afterwards. 3. If the returned value is - * _accept_, Tendermint prevotes on this proposal for round _r_, height _h_. - * _reject_, Tendermint prevotes `nil`. + * `ACCEPT`: Tendermint prevotes on this proposal for round _r_, height _h_. + * `REJECT`: Tendermint prevotes `nil`. ### ExtendVote @@ -473,20 +542,21 @@ When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which * **Response**: - | Name | Type | Description | Field Number | - |-------------------|-------|-----------------------------------------------|--------------| - | vote_extension | bytes | Optional information signed by by Tendermint. | 1 | + | Name | Type | Description | Field Number | + |-------------------|-------|---------------------------------------------------------|--------------| + | vote_extension | bytes | Information signed by by Tendermint. Can have 0 length. | 1 | * **Usage**: - * `ResponseExtendVote.vote_extension` is optional information that, if present, will be signed by Tendermint and - attached to the Precommit message. - * `RequestExtendVote.hash` corresponds to the hash of a proposed block that was made available to the application - in a previous call to `ProcessProposal` or `PrepareProposal` for the current height. + * `ResponseExtendVote.vote_extension` is application-generated information that will be signed + by Tendermint and attached to the Precommit message. + * The Application may choose to use an empty vote extension (0 length). + * `RequestExtendVote.hash` corresponds to the hash of a proposed block that was made available + to the Application in a previous call to `ProcessProposal` for the current height. * `ResponseExtendVote.vote_extension` will only be attached to a non-`nil` Precommit message. If Tendermint is to precommit `nil`, it will not call `RequestExtendVote`. * The Application logic that creates the extension can be non-deterministic. -#### When does Tendermint call it? +#### When does Tendermint call `ExtendVote`? When a validator _p_ is in Tendermint consensus state _prevote_ of round _r_, height _h_, in which _q_ is the proposer; and _p_ has received @@ -497,7 +567,7 @@ then _p_'s Tendermint locks _v_ and sends a Precommit message in the following 1. _p_'s Tendermint sets _lockedValue_ and _validValue_ to _v_, and sets _lockedRound_ and _validRound_ to _r_ 2. _p_'s Tendermint calls `RequestExtendVote` with _id(v)_ (`RequestExtendVote.hash`). The call is synchronous. -3. The Application optionally returns an array of bytes, `ResponseExtendVote.extension`, which is not interpreted by Tendermint. +3. The Application returns an array of bytes, `ResponseExtendVote.extension`, which is not interpreted by Tendermint. 4. _p_'s Tendermint includes `ResponseExtendVote.extension` in a field of type [CanonicalVoteExtension](#canonicalvoteextension), it then populates the other fields in [CanonicalVoteExtension](#canonicalvoteextension), and signs the populated data structure. @@ -516,12 +586,12 @@ a [CanonicalVoteExtension](#canonicalvoteextension) field in the `precommit nil` * **Request**: - | Name | Type | Description | Field Number | - |-------------------|-------|------------------------------------------------------------------------------------------|--------------| - | hash | bytes | The header hash of the propsed block that the vote extension refers to. | 1 | - | validator_address | bytes | [Address](../core/data_structures.md#address) of the validator that signed the extension | 2 | - | height | int64 | Height of the block (for sanity check). | 3 | - | vote_extension | bytes | Application-specific information signed by Tendermint. Can have 0 length | 4 | + | Name | Type | Description | Field Number | + |-------------------|-------|-------------------------------------------------------------------------------------------|--------------| + | hash | bytes | The hash of the proposed block that the vote extension refers to. | 1 | + | validator_address | bytes | [Address](../core/data_structures.md#address) of the validator that signed the extension. | 2 | + | height | int64 | Height of the block (for sanity check). | 3 | + | vote_extension | bytes | Application-specific information signed by Tendermint. Can have 0 length. | 4 | * **Response**: @@ -530,33 +600,38 @@ a [CanonicalVoteExtension](#canonicalvoteextension) field in the `precommit nil` | status | [VerifyStatus](#verifystatus) | `enum` signaling if the application accepts the vote extension | 1 | * **Usage**: - * `RequestVerifyVoteExtension.vote_extension` can be an empty byte array. The Application's interpretation of it should be + * `RequestVerifyVoteExtension.vote_extension` can be an empty byte array. The Application's + interpretation of it should be that the Application running at the process that sent the vote chose not to extend it. Tendermint will always call `RequestVerifyVoteExtension`, even for 0 length vote extensions. + * `RequestVerifyVoteExtension` is not called for precommit votes sent by the local process. + * `RequestVerifyVoteExtension.hash` refers to a proposed block. There is not guarantee that + this proposed block has previously been exposed to the Application via `ProcessProposal`. * If `ResponseVerifyVoteExtension.status` is `REJECT`, Tendermint will reject the whole received vote. - See the [Requirements](abci++_app_requirements_002_draft.md) section to understand the potential + See the [Requirements](./abci++_app_requirements.md) section to understand the potential liveness implications of this. * The implementation of `VerifyVoteExtension` MUST be deterministic. Moreover, the value of `ResponseVerifyVoteExtension.status` MUST **exclusively** depend on the parameters passed in the call to `RequestVerifyVoteExtension`, and the last committed Application state - (see [Requirements](abci++_app_requirements_002_draft.md) section). + (see [Requirements](./abci++_app_requirements.md) section). * Moreover, application implementers SHOULD always set `ResponseVerifyVoteExtension.status` to `ACCEPT`, unless they _really_ know what the potential liveness implications of returning `REJECT` are. -#### When does Tendermint call it? +#### When does Tendermint call `VerifyVoteExtension`? -When a validator _p_ is in Tendermint consensus round _r_, height _h_, state _prevote_ (**TODO** discuss: I think I must remove the state -from this condition, but not sure), and _p_ receives a Precommit message for round _r_, height _h_ from _q_: +When a node _p_ is in Tendermint consensus round _r_, height _h_, and _p_ receives a Precommit +message for round _r_, height _h_ from validator _q_ (_q_ ≠ _p_): -1. If the Precommit message does not contain a vote extension with a valid signature, Tendermint discards the message as invalid. +1. If the Precommit message does not contain a vote extension with a valid signature, Tendermint + discards the Precommit message as invalid. * a 0-length vote extension is valid as long as its accompanying signature is also valid. 2. Else, _p_'s Tendermint calls `RequestVerifyVoteExtension`. -3. The Application returns _accept_ or _reject_ via `ResponseVerifyVoteExtension.status`. +3. The Application returns `ACCEPT` or `REJECT` via `ResponseVerifyVoteExtension.status`. 4. If the Application returns - * _accept_, _p_'s Tendermint will keep the received vote, together with its corresponding + * `ACCEPT`, _p_'s Tendermint will keep the received vote, together with its corresponding vote extension in its internal data structures. It will be used to populate the [ExtendedCommitInfo](#extendedcommitinfo) structure in calls to `RequestPrepareProposal`, in rounds of height _h + 1_ where _p_ is the proposer. - * _reject_, _p_'s Tendermint will deem the Precommit message invalid and discard it. + * `REJECT`, _p_'s Tendermint will deem the Precommit message invalid and discard it. ### FinalizeBlock @@ -564,38 +639,38 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou * **Request**: - | Name | Type | Description | Field Number | - |----------------------|---------------------------------------------|------------------------------------------------------------------------------------------|--------------| - | txs | repeated bytes | List of transactions committed as part of the block. | 1 | - | decided_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the block that was just decided. | 2 | - | byzantine_validators | repeated [Misbehavior](#misbehavior) | List of information about validators that acted incorrectly. | 3 | - | hash | bytes | The block header's hash. Present for convenience (can be derived from the block header). | 4 | - | height | int64 | The height of the finalized block. | 5 | - | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Timestamp included in the finalized block. | 6 | - | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | - | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal.| 8 | + | Name | Type | Description | Field Number | + |----------------------|-------------------------------------------------|-------------------------------------------------------------------------------------------|--------------| + | txs | repeated bytes | List of transactions committed as part of the block. | 1 | + | decided_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the block that was just decided. | 2 | + | misbehavior | repeated [Misbehavior](#misbehavior) | List of information about validators that misbehaved. | 3 | + | hash | bytes | The block's hash. | 4 | + | height | int64 | The height of the finalized block. | 5 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the finalized block. | 6 | + | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | + | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal. | 8 | * **Response**: | Name | Type | Description | Field Number | |-------------------------|-------------------------------------------------------------|----------------------------------------------------------------------------------|--------------| - | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing | 1 | + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing | 1 | | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 2 | | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | - | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 4 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to gas, size, and other consensus-related parameters. | 4 | | app_hash | bytes | The Merkle root hash of the application state. | 5 | - | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 6 | * **Usage**: * Contains the fields of the newly decided block. * This method is equivalent to the call sequence `BeginBlock`, [`DeliverTx`], - `EndBlock`, `Commit` in the previous version of ABCI. - * The height and timestamp values match the values from the header of the proposed block. - * The Application can use `RequestFinalizeBlock.decided_last_commit` and `RequestFinalizeBlock.byzantine_validators` + and `EndBlock` in the previous version of ABCI. + * The height and time values match the values from the header of the proposed block. + * The Application can use `RequestFinalizeBlock.decided_last_commit` and `RequestFinalizeBlock.misbehavior` to determine rewards and punishments for the validators. - * The application must execute the transactions in full, in the order they appear in `RequestFinalizeBlock.txs`, - before returning control to Tendermint. Alternatively, it can commit the candidate state corresponding to the same block - previously executed via `PrepareProposal` or `ProcessProposal`. + * The Application executes the transactions in `RequestFinalizeBlock.txs` deterministically, + according to the rules set up by the Application, before returning control to Tendermint. + Alternatively, it can commit the candidate state corresponding to the same block previously + executed via `PrepareProposal` or `ProcessProposal`. * `ResponseFinalizeBlock.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. * In next-block execution mode, the Application must provide values for `ResponseFinalizeBlock.app_hash`, `ResponseFinalizeBlock.tx_results`, `ResponseFinalizeBlock.validator_updates`, and @@ -605,16 +680,18 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou the current values. * `ResponseFinalizeBlock.validator_updates`, triggered by block `H`, affect validation for blocks `H+1`, `H+2`, and `H+3`. Heights following a validator update are affected in the following way: - - Height `H+1`: `NextValidatorsHash` includes the new `validator_updates` value. - - Height `H+2`: The validator set change takes effect and `ValidatorsHash` is updated. - - Height `H+3`: `decided_last_commit` now includes the altered validator set. + * Height `H+1`: `NextValidatorsHash` includes the new `validator_updates` value. + * Height `H+2`: The validator set change takes effect and `ValidatorsHash` is updated. + * Height `H+3`: `*_last_commit` fields in `PrepareProposal`, `ProcessProposal`, and + `FinalizeBlock` now include the altered validator set. * `ResponseFinalizeBlock.consensus_param_updates` returned for block `H` apply to the consensus params for block `H+1`. For more information on the consensus parameters, - see the [application spec entry on consensus parameters](../abci/apps.md#consensus-parameters). + see the [consensus parameters](./abci%2B%2B_app_requirements.md#consensus-parameters) + section. + * In same-block execution mode, Tendermint will log an error and ignore values for `ResponseFinalizeBlock.app_hash`, `ResponseFinalizeBlock.tx_results`, `ResponseFinalizeBlock.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`, as those must have been provided by `PrepareProposal`. - * Application is expected to persist its state at the end of this call, before calling `ResponseFinalizeBlock`. * `ResponseFinalizeBlock.app_hash` contains an (optional) Merkle root hash of the application state. * `ResponseFinalizeBlock.app_hash` is included * [in next-block execution mode] as the `Header.AppHash` in the next block. @@ -626,11 +703,7 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou of `RequestFinalizeBlock` and the previous committed state. * Later calls to `Query` can return proofs about the application state anchored in this Merkle root hash. - * Use `ResponseFinalizeBlock.retain_height` with caution! If all nodes in the network remove historical - blocks then this data is permanently lost, and no new nodes will be able to join the network and - bootstrap. Historical blocks may also be required for other purposes, e.g. auditing, replay of - non-persisted heights, light client verification, and so on. - * Just as `ProcessProposal`, the implementation of `FinalizeBlock` MUST be deterministic, since it is + * The implementation of `FinalizeBlock` MUST be deterministic, since it is making the Application's state evolve in the context of state machine replication. * Currently, Tendermint will fill up all fields in `RequestFinalizeBlock`, even if they were already passed on to the Application via `RequestPrepareProposal` or `RequestProcessProposal`. @@ -638,9 +711,9 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou (rather than executing the whole block). In this case the Application disregards all parameters in `RequestFinalizeBlock` except `RequestFinalizeBlock.hash`. -#### When does Tendermint call it? +#### When does Tendermint call `FinalizeBlock`? -When a validator _p_ is in Tendermint consensus height _h_, and _p_ receives +When a node _p_ is in Tendermint consensus height _h_, and _p_ receives * the Proposal message with block _v_ for a round _r_, along with all its block parts, from _q_, which is the proposer of round _r_, height _h_, @@ -649,16 +722,19 @@ When a validator _p_ is in Tendermint consensus height _h_, and _p_ receives then _p_'s Tendermint decides block _v_ and finalizes consensus for height _h_ in the following way -1. _p_'s Tendermint persists _v_ as decision for height _h_. -2. _p_'s Tendermint locks the mempool -- no calls to checkTx on new transactions. -3. _p_'s Tendermint calls `RequestFinalizeBlock` with _id(v)_. The call is synchronous. -4. _p_'s Application processes block _v_, received in a previous call to `RequestProcessProposal`. -5. _p_'s Application commits and persists the state resulting from processing the block. -6. _p_'s Application calculates and returns the _AppHash_, along with an array of arrays of bytes representing the output of each of the transactions -7. _p_'s Tendermint hashes the array of transaction outputs and stores it in _ResultHash_ -8. _p_'s Tendermint persists _AppHash_ and _ResultHash_ -9. _p_'s Tendermint unlocks the mempool -- newly received transactions can now be checked. -10. _p_'s starts consensus for a new height _h+1_, round 0 +1. _p_'s Tendermint persists _v_ as the decision for height _h_. +2. _p_'s Tendermint calls `RequestFinalizeBlock` with _v_'s data. The call is synchronous. +3. _p_'s Application executes block _v_. +4. _p_'s Application calculates and returns the _AppHash_, along with a list containing + the outputs of each of the transactions executed. +5. _p_'s Tendermint hashes all the transaction outputs and stores it in _ResultHash_. +6. _p_'s Tendermint persists the transaction outputs, _AppHash_, and _ResultsHash_. +7. _p_'s Tendermint locks the mempool — no calls to `CheckTx` on new transactions. +8. _p_'s Tendermint calls `RequestCommit` to instruct the Application to persist its state. +9. _p_'s Tendermint, optionally, re-checks all outstanding transactions in the mempool + against the newly persisted Application state. +10. _p_'s Tendermint unlocks the mempool — newly received transactions can now be checked. +11. _p_'s starts consensus for height _h+1_, round 0 ## Data Types existing in ABCI @@ -696,13 +772,13 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |--------------------|--------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|--------------| - | type | [MisbehaviorType](#misbehaviortype) | Type of the misbehavior. An enum of possible misbehaviors. | 1 | - | validator | [Validator](#validator) | The offending validator | 2 | - | height | int64 | Height when the offense occurred | 3 | - | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Time of the block that was committed at the height that the offense occurred | 4 | - | total_voting_power | int64 | Total voting power of the validator set at height `Height` | 5 | + | Name | Type | Description | Field Number | + |--------------------|-------------------------------------------------|------------------------------------------------------------------------------|--------------| + | type | [MisbehaviorType](#misbehaviortype) | Type of the misbehavior. An enum of possible misbehaviors. | 1 | + | validator | [Validator](#validator) | The offending validator | 2 | + | height | int64 | Height when the offense occurred | 3 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that was committed at height `height` | 4 | + | total_voting_power | int64 | Total voting power of the validator set at height `height` | 5 | #### MisbehaviorType @@ -823,7 +899,7 @@ Most of the data structures used in ABCI are shared [common data structures](../ | info | string | Additional information. **May be non-deterministic.** | 4 | | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | | gas_used | int64 | Amount of gas consumed by transaction. | 6 | - | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | | codespace | string | Namespace for the `code`. | 8 | ### TxAction @@ -842,7 +918,7 @@ enum TxAction { * If `Action` is `UNMODIFIED`, Tendermint includes the transaction in the proposal. Nothing to do on the mempool. * If `Action` is `ADDED`, Tendermint includes the transaction in the proposal. The transaction is _not_ added to the mempool. * If `Action` is `REMOVED`, Tendermint excludes the transaction from the proposal. The transaction is also removed from the mempool if it exists, - similar to `CheckTx` returning _false_. + similar to `CheckTx` returning an error code. ### TxRecord @@ -905,3 +981,5 @@ enum VerifyStatus { * Tendermint is to sign the whole data structure and attach it to a Precommit message * Upon reception, Tendermint validates the sender's signature and sanity-checks the values of `height`, `round`, and `chain_id`. Then it sends `extension` to the Application via `RequestVerifyVoteExtension` for verification. + +[protobuf-timestamp]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp diff --git a/spec/abci++/abci++_tmint_expected_behavior_002_draft.md b/spec/abci++/abci++_tmint_expected_behavior.md similarity index 66% rename from spec/abci++/abci++_tmint_expected_behavior_002_draft.md rename to spec/abci++/abci++_tmint_expected_behavior.md index 7786894505..8df5e6c844 100644 --- a/spec/abci++/abci++_tmint_expected_behavior_002_draft.md +++ b/spec/abci++/abci++_tmint_expected_behavior.md @@ -11,16 +11,20 @@ This section describes what the Application can expect from Tendermint. The Tendermint consensus algorithm is designed to protect safety under any network conditions, as long as less than 1/3 of validators' voting power is byzantine. Most of the time, though, the network will behave -synchronously and there will be no byzantine process. In these frequent, benign conditions: +synchronously, no process will fall behind, and there will be no byzantine process. The following describes +what will happen during a block height _h_ in these frequent, benign conditions: -* Tendermint will decide in round 0; +* Tendermint will decide in round 0, for height _h_; * `PrepareProposal` will be called exactly once at the proposer process of round 0, height _h_; -* `ProcessProposal` will be called exactly once at all processes except the proposer of round 0, and +* `ProcessProposal` will be called exactly once at all processes, and will return _accept_ in its `Response*`; -* `ExtendVote` will be called exactly once at all processes -* `VerifyVoteExtension` will be called _n-1_ times at each validator process, where _n_ is the number of validators; and -* `FinalizeBlock` will be finally called at all processes at the end of height _h_, conveying the same prepared - block that all calls to `PrepareProposal` and `ProcessProposal` had previously reported for height _h_. +* `ExtendVote` will be called exactly once at all processes; +* `VerifyVoteExtension` will be called exactly _n-1_ times at each validator process, where _n_ is + the number of validators, and will always return _accept_ in its `Response*`; +* `FinalizeBlock` will be called exactly once at all processes, conveying the same prepared + block that all calls to `PrepareProposal` and `ProcessProposal` had previously reported for + height _h_; and +* `Commit` will finally be called exactly once at all processes at the end of height _h_. However, the Application logic must be ready to cope with any possible run of Tendermint for a given height, including bad periods (byzantine proposers, network being asynchronous). @@ -28,7 +32,7 @@ In these cases, the sequence of calls to ABCI++ methods may not be so straighfor the Application should still be able to handle them, e.g., without crashing. The purpose of this section is to define what these sequences look like an a precise way. -As mentioned in the [Basic Concepts](abci++_basic_concepts_002_draft.md) section, Tendermint +As mentioned in the [Basic Concepts](./abci%2B%2B_basic_concepts.md) section, Tendermint acts as a client of ABCI++ and the Application acts as a server. Thus, it is up to Tendermint to determine when and in which order the different ABCI++ methods will be called. A well-written Application design should consider _any_ of these possible sequences. @@ -46,18 +50,15 @@ state-sync = *state-sync-attempt success-sync info state-sync-attempt = offer-snapshot *apply-chunk success-sync = offer-snapshot 1*apply-chunk -recovery = info *consensus-replay consensus-exec -consensus-replay = decide +recovery = info consensus-exec consensus-exec = (inf)consensus-height -consensus-height = *consensus-round decide +consensus-height = *consensus-round decide commit consensus-round = proposer / non-proposer -proposer = prepare-proposal extend-proposer -extend-proposer = *got-vote [extend-vote] *got-vote - -non-proposer = *got-vote [extend-non-proposer] *got-vote -extend-non-proposer = process-proposal *got-vote [extend-vote] +proposer = *got-vote prepare-proposal *got-vote process-proposal [extend] +extend = *got-vote extend-vote *got-vote +non-proposer = *got-vote [process-proposal] [extend] init-chain = %s"" offer-snapshot = %s"" @@ -68,12 +69,10 @@ process-proposal = %s"" extend-vote = %s"" got-vote = %s"" decide = %s"" +commit = %s"" ``` ->**TODO** Still hesitating... introduce _n_ as total number of validators, so that we can bound the occurrences of ->`got-vote` in a round. - -We have kept some of the ABCI++ methods out of the grammar, in order to keep it as clear and concise as possible. +We have kept some ABCI methods out of the grammar, in order to keep it as clear and concise as possible. A common reason for keeping all these methods out is that they all can be called at any point in a sequence defined by the grammar above. Other reasons depend on the method in question: @@ -115,7 +114,7 @@ Let us now examine the grammar line by line, providing further details. * In _state-sync_ mode, Tendermint makes one or more attempts at synchronizing the Application's state. At the beginning of each attempt, it offers the Application a snapshot found at another process. - If the Application accepts the snapshop, at sequence of calls to `ApplySnapshotChunk` method follow + If the Application accepts the snapshot, a sequence of calls to `ApplySnapshotChunk` method follow to provide the Application with all the snapshots needed, in order to reconstruct the state locally. A successful attempt must provide at least one chunk via `ApplySnapshotChunk`. At the end of a successful attempt, Tendermint calls `Info` to make sure the recontructed state's @@ -128,12 +127,10 @@ Let us now examine the grammar line by line, providing further details. >``` * In recovery mode, Tendermint first calls `Info` to know from which height it needs to replay decisions - to the Application. To replay a decision, Tendermint simply calls `FinalizeBlock` with the decided - block at that height. After this, Tendermint enters nomal consensus execution. + to the Application. After this, Tendermint enters nomal consensus execution. >```abnf ->recovery = info *consensus-replay consensus-exec ->consensus-replay = decide +>recovery = info consensus-exec >``` * The non-terminal `consensus-exec` is a key point in this grammar. It is an infinite sequence of @@ -145,33 +142,36 @@ Let us now examine the grammar line by line, providing further details. >consensus-exec = (inf)consensus-height >``` -* A consensus height consists of zero or more rounds before deciding via a call to `FinalizeBlock`. - In each round, the sequence of method calls depends on whether the local process is the proposer or not. +* A consensus height consists of zero or more rounds before deciding and executing via a call to + `FinalizeBlock`, followed by a call to `Commit`. In each round, the sequence of method calls + depends on whether the local process is the proposer or not. Note that, if a height contains zero + rounds, this means the process is replaying an already decided value (catch-up mode). >```abnf ->consensus-height = *consensus-round decide +>consensus-height = *consensus-round decide commit >consensus-round = proposer / non-proposer >``` -* If the local process is the proposer of the current round, Tendermint starts by calling `PrepareProposal`. - No calls to methods related to vote extensions (`ExtendVote`, `VerifyVoteExtension`) can be called - in the present round before `PrepareProposal`. Once `PrepareProposal` is called, calls to - `ExtendVote` and `VerifyVoteExtension` can come in any order, although the former will be called - at most once in this round. +* For every round, if the local process is the proposer of the current round, Tendermint starts by + calling `PrepareProposal`, followed by `ProcessProposal`. Then, optionally, the Application is + asked to extend its vote for that round. Calls to `VerifyVoteExtension` can come at any time: the + local process may be slightly late in the current round, or votes may come from a future round + of this height. >```abnf ->proposer = prepare-proposal extend-proposer ->extend-proposer = *got-vote [extend-vote] *got-vote +>proposer = *got-vote prepare-proposal *got-vote process-proposal [extend] +>extend = *got-vote extend-vote *got-vote >``` -* If the local process is not the proposer of the current round, Tendermint will call `ProcessProposal` - at most once. At most one call to `ExtendVote` can occur only after `ProcessProposal` is called. - A number of calls to `VerifyVoteExtension` can occur in any order with respect to `ProcessProposal` - and `ExtendVote` throughout the round. +* Also for every round, if the local process is _not_ the proposer of the current round, Tendermint + will call `ProcessProposal` at most once. At most one call to `ExtendVote` may occur only after + `ProcessProposal` is called. A number of calls to `VerifyVoteExtension` can occur in any order + with respect to `ProcessProposal` and `ExtendVote` throughout the round. The reasons are the same + as above, namely, the process running slightly late in the current round, or votes from future + rounds of this height received. >```abnf ->non-proposer = *got-vote [extend-non-proposer] *got-vote ->extend-non-proposer = process-proposal *got-vote [extend-vote] +>non-proposer = *got-vote [process-proposal] [extend] >``` * Finally, the grammar describes all its terminal symbols, which denote the different ABCI++ method calls that @@ -187,6 +187,7 @@ Let us now examine the grammar line by line, providing further details. >extend-vote = %s"" >got-vote = %s"" >decide = %s"" +>commit = %s"" >``` ## Adapting existing Applications that use ABCI @@ -202,17 +203,21 @@ to undergo any changes in their implementation. As for the new methods: -* `PrepareProposal` must create a list of [TxRecord](./abci++_methods_002_draft.md#txrecord) each containing a - transaction passed in `RequestPrepareProposal.txs`, in the same other. The field `action` must be set to `UNMODIFIED` - for all [TxRecord](./abci++_methods_002_draft.md#txrecord) elements in the list. +* `PrepareProposal` must create a list of [TxRecord](./abci++_methods.md#txrecord) each containing + a transaction passed in `RequestPrepareProposal.txs`, in the same other. The field `action` must + be set to `UNMODIFIED` for all [TxRecord](./abci++_methods.md#txrecord) elements in the list. The Application must check whether the size of all transactions exceeds the byte limit - (`RequestPrepareProposal.max_tx_bytes`). If so, the Application must remove transactions at the end of the list - until the total byte size is at or below the limit. + (`RequestPrepareProposal.max_tx_bytes`). If so, the Application must remove transactions at the + end of the list until the total byte size is at or below the limit. * `ProcessProposal` must set `ResponseProcessProposal.accept` to _true_ and return. * `ExtendVote` is to set `ResponseExtendVote.extension` to an empty byte array and return. -* `VerifyVoteExtension` must set `ResponseVerifyVoteExtension.accept` to _true_ if the extension is an empty byte array - and _false_ otherwise, then return. -* `FinalizeBlock` is to coalesce the implementation of methods `BeginBlock`, `DeliverTx`, `EndBlock`, and `Commit`. - Legacy applications looking to reuse old code that implemented `DeliverTx` should wrap the legacy - `DeliverTx` logic in a loop that executes one transaction iteration per +* `VerifyVoteExtension` must set `ResponseVerifyVoteExtension.accept` to _true_ if the extension is + an empty byte array and _false_ otherwise, then return. +* `FinalizeBlock` is to coalesce the implementation of methods `BeginBlock`, `DeliverTx`, and + `EndBlock`. Legacy applications looking to reuse old code that implemented `DeliverTx` should + wrap the legacy `DeliverTx` logic in a loop that executes one transaction iteration per transaction in `RequestFinalizeBlock.tx`. + +Finally, `Commit`, which is kept in ABCI++, no longer returns the `AppHash`. It is now up to +`FinalizeBlock` to do so. Thus, a slight refactoring of the old `Commit` implementation will be +needed to move the return of `AppHash` to `FinalizeBlock`. diff --git a/spec/abci++/v0.md b/spec/abci++/v0.md deleted file mode 100644 index 163b3f7cbe..0000000000 --- a/spec/abci++/v0.md +++ /dev/null @@ -1,156 +0,0 @@ -# Tendermint v0 Markdown pseudocode - -This translates the latex code for Tendermint consensus from the Tendermint paper into markdown. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedRound_p ← -1 -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - proposal ← getValue() - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, *⟩ with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, round_p, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - broadcast ⟨PRECOMMIT, h_p, round_p, id(v)⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit -} -``` - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v1.md b/spec/abci++/v1.md deleted file mode 100644 index 96dc8e674a..0000000000 --- a/spec/abci++/v1.md +++ /dev/null @@ -1,162 +0,0 @@ -# Tendermint v1 Markdown pseudocode - -This adds hooks for the existing ABCI to the prior pseudocode - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getBlockProposal fills in header - proposal ← getBlockProposal(txdata) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - broadcast ⟨PRECOMMIT, h_p, round_p, id(v)⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ - step_p ← precommit -} -``` - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.BeginBlock(v.header) - ABCI.DeliverTxs(v.data) - ABCI.EndBlock() - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v2.md b/spec/abci++/v2.md deleted file mode 100644 index 1abd8ec670..0000000000 --- a/spec/abci++/v2.md +++ /dev/null @@ -1,180 +0,0 @@ -# Tendermint v2 Markdown pseudocode - -This adds a single-threaded implementation of ABCI++, -with no optimization for splitting out verifying the header and verifying the proposal. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getUnpreparedBlockProposal takes tx data, and fills in the unprepared header data - unpreparedProposal ← getUnpreparedBlockProposal(txdata) - // ABCI++: the proposer may reorder/update transactions in `unpreparedProposal` - proposal ← ABCI.PrepareProposal(unpreparedProposal) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ ABCI.ProcessProposal(h_p, v).accept ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ ABCI.ProcessProposal(h_p, v).accept ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - precommit_extension ← ABCI.ExtendVote(h_p, round_p, id(v)) - broadcast ⟨PRECOMMIT, h_p, round_p, id(v), precommit_extension⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit -} -``` - -### Upon receiving a precommit - -Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` -before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped -in the syntax of methods from the paper. - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.FinalizeBlock(id(v)) - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. diff --git a/spec/abci++/v3.md b/spec/abci++/v3.md deleted file mode 100644 index ed4c720b4e..0000000000 --- a/spec/abci++/v3.md +++ /dev/null @@ -1,201 +0,0 @@ -# Tendermint v3 Markdown pseudocode - -This is a single-threaded implementation of ABCI++, -with an optimization for the ProcessProposal phase. -Namely, processing of the header and the block data is separated into two different functions. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getUnpreparedBlockProposal fills in header - unpreparedProposal ← getUnpreparedBlockProposal(txdata) - proposal ← ABCI.PrepareProposal(unpreparedProposal) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v_header, −1) from proposer(h_p, round_p) while step_p = propose do { - prevote_nil ← false - // valid is Tendermints validation, ABCI.VerifyHeader is the applications - if valid(v_header) ∧ ABCI.VerifyHeader(h_p, v_header) ∧ (lockedRound_p = −1 ∨ lockedValue_p = id(v_header)) { - wait to receive proposal v corresponding to v_header - // We split up the app's header verification from the remainder of its processing of the proposal - if ABCI.ProcessProposal(h_p, v).accept { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - prevote_nil ← true - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - } else { - prevote_nil ← true - } - if prevote_nil { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v_header, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v_header)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - prevote_nil ← false - if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - wait to receive proposal v corresponding to v_header - // We split up the app's header verification from the remainder of its processing of the proposal - if ABCI.ProcessProposal(h_p, v).accept { - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - prevote_nil ← true - // Include any slashing evidence that may be sent in the process proposal response - for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - } - } else { - prevote_nil ← true - } - if prevote_nil { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -function OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - precommit_extension ← ABCI.ExtendVote(h_p, round_p, id(v)) - broadcast ⟨PRECOMMIT, h_p, round_p, id(v), precommit_extension⟩ - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit -} -``` - -### Upon receiving a precommit - -Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` -before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped -in the syntax of methods from the paper. - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -function OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.FinalizeBlock(id(v)) - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v4.md b/spec/abci++/v4.md deleted file mode 100644 index d211fd87fc..0000000000 --- a/spec/abci++/v4.md +++ /dev/null @@ -1,199 +0,0 @@ -# Tendermint v4 Markdown pseudocode - -This is a multi-threaded implementation of ABCI++, -where ProcessProposal starts when the proposal is received, but ends before precommitting. - -### Initialization - -```go -h_p ← 0 -round_p ← 0 -step_p is one of {propose, prevote, precommit} -decision_p ← Vector() -lockedValue_p ← nil -validValue_p ← nil -validRound_p ← -1 -``` - -### StartRound(round) - -```go -function startRound(round) { - round_p ← round - step_p ← propose - if proposer(h_p, round_p) = p { - if validValue_p != nil { - proposal ← validValue_p - } else { - txdata ← mempool.GetBlock() - // getUnpreparedBlockProposal fills in header - unpreparedProposal ← getUnpreparedBlockProposal(txdata) - proposal ← ABCI.PrepareProposal(unpreparedProposal) - } - broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ - } else { - schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) - } -} -``` - -### ReceiveProposal - -In the case where the local node is not locked on any round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { - if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { - // We fork process proposal into a parallel process - Fork ABCI.ProcessProposal(h_p, v) - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -In the case where the node is locked on a round, the following is ran: - -```go -upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ - while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { - if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { - // We fork process proposal into a parallel process - Fork ABCI.ProcessProposal(h_p, v) - broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ - } else { - broadcast ⟨PREVOTE, h_p, round_p, nil⟩ - } - step_p ← prevote -} -``` - -### Prevote timeout - -Upon receiving 2f + 1 prevotes, setup a timeout. - -```go -upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ - with step_p = prevote for the first time, do { - schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) -} -``` - -with OnTimeoutPrevote defined as: - -```go -def OnTimeoutPrevote(height, round) { - if (height = h_p && round = round_p && step_p = prevote) { - // Join the ProcessProposal, and output any evidence in case it has some. - processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) - for evidence in processProposalOutput.evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit - } -} -``` - -### Receiving enough prevotes to precommit - -The following code is ran upon receiving 2f + 1 prevotes for the same block - -```go -upon ⟨PROPOSAL, h_p, round_p, v, *⟩ - from proposer(h_p, round_p) - AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ -while valid(v) ∧ step_p >= prevote for the first time do { - if (step_p = prevote) { - lockedValue_p ← v - lockedRound_p ← round_p - processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) - // If the proposal is valid precommit as before. - // If it was invalid, precommit nil. - // Note that ABCI.ProcessProposal(h_p, v).accept is deterministic for all honest nodes. - precommit_value ← nil - if processProposalOutput.accept { - precommit_value ← id(v) - } - precommit_extension ← ABCI.ExtendVote(h_p, round_p, precommit_value) - broadcast ⟨PRECOMMIT, h_p, round_p, precommit_value, precommit_extension⟩ - for evidence in processProposalOutput.evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - - step_p ← precommit - } - validValue_p ← v - validRound_p ← round_p -} -``` - -And upon receiving 2f + 1 prevotes for nil: - -```go -upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ - while step_p = prevote do { - // Join ABCI.ProcessProposal, and broadcast any evidence if it exists. - processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) - for evidence in processProposalOutput.evidence_list { - broadcast ⟨EVIDENCE, evidence⟩ - } - - precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) - broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ - step_p ← precommit -} -``` - -### Upon receiving a precommit - -Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` -before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped -in the syntax of methods from the paper. - -### Precommit timeout - -Upon receiving 2f + 1 precommits, setup a timeout. - -```go -upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { - schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) -} -``` - -with OnTimeoutPrecommit defined as: - -```go -def OnTimeoutPrecommit(height, round) { - if (height = h_p && round = round_p) { - StartRound(round_p + 1) - } -} -``` - -### Upon Receiving 2f + 1 precommits - -The following code is ran upon receiving 2f + 1 precommits for the same block - -```go -upon ⟨PROPOSAL, h_p, r, v, *⟩ - from proposer(h_p, r) - AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ - while decision_p[h_p] = nil do { - if (valid(v)) { - decision_p[h_p] ← v - h_p ← h_p + 1 - reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values - ABCI.FinalizeBlock(id(v)) - StartRound(0) - } -} -``` - -If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file From d8de2d85c0a07d0c2b72591a7991861bf88c2775 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Jul 2022 12:12:15 +0200 Subject: [PATCH 151/203] build(deps): Bump github.com/libp2p/go-buffer-pool from 0.0.2 to 0.1.0 (#8932) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 691efb09f6..f8fa79cfce 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/lib/pq v1.10.6 - github.com/libp2p/go-buffer-pool v0.0.2 + github.com/libp2p/go-buffer-pool v0.1.0 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible diff --git a/go.sum b/go.sum index ca88c3e8f4..f7b9149a21 100644 --- a/go.sum +++ b/go.sum @@ -717,8 +717,8 @@ github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lufeee/execinquery v1.0.0 h1:1XUTuLIVPDlFvUU3LXmmZwHDsolsxXnY67lzhpeqe0I= github.com/lufeee/execinquery v1.0.0/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= From 3cde9a0bbc04683aedb96871b0b45f8e2e650101 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= Date: Tue, 5 Jul 2022 16:08:58 +0300 Subject: [PATCH 152/203] abci-cli: Add `process_proposal` command to abci-cli (#8901) * Add `process_proposal` command to abci-cli * Added process proposal to the 'tutorial' examples * Added entry in CHANGELOG_PENDING.md * Allow empty blocks in PrepareProposal, ProcessProposal, and FinalizeBlock * Fix minimum arguments * Add tests for empty block * Updated abci-cli doc Co-authored-by: Sergio Mena Co-authored-by: Jasmina Malicevic --- CHANGELOG_PENDING.md | 1 + abci/cmd/abci-cli/abci-cli.go | 74 ++++++++++++++++++++++---------- abci/example/kvstore/kvstore.go | 2 +- abci/tests/server/client.go | 13 ++++++ abci/tests/test_cli/ex1.abci | 9 +++- abci/tests/test_cli/ex1.abci.out | 25 +++++++++++ docs/app-dev/abci-cli.md | 27 ++++++++++++ 7 files changed, 127 insertions(+), 24 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 07e7555011..13eeaef165 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -33,6 +33,7 @@ Special thanks to external contributors on this release: - [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena) - [abci] \#8656 Added cli command for `PrepareProposal`. (@jmalicevic) - [sink/psql] \#8637 tx_results emitted from psql sink are now json encoded, previously they were protobuf encoded + - [abci] \#8901 Added cli command for `ProcessProposal`. (@hvanz) - P2P Protocol diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 237493e0ed..824bbb3605 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -79,10 +79,11 @@ func RootCmmand(logger log.Logger) *cobra.Command { // Structure for data passed to print response. type response struct { // generic abci response - Data []byte - Code uint32 - Info string - Log string + Data []byte + Code uint32 + Info string + Log string + Status int32 Query *queryResponse } @@ -132,6 +133,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) { cmd.AddCommand(versionCmd) cmd.AddCommand(testCmd) cmd.AddCommand(prepareProposalCmd) + cmd.AddCommand(processProposalCmd) cmd.AddCommand(getQueryCmd()) // examples @@ -172,7 +174,7 @@ This command opens an interactive console for running any of the other commands without opening a new connection each time `, Args: cobra.ExactArgs(0), - ValidArgs: []string{"echo", "info", "query", "check_tx", "prepare_proposal", "finalize_block", "commit"}, + ValidArgs: []string{"echo", "info", "query", "check_tx", "prepare_proposal", "process_proposal", "finalize_block", "commit"}, RunE: cmdConsole, } @@ -195,7 +197,7 @@ var finalizeBlockCmd = &cobra.Command{ Use: "finalize_block", Short: "deliver a block of transactions to the application", Long: "deliver a block of transactions to the application", - Args: cobra.MinimumNArgs(1), + Args: cobra.MinimumNArgs(0), RunE: cmdFinalizeBlock, } @@ -230,10 +232,18 @@ var prepareProposalCmd = &cobra.Command{ Use: "prepare_proposal", Short: "prepare proposal", Long: "prepare proposal", - Args: cobra.MinimumNArgs(1), + Args: cobra.MinimumNArgs(0), RunE: cmdPrepareProposal, } +var processProposalCmd = &cobra.Command{ + Use: "process_proposal", + Short: "process proposal", + Long: "process proposal", + Args: cobra.MinimumNArgs(0), + RunE: cmdProcessProposal, +} + func getQueryCmd() *cobra.Command { cmd := &cobra.Command{ Use: "query", @@ -352,6 +362,11 @@ func cmdTest(cmd *cobra.Command, args []string) error { types.TxRecord_UNMODIFIED, }, nil) }, + func() error { + return servertest.ProcessProposal(ctx, client, [][]byte{ + {0x01}, + }, types.ResponseProcessProposal_ACCEPT) + }, }) } @@ -454,6 +469,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { return cmdQuery(cmd, actualArgs) case "prepare_proposal": return cmdPrepareProposal(cmd, actualArgs) + case "process_proposal": + return cmdProcessProposal(cmd, actualArgs) default: return cmdUnimplemented(cmd, pArgs) } @@ -517,13 +534,6 @@ const codeBad uint32 = 10 // Append new txs to application func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - printResponse(cmd, args, response{ - Code: codeBad, - Log: "Must provide at least one transaction", - }) - return nil - } txs := make([][]byte, len(args)) for i, arg := range args { txBytes, err := stringOrHexToBytes(arg) @@ -633,15 +643,8 @@ func inTxArray(txByteArray [][]byte, tx []byte) bool { } return false } + func cmdPrepareProposal(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - printResponse(cmd, args, response{ - Code: codeBad, - Info: "Must provide at least one transaction", - Log: "Must provide at least one transaction", - }) - return nil - } txsBytesArray := make([][]byte, len(args)) for i, arg := range args { @@ -682,6 +685,30 @@ func cmdPrepareProposal(cmd *cobra.Command, args []string) error { return nil } +func cmdProcessProposal(cmd *cobra.Command, args []string) error { + txsBytesArray := make([][]byte, len(args)) + + for i, arg := range args { + txBytes, err := stringOrHexToBytes(arg) + if err != nil { + return err + } + txsBytesArray[i] = txBytes + } + + res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{ + Txs: txsBytesArray, + }) + if err != nil { + return err + } + + printResponse(cmd, args, response{ + Status: int32(res.Status), + }) + return nil +} + func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { // Create the application - in memory or persisted to disk @@ -739,6 +766,9 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) { if rsp.Log != "" { fmt.Printf("-> log: %s\n", rsp.Log) } + if cmd.Use == "process_proposal" { + fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status]) + } if rsp.Query != nil { fmt.Printf("-> height: %d\n", rsp.Query.Height) diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index c9a2a148c6..c1005eb86a 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -293,7 +293,7 @@ func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPre func (*Application) ProcessProposal(_ context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { for _, tx := range req.Txs { - if len(tx) == 0 { + if len(tx) == 0 || isPrepareTx(tx) { return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil } } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 7762c8d033..eb5649d9ea 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -83,6 +83,19 @@ func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][] fmt.Println("Passed test: PrepareProposal") return nil } + +func ProcessProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error { + res, _ := client.ProcessProposal(ctx, &types.RequestProcessProposal{Txs: txBytes}) + if res.Status != statusExp { + fmt.Println("Failed test: ProcessProposal") + fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.", + res.Status, statusExp) + return errors.New("ProcessProposal error") + } + fmt.Println("Passed test: ProcessProposal") + return nil +} + func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes}) code, data := res.Code, res.Data diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci index dc9e213ecb..eba06028a5 100644 --- a/abci/tests/test_cli/ex1.abci +++ b/abci/tests/test_cli/ex1.abci @@ -1,6 +1,7 @@ echo hello info prepare_proposal "abc" +process_proposal "abc" finalize_block "abc" commit info @@ -8,4 +9,10 @@ query "abc" finalize_block "def=xyz" "ghi=123" commit query "def" -prepare_proposal "preparedef" \ No newline at end of file +prepare_proposal "preparedef" +process_proposal "def" +process_proposal "preparedef" +prepare_proposal +process_proposal +finalize_block +commit \ No newline at end of file diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index f4f342dfb4..fb3ababfaa 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -12,6 +12,10 @@ -> code: OK -> log: Succeeded. Tx: abc action: UNMODIFIED +> process_proposal "abc" +-> code: OK +-> status: ACCEPT + > finalize_block "abc" -> code: OK -> code: OK @@ -58,3 +62,24 @@ -> code: OK -> log: Succeeded. Tx: preparedef action: REMOVED +> process_proposal "def" +-> code: OK +-> status: ACCEPT + +> process_proposal "preparedef" +-> code: OK +-> status: REJECT + +> prepare_proposal + +> process_proposal +-> code: OK +-> status: ACCEPT + +> finalize_block +-> code: OK +-> data.hex: 0x0600000000000000 + +> commit +-> code: OK + diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 109e3b9fb1..bb94a36ca1 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -208,6 +208,33 @@ Try running these commands: -> key.hex: 646566 -> value: xyz -> value.hex: 78797A + +> prepare_proposal "preparedef" +-> code: OK +-> log: Succeeded. Tx: def action: ADDED +-> code: OK +-> log: Succeeded. Tx: preparedef action: REMOVED + +> process_proposal "def" +-> code: OK +-> status: ACCEPT + +> process_proposal "preparedef" +-> code: OK +-> status: REJECT + +> prepare_proposal + +> process_proposal +-> code: OK +-> status: ACCEPT + +> finalize_block +-> code: OK +-> data.hex: 0x0600000000000000 + +> commit +-> code: OK ``` Note that if we do `finalize_block "abc" ...` it will store `(abc, abc)`, but if From 2b5329ae47a092941c18b3663ebedf0d387abce7 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Wed, 6 Jul 2022 00:02:29 +0200 Subject: [PATCH 153/203] Typos in spec (#8939) --- spec/abci++/abci++_basic_concepts.md | 4 ++-- spec/abci++/abci++_methods.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/spec/abci++/abci++_basic_concepts.md b/spec/abci++/abci++_basic_concepts.md index a467b623ed..9b91678658 100644 --- a/spec/abci++/abci++_basic_concepts.md +++ b/spec/abci++/abci++_basic_concepts.md @@ -32,7 +32,7 @@ now better understood than when ABCI was first written. For example, many ideas scalability can be boiled down to "make the block proposers do work, so the network does not have to". This includes optimizations such as transaction level signature aggregation, state transition proofs, etc. Furthermore, many new security properties cannot be achieved in the current paradigm, -as the Application cannot require validators to do more than execute the transactions contained in +as the Application cannot require validators to do more than executing the transactions contained in finalized blocks. This includes features such as threshold cryptography, and guaranteed IBC connection attempts. @@ -312,7 +312,7 @@ Sources of non-determinism in applications may include: See [#56](https://github.com/tendermint/abci/issues/56) for the original discussion. -Note that some methods (`Query, CheckTx, FinalizeBlock`) return non-deterministic data in the form +Note that some methods (`Query, FinalizeBlock`) return non-deterministic data in the form of `Info` and `Log` fields. The `Log` is intended for the literal output from the Application's logger, while the `Info` is any additional info that should be returned. These are the only fields that are not included in block header computations, so we don't need agreement diff --git a/spec/abci++/abci++_methods.md b/spec/abci++/abci++_methods.md index 9d33652dd5..4d1e47bef7 100644 --- a/spec/abci++/abci++_methods.md +++ b/spec/abci++/abci++_methods.md @@ -669,7 +669,7 @@ message for round _r_, height _h_ from validator _q_ (_q_ ≠ _p_): to determine rewards and punishments for the validators. * The Application executes the transactions in `RequestFinalizeBlock.txs` deterministically, according to the rules set up by the Application, before returning control to Tendermint. - Alternatively, it can commit the candidate state corresponding to the same block previously + Alternatively, it can apply the candidate state corresponding to the same block previously executed via `PrepareProposal` or `ProcessProposal`. * `ResponseFinalizeBlock.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. * In next-block execution mode, the Application must provide values for `ResponseFinalizeBlock.app_hash`, @@ -830,8 +830,8 @@ Most of the data structures used in ABCI are shared [common data structures](../ | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. Tendermint does not interpret this. | 2 | | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | - | hash | bytes | TAn arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. Tendermint does not interpret the hash, it only compares them. | 3 | - | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 3 | + | hash | bytes | An arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. Tendermint does not interpret the hash, it only compares them. | 4 | + | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 5 | * **Usage**: * Used for state sync snapshots, see the [state sync section](../p2p/messages/state-sync.md) for details. From 70e7372c4ac518c740458f7873ba9b4409a44751 Mon Sep 17 00:00:00 2001 From: samricotta <37125168+samricotta@users.noreply.github.com> Date: Wed, 6 Jul 2022 14:30:45 +0200 Subject: [PATCH 154/203] added name to CO (#8947) Co-authored-by: Samantha Ricotta --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 950ed1dc24..05357c87c8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,7 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir +* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir @samricotta # Spec related changes can be approved by the protocol design team /spec @josef-widder @milosevic @cason @sergio-mena @jmalicevic From be6d74e6579bad8709def9716268f12f460df93d Mon Sep 17 00:00:00 2001 From: yihuang Date: Thu, 7 Jul 2022 02:05:48 +0800 Subject: [PATCH 155/203] Work around indexing problem for duplicate transactions (forward port: #8625) (#8945) Port the bug fix terra-money#76 to upstream. This is critical for ethermint json-rpc to work. fix: prevent duplicate tx index if it succeeded before fix: use CodeTypeOk instead of 0 fix: handle duplicate txs within the same block Co-authored-by: jess jesse@soob.co ref: #5281 Co-authored-by: M. J. Fromberger --- CHANGELOG_PENDING.md | 1 + internal/state/indexer/indexer_service.go | 52 +++++- .../state/indexer/indexer_service_test.go | 159 ++++++++++++++++++ 3 files changed, 211 insertions(+), 1 deletion(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 13eeaef165..9936e13098 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -101,3 +101,4 @@ Special thanks to external contributors on this release: - [cli] \#8276 scmigrate: ensure target key is correctly renamed. (@creachadair) - [cli] \#8294 keymigrate: ensure block hash keys are correctly translated. (@creachadair) - [cli] \#8352 keymigrate: ensure transaction hash keys are correctly translated. (@creachadair) +- (indexer) \#8625 Fix overriding tx index of duplicated txs. diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index e73e4a3ba2..d6db82806a 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -4,6 +4,7 @@ import ( "context" "time" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/libs/log" @@ -96,7 +97,14 @@ func (is *Service) publish(msg pubsub.Message) error { if curr.Size() != 0 { start := time.Now() - err := sink.IndexTxEvents(curr.Ops) + + var err error + curr.Ops, err = DeduplicateBatch(curr.Ops, sink) + if err != nil { + is.logger.Error("failed to deduplicate batch", "height", is.currentBlock.height, "error", err) + } + + err = sink.IndexTxEvents(curr.Ops) if err != nil { is.logger.Error("failed to index block txs", "height", is.currentBlock.height, "err", err) @@ -169,3 +177,45 @@ func IndexingEnabled(sinks []EventSink) bool { return false } + +// DeduplicateBatch consider the case of duplicate txs. +// if the current one under investigation is NOT OK, then we need to check +// whether there's a previously indexed tx. +// SKIP the current tx if the previously indexed record is found and successful. +func DeduplicateBatch(ops []*abci.TxResult, sink EventSink) ([]*abci.TxResult, error) { + result := make([]*abci.TxResult, 0, len(ops)) + + // keep track of successful txs in this block in order to suppress latter ones being indexed. + var successfulTxsInThisBlock = make(map[string]struct{}) + + for _, txResult := range ops { + hash := types.Tx(txResult.Tx).Hash() + + if txResult.Result.IsOK() { + successfulTxsInThisBlock[string(hash)] = struct{}{} + } else { + // if it already appeared in current block and was successful, skip. + if _, found := successfulTxsInThisBlock[string(hash)]; found { + continue + } + + // check if this tx hash is already indexed + old, err := sink.GetTxByHash(hash) + + // if db op errored + // Not found is not an error + if err != nil { + return nil, err + } + + // if it's already indexed in an older block and was successful, skip. + if old != nil && old.Result.Code == abci.CodeTypeOK { + continue + } + } + + result = append(result, txResult) + } + + return result, nil +} diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index 6126ae2595..6dc1bdf504 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -109,6 +109,165 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { assert.Nil(t, teardown(t, pool)) } +func TestTxIndexDuplicatedTx(t *testing.T) { + var mockTx = types.Tx("MOCK_TX_HASH") + + testCases := []struct { + name string + tx1 abci.TxResult + tx2 abci.TxResult + expSkip bool // do we expect the second tx to be skipped by tx indexer + }{ + {"skip, previously successful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + true, + }, + {"not skip, previously unsuccessful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + false, + }, + {"not skip, both successful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + false, + }, + {"not skip, both unsuccessful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + false, + }, + {"skip, same block, previously successful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + true, + }, + {"not skip, same block, previously unsuccessful", + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK + 1, + }, + }, + abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ExecTxResult{ + Code: abci.CodeTypeOK, + }, + }, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sink := kv.NewEventSink(dbm.NewMemDB()) + + if tc.tx1.Height != tc.tx2.Height { + // index the first tx + err := sink.IndexTxEvents([]*abci.TxResult{&tc.tx1}) + require.NoError(t, err) + + // check if the second one should be skipped. + ops, err := indexer.DeduplicateBatch([]*abci.TxResult{&tc.tx2}, sink) + require.NoError(t, err) + + if tc.expSkip { + require.Empty(t, ops) + } else { + require.Equal(t, []*abci.TxResult{&tc.tx2}, ops) + } + } else { + // same block + ops := []*abci.TxResult{&tc.tx1, &tc.tx2} + ops, err := indexer.DeduplicateBatch(ops, sink) + require.NoError(t, err) + if tc.expSkip { + // the second one is skipped + require.Equal(t, []*abci.TxResult{&tc.tx1}, ops) + } else { + require.Equal(t, []*abci.TxResult{&tc.tx1, &tc.tx2}, ops) + } + } + }) + } +} + func readSchema() ([]*schema.Migration, error) { filename := "./sink/psql/schema.sql" contents, err := os.ReadFile(filename) From 27c523dccbe7f2f577868ee871b0042c1fef9a53 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 6 Jul 2022 20:17:56 +0200 Subject: [PATCH 156/203] mempool: return error when mempool is full and inbound tx is rejected (#8942) Addresses: https://github.com/tendermint/tendermint/issues/8928 --- internal/mempool/mempool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index caee97ff8e..2398180fc6 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -540,7 +540,7 @@ func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, ) if len(evictTxs) == 0 { // No room for the new incoming transaction so we just remove it from - // the cache. + // the cache and return an error to the user. txmp.cache.Remove(wtx.tx) txmp.logger.Error( "rejected incoming good transaction; mempool full", @@ -548,7 +548,7 @@ func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, "err", err.Error(), ) txmp.metrics.RejectedTxs.Add(1) - return nil + return err } // evict an existing transaction(s) From d1a16e8ff0393b80d45eb7299cac314570e48f8c Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 7 Jul 2022 12:13:52 -0400 Subject: [PATCH 157/203] p2p: simpler priority queue (#8929) --- internal/p2p/channel.go | 4 ++ internal/p2p/pqueue.go | 14 +++-- internal/p2p/router.go | 13 ++++- internal/p2p/rqueue.go | 112 ++++++++++++++++++++++++++++++++++++ internal/p2p/rqueue_test.go | 47 +++++++++++++++ 5 files changed, 183 insertions(+), 7 deletions(-) create mode 100644 internal/p2p/rqueue.go create mode 100644 internal/p2p/rqueue_test.go diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index d6763543ab..e33e7faa7e 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -19,6 +19,10 @@ type Envelope struct { ChannelID ChannelID } +func (e Envelope) IsZero() bool { + return e.From == "" && e.To == "" && e.Message == nil +} + // Wrapper is a Protobuf message that can contain a variety of inner messages // (e.g. via oneof fields). If a Channel's message type implements Wrapper, the // Router will automatically wrap outbound messages and unwrap inbound messages, diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index 268daa8deb..3cd1c897a5 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -31,8 +31,16 @@ func (pq priorityQueue) get(i int) *pqEnvelope { return pq[i] } func (pq priorityQueue) Len() int { return len(pq) } func (pq priorityQueue) Less(i, j int) bool { - // if both elements have the same priority, prioritize based on most recent + // if both elements have the same priority, prioritize based + // on most recent and largest if pq[i].priority == pq[j].priority { + diff := pq[i].timestamp.Sub(pq[j].timestamp) + if diff < 0 { + diff *= -1 + } + if diff < 10*time.Millisecond { + return pq[i].size > pq[j].size + } return pq[i].timestamp.After(pq[j].timestamp) } @@ -272,12 +280,10 @@ func (s *pqScheduler) process(ctx context.Context) { } func (s *pqScheduler) push(pqEnv *pqEnvelope) { - chIDStr := strconv.Itoa(int(pqEnv.envelope.ChannelID)) - // enqueue the incoming Envelope heap.Push(s.pq, pqEnv) s.size += pqEnv.size - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Add(float64(pqEnv.size)) + s.metrics.PeerQueueMsgSize.With("ch_id", strconv.Itoa(int(pqEnv.envelope.ChannelID))).Add(float64(pqEnv.size)) // Update the cumulative sizes by adding the Envelope's size to every // priority less than or equal to it. diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 8b7db9a034..0e55049c19 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -68,8 +68,9 @@ type RouterOptions struct { } const ( - queueTypeFifo = "fifo" - queueTypePriority = "priority" + queueTypeFifo = "fifo" + queueTypePriority = "priority" + queueTypeSimplePriority = "simple-priority" ) // Validate validates router options. @@ -77,7 +78,7 @@ func (o *RouterOptions) Validate() error { switch o.QueueType { case "": o.QueueType = queueTypeFifo - case queueTypeFifo, queueTypePriority: + case queueTypeFifo, queueTypePriority, queueTypeSimplePriority: // pass default: return fmt.Errorf("queue type %q is not supported", o.QueueType) @@ -227,6 +228,9 @@ func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error return q }, nil + case queueTypeSimplePriority: + return func(size int) queue { return newSimplePriorityQueue(ctx, size, r.chDescs) }, nil + default: return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType) } @@ -304,6 +308,9 @@ func (r *Router) routeChannel( for { select { case envelope := <-outCh: + if envelope.IsZero() { + continue + } // Mark the envelope with the channel ID to allow sendPeer() to pass // it on to Transport.SendMessage(). envelope.ChannelID = chID diff --git a/internal/p2p/rqueue.go b/internal/p2p/rqueue.go new file mode 100644 index 0000000000..8d6406864a --- /dev/null +++ b/internal/p2p/rqueue.go @@ -0,0 +1,112 @@ +package p2p + +import ( + "container/heap" + "context" + "sort" + "time" + + "github.com/gogo/protobuf/proto" +) + +type simpleQueue struct { + input chan Envelope + output chan Envelope + closeFn func() + closeCh <-chan struct{} + + maxSize int + chDescs []*ChannelDescriptor +} + +func newSimplePriorityQueue(ctx context.Context, size int, chDescs []*ChannelDescriptor) *simpleQueue { + if size%2 != 0 { + size++ + } + + ctx, cancel := context.WithCancel(ctx) + q := &simpleQueue{ + input: make(chan Envelope, size*2), + output: make(chan Envelope, size/2), + maxSize: size * size, + closeCh: ctx.Done(), + closeFn: cancel, + } + + go q.run(ctx) + return q +} + +func (q *simpleQueue) enqueue() chan<- Envelope { return q.input } +func (q *simpleQueue) dequeue() <-chan Envelope { return q.output } +func (q *simpleQueue) close() { q.closeFn() } +func (q *simpleQueue) closed() <-chan struct{} { return q.closeCh } + +func (q *simpleQueue) run(ctx context.Context) { + defer q.closeFn() + + var chPriorities = make(map[ChannelID]uint, len(q.chDescs)) + for _, chDesc := range q.chDescs { + chID := chDesc.ID + chPriorities[chID] = uint(chDesc.Priority) + } + + pq := make(priorityQueue, 0, q.maxSize) + heap.Init(&pq) + ticker := time.NewTicker(10 * time.Millisecond) + // must have a buffer of exactly one because both sides of + // this channel are used in this loop, and simply signals adds + // to the heap + signal := make(chan struct{}, 1) + for { + select { + case <-ctx.Done(): + return + case <-q.closeCh: + return + case e := <-q.input: + // enqueue the incoming Envelope + heap.Push(&pq, &pqEnvelope{ + envelope: e, + size: uint(proto.Size(e.Message)), + priority: chPriorities[e.ChannelID], + timestamp: time.Now().UTC(), + }) + + select { + case signal <- struct{}{}: + default: + if len(pq) > q.maxSize { + sort.Sort(pq) + pq = pq[:q.maxSize] + } + } + + case <-ticker.C: + if len(pq) > q.maxSize { + sort.Sort(pq) + pq = pq[:q.maxSize] + } + if len(pq) > 0 { + select { + case signal <- struct{}{}: + default: + } + } + case <-signal: + SEND: + for len(pq) > 0 { + select { + case <-ctx.Done(): + return + case <-q.closeCh: + return + case q.output <- heap.Pop(&pq).(*pqEnvelope).envelope: + continue SEND + default: + break SEND + } + } + } + } +} diff --git a/internal/p2p/rqueue_test.go b/internal/p2p/rqueue_test.go new file mode 100644 index 0000000000..43c4066e57 --- /dev/null +++ b/internal/p2p/rqueue_test.go @@ -0,0 +1,47 @@ +package p2p + +import ( + "context" + "testing" + "time" +) + +func TestSimpleQueue(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // set up a small queue with very small buffers so we can + // watch it shed load, then send a bunch of messages to the + // queue, most of which we'll watch it drop. + sq := newSimplePriorityQueue(ctx, 1, nil) + for i := 0; i < 100; i++ { + sq.enqueue() <- Envelope{From: "merlin"} + } + + seen := 0 + +RETRY: + for seen <= 2 { + select { + case e := <-sq.dequeue(): + if e.From != "merlin" { + continue + } + seen++ + case <-time.After(10 * time.Millisecond): + break RETRY + } + } + // if we don't see any messages, then it's just broken. + if seen == 0 { + t.Errorf("seen %d messages, should have seen more than one", seen) + } + // ensure that load shedding happens: there can be at most 3 + // messages that we get out of this, one that was buffered + // plus 2 that were under the cap, everything else gets + // dropped. + if seen > 3 { + t.Errorf("saw %d messages, should have seen 5 or fewer", seen) + } + +} From 636320f9010be75c0ae804714ddce62b46f17930 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 7 Jul 2022 12:29:50 -0400 Subject: [PATCH 158/203] p2p: delete cruft (#8958) I think the decision in #8806 is that we shouldn't do this yet, so I think it's best to just drop this. --- internal/p2p/peermanager.go | 41 ------------------------------------- 1 file changed, 41 deletions(-) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 8230b262eb..fabff390e6 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -1419,47 +1419,6 @@ func (s *peerStore) Ranked() []*peerInfo { } sort.Slice(s.ranked, func(i, j int) bool { return s.ranked[i].Score() > s.ranked[j].Score() - // TODO: reevaluate more wholistic sorting, perhaps as follows: - - // // sort inactive peers after active peers - // if s.ranked[i].Inactive && !s.ranked[j].Inactive { - // return false - // } else if !s.ranked[i].Inactive && s.ranked[j].Inactive { - // return true - // } - - // iLastDialed, iLastDialSuccess := s.ranked[i].LastDialed() - // jLastDialed, jLastDialSuccess := s.ranked[j].LastDialed() - - // // sort peers who our most recent dialing attempt was - // // successful ahead of peers with recent dialing - // // failures - // switch { - // case iLastDialSuccess && jLastDialSuccess: - // // if both peers were (are?) successfully - // // connected, convey their score, but give the - // // one we dialed successfully most recently a bonus - - // iScore := s.ranked[i].Score() - // jScore := s.ranked[j].Score() - // if jLastDialed.Before(iLastDialed) { - // jScore++ - // } else { - // iScore++ - // } - - // return iScore > jScore - // case iLastDialSuccess: - // return true - // case jLastDialSuccess: - // return false - // default: - // // if both peers were not successful in their - // // most recent dialing attempt, fall back to - // // peer score. - - // return s.ranked[i].Score() > s.ranked[j].Score() - // } }) return s.ranked } From 61ce384d752233c5f236d0b14c2685a259f0557a Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 8 Jul 2022 10:06:57 -0400 Subject: [PATCH 159/203] p2p: make peer gossiping coinflip safer (#8949) Closes #8948 --- internal/p2p/peermanager.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index fabff390e6..4d179e63fe 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -954,7 +954,7 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress } var numAddresses int - var totalScore int + var totalAbsScore int ranked := m.store.Ranked() seenAddresses := map[NodeAddress]struct{}{} scores := map[types.NodeID]int{} @@ -965,8 +965,12 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress continue } score := int(peer.Score()) + if score < 0 { + totalAbsScore += -score + } else { + totalAbsScore += score + } - totalScore += score scores[peer.ID] = score for addr := range peer.AddressInfo { if _, ok := m.options.PrivatePeers[addr.NodeID]; !ok { @@ -975,6 +979,8 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress } } + meanAbsScore := (totalAbsScore + 1) / (len(scores) + 1) + var attempts uint16 var addedLastIteration bool @@ -1023,7 +1029,7 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress // peer. // nolint:gosec // G404: Use of weak random number generator - if numAddresses <= int(limit) || rand.Intn(totalScore+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 { + if numAddresses <= int(limit) || rand.Intn((meanAbsScore*2)+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 { addresses = append(addresses, addressInfo.Address) addedLastIteration = true seenAddresses[addressInfo.Address] = struct{}{} From 6902fa92829ff5cc2aa051fadf15d45322251463 Mon Sep 17 00:00:00 2001 From: Rootul Patel Date: Mon, 11 Jul 2022 10:14:54 -0400 Subject: [PATCH 160/203] Fix punctuation (#8972) --- docs/introduction/what-is-tendermint.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md index 417152d748..18fe5ce776 100644 --- a/docs/introduction/what-is-tendermint.md +++ b/docs/introduction/what-is-tendermint.md @@ -103,9 +103,9 @@ Another example of a cryptocurrency application built on Tendermint is to Tendermint, but is more opinionated about how the state is managed, and requires that all application behaviour runs in potentially many docker containers, modules it calls "chaincode". It uses an -implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). +implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf) from a team at IBM that is augmented to handle potentially non-deterministic -chaincode It is possible to implement this docker-based behaviour as a ABCI app +chaincode. It is possible to implement this docker-based behaviour as a ABCI app in Tendermint, though extending Tendermint to handle non-determinism remains for future work. From d5fb82e414b49d1bb27eabcaded6213fec06f5ac Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 11 Jul 2022 16:22:40 -0400 Subject: [PATCH 161/203] p2p: make p2p.Channel an interface (#8967) This is (#8446) pulled from the `main/libp2p` branch but without any of the libp2p content, and is perhaps the easiest first step to enable pluggability at the peer layer, and makes it possible hoist shims (including for, say 0.34) into tendermint without touching the reactors. --- internal/blocksync/reactor.go | 16 +++--- internal/blocksync/reactor_test.go | 6 +- internal/consensus/invalid_test.go | 2 +- internal/consensus/reactor.go | 32 +++++------ internal/consensus/reactor_test.go | 10 ++-- internal/evidence/reactor.go | 8 +-- internal/evidence/reactor_test.go | 4 +- internal/mempool/reactor.go | 8 +-- internal/mempool/reactor_test.go | 6 +- internal/p2p/channel.go | 81 +++++++++++++++++---------- internal/p2p/channel_test.go | 4 +- internal/p2p/p2ptest/network.go | 12 ++-- internal/p2p/p2ptest/require.go | 12 ++-- internal/p2p/pex/reactor.go | 9 ++- internal/p2p/pex/reactor_test.go | 17 +++--- internal/p2p/router.go | 11 ++-- internal/p2p/router_test.go | 2 +- internal/statesync/dispatcher.go | 4 +- internal/statesync/dispatcher_test.go | 4 +- internal/statesync/reactor.go | 18 +++--- internal/statesync/reactor_test.go | 14 +++-- internal/statesync/stateprovider.go | 4 +- internal/statesync/syncer.go | 4 +- 23 files changed, 157 insertions(+), 131 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 6c1c060e74..c1b032b03d 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -135,7 +135,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { if err != nil { return err } - r.chCreator = func(context.Context, *conn.ChannelDescriptor) (*p2p.Channel, error) { return blockSyncCh, nil } + r.chCreator = func(context.Context, *conn.ChannelDescriptor) (p2p.Channel, error) { return blockSyncCh, nil } state, err := r.stateStore.Load() if err != nil { @@ -183,7 +183,7 @@ func (r *Reactor) OnStop() { // respondToPeer loads a block and sends it to the requesting peer, if we have it. // Otherwise, we'll respond saying we do not have it. -func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error { +func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh p2p.Channel) error { block := r.store.LoadBlock(msg.Height) if block == nil { r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) @@ -223,7 +223,7 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh *p2p.Channel) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh p2p.Channel) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -298,7 +298,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo // message execution will result in a PeerError being sent on the BlockSyncChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Channel) { +func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh p2p.Channel) { iter := blockSyncCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() @@ -319,7 +319,7 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Chann } // processPeerUpdate processes a PeerUpdate. -func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh *p2p.Channel) { +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh p2p.Channel) { r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) // XXX: Pool#RedoRequest can sometimes give us an empty peer. @@ -354,7 +354,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh *p2p.Channel) { +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh p2p.Channel) { for { select { case <-ctx.Done(): @@ -396,7 +396,7 @@ func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error { return nil } -func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel) { +func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh p2p.Channel) { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) defer statusUpdateTicker.Stop() @@ -438,7 +438,7 @@ func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel) // do. // // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh *p2p.Channel) { +func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh p2p.Channel) { var ( trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond) switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second) diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 141eaf7ece..3ef2ec86f1 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -37,7 +37,7 @@ type reactorTestSuite struct { reactors map[types.NodeID]*Reactor app map[types.NodeID]abciclient.Client - blockSyncChannels map[types.NodeID]*p2p.Channel + blockSyncChannels map[types.NodeID]p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate peerUpdates map[types.NodeID]*p2p.PeerUpdates } @@ -64,7 +64,7 @@ func setup( nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), app: make(map[types.NodeID]abciclient.Client, numNodes), - blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + blockSyncChannels: make(map[types.NodeID]p2p.Channel, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } @@ -177,7 +177,7 @@ func (rts *reactorTestSuite) addNode( rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) - chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.blockSyncChannels[nodeID], nil } diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index 93c5cea1b8..4685bb3189 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -107,7 +107,7 @@ func invalidDoPrevoteFunc( round int32, cs *State, r *Reactor, - voteCh *p2p.Channel, + voteCh p2p.Channel, pv types.PrivValidator, ) { // routine to: diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index c353e0c73d..3ba95c8361 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -165,10 +165,10 @@ func NewReactor( } type channelBundle struct { - state *p2p.Channel - data *p2p.Channel - vote *p2p.Channel - votSet *p2p.Channel + state p2p.Channel + data p2p.Channel + vote p2p.Channel + votSet p2p.Channel } // OnStart starts separate go routines for each p2p Channel and listens for @@ -310,14 +310,14 @@ func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) { return ps, ok } -func (r *Reactor) broadcastNewRoundStepMessage(ctx context.Context, rs *cstypes.RoundState, stateCh *p2p.Channel) error { +func (r *Reactor) broadcastNewRoundStepMessage(ctx context.Context, rs *cstypes.RoundState, stateCh p2p.Channel) error { return stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: makeRoundStepMessage(rs), }) } -func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes.RoundState, stateCh *p2p.Channel) error { +func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes.RoundState, stateCh p2p.Channel) error { psHeader := rs.ProposalBlockParts.Header() return stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, @@ -331,7 +331,7 @@ func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes }) } -func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote, stateCh *p2p.Channel) error { +func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote, stateCh p2p.Channel) error { return stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: &tmcons.HasVote{ @@ -346,7 +346,7 @@ func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote, // subscribeToBroadcastEvents subscribes for new round steps and votes using the // internal pubsub defined in the consensus state to broadcast them to peers // upon receiving. -func (r *Reactor) subscribeToBroadcastEvents(ctx context.Context, stateCh *p2p.Channel) { +func (r *Reactor) subscribeToBroadcastEvents(ctx context.Context, stateCh p2p.Channel) { onStopCh := r.state.getOnStopCh() err := r.state.evsw.AddListenerForEvent( @@ -403,7 +403,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep { } } -func (r *Reactor) sendNewRoundStepMessage(ctx context.Context, peerID types.NodeID, stateCh *p2p.Channel) error { +func (r *Reactor) sendNewRoundStepMessage(ctx context.Context, peerID types.NodeID, stateCh p2p.Channel) error { return stateCh.Send(ctx, p2p.Envelope{ To: peerID, Message: makeRoundStepMessage(r.getRoundState()), @@ -433,7 +433,7 @@ func (r *Reactor) getRoundState() *cstypes.RoundState { return r.rs } -func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, dataCh *p2p.Channel) { +func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, dataCh p2p.Channel) { logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { @@ -497,7 +497,7 @@ func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundSta time.Sleep(r.state.config.PeerGossipSleepDuration) } -func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh *p2p.Channel) { +func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh p2p.Channel) { logger := r.logger.With("peer", ps.peerID) timer := time.NewTimer(0) @@ -632,7 +632,7 @@ OUTER_LOOP: // pickSendVote picks a vote and sends it to the peer. It will return true if // there is a vote to send and false otherwise. -func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader, voteCh *p2p.Channel) (bool, error) { +func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader, voteCh p2p.Channel) (bool, error) { vote, ok := ps.PickVoteToSend(votes) if !ok { return false, nil @@ -660,7 +660,7 @@ func (r *Reactor) gossipVotesForHeight( rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, - voteCh *p2p.Channel, + voteCh p2p.Channel, ) (bool, error) { logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) @@ -732,7 +732,7 @@ func (r *Reactor) gossipVotesForHeight( return false, nil } -func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh *p2p.Channel) { +func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh p2p.Channel) { logger := r.logger.With("peer", ps.peerID) timer := time.NewTimer(0) @@ -804,7 +804,7 @@ func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh // NOTE: `queryMaj23Routine` has a simple crude design since it only comes // into play for liveness when there's a signature DDoS attack happening. -func (r *Reactor) queryMaj23Routine(ctx context.Context, ps *PeerState, stateCh *p2p.Channel) { +func (r *Reactor) queryMaj23Routine(ctx context.Context, ps *PeerState, stateCh p2p.Channel) { timer := time.NewTimer(0) defer timer.Stop() @@ -1015,7 +1015,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // If we fail to find the peer state for the envelope sender, we perform a no-op // and return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message, voteSetCh *p2p.Channel) error { +func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message, voteSetCh p2p.Channel) error { ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { r.logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 96cf800bdf..d848f53e7a 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -46,10 +46,10 @@ type reactorTestSuite struct { reactors map[types.NodeID]*Reactor subs map[types.NodeID]eventbus.Subscription blocksyncSubs map[types.NodeID]eventbus.Subscription - stateChannels map[types.NodeID]*p2p.Channel - dataChannels map[types.NodeID]*p2p.Channel - voteChannels map[types.NodeID]*p2p.Channel - voteSetBitsChannels map[types.NodeID]*p2p.Channel + stateChannels map[types.NodeID]p2p.Channel + dataChannels map[types.NodeID]p2p.Channel + voteChannels map[types.NodeID]p2p.Channel + voteSetBitsChannels map[types.NodeID]p2p.Channel } func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { @@ -86,7 +86,7 @@ func setup( t.Cleanup(cancel) chCreator := func(nodeID types.NodeID) p2p.ChannelCreator { - return func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { switch desc.ID { case StateChannel: return rts.stateChannels[nodeID], nil diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 1d952d30ea..d0bc28b13c 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -159,7 +159,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (er // processEvidenceCh implements a blocking event loop where we listen for p2p // Envelope messages from the evidenceCh. -func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel) { +func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh p2p.Channel) { iter := evidenceCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() @@ -186,7 +186,7 @@ func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel // connects/disconnects frequently from the broadcasting peer(s). // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh *p2p.Channel) { +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh p2p.Channel) { r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() @@ -227,7 +227,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh *p2p.Channel) { +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh p2p.Channel) { for { select { case peerUpdate := <-peerUpdates.Updates(): @@ -249,7 +249,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU // that the peer has already received or may not be ready for. // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh *p2p.Channel) { +func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh p2p.Channel) { var next *clist.CElement defer func() { diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index f23195faed..92566ccc87 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -38,7 +38,7 @@ type reactorTestSuite struct { logger log.Logger reactors map[types.NodeID]*evidence.Reactor pools map[types.NodeID]*evidence.Pool - evidenceChannels map[types.NodeID]*p2p.Channel + evidenceChannels map[types.NodeID]p2p.Channel peerUpdates map[types.NodeID]*p2p.PeerUpdates peerChans map[types.NodeID]chan p2p.PeerUpdate nodes []*p2ptest.Node @@ -96,7 +96,7 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store) *reactorTe rts.network.Nodes[nodeID].PeerManager.Register(ctx, pu) rts.nodes = append(rts.nodes, rts.network.Nodes[nodeID]) - chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.evidenceChannels[nodeID], nil } diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 28ee9e334a..62cdf386c5 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -194,7 +194,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (er // processMempoolCh implements a blocking event loop where we listen for p2p // Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh *p2p.Channel) { +func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh p2p.Channel) { iter := mempoolCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() @@ -215,7 +215,7 @@ func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh *p2p.Channel) // goroutine or not. If not, we start one for the newly added peer. For down or // removed peers, we remove the peer from the mempool peer ID set and signal to // stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, mempoolCh *p2p.Channel) { +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, mempoolCh p2p.Channel) { r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() @@ -264,7 +264,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, mempoolCh *p2p.Channel) { +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, mempoolCh p2p.Channel) { for { select { case <-ctx.Done(): @@ -275,7 +275,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU } } -func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, mempoolCh *p2p.Channel) { +func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, mempoolCh p2p.Channel) { peerMempoolID := r.ids.GetForPeer(peerID) var nextGossipTx *clist.CElement diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 034c5eaa26..ee7fe777f2 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -30,7 +30,7 @@ type reactorTestSuite struct { logger log.Logger reactors map[types.NodeID]*Reactor - mempoolChannels map[types.NodeID]*p2p.Channel + mempoolChannels map[types.NodeID]p2p.Channel mempools map[types.NodeID]*TxMempool kvstores map[types.NodeID]*kvstore.Application @@ -51,7 +51,7 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode logger: log.NewNopLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + mempoolChannels: make(map[types.NodeID]p2p.Channel, numNodes), mempools: make(map[types.NodeID]*TxMempool, numNodes), kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), @@ -75,7 +75,7 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) - chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.mempoolChannels[nodeID], nil } diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index e33e7faa7e..394656632d 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -2,6 +2,7 @@ package p2p import ( "context" + "errors" "fmt" "sync" @@ -37,6 +38,16 @@ type Wrapper interface { Unwrap() (proto.Message, error) } +type Channel interface { + fmt.Stringer + + Err() error + + Send(context.Context, Envelope) error + SendError(context.Context, PeerError) error + Receive(context.Context) *ChannelIterator +} + // PeerError is a peer error reported via Channel.Error. // // FIXME: This currently just disconnects the peer, which is too simplistic. @@ -56,9 +67,9 @@ type PeerError struct { func (pe PeerError) Error() string { return fmt.Sprintf("peer=%q: %s", pe.NodeID, pe.Err.Error()) } func (pe PeerError) Unwrap() error { return pe.Err } -// Channel is a bidirectional channel to exchange Protobuf messages with peers. +// legacyChannel is a bidirectional channel to exchange Protobuf messages with peers. // Each message is wrapped in an Envelope to specify its sender and receiver. -type Channel struct { +type legacyChannel struct { ID ChannelID inCh <-chan Envelope // inbound messages (peers to reactors) outCh chan<- Envelope // outbound messages (reactors to peers) @@ -69,9 +80,10 @@ type Channel struct { // NewChannel creates a new channel. It is primarily for internal and test // use, reactors should use Router.OpenChannel(). -func NewChannel(id ChannelID, inCh <-chan Envelope, outCh chan<- Envelope, errCh chan<- PeerError) *Channel { - return &Channel{ +func NewChannel(id ChannelID, name string, inCh <-chan Envelope, outCh chan<- Envelope, errCh chan<- PeerError) Channel { + return &legacyChannel{ ID: id, + name: name, inCh: inCh, outCh: outCh, errCh: errCh, @@ -80,7 +92,7 @@ func NewChannel(id ChannelID, inCh <-chan Envelope, outCh chan<- Envelope, errCh // Send blocks until the envelope has been sent, or until ctx ends. // An error only occurs if the context ends before the send completes. -func (ch *Channel) Send(ctx context.Context, envelope Envelope) error { +func (ch *legacyChannel) Send(ctx context.Context, envelope Envelope) error { select { case <-ctx.Done(): return ctx.Err() @@ -89,9 +101,15 @@ func (ch *Channel) Send(ctx context.Context, envelope Envelope) error { } } +func (ch *legacyChannel) Err() error { return nil } + // SendError blocks until the given error has been sent, or ctx ends. // An error only occurs if the context ends before the send completes. -func (ch *Channel) SendError(ctx context.Context, pe PeerError) error { +func (ch *legacyChannel) SendError(ctx context.Context, pe PeerError) error { + if errors.Is(pe.Err, context.Canceled) || errors.Is(pe.Err, context.DeadlineExceeded) { + return nil + } + select { case <-ctx.Done(): return ctx.Err() @@ -100,18 +118,29 @@ func (ch *Channel) SendError(ctx context.Context, pe PeerError) error { } } -func (ch *Channel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s>", ch.ID, ch.name) } +func (ch *legacyChannel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s>", ch.ID, ch.name) } // Receive returns a new unbuffered iterator to receive messages from ch. // The iterator runs until ctx ends. -func (ch *Channel) Receive(ctx context.Context) *ChannelIterator { +func (ch *legacyChannel) Receive(ctx context.Context) *ChannelIterator { iter := &ChannelIterator{ pipe: make(chan Envelope), // unbuffered } - go func() { + go func(pipe chan<- Envelope) { defer close(iter.pipe) - iteratorWorker(ctx, ch, iter.pipe) - }() + for { + select { + case <-ctx.Done(): + return + case envelope := <-ch.inCh: + select { + case <-ctx.Done(): + return + case pipe <- envelope: + } + } + } + }(iter.pipe) return iter } @@ -126,21 +155,6 @@ type ChannelIterator struct { current *Envelope } -func iteratorWorker(ctx context.Context, ch *Channel, pipe chan Envelope) { - for { - select { - case <-ctx.Done(): - return - case envelope := <-ch.inCh: - select { - case <-ctx.Done(): - return - case pipe <- envelope: - } - } - } -} - // Next returns true when the Envelope value has advanced, and false // when the context is canceled or iteration should stop. If an iterator has returned false, // it will never return true again. @@ -179,7 +193,7 @@ func (iter *ChannelIterator) Envelope() *Envelope { return iter.current } // // This allows the caller to consume messages from multiple channels // without needing to manage the concurrency separately. -func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterator { +func MergedChannelIterator(ctx context.Context, chs ...Channel) *ChannelIterator { iter := &ChannelIterator{ pipe: make(chan Envelope), // unbuffered } @@ -187,10 +201,17 @@ func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterato for _, ch := range chs { wg.Add(1) - go func(ch *Channel) { + go func(ch Channel, pipe chan<- Envelope) { defer wg.Done() - iteratorWorker(ctx, ch, iter.pipe) - }(ch) + iter := ch.Receive(ctx) + for iter.Next(ctx) { + select { + case <-ctx.Done(): + return + case pipe <- *iter.Envelope(): + } + } + }(ch, iter.pipe) } done := make(chan struct{}) diff --git a/internal/p2p/channel_test.go b/internal/p2p/channel_test.go index e06e3e77ea..eeaf77db25 100644 --- a/internal/p2p/channel_test.go +++ b/internal/p2p/channel_test.go @@ -16,13 +16,13 @@ type channelInternal struct { Error chan PeerError } -func testChannel(size int) (*channelInternal, *Channel) { +func testChannel(size int) (*channelInternal, *legacyChannel) { in := &channelInternal{ In: make(chan Envelope, size), Out: make(chan Envelope, size), Error: make(chan PeerError, size), } - ch := &Channel{ + ch := &legacyChannel{ inCh: in.In, outCh: in.Out, errCh: in.Error, diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 8133449152..95c040b570 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -146,8 +146,8 @@ func (n *Network) MakeChannels( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) map[types.NodeID]*p2p.Channel { - channels := map[types.NodeID]*p2p.Channel{} +) map[types.NodeID]p2p.Channel { + channels := map[types.NodeID]p2p.Channel{} for _, node := range n.Nodes { channels[node.NodeID] = node.MakeChannel(ctx, t, chDesc) } @@ -161,8 +161,8 @@ func (n *Network) MakeChannelsNoCleanup( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) map[types.NodeID]*p2p.Channel { - channels := map[types.NodeID]*p2p.Channel{} +) map[types.NodeID]p2p.Channel { + channels := map[types.NodeID]p2p.Channel{} for _, node := range n.Nodes { channels[node.NodeID] = node.MakeChannelNoCleanup(ctx, t, chDesc) } @@ -304,7 +304,7 @@ func (n *Node) MakeChannel( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) *p2p.Channel { +) p2p.Channel { ctx, cancel := context.WithCancel(ctx) channel, err := n.Router.OpenChannel(ctx, chDesc) require.NoError(t, err) @@ -321,7 +321,7 @@ func (n *Node) MakeChannelNoCleanup( ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, -) *p2p.Channel { +) p2p.Channel { channel, err := n.Router.OpenChannel(ctx, chDesc) require.NoError(t, err) return channel diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 885e080d40..276bff390a 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -15,7 +15,7 @@ import ( ) // RequireEmpty requires that the given channel is empty. -func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) { +func RequireEmpty(ctx context.Context, t *testing.T, channels ...p2p.Channel) { t.Helper() ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) @@ -32,7 +32,7 @@ func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) { } // RequireReceive requires that the given envelope is received on the channel. -func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { +func RequireReceive(ctx context.Context, t *testing.T, channel p2p.Channel, expect p2p.Envelope) { t.Helper() ctx, cancel := context.WithTimeout(ctx, time.Second) @@ -54,7 +54,7 @@ func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, exp // RequireReceiveUnordered requires that the given envelopes are all received on // the channel, ignoring order. -func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Channel, expect []*p2p.Envelope) { +func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel p2p.Channel, expect []*p2p.Envelope) { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() @@ -75,7 +75,7 @@ func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Cha } // RequireSend requires that the given envelope is sent on the channel. -func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { +func RequireSend(ctx context.Context, t *testing.T, channel p2p.Channel, envelope p2p.Envelope) { tctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() @@ -93,7 +93,7 @@ func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelo func RequireSendReceive( ctx context.Context, t *testing.T, - channel *p2p.Channel, + channel p2p.Channel, peerID types.NodeID, send proto.Message, receive proto.Message, @@ -116,7 +116,7 @@ func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUp } // RequireError requires that the given peer error is submitted for a peer. -func RequireError(ctx context.Context, t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { +func RequireError(ctx context.Context, t *testing.T, channel p2p.Channel, peerError p2p.PeerError) { tctx, tcancel := context.WithTimeout(ctx, time.Second) defer tcancel() diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index bd47373265..87677799d0 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -145,7 +145,7 @@ func (r *Reactor) OnStop() {} // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. -func (r *Reactor) processPexCh(ctx context.Context, pexCh *p2p.Channel) { +func (r *Reactor) processPexCh(ctx context.Context, pexCh p2p.Channel) { incoming := make(chan *p2p.Envelope) go func() { defer close(incoming) @@ -192,8 +192,7 @@ func (r *Reactor) processPexCh(ctx context.Context, pexCh *p2p.Channel) { // A request from another peer, or a response to one of our requests. dur, err := r.handlePexMessage(ctx, envelope, pexCh) if err != nil { - r.logger.Error("failed to process message", - "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) + r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) if serr := pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, @@ -225,7 +224,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU // handlePexMessage handles envelopes sent from peers on the PexChannel. // If an update was received, a new polling interval is returned; otherwise the // duration is 0. -func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope, pexCh *p2p.Channel) (time.Duration, error) { +func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope, pexCh p2p.Channel) (time.Duration, error) { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -308,7 +307,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // that peer a request for more peer addresses. The chosen peer is moved into // the requestsSent bucket so that we will not attempt to contact them again // until they've replied or updated. -func (r *Reactor) sendRequestForPeers(ctx context.Context, pexCh *p2p.Channel) error { +func (r *Reactor) sendRequestForPeers(ctx context.Context, pexCh p2p.Channel) error { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index ec2f03d838..07f49f0d60 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -275,7 +275,7 @@ type singleTestReactor struct { pexInCh chan p2p.Envelope pexOutCh chan p2p.Envelope pexErrCh chan p2p.PeerError - pexCh *p2p.Channel + pexCh p2p.Channel peerCh chan p2p.PeerUpdate manager *p2p.PeerManager } @@ -287,8 +287,11 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { pexInCh := make(chan p2p.Envelope, chBuf) pexOutCh := make(chan p2p.Envelope, chBuf) pexErrCh := make(chan p2p.PeerError, chBuf) + + chDesc := pex.ChannelDescriptor() pexCh := p2p.NewChannel( - p2p.ChannelID(pex.PexChannel), + chDesc.ID, + chDesc.Name, pexInCh, pexOutCh, pexErrCh, @@ -299,7 +302,7 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) { return pexCh, nil } @@ -324,7 +327,7 @@ type reactorTestSuite struct { logger log.Logger reactors map[types.NodeID]*pex.Reactor - pexChannels map[types.NodeID]*p2p.Channel + pexChannels map[types.NodeID]p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate peerUpdates map[types.NodeID]*p2p.PeerUpdates @@ -367,7 +370,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT logger: log.NewNopLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(ctx, t, networkOpts), reactors: make(map[types.NodeID]*pex.Reactor, realNodes), - pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), + pexChannels: make(map[types.NodeID]p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), total: opts.TotalNodes, @@ -388,7 +391,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf) rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) - chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) { return rts.pexChannels[nodeID], nil } @@ -448,7 +451,7 @@ func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID]) - chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) { return r.pexChannels[nodeID], nil } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 0e55049c19..4f3af13465 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -239,7 +239,7 @@ func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error // ChannelCreator allows routers to construct their own channels, // either by receiving a reference to Router.OpenChannel or using some // kind shim for testing purposes. -type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error) +type ChannelCreator func(context.Context, *ChannelDescriptor) (Channel, error) // OpenChannel opens a new channel for the given message type. The caller must // close the channel when done, before stopping the Router. messageType is the @@ -247,7 +247,7 @@ type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error) // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*Channel, error) { +func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() @@ -262,11 +262,10 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*C queue := r.queueFactory(chDesc.RecvBufferCapacity) outCh := make(chan Envelope, chDesc.RecvBufferCapacity) errCh := make(chan PeerError, chDesc.RecvBufferCapacity) - channel := NewChannel(id, queue.dequeue(), outCh, errCh) - channel.name = chDesc.Name + channel := NewChannel(chDesc.ID, chDesc.Name, queue.dequeue(), outCh, errCh) var wrapper Wrapper - if w, ok := messageType.(Wrapper); ok { + if w, ok := chDesc.MessageType.(Wrapper); ok { wrapper = w } @@ -287,7 +286,7 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*C queue.close() }() - r.routeChannel(ctx, id, outCh, errCh, wrapper) + r.routeChannel(ctx, chDesc.ID, outCh, errCh, wrapper) }() return channel, nil diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 92f56f768f..dd336510c9 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -26,7 +26,7 @@ import ( "github.com/tendermint/tendermint/types" ) -func echoReactor(ctx context.Context, channel *p2p.Channel) { +func echoReactor(ctx context.Context, channel p2p.Channel) { iter := channel.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 9cdb349784..e7ad731483 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -26,14 +26,14 @@ var ( // NOTE: It is not the responsibility of the dispatcher to verify the light blocks. type Dispatcher struct { // the channel with which to send light block requests on - requestCh *p2p.Channel + requestCh p2p.Channel mtx sync.Mutex // all pending calls that have been dispatched and are awaiting an answer calls map[types.NodeID]chan *types.LightBlock } -func NewDispatcher(requestChannel *p2p.Channel) *Dispatcher { +func NewDispatcher(requestChannel p2p.Channel) *Dispatcher { return &Dispatcher{ requestCh: requestChannel, calls: make(map[types.NodeID]chan *types.LightBlock), diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index 8ec074bd19..8f6783e67b 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -24,13 +24,13 @@ type channelInternal struct { Error chan p2p.PeerError } -func testChannel(size int) (*channelInternal, *p2p.Channel) { +func testChannel(size int) (*channelInternal, p2p.Channel) { in := &channelInternal{ In: make(chan p2p.Envelope, size), Out: make(chan p2p.Envelope, size), Error: make(chan p2p.PeerError, size), } - return in, p2p.NewChannel(0, in.In, in.Out, in.Error) + return in, p2p.NewChannel(0, "test", in.In, in.Out, in.Error) } func TestDispatcherBasic(t *testing.T) { diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index f4d72d017c..deed8d0d3e 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -305,7 +305,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { return nil } - go r.processChannels(ctx, map[p2p.ChannelID]*p2p.Channel{ + go r.processChannels(ctx, map[p2p.ChannelID]p2p.Channel{ SnapshotChannel: snapshotCh, ChunkChannel: chunkCh, LightBlockChannel: blockCh, @@ -611,7 +611,7 @@ func (r *Reactor) backfill( // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope, snapshotCh *p2p.Channel) error { +func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope, snapshotCh p2p.Channel) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -683,7 +683,7 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel // handleChunkMessage handles envelopes sent from peers on the ChunkChannel. // It returns an error only if the Envelope.Message is unknown for this channel. // This should never be called outside of handleMessage. -func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh *p2p.Channel) error { +func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.ChunkRequest: r.logger.Debug( @@ -772,7 +772,7 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope return nil } -func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope, blockCh *p2p.Channel) error { +func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope, blockCh p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.LightBlockRequest: r.logger.Info("received light block request", "height", msg.Height) @@ -829,7 +829,7 @@ func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Env return nil } -func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope, paramsCh *p2p.Channel) error { +func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope, paramsCh p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.ParamsRequest: r.logger.Debug("received consensus params request", "height", msg.Height) @@ -878,7 +878,7 @@ func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelop // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans map[p2p.ChannelID]*p2p.Channel) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans map[p2p.ChannelID]p2p.Channel) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -912,12 +912,12 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, cha // encountered during message execution will result in a PeerError being sent on // the respective channel. When the reactor is stopped, we will catch the signal // and close the p2p Channel gracefully. -func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.ChannelID]*p2p.Channel) { - // make sure that the iterator gets cleaned up in case of error +func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.ChannelID]p2p.Channel) { + // make sure tht the iterator gets cleaned up in case of error ctx, cancel := context.WithCancel(ctx) defer cancel() - chs := make([]*p2p.Channel, 0, len(chanTable)) + chs := make([]p2p.Channel, 0, len(chanTable)) for key := range chanTable { chs = append(chs, chanTable[key]) } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index f57e228a7f..b81c1ac2c8 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -40,22 +40,22 @@ type reactorTestSuite struct { conn *clientmocks.Client stateProvider *mocks.StateProvider - snapshotChannel *p2p.Channel + snapshotChannel p2p.Channel snapshotInCh chan p2p.Envelope snapshotOutCh chan p2p.Envelope snapshotPeerErrCh chan p2p.PeerError - chunkChannel *p2p.Channel + chunkChannel p2p.Channel chunkInCh chan p2p.Envelope chunkOutCh chan p2p.Envelope chunkPeerErrCh chan p2p.PeerError - blockChannel *p2p.Channel + blockChannel p2p.Channel blockInCh chan p2p.Envelope blockOutCh chan p2p.Envelope blockPeerErrCh chan p2p.PeerError - paramsChannel *p2p.Channel + paramsChannel p2p.Channel paramsInCh chan p2p.Envelope paramsOutCh chan p2p.Envelope paramsPeerErrCh chan p2p.PeerError @@ -102,6 +102,7 @@ func setup( rts.snapshotChannel = p2p.NewChannel( SnapshotChannel, + "snapshot", rts.snapshotInCh, rts.snapshotOutCh, rts.snapshotPeerErrCh, @@ -109,6 +110,7 @@ func setup( rts.chunkChannel = p2p.NewChannel( ChunkChannel, + "chunk", rts.chunkInCh, rts.chunkOutCh, rts.chunkPeerErrCh, @@ -116,6 +118,7 @@ func setup( rts.blockChannel = p2p.NewChannel( LightBlockChannel, + "lightblock", rts.blockInCh, rts.blockOutCh, rts.blockPeerErrCh, @@ -123,6 +126,7 @@ func setup( rts.paramsChannel = p2p.NewChannel( ParamsChannel, + "params", rts.paramsInCh, rts.paramsOutCh, rts.paramsPeerErrCh, @@ -133,7 +137,7 @@ func setup( cfg := config.DefaultStateSyncConfig() - chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { switch desc.ID { case SnapshotChannel: return rts.snapshotChannel, nil diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index a796b0b2e8..a8110b71bc 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -208,7 +208,7 @@ type stateProviderP2P struct { sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 - paramsSendCh *p2p.Channel + paramsSendCh p2p.Channel paramsRecvCh chan types.ConsensusParams } @@ -220,7 +220,7 @@ func NewP2PStateProvider( initialHeight int64, providers []lightprovider.Provider, trustOptions light.TrustOptions, - paramsSendCh *p2p.Channel, + paramsSendCh p2p.Channel, logger log.Logger, ) (StateProvider, error) { if len(providers) < 2 { diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 47e058c193..a09b558926 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -56,8 +56,8 @@ type syncer struct { stateProvider StateProvider conn abciclient.Client snapshots *snapshotPool - snapshotCh *p2p.Channel - chunkCh *p2p.Channel + snapshotCh p2p.Channel + chunkCh p2p.Channel tempDir string fetchers int32 retryTimeout time.Duration From 136b62762fe3a941a947efc842af00c857d323b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Jul 2022 08:00:55 -0400 Subject: [PATCH 162/203] build(deps): Bump github.com/prometheus/common from 0.35.0 to 0.36.0 (#8980) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.35.0 to 0.36.0. - [Release notes](https://github.com/prometheus/common/releases) - [Commits](https://github.com/prometheus/common/compare/v0.35.0...v0.36.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f8fa79cfce..9971380bde 100644 --- a/go.mod +++ b/go.mod @@ -240,6 +240,6 @@ require ( require ( github.com/creachadair/tomledit v0.0.22 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.35.0 + github.com/prometheus/common v0.36.0 github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca ) diff --git a/go.sum b/go.sum index f7b9149a21..e93e1df2c6 100644 --- a/go.sum +++ b/go.sum @@ -938,8 +938,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= -github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.36.0 h1:78hJTing+BLYLjhXE+Z2BubeEymH5Lr0/Mt8FKkxxYo= +github.com/prometheus/common v0.36.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= From e9239e9ca813638d3fba7739a2904d684b043d90 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 12 Jul 2022 08:20:17 -0400 Subject: [PATCH 163/203] p2p: switch default queue implementation (#8976) --- config/config.go | 2 +- test/e2e/generator/generate.go | 2 +- test/e2e/networks/ci.toml | 2 +- test/e2e/pkg/testnet.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config/config.go b/config/config.go index 230fed98cb..03348b9f19 100644 --- a/config/config.go +++ b/config/config.go @@ -685,7 +685,7 @@ func DefaultP2PConfig() *P2PConfig { PexReactor: true, HandshakeTimeout: 20 * time.Second, DialTimeout: 3 * time.Second, - QueueType: "priority", + QueueType: "simple-priority", } } diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 01e04a4183..d5d628aeea 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -115,7 +115,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er Nodes: map[string]*e2e.ManifestNode{}, KeyType: keyType.Choose(r).(string), Evidence: evidence.Choose(r).(int), - QueueType: "priority", + QueueType: "simple-priority", TxSize: opt["txSize"].(int), } diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index eb74dd1119..f72bddfd46 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -4,7 +4,7 @@ evidence = 5 initial_height = 1000 initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"} -queue_type = "priority" +queue_type = "simple-priority" abci_protocol = "builtin" [validators] diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 2041796c4d..f3caf034f4 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -355,7 +355,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid mempool version %q", n.Mempool) } switch n.QueueType { - case "", "priority", "fifo": + case "", "priority", "fifo", "simple-priority": default: return fmt.Errorf("unsupported p2p queue type: %s", n.QueueType) } From b421138e5399432b7d420c336ae24874670038b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Jul 2022 13:49:52 +0000 Subject: [PATCH 164/203] build(deps): Bump google.golang.org/grpc from 1.47.0 to 1.48.0 (#8993) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.47.0 to 1.48.0.
Release notes

Sourced from google.golang.org/grpc's releases.

Release 1.48.0

Bug Fixes

  • xds/priority: fix bug that could prevent higher priorities from receiving config updates (#5417)
  • RLS load balancer: don't propagate the status code returned on control plane RPCs to data plane RPCs (#5400)

New Features

  • stats: add support for multiple stats handlers in a single client or server (#5347)
  • gcp/observability: add experimental OpenCensus tracing/metrics support (#5372)
  • xds: enable aggregate and logical DNS clusters by default (#5380)
  • credentials/google (for xds): support xdstp C2P cluster names (#5399)
Commits
  • 6417495 Change version to 1.48.0 (#5482)
  • 5770b1d xds: drop localities with zero weight at the xdsClient layer (#5476)
  • 423cd8e interop: update proto to make vet happy (#5475)
  • c9b16c8 transport: remove unused bufWriter.onFlush() (#5464)
  • 755bf5a fix typo in the binary log (#5467)
  • 15739b5 health: split imports into healthpb and healthgrpc (#5466)
  • c075d20 interop client: provide new flag, --soak_min_time_ms_between_rpcs (#5421)
  • 4b75005 clusterresolver: merge P(p)arseConfig functions (#5462)
  • d883f3d test/xds: fail only when state changes to something other than READY and IDLE...
  • c6ee1c7 xdsclient: only include nodeID in error strings, not the whole nodeProto (#5461)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.47.0&new-version=1.48.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9971380bde..f489058d27 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 - google.golang.org/grpc v1.47.0 + google.golang.org/grpc v1.48.0 pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index e93e1df2c6..19a40e5600 100644 --- a/go.sum +++ b/go.sum @@ -1815,8 +1815,8 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From b71ec8c83fde289dc0504a01c2f3fd85216dc288 Mon Sep 17 00:00:00 2001 From: kuniseichi Date: Wed, 13 Jul 2022 22:04:17 +0800 Subject: [PATCH 165/203] doc: fix typos in quick-start.md. (#8990) --- docs/introduction/quick-start.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index 040da8eb2f..74baf7201c 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -106,10 +106,10 @@ Next, use the `tendermint testnet` command to create four directories of config Before you can start the network, you'll need peers identifiers (IPs are not enough and can change). We'll refer to them as ID1, ID2, ID3, ID4. ```sh -tendermint show_node_id --home ./mytestnet/node0 -tendermint show_node_id --home ./mytestnet/node1 -tendermint show_node_id --home ./mytestnet/node2 -tendermint show_node_id --home ./mytestnet/node3 +tendermint show-node-id --home ./mytestnet/node0 +tendermint show-node-id --home ./mytestnet/node1 +tendermint show-node-id --home ./mytestnet/node2 +tendermint show-node-id --home ./mytestnet/node3 ``` Finally, from each machine, run: From c1c501ecd43dec53e56a197f7013bface2a962fb Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 14 Jul 2022 16:19:53 -0400 Subject: [PATCH 166/203] config: update config to reflect simple-priority queue (#9007) Update the queue documentation to reflect the types of queues and current default queue. --- config/config.go | 4 ++-- config/toml.go | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index 03348b9f19..64d798a473 100644 --- a/config/config.go +++ b/config/config.go @@ -659,8 +659,8 @@ type P2PConfig struct { //nolint: maligned DialTimeout time.Duration `mapstructure:"dial-timeout"` // Makes it possible to configure which queue backend the p2p - // layer uses. Options are: "fifo" and "priority", - // with the default being "priority". + // layer uses. Options are: "fifo" and "simple-priority", and "priority", + // with the default being "simple-priority". QueueType string `mapstructure:"queue-type"` } diff --git a/config/toml.go b/config/toml.go index 0aecbc1a3c..e079ba206d 100644 --- a/config/toml.go +++ b/config/toml.go @@ -282,7 +282,9 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### [p2p] -# Select the p2p internal queue +# Select the p2p internal queue. +# Options are: "fifo" and "simple-priority", and "priority", +# with the default being "simple-priority". queue-type = "{{ .P2P.QueueType }}" # Address to listen for incoming connections From 4214d998f2ed6f0217f0f405c5093375dd45510e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 14 Jul 2022 18:43:38 -0700 Subject: [PATCH 167/203] Forward-port point release changelogs from v0.35.x. (#9011) --- CHANGELOG.md | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b30abf027..e6b99cd3a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,76 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). +## v0.35.8 + +July 12, 2022 + +Special thanks to external contributors on this release: @joeabbey + +This release fixes an unbounded heap growth issue in the implementation of the +priority mempool, as well as some configuration, logging, and peer dialing +improvements in the non-legacy p2p stack. It also adds a new opt-in +"simple-priority" value for the `p2p.queue-type` setting, that should improve +gossip performance for non-legacy peer networks. + +### BREAKING CHANGES + +- CLI/RPC/Config + + - [node] [\#8902](https://github.com/tendermint/tendermint/pull/8902) Always start blocksync and avoid misconfiguration (@tychoish) + +### FEATURES + +- [cli] [\#8675](https://github.com/tendermint/tendermint/pull/8675) Add command to force compact goleveldb databases (@cmwaters) + +### IMPROVEMENTS + +- [p2p] [\#8914](https://github.com/tendermint/tendermint/pull/8914) [\#8875](https://github.com/tendermint/tendermint/pull/8875) Improvements to peer dialing (backported). (@tychoish) +- [p2p] [\#8820](https://github.com/tendermint/tendermint/pull/8820) add eviction metrics and cleanup dialing error handling (backport #8819) (@tychoish) +- [logging] [\#8896](https://github.com/tendermint/tendermint/pull/8896) Do not pre-process log results (backport #8895). (@tychoish) +- [p2p] [\#8956](https://github.com/tendermint/tendermint/pull/8956) Simpler priority queue (backport #8929). (@tychoish) + +### BUG FIXES + +- [mempool] [\#8944](https://github.com/tendermint/tendermint/pull/8944) Fix unbounded heap growth in the priority mempool. (@creachadair) +- [p2p] [\#8869](https://github.com/tendermint/tendermint/pull/8869) Set empty timeouts to configed values. (backport #8847). (@williambanfield) + + +## v0.35.7 + +June 16, 2022 + +### BUG FIXES + +- [p2p] [\#8692](https://github.com/tendermint/tendermint/pull/8692) scale the number of stored peers by the configured maximum connections (#8684) +- [rpc] [\#8715](https://github.com/tendermint/tendermint/pull/8715) always close http bodies (backport #8712) +- [p2p] [\#8760](https://github.com/tendermint/tendermint/pull/8760) accept should not abort on first error (backport #8759) + +### BREAKING CHANGES + +- P2P Protocol + + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish) + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish) + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish) + - [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish) + +## v0.35.6 + +June 3, 2022 + +### FEATURES + +- [migrate] [\#8672](https://github.com/tendermint/tendermint/pull/8672) provide function for database production (backport #8614) (@tychoish) + +### BUG FIXES + +- [consensus] [\#8651](https://github.com/tendermint/tendermint/pull/8651) restructure peer catchup sleep (@tychoish) +- [pex] [\#8657](https://github.com/tendermint/tendermint/pull/8657) align max address thresholds (@cmwaters) +- [cmd] [\#8668](https://github.com/tendermint/tendermint/pull/8668) don't used global config for reset commands (@cmwaters) +- [p2p] [\#8681](https://github.com/tendermint/tendermint/pull/8681) shed peers from store from other networks (backport #8678) (@tychoish) + + ## v0.35.5 May 26, 2022 From 31457ad3614deddc0360b3f24bc8a528d7c63cc7 Mon Sep 17 00:00:00 2001 From: Sergio Mena Date: Fri, 15 Jul 2022 12:01:29 +0200 Subject: [PATCH 168/203] typo (#9001) --- spec/abci++/abci++_basic_concepts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/abci++/abci++_basic_concepts.md b/spec/abci++/abci++_basic_concepts.md index 9b91678658..b02798b57f 100644 --- a/spec/abci++/abci++_basic_concepts.md +++ b/spec/abci++/abci++_basic_concepts.md @@ -80,7 +80,7 @@ call sequences of these methods. - [**ProcessProposal:**](./abci++_methods.md#processproposal) It allows a validator to perform application-dependent work in a proposed block. This enables features such as immediate block execution, and allows the Application to reject invalid blocks. - Tendermint calls it when it receives a proposal and the Tendermint algorithms has not locked on a + Tendermint calls it when it receives a proposal and the Tendermint algorithm has not locked on a value. The Application cannot modify the proposal at this point but can reject it if it is invalid. If that is the case, Tendermint will prevote `nil` on the proposal, which has strong liveness implications for Tendermint. As a general rule, the Application From d2db54ae9ab5e9ed5a9fef25d72fe31d48fc43e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Jul 2022 13:24:17 +0000 Subject: [PATCH 169/203] build(deps): Bump pgregory.net/rapid from 0.4.7 to 0.4.8 (#9014) Bumps [pgregory.net/rapid](https://github.com/flyingmutant/rapid) from 0.4.7 to 0.4.8.
Commits
  • 110d7a5 persist: bump rapid version
  • 94a73e7 Remove shrinking-challenge tests
  • 1a852a2 persist: bump rapid version
  • bc396c3 persist: put failfiles under testdata/rapid/ subdirectories
  • 5408033 ci: test on Go 1.18
  • dd3e976 Document concurrent usage of *T
  • a026755 Expose TB interface
  • 32f9d9b ci: test on Go 1.17
  • ef97f65 Avoid division in genFloat01()
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pgregory.net/rapid&package-manager=go_modules&previous-version=0.4.7&new-version=0.4.8)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f489058d27..ca3f40fd65 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 google.golang.org/grpc v1.48.0 - pgregory.net/rapid v0.4.7 + pgregory.net/rapid v0.4.8 ) require ( diff --git a/go.sum b/go.sum index 19a40e5600..2098fc7cc2 100644 --- a/go.sum +++ b/go.sum @@ -1891,8 +1891,8 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphD mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +pgregory.net/rapid v0.4.8 h1:d+5SGZWUbJPbl3ss6tmPFqnNeQR6VDOFly+eTjwPiEw= +pgregory.net/rapid v0.4.8/go.mod h1:Z5PbWqjvWR1I3UGjvboUuan4fe4ZYEYNLNQLExzCoUs= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= From 18b5a500da06723823da0424a99e13a7bcad7d68 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 15 Jul 2022 07:29:34 -0700 Subject: [PATCH 170/203] Extract a library from the confix command-line tool. (#9012) Pull out the library functionality from scripts/confix and move it to internal/libs/confix. Replace scripts/confix with a simple stub that has the same command-line API, but uses the library instead. Related: - Move and update unit tests. - Move scripts/confix/condiff to scripts/condiff. - Update test data for v34, v35, and v36. - Update reference diffs. - Update testdata README. --- internal/libs/confix/confix.go | 155 ++++++++++++++++++ .../libs}/confix/confix_test.go | 4 +- {scripts => internal/libs}/confix/plan.go | 2 +- .../libs}/confix/testdata/README.md | 4 +- .../libs}/confix/testdata/baseline.txt | 0 .../libs}/confix/testdata/diff-26-27.txt | 0 .../libs}/confix/testdata/diff-27-28.txt | 0 .../libs}/confix/testdata/diff-28-29.txt | 0 .../libs}/confix/testdata/diff-29-30.txt | 0 .../libs}/confix/testdata/diff-30-31.txt | 0 .../libs}/confix/testdata/diff-31-32.txt | 0 .../libs}/confix/testdata/diff-32-33.txt | 0 .../libs}/confix/testdata/diff-33-34.txt | 0 .../libs}/confix/testdata/diff-34-35.txt | 5 +- .../libs}/confix/testdata/diff-35-36.txt | 0 .../libs}/confix/testdata/non-config.toml | 0 .../libs}/confix/testdata/v26-config.toml | 0 .../libs}/confix/testdata/v27-config.toml | 0 .../libs}/confix/testdata/v28-config.toml | 0 .../libs}/confix/testdata/v29-config.toml | 0 .../libs}/confix/testdata/v30-config.toml | 0 .../libs}/confix/testdata/v31-config.toml | 0 .../libs}/confix/testdata/v32-config.toml | 0 .../libs}/confix/testdata/v33-config.toml | 0 .../libs}/confix/testdata/v34-config.toml | 27 +++ .../libs}/confix/testdata/v35-config.toml | 8 +- .../libs}/confix/testdata/v36-config.toml | 10 +- scripts/{confix => }/condiff/condiff.go | 0 scripts/confix/confix.go | 130 ++------------- 29 files changed, 213 insertions(+), 132 deletions(-) create mode 100644 internal/libs/confix/confix.go rename {scripts => internal/libs}/confix/confix_test.go (97%) rename {scripts => internal/libs}/confix/plan.go (99%) rename {scripts => internal/libs}/confix/testdata/README.md (90%) rename {scripts => internal/libs}/confix/testdata/baseline.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-26-27.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-27-28.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-28-29.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-29-30.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-30-31.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-31-32.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-32-33.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-33-34.txt (100%) rename {scripts => internal/libs}/confix/testdata/diff-34-35.txt (87%) rename {scripts => internal/libs}/confix/testdata/diff-35-36.txt (100%) rename {scripts => internal/libs}/confix/testdata/non-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v26-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v27-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v28-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v29-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v30-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v31-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v32-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v33-config.toml (100%) rename {scripts => internal/libs}/confix/testdata/v34-config.toml (93%) rename {scripts => internal/libs}/confix/testdata/v35-config.toml (98%) rename {scripts => internal/libs}/confix/testdata/v36-config.toml (98%) rename scripts/{confix => }/condiff/condiff.go (100%) diff --git a/internal/libs/confix/confix.go b/internal/libs/confix/confix.go new file mode 100644 index 0000000000..a9449fa228 --- /dev/null +++ b/internal/libs/confix/confix.go @@ -0,0 +1,155 @@ +// Package confix applies changes to a Tendermint TOML configuration file, to +// update configurations created with an older version of Tendermint to a +// compatible format for a newer version. +package confix + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + + "github.com/creachadair/atomicfile" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/transform" + "github.com/spf13/viper" + + "github.com/tendermint/tendermint/config" +) + +// Upgrade reads the configuration file at configPath and applies any +// transformations necessary to upgrade it to the current version. If this +// succeeds, the transformed output is written to outputPath. As a special +// case, if outputPath == "" the output is written to stdout. +// +// It is safe if outputPath == inputPath. If a regular file outputPath already +// exists, it is overwritten. In case of error, the output is not written. +// +// Upgrade is a convenience wrapper for calls to LoadConfig, ApplyFixes, and +// CheckValid. If the caller requires more control over the behavior of the +// upgrade, call those functions directly. +func Upgrade(ctx context.Context, configPath, outputPath string) error { + if configPath == "" { + return errors.New("empty input configuration path") + } + + doc, err := LoadConfig(configPath) + if err != nil { + return fmt.Errorf("loading config: %v", err) + } + + if err := ApplyFixes(ctx, doc); err != nil { + return fmt.Errorf("updating %q: %v", configPath, err) + } + + var buf bytes.Buffer + if err := tomledit.Format(&buf, doc); err != nil { + return fmt.Errorf("formatting config: %v", err) + } + + // Verify that Tendermint can parse the results after our edits. + if err := CheckValid(buf.Bytes()); err != nil { + return fmt.Errorf("updated config is invalid: %v", err) + } + + if outputPath == "" { + _, err = os.Stdout.Write(buf.Bytes()) + } else { + err = atomicfile.WriteData(outputPath, buf.Bytes(), 0600) + } + return err +} + +// ApplyFixes transforms doc and reports whether it succeeded. +func ApplyFixes(ctx context.Context, doc *tomledit.Document) error { + // Check what version of Tendermint might have created this config file, as + // a safety check for the updates we are about to make. + tmVersion := GuessConfigVersion(doc) + if tmVersion == vUnknown { + return errors.New("cannot tell what Tendermint version created this config") + } else if tmVersion < v34 || tmVersion > v36 { + // TODO(creachadair): Add in rewrites for older versions. This will + // require some digging to discover what the changes were. The upgrade + // instructions do not give specifics. + return fmt.Errorf("unable to update version %s config", tmVersion) + } + return plan.Apply(ctx, doc) +} + +// LoadConfig loads and parses the TOML document from path. +func LoadConfig(path string) (*tomledit.Document, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return tomledit.Parse(f) +} + +const ( + vUnknown = "" + v32 = "v0.32" + v33 = "v0.33" + v34 = "v0.34" + v35 = "v0.35" + v36 = "v0.36" +) + +// GuessConfigVersion attempts to figure out which version of Tendermint +// created the specified config document. It returns "" if the creating version +// cannot be determined, otherwise a string of the form "vX.YY". +func GuessConfigVersion(doc *tomledit.Document) string { + hasDisableWS := doc.First("rpc", "experimental-disable-websocket") != nil + hasUseLegacy := doc.First("p2p", "use-legacy") != nil // v0.35 only + if hasDisableWS && !hasUseLegacy { + return v36 + } + + hasBlockSync := transform.FindTable(doc, "blocksync") != nil // add: v0.35 + hasStateSync := transform.FindTable(doc, "statesync") != nil // add: v0.34 + if hasBlockSync && hasStateSync { + return v35 + } else if hasStateSync { + return v34 + } + + hasIndexKeys := doc.First("tx_index", "index_keys") != nil // add: v0.33 + hasIndexTags := doc.First("tx_index", "index_tags") != nil // rem: v0.33 + if hasIndexKeys && !hasIndexTags { + return v33 + } + + hasFastSync := transform.FindTable(doc, "fastsync") != nil // add: v0.32 + if hasIndexTags && hasFastSync { + return v32 + } + + // Something older, probably. + return vUnknown +} + +// CheckValid checks whether the specified config appears to be a valid +// Tendermint config file. This emulates how the node loads the config. +func CheckValid(data []byte) error { + v := viper.New() + v.SetConfigType("toml") + + if err := v.ReadConfig(bytes.NewReader(data)); err != nil { + return fmt.Errorf("reading config: %w", err) + } + + var cfg config.Config + if err := v.Unmarshal(&cfg); err != nil { + return fmt.Errorf("decoding config: %w", err) + } + + return cfg.ValidateBasic() +} + +// WithLogWriter returns a child of ctx with a logger attached that sends +// output to w. This is a convenience wrapper for transform.WithLogWriter. +func WithLogWriter(ctx context.Context, w io.Writer) context.Context { + return transform.WithLogWriter(ctx, w) +} diff --git a/scripts/confix/confix_test.go b/internal/libs/confix/confix_test.go similarity index 97% rename from scripts/confix/confix_test.go rename to internal/libs/confix/confix_test.go index ec258f4ca6..dc0042fe59 100644 --- a/scripts/confix/confix_test.go +++ b/internal/libs/confix/confix_test.go @@ -1,4 +1,4 @@ -package main_test +package confix_test import ( "bytes" @@ -9,7 +9,7 @@ import ( "github.com/creachadair/tomledit" "github.com/google/go-cmp/cmp" - confix "github.com/tendermint/tendermint/scripts/confix" + "github.com/tendermint/tendermint/internal/libs/confix" ) func mustParseConfig(t *testing.T, path string) *tomledit.Document { diff --git a/scripts/confix/plan.go b/internal/libs/confix/plan.go similarity index 99% rename from scripts/confix/plan.go rename to internal/libs/confix/plan.go index 706343338f..ac6f7b5a6d 100644 --- a/scripts/confix/plan.go +++ b/internal/libs/confix/plan.go @@ -1,4 +1,4 @@ -package main +package confix import ( "context" diff --git a/scripts/confix/testdata/README.md b/internal/libs/confix/testdata/README.md similarity index 90% rename from scripts/confix/testdata/README.md rename to internal/libs/confix/testdata/README.md index 5bbfa795f3..04f2af2050 100644 --- a/scripts/confix/testdata/README.md +++ b/internal/libs/confix/testdata/README.md @@ -41,12 +41,12 @@ The files named `diff-XX-YY.txt` were generated by using the `condiff` tool on the config samples for versions v0.XX and v0.YY: ```shell -go run ./scripts/confix/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt +go run ./scripts/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt ``` The `baseline.txt` was computed in the same way, but using an empty starting file so that we capture all the settings in the target: ```shell -go run ./scripts/confix/condiff -desnake /dev/null v26-config.toml > baseline.txt +go run ./scripts/condiff -desnake /dev/null v26-config.toml > baseline.txt ``` diff --git a/scripts/confix/testdata/baseline.txt b/internal/libs/confix/testdata/baseline.txt similarity index 100% rename from scripts/confix/testdata/baseline.txt rename to internal/libs/confix/testdata/baseline.txt diff --git a/scripts/confix/testdata/diff-26-27.txt b/internal/libs/confix/testdata/diff-26-27.txt similarity index 100% rename from scripts/confix/testdata/diff-26-27.txt rename to internal/libs/confix/testdata/diff-26-27.txt diff --git a/scripts/confix/testdata/diff-27-28.txt b/internal/libs/confix/testdata/diff-27-28.txt similarity index 100% rename from scripts/confix/testdata/diff-27-28.txt rename to internal/libs/confix/testdata/diff-27-28.txt diff --git a/scripts/confix/testdata/diff-28-29.txt b/internal/libs/confix/testdata/diff-28-29.txt similarity index 100% rename from scripts/confix/testdata/diff-28-29.txt rename to internal/libs/confix/testdata/diff-28-29.txt diff --git a/scripts/confix/testdata/diff-29-30.txt b/internal/libs/confix/testdata/diff-29-30.txt similarity index 100% rename from scripts/confix/testdata/diff-29-30.txt rename to internal/libs/confix/testdata/diff-29-30.txt diff --git a/scripts/confix/testdata/diff-30-31.txt b/internal/libs/confix/testdata/diff-30-31.txt similarity index 100% rename from scripts/confix/testdata/diff-30-31.txt rename to internal/libs/confix/testdata/diff-30-31.txt diff --git a/scripts/confix/testdata/diff-31-32.txt b/internal/libs/confix/testdata/diff-31-32.txt similarity index 100% rename from scripts/confix/testdata/diff-31-32.txt rename to internal/libs/confix/testdata/diff-31-32.txt diff --git a/scripts/confix/testdata/diff-32-33.txt b/internal/libs/confix/testdata/diff-32-33.txt similarity index 100% rename from scripts/confix/testdata/diff-32-33.txt rename to internal/libs/confix/testdata/diff-32-33.txt diff --git a/scripts/confix/testdata/diff-33-34.txt b/internal/libs/confix/testdata/diff-33-34.txt similarity index 100% rename from scripts/confix/testdata/diff-33-34.txt rename to internal/libs/confix/testdata/diff-33-34.txt diff --git a/scripts/confix/testdata/diff-34-35.txt b/internal/libs/confix/testdata/diff-34-35.txt similarity index 87% rename from scripts/confix/testdata/diff-34-35.txt rename to internal/libs/confix/testdata/diff-34-35.txt index 13a4432a0e..de08f29652 100644 --- a/scripts/confix/testdata/diff-34-35.txt +++ b/internal/libs/confix/testdata/diff-34-35.txt @@ -8,13 +8,11 @@ +M blocksync.version -S fastsync -M fastsync.version -+M mempool.ttl-duration -+M mempool.ttl-num-blocks -+M mempool.version -M mempool.wal-dir +M p2p.bootstrap-peers +M p2p.max-connections +M p2p.max-incoming-connection-attempts ++M p2p.max-outgoing-connections +M p2p.queue-type -M p2p.seed-mode +M p2p.use-legacy @@ -28,4 +26,3 @@ -M statesync.chunk-fetchers +M statesync.fetchers +M statesync.use-p2p -+M tx-index.psql-conn diff --git a/scripts/confix/testdata/diff-35-36.txt b/internal/libs/confix/testdata/diff-35-36.txt similarity index 100% rename from scripts/confix/testdata/diff-35-36.txt rename to internal/libs/confix/testdata/diff-35-36.txt diff --git a/scripts/confix/testdata/non-config.toml b/internal/libs/confix/testdata/non-config.toml similarity index 100% rename from scripts/confix/testdata/non-config.toml rename to internal/libs/confix/testdata/non-config.toml diff --git a/scripts/confix/testdata/v26-config.toml b/internal/libs/confix/testdata/v26-config.toml similarity index 100% rename from scripts/confix/testdata/v26-config.toml rename to internal/libs/confix/testdata/v26-config.toml diff --git a/scripts/confix/testdata/v27-config.toml b/internal/libs/confix/testdata/v27-config.toml similarity index 100% rename from scripts/confix/testdata/v27-config.toml rename to internal/libs/confix/testdata/v27-config.toml diff --git a/scripts/confix/testdata/v28-config.toml b/internal/libs/confix/testdata/v28-config.toml similarity index 100% rename from scripts/confix/testdata/v28-config.toml rename to internal/libs/confix/testdata/v28-config.toml diff --git a/scripts/confix/testdata/v29-config.toml b/internal/libs/confix/testdata/v29-config.toml similarity index 100% rename from scripts/confix/testdata/v29-config.toml rename to internal/libs/confix/testdata/v29-config.toml diff --git a/scripts/confix/testdata/v30-config.toml b/internal/libs/confix/testdata/v30-config.toml similarity index 100% rename from scripts/confix/testdata/v30-config.toml rename to internal/libs/confix/testdata/v30-config.toml diff --git a/scripts/confix/testdata/v31-config.toml b/internal/libs/confix/testdata/v31-config.toml similarity index 100% rename from scripts/confix/testdata/v31-config.toml rename to internal/libs/confix/testdata/v31-config.toml diff --git a/scripts/confix/testdata/v32-config.toml b/internal/libs/confix/testdata/v32-config.toml similarity index 100% rename from scripts/confix/testdata/v32-config.toml rename to internal/libs/confix/testdata/v32-config.toml diff --git a/scripts/confix/testdata/v33-config.toml b/internal/libs/confix/testdata/v33-config.toml similarity index 100% rename from scripts/confix/testdata/v33-config.toml rename to internal/libs/confix/testdata/v33-config.toml diff --git a/scripts/confix/testdata/v34-config.toml b/internal/libs/confix/testdata/v34-config.toml similarity index 93% rename from scripts/confix/testdata/v34-config.toml rename to internal/libs/confix/testdata/v34-config.toml index 0ef8b25ebe..f9d61f4937 100644 --- a/scripts/confix/testdata/v34-config.toml +++ b/internal/libs/confix/testdata/v34-config.toml @@ -272,6 +272,11 @@ dial_timeout = "3s" ####################################################### [mempool] +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool. +version = "v0" + recheck = true broadcast = true wal_dir = "" @@ -301,6 +306,22 @@ max_tx_bytes = 1048576 # XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 max_batch_bytes = 0 +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + ####################################################### ### State Sync Configuration Options ### ####################################################### @@ -403,8 +424,14 @@ peer_query_maj23_sleep_duration = "2s" # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). # - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = "kv" +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + ####################################################### ### Instrumentation Configuration Options ### ####################################################### diff --git a/scripts/confix/testdata/v35-config.toml b/internal/libs/confix/testdata/v35-config.toml similarity index 98% rename from scripts/confix/testdata/v35-config.toml rename to internal/libs/confix/testdata/v35-config.toml index 79616d6cdc..a59161f078 100644 --- a/scripts/confix/testdata/v35-config.toml +++ b/internal/libs/confix/testdata/v35-config.toml @@ -227,7 +227,9 @@ pprof-laddr = "" # Enable the legacy p2p layer. use-legacy = false -# Select the p2p internal queue +# Select the p2p internal queue. +# Options are: "fifo", "simple-priority", "priority", and "wdrr" +# with the default being "priority". queue-type = "priority" # Address to listen for incoming connections @@ -281,6 +283,10 @@ max-num-outbound-peers = 10 # Maximum number of connections (inbound and outbound). max-connections = 64 +# Maximum number of connections reserved for outgoing +# connections. Must be less than max-connections +max-outgoing-connections = 12 + # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = 100 diff --git a/scripts/confix/testdata/v36-config.toml b/internal/libs/confix/testdata/v36-config.toml similarity index 98% rename from scripts/confix/testdata/v36-config.toml rename to internal/libs/confix/testdata/v36-config.toml index 612f46ece6..7d39afc153 100644 --- a/scripts/confix/testdata/v36-config.toml +++ b/internal/libs/confix/testdata/v36-config.toml @@ -208,8 +208,10 @@ pprof-laddr = "" ####################################################### [p2p] -# Select the p2p internal queue -queue-type = "priority" +# Select the p2p internal queue. +# Options are: "fifo", "simple-priority", and "priority", +# with the default being "priority". +queue-type = "simple-priority" # Address to listen for incoming connections laddr = "tcp://0.0.0.0:26656" @@ -235,6 +237,10 @@ upnp = false # Maximum number of connections (inbound and outbound). max-connections = 64 +# Maximum number of connections reserved for outgoing +# connections. Must be less than max-connections +max-outgoing-connections = 12 + # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = 100 diff --git a/scripts/confix/condiff/condiff.go b/scripts/condiff/condiff.go similarity index 100% rename from scripts/confix/condiff/condiff.go rename to scripts/condiff/condiff.go diff --git a/scripts/confix/confix.go b/scripts/confix/confix.go index b24c3a778d..29c5bb7531 100644 --- a/scripts/confix/confix.go +++ b/scripts/confix/confix.go @@ -1,24 +1,17 @@ -// Program confix applies fixes to a Tendermint TOML configuration file to -// update a file created with an older version of Tendermint to a compatible -// format for a newer version. +// Program confix applies changes to a Tendermint TOML configuration file, to +// update configurations created with an older version of Tendermint to a +// compatible format for a newer version. package main import ( - "bytes" "context" - "errors" "flag" "fmt" "log" "os" "path/filepath" - "github.com/creachadair/atomicfile" - "github.com/creachadair/tomledit" - "github.com/creachadair/tomledit/transform" - "github.com/spf13/viper" - - "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/confix" ) func init() { @@ -42,6 +35,7 @@ Options: var ( configPath = flag.String("config", "", "Config file path (required)") outPath = flag.String("out", "", "Output file path (default stdout)") + doVerbose = flag.Bool("v", false, "Log changes to stderr") ) func main() { @@ -50,115 +44,11 @@ func main() { log.Fatal("You must specify a non-empty -config path") } - doc, err := LoadConfig(*configPath) - if err != nil { - log.Fatalf("Loading config: %v", err) + ctx := context.Background() + if *doVerbose { + ctx = confix.WithLogWriter(ctx, os.Stderr) } - - ctx := transform.WithLogWriter(context.Background(), os.Stderr) - if err := ApplyFixes(ctx, doc); err != nil { - log.Fatalf("Updating %q: %v", *configPath, err) + if err := confix.Upgrade(ctx, *configPath, *outPath); err != nil { + log.Fatalf("Upgrading config: %v", err) } - - var buf bytes.Buffer - if err := tomledit.Format(&buf, doc); err != nil { - log.Fatalf("Formatting config: %v", err) - } - - // Verify that Tendermint can parse the results after our edits. - if err := CheckValid(buf.Bytes()); err != nil { - log.Fatalf("Updated config is invalid: %v", err) - } - - if *outPath == "" { - os.Stdout.Write(buf.Bytes()) - } else if err := atomicfile.WriteData(*outPath, buf.Bytes(), 0600); err != nil { - log.Fatalf("Writing output: %v", err) - } -} - -// ApplyFixes transforms doc and reports whether it succeeded. -func ApplyFixes(ctx context.Context, doc *tomledit.Document) error { - // Check what version of Tendermint might have created this config file, as - // a safety check for the updates we are about to make. - tmVersion := GuessConfigVersion(doc) - if tmVersion == vUnknown { - return errors.New("cannot tell what Tendermint version created this config") - } else if tmVersion < v34 || tmVersion > v36 { - // TODO(creachadair): Add in rewrites for older versions. This will - // require some digging to discover what the changes were. The upgrade - // instructions do not give specifics. - return fmt.Errorf("unable to update version %s config", tmVersion) - } - return plan.Apply(ctx, doc) -} - -// LoadConfig loads and parses the TOML document from path. -func LoadConfig(path string) (*tomledit.Document, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - return tomledit.Parse(f) -} - -const ( - vUnknown = "" - v32 = "v0.32" - v33 = "v0.33" - v34 = "v0.34" - v35 = "v0.35" - v36 = "v0.36" -) - -// GuessConfigVersion attempts to figure out which version of Tendermint -// created the specified config document. It returns "" if the creating version -// cannot be determined, otherwise a string of the form "vX.YY". -func GuessConfigVersion(doc *tomledit.Document) string { - hasDisableWS := doc.First("rpc", "experimental-disable-websocket") != nil - hasUseLegacy := doc.First("p2p", "use-legacy") != nil // v0.35 only - if hasDisableWS && !hasUseLegacy { - return v36 - } - - hasBlockSync := transform.FindTable(doc, "blocksync") != nil // add: v0.35 - hasStateSync := transform.FindTable(doc, "statesync") != nil // add: v0.34 - if hasBlockSync && hasStateSync { - return v35 - } else if hasStateSync { - return v34 - } - - hasIndexKeys := doc.First("tx_index", "index_keys") != nil // add: v0.33 - hasIndexTags := doc.First("tx_index", "index_tags") != nil // rem: v0.33 - if hasIndexKeys && !hasIndexTags { - return v33 - } - - hasFastSync := transform.FindTable(doc, "fastsync") != nil // add: v0.32 - if hasIndexTags && hasFastSync { - return v32 - } - - // Something older, probably. - return vUnknown -} - -// CheckValid checks whether the specified config appears to be a valid -// Tendermint config file. This emulates how the node loads the config. -func CheckValid(data []byte) error { - v := viper.New() - v.SetConfigType("toml") - - if err := v.ReadConfig(bytes.NewReader(data)); err != nil { - return fmt.Errorf("reading config: %w", err) - } - - var cfg config.Config - if err := v.Unmarshal(&cfg); err != nil { - return fmt.Errorf("decoding config: %w", err) - } - - return cfg.ValidateBasic() } From 503ddf1c4dfc26e347b52c81b5c30555e7f2f86a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Jul 2022 16:10:24 +0000 Subject: [PATCH 171/203] build(deps): Bump github.com/prometheus/common from 0.36.0 to 0.37.0 (#9013) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.36.0 to 0.37.0.
Release notes

Sourced from github.com/prometheus/common's releases.

sigv4/v0.1.0

Initial release

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/prometheus/common&package-manager=go_modules&previous-version=0.36.0&new-version=0.37.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ca3f40fd65..3b248a3389 100644 --- a/go.mod +++ b/go.mod @@ -240,6 +240,6 @@ require ( require ( github.com/creachadair/tomledit v0.0.22 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.36.0 + github.com/prometheus/common v0.37.0 github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca ) diff --git a/go.sum b/go.sum index 2098fc7cc2..957b602756 100644 --- a/go.sum +++ b/go.sum @@ -938,8 +938,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.36.0 h1:78hJTing+BLYLjhXE+Z2BubeEymH5Lr0/Mt8FKkxxYo= -github.com/prometheus/common v0.36.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= From cc07318866c72b1e29060e8dc853037cf9e65a0d Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 15 Jul 2022 21:14:18 -0400 Subject: [PATCH 172/203] migration: scope key migration to stores (#9005) --- cmd/tendermint/commands/key_migrate.go | 3 +- config/db.go | 1 + scripts/keymigrate/migrate.go | 615 ++++++++++++++++--------- scripts/keymigrate/migrate_test.go | 196 ++++---- 4 files changed, 492 insertions(+), 323 deletions(-) diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go index 723026da5a..88b9dfe715 100644 --- a/cmd/tendermint/commands/key_migrate.go +++ b/cmd/tendermint/commands/key_migrate.go @@ -34,7 +34,6 @@ func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.C // reduce the possibility of the // ephemeral data overwriting later data "tx_index", - "peerstore", "light", "blockstore", "state", @@ -57,7 +56,7 @@ func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.C return fmt.Errorf("constructing database handle: %w", err) } - if err = keymigrate.Migrate(ctx, db); err != nil { + if err = keymigrate.Migrate(ctx, dbctx, db); err != nil { return fmt.Errorf("running migration for context %q: %w", dbctx, err) } diff --git a/config/db.go b/config/db.go index f508354e07..bbc2869446 100644 --- a/config/db.go +++ b/config/db.go @@ -25,5 +25,6 @@ type DBProvider func(*DBContext) (dbm.DB, error) // specified in the Config. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) } diff --git a/scripts/keymigrate/migrate.go b/scripts/keymigrate/migrate.go index a0b43aef6f..2c5873427e 100644 --- a/scripts/keymigrate/migrate.go +++ b/scripts/keymigrate/migrate.go @@ -26,7 +26,7 @@ type ( migrateFunc func(keyID) (keyID, error) ) -func getAllLegacyKeys(db dbm.DB) ([]keyID, error) { +func getAllLegacyKeys(db dbm.DB, storeName string) ([]keyID, error) { var out []keyID iter, err := db.Iterator(nil, nil) @@ -37,9 +37,16 @@ func getAllLegacyKeys(db dbm.DB) ([]keyID, error) { for ; iter.Valid(); iter.Next() { k := iter.Key() - // make sure it's a key with a legacy format, and skip - // all other keys, to make it safe to resume the migration. - if !checkKeyType(k).isLegacy() { + // make sure it's a key that we'd expect to see in + // this database, with a legacy format, and skip all + // other keys, to make it safe to resume the + // migration. + kt, err := checkKeyType(k, storeName) + if err != nil { + return nil, err + } + + if !kt.isLegacy() { continue } @@ -88,241 +95,405 @@ var prefixes = []struct { ktype keyType check func(keyID) bool }{ - {[]byte("consensusParamsKey:"), consensusParamsKey, nil}, - {[]byte("abciResponsesKey:"), abciResponsesKey, nil}, - {[]byte("validatorsKey:"), validatorsKey, nil}, - {[]byte("stateKey"), stateStoreKey, nil}, - {[]byte("H:"), blockMetaKey, nil}, - {[]byte("P:"), blockPartKey, nil}, - {[]byte("C:"), commitKey, nil}, - {[]byte("SC:"), seenCommitKey, nil}, - {[]byte("BH:"), blockHashKey, nil}, - {[]byte("size"), lightSizeKey, nil}, - {[]byte("lb/"), lightBlockKey, nil}, - {[]byte("\x00"), evidenceCommittedKey, checkEvidenceKey}, - {[]byte("\x01"), evidencePendingKey, checkEvidenceKey}, + {[]byte(legacyConsensusParamsPrefix), consensusParamsKey, nil}, + {[]byte(legacyAbciResponsePrefix), abciResponsesKey, nil}, + {[]byte(legacyValidatorPrefix), validatorsKey, nil}, + {[]byte(legacyStateKeyPrefix), stateStoreKey, nil}, + {[]byte(legacyBlockMetaPrefix), blockMetaKey, nil}, + {[]byte(legacyBlockPartPrefix), blockPartKey, nil}, + {[]byte(legacyCommitPrefix), commitKey, nil}, + {[]byte(legacySeenCommitPrefix), seenCommitKey, nil}, + {[]byte(legacyBlockHashPrefix), blockHashKey, nil}, + {[]byte(legacyLightSizePrefix), lightSizeKey, nil}, + {[]byte(legacyLightBlockPrefix), lightBlockKey, nil}, + {[]byte(legacyEvidenceComittedPrefix), evidenceCommittedKey, checkEvidenceKey}, + {[]byte(legacyEvidencePendingPrefix), evidencePendingKey, checkEvidenceKey}, } -// checkKeyType classifies a candidate key based on its structure. -func checkKeyType(key keyID) keyType { - for _, p := range prefixes { - if bytes.HasPrefix(key, p.prefix) { - if p.check == nil || p.check(key) { - return p.ktype - } - } - } - - // A legacy event key has the form: - // - // / / / - // - // Transaction hashes are stored as a raw binary hash with no prefix. - // - // Because a hash can contain any byte, it is possible (though unlikely) - // that a hash could have the correct form for an event key, in which case - // we would translate it incorrectly. To reduce the likelihood of an - // incorrect interpretation, we parse candidate event keys and check for - // some structural properties before making a decision. - // - // Note, though, that nothing prevents event names or values from containing - // additional "/" separators, so the parse has to be forgiving. - parts := bytes.Split(key, []byte("/")) - if len(parts) >= 4 { - // Special case for tx.height. - if len(parts) == 4 && bytes.Equal(parts[0], []byte("tx.height")) { - return txHeightKey - } - - // The name cannot be empty, but we don't know where the name ends and - // the value begins, so insist that there be something. - var n int - for _, part := range parts[:len(parts)-2] { - n += len(part) - } - // Check whether the last two fields could be .../height/index. - if n > 0 && isDecimal(parts[len(parts)-1]) && isDecimal(parts[len(parts)-2]) { - return abciEventKey - } - } +const ( + legacyConsensusParamsPrefix = "consensusParamsKey:" + legacyAbciResponsePrefix = "abciResponsesKey:" + legacyValidatorPrefix = "validatorsKey:" + legacyStateKeyPrefix = "stateKey" + legacyBlockMetaPrefix = "H:" + legacyBlockPartPrefix = "P:" + legacyCommitPrefix = "C:" + legacySeenCommitPrefix = "SC:" + legacyBlockHashPrefix = "BH:" + legacyLightSizePrefix = "size" + legacyLightBlockPrefix = "lb/" + legacyEvidenceComittedPrefix = "\x00" + legacyEvidencePendingPrefix = "\x01" +) - // If we get here, it's not an event key. Treat it as a hash if it is the - // right length. Note that it IS possible this could collide with the - // translation of some other key (though not a hash, since encoded hashes - // will be longer). The chance of that is small, but there is nothing we can - // do to detect it. - if len(key) == 32 { - return txHashKey - } - return nonLegacyKey +type migrationDefinition struct { + name string + storeName string + prefix []byte + ktype keyType + check func(keyID) bool + transform migrateFunc } -// isDecimal reports whether buf is a non-empty sequence of Unicode decimal -// digits. -func isDecimal(buf []byte) bool { - for _, c := range buf { - if c < '0' || c > '9' { - return false - } - } - return len(buf) != 0 -} - -func migrateKey(key keyID) (keyID, error) { - switch checkKeyType(key) { - case blockMetaKey: - val, err := strconv.Atoi(string(key[2:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(0), int64(val)) - case blockPartKey: - parts := bytes.Split(key[2:], []byte(":")) - if len(parts) != 2 { - return nil, fmt.Errorf("block parts key has %d rather than 2 components", - len(parts)) - } - valOne, err := strconv.Atoi(string(parts[0])) - if err != nil { - return nil, err - } - - valTwo, err := strconv.Atoi(string(parts[1])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(1), int64(valOne), int64(valTwo)) - case commitKey: - val, err := strconv.Atoi(string(key[2:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(2), int64(val)) - case seenCommitKey: - val, err := strconv.Atoi(string(key[3:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(3), int64(val)) - case blockHashKey: - hash := string(key[3:]) - if len(hash)%2 == 1 { - hash = "0" + hash - } - val, err := hex.DecodeString(hash) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(4), string(val)) - case validatorsKey: - val, err := strconv.Atoi(string(key[14:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(5), int64(val)) - case consensusParamsKey: - val, err := strconv.Atoi(string(key[19:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(6), int64(val)) - case abciResponsesKey: - val, err := strconv.Atoi(string(key[17:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(7), int64(val)) - case stateStoreKey: - return orderedcode.Append(nil, int64(8)) - case evidenceCommittedKey: - return convertEvidence(key, 9) - case evidencePendingKey: - return convertEvidence(key, 10) - case lightBlockKey: - if len(key) < 24 { - return nil, fmt.Errorf("light block evidence %q in invalid format", string(key)) - } - - val, err := strconv.Atoi(string(key[len(key)-20:])) - if err != nil { - return nil, err - } - - return orderedcode.Append(nil, int64(11), int64(val)) - case lightSizeKey: - return orderedcode.Append(nil, int64(12)) - case txHeightKey: - parts := bytes.Split(key, []byte("/")) - if len(parts) != 4 { - return nil, fmt.Errorf("key has %d parts rather than 4", len(parts)) - } - parts = parts[1:] // drop prefix +var migrations = []migrationDefinition{ + { + name: "consensus-params", + storeName: "state", + prefix: []byte(legacyConsensusParamsPrefix), + ktype: consensusParamsKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[19:])) + if err != nil { + return nil, err + } - elems := make([]interface{}, 0, len(parts)+1) - elems = append(elems, "tx.height") + return orderedcode.Append(nil, int64(6), int64(val)) + }, + }, + { + name: "abci-responses", + storeName: "state", + prefix: []byte(legacyAbciResponsePrefix), + ktype: abciResponsesKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[17:])) + if err != nil { + return nil, err + } - for idx, pt := range parts { - val, err := strconv.Atoi(string(pt)) + return orderedcode.Append(nil, int64(7), int64(val)) + }, + }, + { + name: "validators", + storeName: "state", + prefix: []byte(legacyValidatorPrefix), + ktype: validatorsKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[14:])) if err != nil { return nil, err } - if idx == 0 { - elems = append(elems, fmt.Sprintf("%d", val)) - } else { - elems = append(elems, int64(val)) + + return orderedcode.Append(nil, int64(5), int64(val)) + }, + }, + { + name: "tendermint-state", + storeName: "state", + prefix: []byte(legacyStateKeyPrefix), + ktype: stateStoreKey, + transform: func(key keyID) (keyID, error) { + return orderedcode.Append(nil, int64(8)) + }, + }, + { + name: "block-meta", + storeName: "blockstore", + prefix: []byte(legacyBlockMetaPrefix), + ktype: blockMetaKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[2:])) + if err != nil { + return nil, err } - } - return orderedcode.Append(nil, elems...) - case abciEventKey: - parts := bytes.Split(key, []byte("/")) + return orderedcode.Append(nil, int64(0), int64(val)) + }, + }, + { + name: "block-part", + storeName: "blockstore", + prefix: []byte(legacyBlockPartPrefix), + ktype: blockPartKey, + transform: func(key keyID) (keyID, error) { + parts := bytes.Split(key[2:], []byte(":")) + if len(parts) != 2 { + return nil, fmt.Errorf("block parts key has %d rather than 2 components", + len(parts)) + } + valOne, err := strconv.Atoi(string(parts[0])) + if err != nil { + return nil, err + } - elems := make([]interface{}, 0, 4) - if len(parts) == 4 { - elems = append(elems, string(parts[0]), string(parts[1])) + valTwo, err := strconv.Atoi(string(parts[1])) + if err != nil { + return nil, err + } - val, err := strconv.Atoi(string(parts[2])) + return orderedcode.Append(nil, int64(1), int64(valOne), int64(valTwo)) + }, + }, + { + name: "commit", + storeName: "blockstore", + prefix: []byte(legacyCommitPrefix), + ktype: commitKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[2:])) if err != nil { return nil, err } - elems = append(elems, int64(val)) - val2, err := strconv.Atoi(string(parts[3])) + return orderedcode.Append(nil, int64(2), int64(val)) + }, + }, + { + name: "seen-commit", + storeName: "blockstore", + prefix: []byte(legacySeenCommitPrefix), + ktype: seenCommitKey, + transform: func(key keyID) (keyID, error) { + val, err := strconv.Atoi(string(key[3:])) if err != nil { return nil, err } - elems = append(elems, int64(val2)) - } else { - elems = append(elems, string(parts[0])) - parts = parts[1:] - val, err := strconv.Atoi(string(parts[len(parts)-1])) + return orderedcode.Append(nil, int64(3), int64(val)) + }, + }, + { + name: "block-hash", + storeName: "blockstore", + prefix: []byte(legacyBlockHashPrefix), + ktype: blockHashKey, + transform: func(key keyID) (keyID, error) { + hash := string(key[3:]) + if len(hash)%2 == 1 { + hash = "0" + hash + } + val, err := hex.DecodeString(hash) if err != nil { return nil, err } - val2, err := strconv.Atoi(string(parts[len(parts)-2])) + return orderedcode.Append(nil, int64(4), string(val)) + }, + }, + { + name: "light-size", + storeName: "light", + prefix: []byte(legacyLightSizePrefix), + ktype: lightSizeKey, + transform: func(key keyID) (keyID, error) { + return orderedcode.Append(nil, int64(12)) + }, + }, + { + name: "light-block", + storeName: "light", + prefix: []byte(legacyLightBlockPrefix), + ktype: lightBlockKey, + transform: func(key keyID) (keyID, error) { + if len(key) < 24 { + return nil, fmt.Errorf("light block evidence %q in invalid format", string(key)) + } + + val, err := strconv.Atoi(string(key[len(key)-20:])) if err != nil { return nil, err } - appKey := bytes.Join(parts[:len(parts)-3], []byte("/")) - elems = append(elems, string(appKey), int64(val), int64(val2)) + return orderedcode.Append(nil, int64(11), int64(val)) + }, + }, + { + name: "evidence-pending", + storeName: "evidence", + prefix: []byte(legacyEvidencePendingPrefix), + ktype: evidencePendingKey, + transform: func(key keyID) (keyID, error) { + return convertEvidence(key, 10) + }, + }, + { + name: "evidence-committed", + storeName: "evidence", + prefix: []byte(legacyEvidenceComittedPrefix), + ktype: evidenceCommittedKey, + transform: func(key keyID) (keyID, error) { + return convertEvidence(key, 9) + }, + }, + { + name: "event-tx", + storeName: "tx_index", + prefix: nil, + ktype: txHeightKey, + transform: func(key keyID) (keyID, error) { + parts := bytes.Split(key, []byte("/")) + if len(parts) != 4 { + return nil, fmt.Errorf("key has %d parts rather than 4", len(parts)) + } + parts = parts[1:] // drop prefix + + elems := make([]interface{}, 0, len(parts)+1) + elems = append(elems, "tx.height") + + for idx, pt := range parts { + val, err := strconv.Atoi(string(pt)) + if err != nil { + return nil, err + } + if idx == 0 { + elems = append(elems, fmt.Sprintf("%d", val)) + } else { + elems = append(elems, int64(val)) + } + } + + return orderedcode.Append(nil, elems...) + }, + }, + { + name: "event-abci", + storeName: "tx_index", + prefix: nil, + ktype: abciEventKey, + transform: func(key keyID) (keyID, error) { + parts := bytes.Split(key, []byte("/")) + + elems := make([]interface{}, 0, 4) + if len(parts) == 4 { + elems = append(elems, string(parts[0]), string(parts[1])) + + val, err := strconv.Atoi(string(parts[2])) + if err != nil { + return nil, err + } + elems = append(elems, int64(val)) + + val2, err := strconv.Atoi(string(parts[3])) + if err != nil { + return nil, err + } + elems = append(elems, int64(val2)) + } else { + elems = append(elems, string(parts[0])) + parts = parts[1:] + + val, err := strconv.Atoi(string(parts[len(parts)-1])) + if err != nil { + return nil, err + } + + val2, err := strconv.Atoi(string(parts[len(parts)-2])) + if err != nil { + return nil, err + } + + appKey := bytes.Join(parts[:len(parts)-3], []byte("/")) + elems = append(elems, string(appKey), int64(val), int64(val2)) + } + + return orderedcode.Append(nil, elems...) + }, + }, + { + name: "event-tx-hash", + storeName: "tx_index", + prefix: nil, + ktype: txHashKey, + transform: func(key keyID) (keyID, error) { + return orderedcode.Append(nil, "tx.hash", string(key)) + }, + }, +} + +// checkKeyType classifies a candidate key based on its structure. +func checkKeyType(key keyID, storeName string) (keyType, error) { + var migrations []migrationDefinition + for _, m := range migrations { + if m.storeName != storeName { + continue + } + if m.prefix == nil && storeName == "tx_index" { + // A legacy event key has the form: + // + // / / / + // + // Transaction hashes are stored as a raw binary hash with no prefix. + // + // Note, though, that nothing prevents event names or values from containing + // additional "/" separators, so the parse has to be forgiving. + parts := bytes.Split(key, []byte("/")) + if len(parts) >= 4 { + // Special case for tx.height. + if len(parts) == 4 && bytes.Equal(parts[0], []byte("tx.height")) { + return txHeightKey, nil + } + + // The name cannot be empty, but we don't know where the name ends and + // the value begins, so insist that there be something. + var n int + for _, part := range parts[:len(parts)-2] { + n += len(part) + } + // Check whether the last two fields could be .../height/index. + if n > 0 && isDecimal(parts[len(parts)-1]) && isDecimal(parts[len(parts)-2]) { + return abciEventKey, nil + } + } + + // If we get here, it's not an event key. Treat it as a hash if it is the + // right length. Note that it IS possible this could collide with the + // translation of some other key (though not a hash, since encoded hashes + // will be longer). The chance of that is small, but there is nothing we can + // do to detect it. + if len(key) == 32 { + return txHashKey, nil + } + } else if bytes.HasPrefix(key, m.prefix) { + if m.check == nil || m.check(key) { + return m.ktype, nil + } + // we have an expected legacy prefix but that + // didn't pass the check. This probably means + // the evidence data is currupt (based on the + // defined migrations) best to error here. + return -1, fmt.Errorf("in store %q, key %q exists but is not a valid key of type %q", storeName, key, m.ktype) } - return orderedcode.Append(nil, elems...) - case txHashKey: - return orderedcode.Append(nil, "tx.hash", string(key)) - default: - return nil, fmt.Errorf("key %q is in the wrong format", string(key)) + // if we get here, the key in question is either + // migrated or of a different type. We can't break + // here because there are more than one key type in a + // specific database, so we have to keep iterating. } + // if we've looked at every migration and not identified a key + // type, then the key has been migrated *or* we (possibly, but + // very unlikely have data that is in the wrong place or the + // sign of corruption.) In either case we should not attempt + // more migrations at this point + + return nonLegacyKey, nil +} + +// isDecimal reports whether buf is a non-empty sequence of Unicode decimal +// digits. +func isDecimal(buf []byte) bool { + for _, c := range buf { + if c < '0' || c > '9' { + return false + } + } + return len(buf) != 0 +} + +func migrateKey(key keyID, storeName string) (keyID, error) { + kt, err := checkKeyType(key, storeName) + if err != nil { + return nil, err + } + for _, migration := range migrations { + if migration.storeName != storeName { + continue + } + if kt == migration.ktype { + return migration.transform(key) + } + } + + return nil, fmt.Errorf("key %q is in the wrong format", string(key)) } func convertEvidence(key keyID, newPrefix int64) ([]byte, error) { @@ -374,7 +545,29 @@ func isHex(data []byte) bool { return len(data) != 0 } -func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error { +func getMigrationFunc(storeName string, key keyID) (*migrationDefinition, error) { + for idx := range migrations { + migration := migrations[idx] + + if migration.storeName == storeName { + if migration.prefix == nil { + return &migration, nil + } + if bytes.HasPrefix(migration.prefix, key) { + return &migration, nil + } + } + } + return nil, fmt.Errorf("no migration defined for data store %q and key %q", storeName, key) +} + +func replaceKey(db dbm.DB, storeName string, key keyID) error { + migration, err := getMigrationFunc(storeName, key) + if err != nil { + return err + } + gooseFn := migration.transform + exists, err := db.Has(key) if err != nil { return err @@ -433,8 +626,8 @@ func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error { // The context allows for a safe termination of the operation // (e.g connected to a singal handler,) to abort the operation // in-between migration operations. -func Migrate(ctx context.Context, db dbm.DB) error { - keys, err := getAllLegacyKeys(db) +func Migrate(ctx context.Context, storeName string, db dbm.DB) error { + keys, err := getAllLegacyKeys(db, storeName) if err != nil { return err } @@ -451,7 +644,7 @@ func Migrate(ctx context.Context, db dbm.DB) error { if err := ctx.Err(); err != nil { return err } - return replaceKey(db, key, migrateKey) + return replaceKey(db, storeName, key) }) } if g.Wait() != nil { diff --git a/scripts/keymigrate/migrate_test.go b/scripts/keymigrate/migrate_test.go index f7322b352f..b2e7a4ab8e 100644 --- a/scripts/keymigrate/migrate_test.go +++ b/scripts/keymigrate/migrate_test.go @@ -1,16 +1,12 @@ package keymigrate import ( - "context" - "errors" "fmt" - "math" "strings" "testing" "github.com/google/orderedcode" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" ) func makeKey(t *testing.T, elems ...interface{}) []byte { @@ -78,30 +74,6 @@ func getNewPrefixKeys(t *testing.T, val int) map[string][]byte { } } -func getLegacyDatabase(t *testing.T) (int, dbm.DB) { - db := dbm.NewMemDB() - batch := db.NewBatch() - ct := 0 - - generated := []map[string][]byte{ - getLegacyPrefixKeys(8), - getLegacyPrefixKeys(9001), - getLegacyPrefixKeys(math.MaxInt32 << 1), - getLegacyPrefixKeys(math.MaxInt64 - 8), - } - - // populate database - for _, km := range generated { - for _, key := range km { - ct++ - require.NoError(t, batch.Set(key, []byte(fmt.Sprintf(`{"value": %d}`, ct)))) - } - } - require.NoError(t, batch.WriteSync()) - require.NoError(t, batch.Close()) - return ct - (2 * len(generated)) + 2, db -} - func TestMigration(t *testing.T) { t.Run("Idempotency", func(t *testing.T) { // we want to make sure that the key space for new and @@ -113,37 +85,12 @@ func TestMigration(t *testing.T) { require.Equal(t, len(legacyPrefixes), len(newPrefixes)) - t.Run("Legacy", func(t *testing.T) { - for kind, le := range legacyPrefixes { - require.True(t, checkKeyType(le).isLegacy(), kind) - } - }) - t.Run("New", func(t *testing.T) { - for kind, ne := range newPrefixes { - require.False(t, checkKeyType(ne).isLegacy(), kind) - } - }) - t.Run("Conversion", func(t *testing.T) { - for kind, le := range legacyPrefixes { - nk, err := migrateKey(le) - require.NoError(t, err, kind) - require.False(t, checkKeyType(nk).isLegacy(), kind) - } - }) t.Run("Hashes", func(t *testing.T) { t.Run("NewKeysAreNotHashes", func(t *testing.T) { for _, key := range getNewPrefixKeys(t, 9001) { require.True(t, len(key) != 32) } }) - t.Run("ContrivedLegacyKeyDetection", func(t *testing.T) { - // length 32: should appear to be a hash - require.Equal(t, txHashKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))) - - // length ≠ 32: should not appear to be a hash - require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx--"))) - require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))) - }) }) }) t.Run("Migrations", func(t *testing.T) { @@ -171,72 +118,101 @@ func TestMigration(t *testing.T) { "UserKey3": []byte("foo/bar/baz/1.2/4"), } for kind, key := range table { - out, err := migrateKey(key) + out, err := migrateKey(key, "") + // TODO probably these error at the + // moment because of store missmatches require.Error(t, err, kind) require.Nil(t, out, kind) } }) - t.Run("Replacement", func(t *testing.T) { - t.Run("MissingKey", func(t *testing.T) { - db := dbm.NewMemDB() - require.NoError(t, replaceKey(db, keyID("hi"), nil)) - }) - t.Run("ReplacementFails", func(t *testing.T) { - db := dbm.NewMemDB() - key := keyID("hi") - require.NoError(t, db.Set(key, []byte("world"))) - require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) { - return nil, errors.New("hi") - })) - }) - t.Run("KeyDisappears", func(t *testing.T) { - db := dbm.NewMemDB() - key := keyID("hi") - require.NoError(t, db.Set(key, []byte("world"))) - require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) { - require.NoError(t, db.Delete(key)) - return keyID("wat"), nil - })) - - exists, err := db.Has(key) - require.NoError(t, err) - require.False(t, exists) - - exists, err = db.Has(keyID("wat")) - require.NoError(t, err) - require.False(t, exists) - }) - }) }) - t.Run("Integration", func(t *testing.T) { - t.Run("KeyDiscovery", func(t *testing.T) { - size, db := getLegacyDatabase(t) - keys, err := getAllLegacyKeys(db) - require.NoError(t, err) - require.Equal(t, size, len(keys)) - legacyKeys := 0 - for _, k := range keys { - if checkKeyType(k).isLegacy() { - legacyKeys++ - } +} + +func TestGlobalDataStructuresForRefactor(t *testing.T) { + defer func() { + if t.Failed() { + t.Log("number of migrations:", len(migrations)) + } + }() + + const unPrefixedLegacyKeys = 3 + + t.Run("MigrationsAreDefined", func(t *testing.T) { + if len(prefixes)+unPrefixedLegacyKeys != len(migrations) { + t.Fatal("migrationse are not correctly defined", + "prefixes", len(prefixes), + "migrations", len(migrations)) + } + }) + t.Run("AllMigrationsHavePrefixDefined", func(t *testing.T) { + for _, m := range migrations { + if m.prefix == nil && m.storeName != "tx_index" { + t.Errorf("migration named %q for store %q does not have a prefix defined", m.name, m.storeName) + } + } + }) + t.Run("Deduplication", func(t *testing.T) { + t.Run("Prefixes", func(t *testing.T) { + set := map[string]struct{}{} + for _, prefix := range prefixes { + set[string(prefix.prefix)] = struct{}{} + } + if len(set) != len(prefixes) { + t.Fatal("duplicate prefix definition", + "set", len(set), + "values", set) } - require.Equal(t, size, legacyKeys) }) - t.Run("KeyIdempotency", func(t *testing.T) { - for _, key := range getNewPrefixKeys(t, 84) { - require.False(t, checkKeyType(key).isLegacy()) + t.Run("MigrationName", func(t *testing.T) { + set := map[string]struct{}{} + for _, migration := range migrations { + set[migration.name] = struct{}{} + } + if len(set) != len(migrations) { + t.Fatal("duplicate migration name defined", + "set", len(set), + "values", set) } }) - t.Run("Migrate", func(t *testing.T) { - _, db := getLegacyDatabase(t) - - ctx := context.Background() - err := Migrate(ctx, db) - require.NoError(t, err) - keys, err := getAllLegacyKeys(db) - require.NoError(t, err) - require.Equal(t, 0, len(keys)) - + t.Run("MigrationPrefix", func(t *testing.T) { + set := map[string]struct{}{} + for _, migration := range migrations { + set[string(migration.prefix)] = struct{}{} + } + // three keys don't have prefixes in the + // legacy system; this is fine but it means + // the set will have 1 less than expected + // (well 2 less, but the empty key takes one + // of the slots): + expectedDupl := unPrefixedLegacyKeys - 1 + + if len(set) != len(migrations)-expectedDupl { + t.Fatal("duplicate migration prefix defined", + "set", len(set), + "expected", len(migrations)-expectedDupl, + "values", set) + } + }) + t.Run("MigrationStoreName", func(t *testing.T) { + set := map[string]struct{}{} + for _, migration := range migrations { + set[migration.storeName] = struct{}{} + } + if len(set) != 5 { + t.Fatal("duplicate migration store name defined", + "set", len(set), + "values", set) + } + if _, ok := set[""]; ok { + t.Fatal("empty store name defined") + } }) }) + t.Run("NilPrefix", func(t *testing.T) { + _, err := getMigrationFunc("tx_index", []byte("fooo")) + if err != nil { + t.Fatal("should find an index for tx", err) + } + }) + } From 48f3062d9d4111e5d79b0b985588b8c31721dbe7 Mon Sep 17 00:00:00 2001 From: Rishabh Goel <36698583+Coder-RG@users.noreply.github.com> Date: Mon, 18 Jul 2022 14:20:55 +0530 Subject: [PATCH 173/203] Updated potential errors in abci.md (#9003) Co-authored-by: Callum Waters Co-authored-by: Josef Widder <44643235+josef-widder@users.noreply.github.com> --- spec/abci/abci.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spec/abci/abci.md b/spec/abci/abci.md index 5d9d59b711..84e566d507 100644 --- a/spec/abci/abci.md +++ b/spec/abci/abci.md @@ -40,13 +40,13 @@ tendermint should not continue. In the Go implementation these methods take a context and may return an error. The context exists so that applications can terminate gracefully during shutdown, and the error return value makes it -possible for applications to singal transient errors to Tendermint. +possible for applications to signal transient errors to Tendermint. ### CheckTx The `CheckTx` ABCI method controls what transactions are considered for inclusion in a block. When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated -transaction will be not be added to Tendermint's mempool or it will be removed if +transaction will not be added to Tendermint's mempool or it will be removed if it is already included. ### DeliverTx From 066b7a9999adf6c42ce06502766e483124099e6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Jul 2022 13:13:02 +0000 Subject: [PATCH 174/203] build(deps): Bump github.com/golangci/golangci-lint from 1.46.0 to 1.47.0 (#9038) Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.46.0 to 1.47.0.
Release notes

Sourced from github.com/golangci/golangci-lint's releases.

v1.47.0

Changelog

  • b4154027 Add linter asasalint to lint pass []any as any (#2968)
  • 1d8a15a0 add nosnakecase lint (#2828)
  • 2a1edcef build(deps): bump github.com/Antonboom/errname from 0.1.6 to 0.1.7 (#2888)
  • c766184c build(deps): bump github.com/GaijinEntertainment/go-exhaustruct/v2 from 2.1.0 to 2.2.0 (#2916)
  • b8f1e2a5 build(deps): bump github.com/daixiang0/gci from 0.3.4 to 0.4.0 (#2965)
  • 5e183652 build(deps): bump github.com/daixiang0/gci from 0.4.0 to 0.4.1 (#2973)
  • e60937a1 build(deps): bump github.com/daixiang0/gci from 0.4.1 to 0.4.2 (#2979)
  • 98c811d0 build(deps): bump github.com/firefart/nonamedreturns from 1.0.1 to 1.0.2 (#2929)
  • 023e1c4f build(deps): bump github.com/firefart/nonamedreturns from 1.0.2 to 1.0.4 (#2944)
  • 7fbb11ca build(deps): bump github.com/fzipp/gocyclo from 0.5.1 to 0.6.0 (#2926)
  • db5d58cd build(deps): bump github.com/hashicorp/go-version from 1.4.0 to 1.5.0 (#2873)
  • f75b1a8b build(deps): bump github.com/hashicorp/go-version from 1.5.0 to 1.6.0 (#2958)
  • 75be924e build(deps): bump github.com/kisielk/errcheck from 1.6.0 to 1.6.1 (#2871)
  • 33f4aeeb build(deps): bump github.com/kulti/thelper from 0.6.2 to 0.6.3 (#2872)
  • 6a412d3d build(deps): bump github.com/kunwardeep/paralleltest from 1.0.3 to 1.0.4 (#2907)
  • 97eea6ea build(deps): bump github.com/kunwardeep/paralleltest from 1.0.4 to 1.0.6 (#2918)
  • 3a0f646e build(deps): bump github.com/maratori/testpackage from 1.0.1 to 1.1.0 (#2945)
  • 92d7022d build(deps): bump github.com/nishanths/exhaustive from 0.7.11 to 0.8.1 (#2906)
  • 97d7415b build(deps): bump github.com/quasilyte/go-ruleguard/dsl from 0.3.19 to 0.3.21 (#2874)
  • 0e3730d3 build(deps): bump github.com/securego/gosec/v2 from 2.11.0 to 2.12.0 (#2925)
  • ac99dbcc build(deps): bump github.com/shirou/gopsutil/v3 from 3.22.4 to 3.22.5 (#2908)
  • 8e0a6725 build(deps): bump github.com/shirou/gopsutil/v3 from 3.22.5 to 3.22.6 (#2959)
  • c8e38c4b build(deps): bump github.com/sivchari/tenv from 1.5.0 to 1.6.0 (#2927)
  • f70bf666 build(deps): bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#2933)
  • 153b4072 build(deps): bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#2889)
  • f03a5207 build(deps): bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#2917)
  • e33e63ed build(deps): bump github.com/stretchr/testify from 1.7.2 to 1.7.4 (#2934)
  • 44e9b34d build(deps): bump github.com/stretchr/testify from 1.7.4 to 1.7.5 (#2942)
  • bb5b6625 build(deps): bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#2957)
  • 2c30625c build(deps): bump github.com/tomarrell/wrapcheck/v2 from 2.6.1 to 2.6.2 (#2928)
  • 9317da6c build(deps): bump github.com/uudashr/gocognit from 1.0.5 to 1.0.6 (#2962)
  • 3071fecb build(deps): bump gitlab.com/bosi/decorder from 0.2.1 to 0.2.2 (#2943)
  • d92f144d build(deps): bump goreleaser/goreleaser-action from 2 to 3 (#2876)
  • ddee31ae build(deps): bump honnef.co/go/tools from 0.3.1 to 0.3.2 (#2870)
  • 9ebc2d52 build(deps): bump moment from 2.29.2 to 2.29.4 in /.github/contributors (#2966)
  • f9d81511 bump golang.org/x/tools to HEAD (#2875)
  • de7cc56e chore: remove reviewers from dependabot configuration (#2932)
  • 86bd8423 chore: spelling and grammar fixes (#2865)
  • 4b218e66 config: spread go version on linter's configurations (#2913)
  • ae2a9688 depguard: adjust phrasing (#2921)
  • f2634d40 fix: codeQL scanning (#2882)
  • 2f41c1f0 gci: fix issues and re-enable autofix (#2892)
  • c531fc2a gosec: allow global config (#2880)
  • 0abb2981 staticcheck: fix generics (#2976)

v1.46.2

Changelog

  • a3336890 build(deps): bump golangci/golangci-lint-action from 3.1.0 to 3.2.0 (#2858)

... (truncated)

Changelog

Sourced from github.com/golangci/golangci-lint's changelog.

v1.47.0

  1. new linters:
  2. updated linters:
    • errname: from 0.1.6 to 0.1.7
    • gci: from 0.3.4 to 0.4.2
    • nonamedreturns: from 1.0.1 to 1.0.4
    • gocyclo: from 0.5.1 to 0.6.0
    • go-exhaustruct: from 2.1.0 to 2.2.0
    • errcheck: from 1.6.0 to 1.6.1
    • thelper: from 0.6.2 to 0.6.3
    • paralleltest: from 1.0.3 to 1.0.6
    • testpackage: from 1.0.1 to 1.1.0
    • exhaustive: from 0.7.11 to 0.8.1
    • go-ruleguard: from 0.3.19 to 0.3.21
    • gosec: from 2.11.0 to 2.12.0
    • tenv: from 1.5.0 to 1.6.0
    • wrapcheck: from 2.6.1 to 2.6.2
    • gocognit: from 1.0.5 to 1.0.6
    • decorder: from 0.2.1 to 0.2.2
    • honnef.co/go/tools: from 0.3.1 to 0.3.2
    • golang.org/x/tools: bump to HEAD
    • gci: fix issues and re-enable autofix
    • gosec: allow global config
    • staticcheck: fix generics
  3. documentation:
    • add thanks page
    • add a clear explanation about the staticcheck integration.
    • depguard: add ignore-file-rules
    • depguard: adjust phrasing
    • gocritic: add enable and disable ruleguard settings
    • gomnd: fix typo
    • gosec: add configs for all existing rules
    • govet: add settings for shadow and unusedresult
    • thelper: add fuzz config and description
    • linters: add defaults

v1.46.2

  1. updated linters:
    • execinquery: bump from v1.2.0 to v1.2.1
    • errorlint: bump to v1.0.0
    • thelper: allow to disable one option
  2. documentation:
    • rename .golangci.example.yml to .golangci.reference.yml
    • add containedctx linter to the list of available linters

v1.46.1

... (truncated)

Commits
  • b415402 Add linter asasalint to lint pass []any as any (#2968)
  • e60937a build(deps): bump github.com/daixiang0/gci from 0.4.1 to 0.4.2 (#2979)
  • 27f921f dev: use directives instead of comments for tests (#2978)
  • 0abb298 staticcheck: fix generics (#2976)
  • d6a39ef dev: remove kortschak from generated team (#2974)
  • 5e18365 build(deps): bump github.com/daixiang0/gci from 0.4.0 to 0.4.1 (#2973)
  • ed4befe dev: change err to nil (#2971)
  • 9ebc2d5 build(deps): bump moment from 2.29.2 to 2.29.4 in /.github/contributors (#2966)
  • b050b42 build(deps): bump moment from 2.29.2 to 2.29.4 in /docs (#2967)
  • b8f1e2a build(deps): bump github.com/daixiang0/gci from 0.3.4 to 0.4.0 (#2965)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/golangci/golangci-lint&package-manager=go_modules&previous-version=1.46.0&new-version=1.47.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 44 +++++++++++---------- go.sum | 119 ++++++++++++++++++++++++++++----------------------------- 2 files changed, 82 insertions(+), 81 deletions(-) diff --git a/go.mod b/go.mod index 3b248a3389..3e3ef647ce 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/bufbuild/buf v1.4.0 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 - github.com/golangci/golangci-lint v1.46.0 + github.com/golangci/golangci-lint v1.47.0 github.com/google/go-cmp v0.5.8 github.com/vektra/mockery/v2 v2.14.0 gotest.tools v2.2.0+incompatible @@ -48,17 +48,18 @@ require ( require ( 4d63.com/gochecknoglobals v0.1.0 // indirect - github.com/Antonboom/errname v0.1.6 // indirect + github.com/Antonboom/errname v0.1.7 // indirect github.com/Antonboom/nilnil v0.1.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/DataDog/zstd v1.4.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/OpenPeeDeeP/depguard v1.1.0 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.10 // indirect github.com/ashanbrown/forbidigo v1.3.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -75,7 +76,7 @@ require ( github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/daixiang0/gci v0.3.3 // indirect + github.com/daixiang0/gci v0.4.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect @@ -91,9 +92,9 @@ require ( github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect - github.com/firefart/nonamedreturns v1.0.1 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/fzipp/gocyclo v0.5.1 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.6.3 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect github.com/go-toolsmith/astcopy v1.0.0 // indirect @@ -126,7 +127,7 @@ require ( github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-version v1.4.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect @@ -138,19 +139,19 @@ require ( github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/kisielk/errcheck v1.6.0 // indirect + github.com/kisielk/errcheck v1.6.1 // indirect github.com/kisielk/gotool v1.0.0 // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/pgzip v1.2.5 // indirect - github.com/kulti/thelper v0.6.2 // indirect - github.com/kunwardeep/paralleltest v1.0.3 // indirect + github.com/kulti/thelper v0.6.3 // indirect + github.com/kunwardeep/paralleltest v1.0.6 // indirect github.com/kyoh86/exportloopref v0.1.8 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect github.com/ldez/tagliatelle v0.3.1 // indirect github.com/leonklingele/grouper v1.1.0 // indirect - github.com/lufeee/execinquery v1.0.0 // indirect + github.com/lufeee/execinquery v1.2.1 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/maratori/testpackage v1.0.1 // indirect + github.com/maratori/testpackage v1.1.0 // indirect github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect @@ -163,7 +164,7 @@ require ( github.com/moricho/tparallel v0.2.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.7.11 // indirect + github.com/nishanths/exhaustive v0.8.1 // indirect github.com/nishanths/predeclared v0.2.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -176,7 +177,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect + github.com/polyfloyd/go-errorlint v1.0.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect @@ -186,11 +187,12 @@ require ( github.com/ryancurrah/gomodguard v1.2.3 // indirect github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.11.0 // indirect + github.com/securego/gosec/v2 v2.12.0 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/sivchari/containedctx v1.0.2 // indirect - github.com/sivchari/tenv v1.5.0 // indirect + github.com/sivchari/nosnakecase v1.5.0 // indirect + github.com/sivchari/tenv v1.6.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect github.com/spf13/afero v1.8.2 // indirect @@ -206,14 +208,14 @@ require ( github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.6.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect github.com/ultraware/funlen v0.0.3 // indirect github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.5 // indirect + github.com/uudashr/gocognit v1.0.6 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect - gitlab.com/bosi/decorder v0.2.1 // indirect + gitlab.com/bosi/decorder v0.2.2 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.23.0 // indirect go.uber.org/atomic v1.9.0 // indirect @@ -221,7 +223,7 @@ require ( go.uber.org/zap v1.21.0 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect + golang.org/x/sys v0.0.0-20220702020025-31831981b65f // indirect golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.11 // indirect @@ -230,7 +232,7 @@ require ( gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.3.1 // indirect + honnef.co/go/tools v0.3.2 // indirect mvdan.cc/gofumpt v0.3.1 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect diff --git a/go.sum b/go.sum index 957b602756..be1ef7f4b5 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.6 h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24= -github.com/Antonboom/errname v0.1.6/go.mod h1:7lz79JAnuoMNDAWE9MeeIr1/c/VpSUWatBv2FH9NYpI= +github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako= +github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q= github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= @@ -84,8 +84,8 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0/go.mod h1:LGOGuvEgCfCQsy3JF2tRmpGDpzA53iZfyGEWSPwQ6/4= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 h1:V9xVvhKbLt7unNEGAruK1xXglyc668Pq3Xx0MNTNqpo= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0/go.mod h1:n/vLeA7V+QY84iYAGwMkkUUp9ooeuftMEvaDrSVch+Q= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= @@ -119,6 +119,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.10 h1:qqGPDTV0ff0tWHN/nnIlSdjlU/EwRPaUY4SfpE1rnms= +github.com/alingse/asasalint v0.0.10/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= @@ -248,8 +250,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4= -github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o= +github.com/daixiang0/gci v0.4.2 h1:PyT/Y4a265wDhPCZo2ip/YH33M4zEuFA3nDMdAvcKSA= +github.com/daixiang0/gci v0.4.2/go.mod h1:d0f+IJhr9loBtIq+ebwhRoTt1LGbPH96ih8bKlsRT9E= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -316,8 +318,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/firefart/nonamedreturns v1.0.1 h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag= -github.com/firefart/nonamedreturns v1.0.1/go.mod h1:D3dpIBojGGNh5UfElmwPu73SwDCm+VKhHYqwlNOk2uQ= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -333,8 +335,8 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.5.1 h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw= -github.com/fzipp/gocyclo v0.5.1/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw= github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k= @@ -446,8 +448,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.46.0 h1:uz9AtEcIP63FH+FIyuAXcQGVQO4vCUavEsMTJpPeD4s= -github.com/golangci/golangci-lint v1.46.0/go.mod h1:IJpcNOUfx/XLRwE95FHQ6QtbhYwwqcm0H5QkwUfF4ZE= +github.com/golangci/golangci-lint v1.47.0 h1:h2s+ZGGF63fdzUtac+VYUHPsEO0ADTqHouI7Vase+FY= +github.com/golangci/golangci-lint v1.47.0/go.mod h1:3TZhfF5KolbIkXYjUFvER6G9CoxzLEaafr/u/QI1S5A= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -520,7 +522,7 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= +github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= @@ -589,8 +591,8 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= -github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -670,8 +672,8 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0 h1:YTDO4pNy7AUN/021p+JGHycQyYNIyMoenM1YDVK6RlY= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.1 h1:cErYo+J4SmEjdXZrVXGwLJCE2sB06s23LpkcyWNrT+s= +github.com/kisielk/errcheck v1.6.1/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= @@ -696,10 +698,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.6.2 h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs= -github.com/kulti/thelper v0.6.2/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI= -github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= +github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= @@ -719,16 +721,16 @@ github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/lufeee/execinquery v1.0.0 h1:1XUTuLIVPDlFvUU3LXmmZwHDsolsxXnY67lzhpeqe0I= -github.com/lufeee/execinquery v1.0.0/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= +github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= @@ -822,8 +824,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6Fx github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA= -github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= +github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo= +github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= @@ -846,17 +848,17 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -887,8 +889,6 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= @@ -913,8 +913,8 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b h1:/BDyEJWLnDUYKGWdlNx/82qSaVu2bUok/EvPUtIGuvw= -github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/polyfloyd/go-errorlint v1.0.0 h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjLgt2LFsM= +github.com/polyfloyd/go-errorlint v1.0.0/go.mod h1:KZy4xxPJyy88/gldCe5OdW6OQRtNO3EZE7hXzmnebgA= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -954,7 +954,7 @@ github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a h1:sWFav github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a/go.mod h1:VMX+OnnSw4LicdiEGtRSD/1X8kW7GuEscjYNr4cOIT4= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/dsl v0.3.16/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.19/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8= @@ -990,19 +990,18 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8 github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= -github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI= -github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo= +github.com/securego/gosec/v2 v2.12.0 h1:CQWdW7ATFpvLSohMVsajscfyHJ5rsGmEXmsNcsDNmAg= +github.com/securego/gosec/v2 v2.12.0/go.mod h1:iTpT+eKTw59bSgklBHlSnH5O2tNygHMDxfvMubA4i7I= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= +github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1014,8 +1013,10 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= -github.com/sivchari/tenv v1.5.0 h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ= -github.com/sivchari/tenv v1.5.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sivchari/nosnakecase v1.5.0 h1:ZBvAu1H3uteN0KQ0IsLpIFOwYgPEhKLyv2ahrVkub6M= +github.com/sivchari/nosnakecase v1.5.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.6.0 h1:FyE4WysxLwYljKqWhTfOMjgKjBSnmzzg7lWOmpDiAcc= +github.com/sivchari/tenv v1.6.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= @@ -1053,7 +1054,6 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= -github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= @@ -1077,6 +1077,7 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= @@ -1102,13 +1103,15 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.6.1 h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw= -github.com/tomarrell/wrapcheck/v2 v2.6.1/go.mod h1:Eo+Opt6pyMW1b6cNllOcDSSoHO0aTJ+iF6BfCUbHltA= +github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M= +github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= @@ -1120,8 +1123,8 @@ github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqz github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= -github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= +github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= +github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= @@ -1156,8 +1159,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w= -gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0= +gitlab.com/bosi/decorder v0.2.2 h1:LRfb3lP6mZWjUzpMOCLTVjcnl/SqZWBWmKNqQvMocQs= +gitlab.com/bosi/decorder v0.2.2/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= @@ -1165,15 +1168,12 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= -go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= @@ -1235,7 +1235,6 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1496,16 +1495,16 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c h1:aFV+BgZ4svzjfabn8ERpuB4JI4N6/rdy1iusx77G3oU= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f h1:xdsejrW/0Wf2diT5CPp3XmKUNbr7Xvw8kYilQ+6qjRY= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1584,7 +1583,6 @@ golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1630,7 +1628,7 @@ golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1866,6 +1864,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -1881,8 +1880,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.3.1 h1:1kJlrWJLkaGXgcaeosRXViwviqjI7nkBvU2+sZW0AYc= -honnef.co/go/tools v0.3.1/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= +honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= +honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= From 8a815417b450c52037f02bde69dce7e124249869 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 19 Jul 2022 16:47:11 -0700 Subject: [PATCH 175/203] RFC 021: The Future of the Socket Protocol (#8584) --- docs/rfc/README.md | 1 + docs/rfc/rfc-021-socket-protocol.md | 266 ++++++++++++++++++++++++++++ 2 files changed, 267 insertions(+) create mode 100644 docs/rfc/rfc-021-socket-protocol.md diff --git a/docs/rfc/README.md b/docs/rfc/README.md index 2872c988ad..3a50832e6a 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -58,5 +58,6 @@ sections. - [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md) - [RFC-019: Configuration File Versioning](./rfc-019-config-version.md) - [RFC-020: Onboarding Projects](./rfc-020-onboarding-projects.rst) +- [RFC-022: The Future of the Socket Protocol](./rfc-021-socket-protocol.md) diff --git a/docs/rfc/rfc-021-socket-protocol.md b/docs/rfc/rfc-021-socket-protocol.md new file mode 100644 index 0000000000..74034d20a6 --- /dev/null +++ b/docs/rfc/rfc-021-socket-protocol.md @@ -0,0 +1,266 @@ +# RFC 021: The Future of the Socket Protocol + +## Changelog + +- 19-May-2022: Initial draft (@creachadair) +- 19-Jul-2022: Converted from ADR to RFC (@creachadair) + +## Abstract + +This RFC captures some technical discussion about the ABCI socket protocol that +was originally documented to solicit an architectural decision. This topic was +not high-enough priority as of this writing to justify making a final decision. + +For that reason, the text of this RFC has the general structure of an ADR, but +should be viewed primarily as a record of the issue for future reference. + +## Background + +The [Application Blockchain Interface (ABCI)][abci] is a client-server protocol +used by the Tendermint consensus engine to communicate with the application on +whose behalf it performs state replication. There are currently three transport +options available for ABCI applications: + +1. **In-process**: Applications written in Go can be linked directly into the + same binary as the consensus node. Such applications use a "local" ABCI + connection, which exposes application methods to the node as direct function + calls. + +2. **Socket protocol**: Out-of-process applications may export the ABCI service + via a custom socket protocol that sends requests and responses over a + Unix-domain or TCP socket connection as length-prefixed protocol buffers. + In Tendermint, this is handled by the [socket client][socket-client]. + +3. **gRPC**: Out-of-process applications may export the ABCI service via gRPC. + In Tendermint, this is handled by the [gRPC client][grpc-client]. + +Both the out-of-process options (2) and (3) have a long history in Tendermint. +The beginnings of the gRPC client were added in [May 2016][abci-start] when +ABCI was still hosted in a separate repository, and the socket client (formerly +called the "remote client") was part of ABCI from its inception in November +2015. + +At that time when ABCI was first being developed, the gRPC project was very new +(it launched Q4 2015) and it was not an obvious choice for use in Tendermint. +It took a while before the language coverage and quality of gRPC reached a +point where it could be a viable solution for out-of-process applications. For +that reason, it made sense for the initial design of ABCI to focus on a custom +protocol for out-of-process applications. + +## Problem Statement + +For practical reasons, ABCI needs an interprocess communication option to +support applications not written in Go. The two practical options are RPC and +FFI, and for operational reasons an RPC mechanism makes more sense. + +The socket protocol has not changed all that substantially since its original +design, and has the advantage of being simple to implement in almost any +reasonable language. However, its simplicity includes some limitations that +have had a negative impact on the stability and performance of out-of-process +applications using it. In particular: + +- The protocol lacks request identifiers, so the client and server must return + responses in strict FIFO order. Even if the client issues requests that have + no dependency on each other, the protocol has no way except order of issue to + map responses to requests. + + This reduces (in some cases substantially) the concurrency an application can + exploit, since the parallelism of requests in flight is gated by the slowest + active request at any moment. There have been complaints from some network + operators on that basis. + +- The protocol lacks method identifiers, so the only way for the client and + server to understand which operation is requested is to dispatch on the type + of the request and response payloads. For responses, this means that [any + error condition is terminal not only to the request, but to the entire ABCI + client](https://github.com/tendermint/tendermint/blob/master/abci/client/socket_client.go#L149). + + The historical intent of terminating for any error seems to have been that + all ABCI errors are unrecoverable and hence protocol fatal + (see [Note 1](#note1)). In practice, however, this greatly complicates + debugging a faulty node, since the only way to respond to errors is to panic + the node which loses valuable context that could have been logged. + +- There are subtle concurrency management dependencies between the client and + the server that are not clearly documented anywhere, and it is very easy for + small changes in both the client and the server to lead to tricky deadlocks, + panics, race conditions, and slowdowns. As a recent example of this, see + https://github.com/tendermint/tendermint/pull/8581. + +These limitations are fixable, but one important question is whether it is +worthwhile to fix them. We can add request and method identifiers, for +example, but doing so would be a breaking change to the protocol requiring +every application using it to update. If applications have to migrate anyway, +the stability and language coverage of gRPC have improved a lot, and today it +is probably simpler to set up and maintain an application using gRPC transport +than to reimplement the Tendermint socket protocol. + +Moreover, gRPC addresses all the above issues out-of-the-box, and requires +(much) less custom code for both the server (i.e., the application) and the +client. The project is well-funded and widely-used, which makes it a safe bet +for a dependency. + +## Decision + +There is a set of related alternatives to consider: + +- Question 1: Designate a single IPC standard for out-of-process applications? + + Claim: We should converge on one (and only one) IPC option for out-of-process + applications. We should choose an option that, after a suitable period of + deprecation for alternatives, will address most or all the highest-impact + uses of Tendermint. Maintaining multiple options increases the surface area + for bugs and vulnerabilities, and we should not have multiple options for + basic interfaces without a clear and well-documented reason. + +- Question 2a: Choose gRPC and deprecate/remove the socket protocol? + + Claim: Maintaining and improving a custom RPC protocol is a substantial + project and not directly relevant to the requirements of consensus. We would + be better served by depending on a well-maintained open-source library like + gRPC. + +- Question 2b: Improve the socket protocol and deprecate/remove gRPC? + + Claim: If we find meaningful advantages to maintaining our own custom RPC + protocol in Tendermint, we should treat it as a first-class project within + the core and invest in making it good enough that we do not require other + options. + +**One important consideration** when discussing these questions is that _any +outcome which includes keeping the socket protocol will have eventual migration +impacts for out-of-process applications_ regardless. To fix the limitations of +the socket protocol as it is currently designed will require making _breaking +changes_ to the protocol. So, while we may put off a migration cost for +out-of-process applications by retaining the socket protocol in the short term, +we will eventually have to pay those costs to fix the problems in its current +design. + +## Detailed Design + +1. If we choose to standardize on gRPC, the main work in Tendermint core will + be removing and cleaning up the code for the socket client and server. + + Besides the code cleanup, we will also need to clearly document a + deprecation schedule, and invest time in making the migration easier for + applications currently using the socket protocol. + + > **Point for discussion:** Migrating from the socket protocol to gRPC + > should mostly be a plumbing change, as long as we do it during a release + > in which we are not making other breaking changes to ABCI. However, the + > effort may be more or less depending on how gRPC integration works in the + > application's implementation language, and would have to be sure networks + > have plenty of time not only to make the change but to verify that it + > preserves the function of the network. + > + > What questions should we be asking node operators and application + > developers to understand the migration costs better? + +2. If we choose to keep only the socket protocol, we will need to follow up + with a more detailed design for extending and upgrading the protocol to fix + the existing performance and operational issues with the protocol. + + Moreover, since the gRPC interface has been around for a long time we will + also need a deprecation plan for it. + +3. If we choose to keep both options, we will still need to do all the work of + (2), but the gRPC implementation should not require any immediate changes. + + +## Alternatives Considered + +- **FFI**. Another approach we could take is to use a C-based FFI interface so + that applications written in other languages are linked directly with the + consensus node, an option currently only available for Go applications. + + An FFI interface is possible for a lot of languages, but FFI support varies + widely in coverage and quality across languages and the points of friction + can be tricky to work around. Moreover, it's much harder to add FFI support + to a language where it's missing after-the-fact for an application developer. + + Although a basic FFI interface is not too difficult on the Go side, the C + shims for an FFI can get complicated if there's a lot of variability in the + runtime environment on the other end. + + If we want to have one answer for non-Go applications, we are better off + picking an IPC-based solution (whether that's gRPC or an extension of our + custom socket protocol or something else). + +## Consequences + +- **Standardize on gRPC** + + - ✅ Addresses existing performance and operational issues. + - ✅ Replaces custom code with a well-maintained widely-used library. + - ✅ Aligns with Cosmos SDK, which already uses gRPC extensively. + - ✅ Aligns with priv validator interface, for which the socket protocol is already deprecated for gRPC. + - ❓ Applications will be hard to implement in a language without gRPC support. + - ⛔ All users of the socket protocol have to migrate to gRPC, and we believe most current out-of-process applications use the socket protocol. + +- **Standardize on socket protocol** + + - ✅ Less immediate impact for existing users (but see below). + - ✅ Simplifies ABCI API surface by removing gRPC. + - ❓ Users of the socket protocol will have a (smaller) migration. + - ❓ Potentially easier to implement for languages that do not have support. + - ⛔ Need to do all the work to fix the socket protocol (which will require existing users to update anyway later). + - ⛔ Ongoing maintenance burden for per-language server implementations. + +- **Keep both options** + + - ✅ Less immediate impact for existing users (but see below). + - ❓ Users of the socket protocol will have a (smaller) migration. + - ⛔ Still need to do all the work to fix the socket protocol (which will require existing users to update anyway later). + - ⛔ Requires ongoing maintenance and support of both gRPC and socket protocol integrations. + + +## References + +- [Application Blockchain Interface (ABCI)][abci] +- [Tendermint ABCI socket client][socket-client] +- [Tendermint ABCI gRPC client][grpc-client] +- [Initial commit of gRPC client][abci-start] + +[abci]: https://github.com/tendermint/spec/tree/master/spec/abci +[socket-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/socket_client.go +[socket-server]: https://github.com/tendermint/tendermint/blob/master/abci/server/socket_server.go +[grpc-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/grpc_client.go +[abci-start]: https://github.com/tendermint/abci/commit/1ab3c747182aaa38418258679c667090c2bb1e0d + +## Notes + +- **Note 1**: The choice to make all ABCI errors protocol-fatal + was intended to avoid the risk that recovering an application error could + cause application state to diverge. Divergence can break consensus, so it's + essential to avoid it. + + This is a sound principle, but conflates protocol errors with "mechanical" + errors such as timeouts, resoures exhaustion, failed connections, and so on. + Because the protocol has no way to distinguish these conditions, the only way + for an application to report an error is to panic or crash. + + Whether a node is running in the same process as the application or as a + separate process, application errors should not be suppressed or hidden. + However, it's important to ensure that errors are handled at a consistent and + well-defined point in the protocol: Having the application panic or crash + rather than reporting an error means the node sees different results + depending on whether the application runs in-process or out-of-process, even + if the application logic is otherwise identical. + +## Appendix: Known Implementations of ABCI Socket Protocol + +This is a list of known implementations of the Tendermint custom socket +protocol. Note that in most cases I have not checked how complete or correct +these implementations are; these are based on search results and a cursory +visual inspection. + +- Tendermint Core (Go): [client][socket-client], [server][socket-server] +- Informal Systems [tendermint-rs](https://github.com/informalsystems/tendermint-rs) (Rust): [client](https://github.com/informalsystems/tendermint-rs/blob/master/abci/src/client.rs), [server](https://github.com/informalsystems/tendermint-rs/blob/master/abci/src/server.rs) +- Tendermint [js-abci](https://github.com/tendermint/js-abci) (JS): [server](https://github.com/tendermint/js-abci/blob/master/src/server.js) +- [Hotmoka](https://github.com/Hotmoka/hotmoka) ABCI (Java): [server](https://github.com/Hotmoka/hotmoka/blob/master/io-hotmoka-tendermint-abci/src/main/java/io/hotmoka/tendermint_abci/Server.java) +- [Tower ABCI](https://github.com/penumbra-zone/tower-abci) (Rust): [server](https://github.com/penumbra-zone/tower-abci/blob/main/src/server.rs) +- [abci-host](https://github.com/datopia/abci-host) (Clojure): [server](https://github.com/datopia/abci-host/blob/master/src/abci/host.clj) +- [abci_server](https://github.com/KrzysiekJ/abci_server) (Erlang): [server](https://github.com/KrzysiekJ/abci_server/blob/master/src/abci_server.erl) +- [py-abci](https://github.com/davebryson/py-abci) (Python): [server](https://github.com/davebryson/py-abci/blob/master/src/abci/server.py) +- [scala-tendermint-server](https://github.com/intechsa/scala-tendermint-server) (Scala): [server](https://github.com/InTechSA/scala-tendermint-server/blob/master/src/main/scala/lu/intech/tendermint/Server.scala) +- [kepler](https://github.com/f-o-a-m/kepler) (Rust): [server](https://github.com/f-o-a-m/kepler/blob/master/hs-abci-server/src/Network/ABCI/Server.hs) From 525624f1ce4c4a834b1c66e312d8531bfec2d06e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 20 Jul 2022 03:01:23 -0700 Subject: [PATCH 176/203] Fix a typo in the RFC ToC (#9047) --- docs/rfc/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rfc/README.md b/docs/rfc/README.md index 3a50832e6a..c1461e8870 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -58,6 +58,6 @@ sections. - [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md) - [RFC-019: Configuration File Versioning](./rfc-019-config-version.md) - [RFC-020: Onboarding Projects](./rfc-020-onboarding-projects.rst) -- [RFC-022: The Future of the Socket Protocol](./rfc-021-socket-protocol.md) +- [RFC-021: The Future of the Socket Protocol](./rfc-021-socket-protocol.md) From fc3a24a669c83a7e8748967b7e9f0e7fe5401d57 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 20 Jul 2022 12:21:14 -0700 Subject: [PATCH 177/203] Disable upload-coverage-report workflow in CI. (#9056) As far as I know nobody looks at these reports anyway, and lately the codecov API has been failing for an expired certificate. --- .github/workflows/tests.yml | 41 ------------------------------------- 1 file changed, 41 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e9e47e25b4..26c7f4c50f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -32,44 +32,3 @@ jobs: run: | make test-group-${{ matrix.part }} NUM_SPLIT=6 if: env.GIT_DIFF - - uses: actions/upload-artifact@v3 - with: - name: "${{ github.sha }}-${{ matrix.part }}-coverage" - path: ./build/${{ matrix.part }}.profile.out - - upload-coverage-report: - runs-on: ubuntu-latest - needs: tests - steps: - - uses: actions/checkout@v3 - - uses: technote-space/get-diff-action@v6 - with: - PATTERNS: | - **/**.go - "!test/" - go.mod - go.sum - Makefile - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-00-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-01-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-02-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v3 - with: - name: "${{ github.sha }}-03-coverage" - if: env.GIT_DIFF - - run: | - cat ./*profile.out | grep -v "mode: set" >> coverage.txt - if: env.GIT_DIFF - - uses: codecov/codecov-action@v3.1.0 - with: - file: ./coverage.txt - if: env.GIT_DIFF From fa32078ceb756962a541bfc85884ef81b13c0906 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 20 Jul 2022 14:37:13 -0700 Subject: [PATCH 178/203] Fix unbounded heap growth in the priority mempool. (#9052) This is a manual forward-port of #8944 and related fixes from v0.35.x. One difference of note is that the CheckTx response messages no longer have a field to record an error from the ABCI application. The code is set up so that these could be reported directly to the CheckTx caller, but it would be an API change, and right now a bunch of test plumbing depends on the existing semantics. Also fix up tests relying on implementation-specific mempool behavior. - Commit was setting the expected mempool size incorrectly. - Fix sequence test not to depend on the incorrect size. --- internal/consensus/mempool_test.go | 22 +- internal/libs/clist/bench_test.go | 2 +- internal/libs/clist/clist.go | 2 +- internal/mempool/cache.go | 13 + internal/mempool/mempool.go | 1005 +++++++++++------------ internal/mempool/mempool_test.go | 99 ++- internal/mempool/priority_queue.go | 158 ---- internal/mempool/priority_queue_test.go | 176 ---- internal/mempool/reactor.go | 6 +- internal/mempool/reactor_test.go | 6 +- internal/mempool/tx.go | 310 ++----- internal/mempool/tx_test.go | 231 ------ 12 files changed, 652 insertions(+), 1378 deletions(-) delete mode 100644 internal/mempool/priority_queue.go delete mode 100644 internal/mempool/priority_queue_test.go delete mode 100644 internal/mempool/tx_test.go diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index ed11f68409..59647f76b7 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -214,12 +214,13 @@ func TestMempoolRmBadTx(t *testing.T) { emptyMempoolCh := make(chan struct{}) checkTxRespCh := make(chan struct{}) go func() { - // Try to send the tx through the mempool. + // Try to send an out-of-sequence transaction through the mempool. // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool + binary.BigEndian.PutUint64(txBytes, uint64(5)) err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.ResponseCheckTx) { if r.Code != code.CodeTypeBadNonce { - t.Errorf("expected checktx to return bad nonce, got %v", r) + t.Errorf("expected checktx to return bad nonce, got %#v", r) return } checkTxRespCh <- struct{}{} @@ -312,14 +313,15 @@ func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.Reques func (app *CounterApplication) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { app.mu.Lock() defer app.mu.Unlock() - - txValue := txAsUint64(req.Tx) - if txValue != uint64(app.mempoolTxCount) { - return &abci.ResponseCheckTx{ - Code: code.CodeTypeBadNonce, - }, nil + if req.Type == abci.CheckTxType_New { + txValue := txAsUint64(req.Tx) + if txValue != uint64(app.mempoolTxCount) { + return &abci.ResponseCheckTx{ + Code: code.CodeTypeBadNonce, + }, nil + } + app.mempoolTxCount++ } - app.mempoolTxCount++ return &abci.ResponseCheckTx{Code: code.CodeTypeOK}, nil } @@ -332,8 +334,6 @@ func txAsUint64(tx []byte) uint64 { func (app *CounterApplication) Commit(context.Context) (*abci.ResponseCommit, error) { app.mu.Lock() defer app.mu.Unlock() - - app.mempoolTxCount = app.txCount return &abci.ResponseCommit{}, nil } diff --git a/internal/libs/clist/bench_test.go b/internal/libs/clist/bench_test.go index ee5d836a7a..95973cc767 100644 --- a/internal/libs/clist/bench_test.go +++ b/internal/libs/clist/bench_test.go @@ -12,7 +12,7 @@ func BenchmarkDetaching(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { start.removed = true - start.detachNext() + start.DetachNext() start.DetachPrev() tmp := nxt nxt = nxt.Next() diff --git a/internal/libs/clist/clist.go b/internal/libs/clist/clist.go index 3969c94cce..9a0e5bcc84 100644 --- a/internal/libs/clist/clist.go +++ b/internal/libs/clist/clist.go @@ -103,7 +103,7 @@ func (e *CElement) Removed() bool { return isRemoved } -func (e *CElement) detachNext() { +func (e *CElement) DetachNext() { e.mtx.Lock() if !e.removed { e.mtx.Unlock() diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index c69fc80dd4..3986cd5853 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -22,6 +22,10 @@ type TxCache interface { // Remove removes the given raw transaction from the cache. Remove(tx types.Tx) + + // Has reports whether tx is present in the cache. Checking for presence is + // not treated as an access of the value. + Has(tx types.Tx) bool } var _ TxCache = (*LRUTxCache)(nil) @@ -97,6 +101,14 @@ func (c *LRUTxCache) Remove(tx types.Tx) { } } +func (c *LRUTxCache) Has(tx types.Tx) bool { + c.mtx.Lock() + defer c.mtx.Unlock() + + _, ok := c.cacheMap[tx.Key()] + return ok +} + // NopTxCache defines a no-op raw transaction cache. type NopTxCache struct{} @@ -105,3 +117,4 @@ var _ TxCache = (*NopTxCache)(nil) func (NopTxCache) Reset() {} func (NopTxCache) Push(types.Tx) bool { return true } func (NopTxCache) Remove(types.Tx) {} +func (NopTxCache) Has(types.Tx) bool { return false } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 2398180fc6..0354eb28a2 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -1,20 +1,20 @@ package mempool import ( - "bytes" "context" - "errors" "fmt" + "runtime" + "sort" "sync" "sync/atomic" "time" + "github.com/creachadair/taskgroup" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" ) @@ -23,73 +23,41 @@ var _ Mempool = (*TxMempool)(nil) // TxMempoolOption sets an optional parameter on the TxMempool. type TxMempoolOption func(*TxMempool) -// TxMempool defines a prioritized mempool data structure used by the v1 mempool -// reactor. It keeps a thread-safe priority queue of transactions that is used -// when a block proposer constructs a block and a thread-safe linked-list that -// is used to gossip transactions to peers in a FIFO manner. +// TxMempool implemements the Mempool interface and allows the application to +// set priority values on transactions in the CheckTx response. When selecting +// transactions to include in a block, higher-priority transactions are chosen +// first. When evicting transactions from the mempool for size constraints, +// lower-priority transactions are evicted sooner. +// +// Within the mempool, transactions are ordered by time of arrival, and are +// gossiped to the rest of the network based on that order (gossip order does +// not take priority into account). type TxMempool struct { + // Immutable fields logger log.Logger - metrics *Metrics config *config.MempoolConfig proxyAppConn abciclient.Client + metrics *Metrics + cache TxCache // seen transactions - // txsAvailable fires once for each height when the mempool is not empty - txsAvailable chan struct{} - notifiedTxsAvailable bool - - // height defines the last block height process during Update() - height int64 - - // sizeBytes defines the total size of the mempool (sum of all tx bytes) - sizeBytes int64 - - // cache defines a fixed-size cache of already seen transactions as this - // reduces pressure on the proxyApp. - cache TxCache - - // txStore defines the main storage of valid transactions. Indexes are built - // on top of this store. - txStore *TxStore - - // gossipIndex defines the gossiping index of valid transactions via a - // thread-safe linked-list. We also use the gossip index as a cursor for - // rechecking transactions already in the mempool. - gossipIndex *clist.CList + // Atomically-updated fields + txsBytes int64 // atomic: the total size of all transactions in the mempool, in bytes - // recheckCursor and recheckEnd are used as cursors based on the gossip index - // to recheck transactions that are already in the mempool. Iteration is not - // thread-safe and transaction may be mutated in serial order. - // - // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for - // iterator and cursor management when rechecking transactions. If the gossip - // index changes or is removed in a future refactor, this will have to be - // refactored. Instead, we should consider just keeping a slice of a snapshot - // of the mempool's current transactions during Update and an integer cursor - // into that slice. This, however, requires additional O(n) space complexity. - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // priorityIndex defines the priority index of valid transactions via a - // thread-safe priority queue. - priorityIndex *TxPriorityQueue - - // heightIndex defines a height-based, in ascending order, transaction index. - // i.e. older transactions are first. - heightIndex *WrappedTxList - - // timestampIndex defines a timestamp-based, in ascending order, transaction - // index. i.e. older transactions are first. - timestampIndex *WrappedTxList - - // A read/write lock is used to safe guard updates, insertions and deletions - // from the mempool. A read-lock is implicitly acquired when executing CheckTx, - // however, a caller must explicitly grab a write-lock via Lock when updating - // the mempool via Update(). - mtx sync.RWMutex - preCheck PreCheckFunc - postCheck PostCheckFunc + // Synchronized fields, protected by mtx. + mtx *sync.RWMutex + notifiedTxsAvailable bool + txsAvailable chan struct{} // one value sent per height when mempool is not empty + preCheck PreCheckFunc + postCheck PostCheckFunc + height int64 // the latest height passed to Update + + txs *clist.CList // valid transactions (passed CheckTx) + txByKey map[types.TxKey]*clist.CElement + txBySender map[string]*clist.CElement // for sender != "" } +// NewTxMempool constructs a new, empty priority mempool at the specified +// initial height and using the given config and options. func NewTxMempool( logger log.Logger, cfg *config.MempoolConfig, @@ -98,23 +66,16 @@ func NewTxMempool( ) *TxMempool { txmp := &TxMempool{ - logger: logger, - config: cfg, - proxyAppConn: proxyAppConn, - height: -1, - cache: NopTxCache{}, - metrics: NopMetrics(), - txStore: NewTxStore(), - gossipIndex: clist.New(), - priorityIndex: NewTxPriorityQueue(), - heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }), - timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) - }), + logger: logger, + config: cfg, + proxyAppConn: proxyAppConn, + metrics: NopMetrics(), + cache: NopTxCache{}, + txs: clist.New(), + mtx: new(sync.RWMutex), + txByKey: make(map[types.TxKey]*clist.CElement), + txBySender: make(map[string]*clist.CElement), } - if cfg.CacheSize > 0 { txmp.cache = NewLRUTxCache(cfg.CacheSize) } @@ -147,45 +108,34 @@ func WithMetrics(metrics *Metrics) TxMempoolOption { // Lock obtains a write-lock on the mempool. A caller must be sure to explicitly // release the lock when finished. -func (txmp *TxMempool) Lock() { - txmp.mtx.Lock() -} +func (txmp *TxMempool) Lock() { txmp.mtx.Lock() } // Unlock releases a write-lock on the mempool. -func (txmp *TxMempool) Unlock() { - txmp.mtx.Unlock() -} +func (txmp *TxMempool) Unlock() { txmp.mtx.Unlock() } // Size returns the number of valid transactions in the mempool. It is // thread-safe. -func (txmp *TxMempool) Size() int { - return txmp.txStore.Size() -} +func (txmp *TxMempool) Size() int { return txmp.txs.Len() } // SizeBytes return the total sum in bytes of all the valid transactions in the // mempool. It is thread-safe. -func (txmp *TxMempool) SizeBytes() int64 { - return atomic.LoadInt64(&txmp.sizeBytes) -} +func (txmp *TxMempool) SizeBytes() int64 { return atomic.LoadInt64(&txmp.txsBytes) } -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. +// FlushAppConn executes Flush on the mempool's proxyAppConn. // -// NOTE: The caller must obtain a write-lock prior to execution. +// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before +// calling FlushAppConn. func (txmp *TxMempool) FlushAppConn(ctx context.Context) error { - return txmp.proxyAppConn.Flush(ctx) -} - -// WaitForNextTx returns a blocking channel that will be closed when the next -// valid transaction is available to gossip. It is thread-safe. -func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { - return txmp.gossipIndex.WaitChan() -} + // N.B.: We have to issue the call outside the lock so that its callback can + // fire. It's safe to do this, the flush will block until complete. + // + // We could just not require the caller to hold the lock at all, but the + // semantics of the Mempool interface require the caller to hold it, and we + // can't change that without disrupting existing use. + txmp.mtx.Unlock() + defer txmp.mtx.Lock() -// NextGossipTx returns the next valid transaction to gossip. A caller must wait -// for WaitForNextTx to signal a transaction is available to gossip first. It is -// thread-safe. -func (txmp *TxMempool) NextGossipTx() *clist.CElement { - return txmp.gossipIndex.Front() + return txmp.proxyAppConn.Flush(ctx) } // EnableTxsAvailable enables the mempool to trigger events when transactions @@ -199,232 +149,249 @@ func (txmp *TxMempool) EnableTxsAvailable() { // TxsAvailable returns a channel which fires once for every height, and only // when transactions are available in the mempool. It is thread-safe. -func (txmp *TxMempool) TxsAvailable() <-chan struct{} { - return txmp.txsAvailable -} +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } -// CheckTx executes the ABCI CheckTx method for a given transaction. -// It acquires a read-lock and attempts to execute the application's -// CheckTx ABCI method synchronously. We return an error if any of -// the following happen: +// CheckTx adds the given transaction to the mempool if it fits and passes the +// application's ABCI CheckTx method. +// +// CheckTx reports an error without adding tx if: // -// - The CheckTx execution fails. -// - The transaction already exists in the cache and we've already received the -// transaction from the peer. Otherwise, if it solely exists in the cache, we -// return nil. -// - The transaction size exceeds the maximum transaction size as defined by the -// configuration provided to the mempool. -// - The transaction fails Pre-Check (if it is defined). -// - The proxyAppConn fails, e.g. the buffer is full. +// - The size of tx exceeds the configured maximum transaction size. +// - The pre-check hook is defined and reports an error for tx. +// - The transaction already exists in the cache. +// - The proxy connection to the application fails. // -// If the mempool is full, we still execute CheckTx and attempt to find a lower -// priority transaction to evict. If such a transaction exists, we remove the -// lower priority transaction and add the new one with higher priority. +// If tx passes all of the above conditions, it is passed (asynchronously) to +// the application's ABCI CheckTx method and this CheckTx method returns nil. +// If cb != nil, it is called when the ABCI request completes to report the +// application response. // -// NOTE: -// - The applications' CheckTx implementation may panic. -// - The caller is not to explicitly require any locks for executing CheckTx. +// If the application accepts the transaction and the mempool is full, the +// mempool evicts one or more of the lowest-priority transaction whose priority +// is (strictly) lower than the priority of tx and whose size together exceeds +// the size of tx, and adds tx instead. If no such transactions exist, tx is +// discarded. func (txmp *TxMempool) CheckTx( ctx context.Context, tx types.Tx, cb func(*abci.ResponseCheckTx), txInfo TxInfo, ) error { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - if txSize := len(tx); txSize > txmp.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: txmp.config.MaxTxBytes, - Actual: txSize, + // During the initial phase of CheckTx, we do not need to modify any state. + // A transaction will not actually be added to the mempool until it survives + // a call to the ABCI CheckTx method and size constraint checks. + height, err := func() (int64, error) { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + // Reject transactions in excess of the configured maximum transaction size. + if len(tx) > txmp.config.MaxTxBytes { + return 0, types.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} } - } - if txmp.preCheck != nil { - if err := txmp.preCheck(tx); err != nil { - return types.ErrPreCheck{Reason: err} + // If a precheck hook is defined, call it before invoking the application. + if txmp.preCheck != nil { + if err := txmp.preCheck(tx); err != nil { + return 0, types.ErrPreCheck{Reason: err} + } } - } - if err := txmp.proxyAppConn.Error(); err != nil { - return err - } + // Early exit if the proxy connection has an error. + if err := txmp.proxyAppConn.Error(); err != nil { + return 0, err + } - txHash := tx.Key() + txKey := tx.Key() - // We add the transaction to the mempool's cache and if the - // transaction is already present in the cache, i.e. false is returned, then we - // check if we've seen this transaction and error if we have. - if !txmp.cache.Push(tx) { - txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) - return types.ErrTxInCache + // Check for the transaction in the cache. + if !txmp.cache.Push(tx) { + // If the cached transaction is also in the pool, record its sender. + if elt, ok := txmp.txByKey[txKey]; ok { + w := elt.Value.(*WrappedTx) + w.SetPeer(txInfo.SenderID) + } + return 0, types.ErrTxInCache + } + return txmp.height, nil + }() + if err != nil { + return err } - res, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) + // Invoke an ABCI CheckTx for this transaction. + rsp, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) if err != nil { txmp.cache.Remove(tx) return err } - - if txmp.recheckCursor != nil { - return errors.New("recheck cursor is non-nil") - } - wtx := &WrappedTx{ tx: tx, - hash: txHash, + hash: tx.Key(), timestamp: time.Now().UTC(), - height: txmp.height, - } - - txmp.defaultTxCallback(tx, res) - err = txmp.initTxCallback(wtx, res, txInfo) - - if err != nil { - return err + height: height, } + wtx.SetPeer(txInfo.SenderID) if cb != nil { - cb(res) + cb(rsp) } - - return nil + return txmp.addNewTransaction(wtx, rsp) } +// RemoveTxByKey removes the transaction with the specified key from the +// mempool. It reports an error if no such transaction exists. This operation +// does not remove the transaction from the cache. func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { - txmp.Lock() - defer txmp.Unlock() + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + return txmp.removeTxByKey(txKey) +} - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { - txmp.removeTx(wtx, false) +// removeTxByKey removes the specified transaction key from the mempool. +// The caller must hold txmp.mtx exclusively. +func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { + if elt, ok := txmp.txByKey[key]; ok { + w := elt.Value.(*WrappedTx) + delete(txmp.txByKey, key) + delete(txmp.txBySender, w.sender) + txmp.txs.Remove(elt) + elt.DetachPrev() + elt.DetachNext() + atomic.AddInt64(&txmp.txsBytes, -w.Size()) return nil } + return fmt.Errorf("transaction %x not found", key) +} - return errors.New("transaction not found") +// removeTxByElement removes the specified transaction element from the mempool. +// The caller must hold txmp.mtx exclusively. +func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { + w := elt.Value.(*WrappedTx) + delete(txmp.txByKey, w.tx.Key()) + delete(txmp.txBySender, w.sender) + txmp.txs.Remove(elt) + elt.DetachPrev() + elt.DetachNext() + atomic.AddInt64(&txmp.txsBytes, -w.Size()) } -// Flush empties the mempool. It acquires a read-lock, fetches all the -// transactions currently in the transaction store and removes each transaction -// from the store and all indexes and finally resets the cache. -// -// NOTE: -// - Flushing the mempool may leave the mempool in an inconsistent state. +// Flush purges the contents of the mempool and the cache, leaving both empty. +// The current height is not modified by this operation. func (txmp *TxMempool) Flush() { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txmp.heightIndex.Reset() - txmp.timestampIndex.Reset() + txmp.mtx.Lock() + defer txmp.mtx.Unlock() - for _, wtx := range txmp.txStore.GetAllTxs() { - txmp.removeTx(wtx, false) + // Remove all the transactions in the list explicitly, so that the sizes + // and indexes get updated properly. + cur := txmp.txs.Front() + for cur != nil { + next := cur.Next() + txmp.removeTxByElement(cur) + cur = next } - - atomic.SwapInt64(&txmp.sizeBytes, 0) txmp.cache.Reset() } -// ReapMaxBytesMaxGas returns a list of transactions within the provided size -// and gas constraints. Transaction are retrieved in priority order. -// -// NOTE: -// - Transactions returned are not removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { +// allEntriesSorted returns a slice of all the transactions currently in the +// mempool, sorted in nonincreasing order by priority with ties broken by +// increasing order of arrival time. +func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { txmp.mtx.RLock() defer txmp.mtx.RUnlock() - var ( - totalGas int64 - totalSize int64 - ) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) - for txmp.priorityIndex.NumTxs() > 0 { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) - - // Ensure we have capacity for the transaction with respect to the - // transaction size. - if maxBytes > -1 && totalSize+size > maxBytes { - return txs[:len(txs)-1] + all := make([]*WrappedTx, 0, len(txmp.txByKey)) + for _, tx := range txmp.txByKey { + all = append(all, tx.Value.(*WrappedTx)) + } + sort.Slice(all, func(i, j int) bool { + if all[i].priority == all[j].priority { + return all[i].timestamp.Before(all[j].timestamp) } + return all[i].priority > all[j].priority // N.B. higher priorities first + }) + return all +} - totalSize += size - - // ensure we have capacity for the transaction with respect to total gas - gas := totalGas + wtx.gasWanted - if maxGas > -1 && gas > maxGas { - return txs[:len(txs)-1] +// ReapMaxBytesMaxGas returns a slice of valid transactions that fit within the +// size and gas constraints. The results are ordered by nonincreasing priority, +// with ties broken by increasing order of arrival. Reaping transactions does +// not remove them from the mempool. +// +// If maxBytes < 0, no limit is set on the total size in bytes. +// If maxGas < 0, no limit is set on the total gas cost. +// +// If the mempool is empty or has no transactions fitting within the given +// constraints, the result will also be empty. +func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + var totalGas, totalBytes int64 + + var keep []types.Tx //nolint:prealloc + for _, w := range txmp.allEntriesSorted() { + // N.B. When computing byte size, we need to include the overhead for + // encoding as protobuf to send to the application. + totalGas += w.gasWanted + totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) + if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) { + break } - - totalGas = gas + keep = append(keep, w.tx) } - - return txs + return keep } -// ReapMaxTxs returns a list of transactions within the provided number of -// transactions bound. Transaction are retrieved in priority order. -// -// NOTE: -// - Transactions returned are not removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() +// TxsWaitChan returns a channel that is closed when there is at least one +// transaction available to be gossiped. +func (txmp *TxMempool) TxsWaitChan() <-chan struct{} { return txmp.txs.WaitChan() } - numTxs := txmp.priorityIndex.NumTxs() - if max < 0 { - max = numTxs - } +// TxsFront returns the frontmost element of the pending transaction list. +// It will be nil if the mempool is empty. +func (txmp *TxMempool) TxsFront() *clist.CElement { return txmp.txs.Front() } - cap := tmmath.MinInt(numTxs, max) +// ReapMaxTxs returns up to max transactions from the mempool. The results are +// ordered by nonincreasing priority with ties broken by increasing order of +// arrival. Reaping transactions does not remove them from the mempool. +// +// If max < 0, all transactions in the mempool are reaped. +// +// The result may have fewer than max elements (possibly zero) if the mempool +// does not have that many transactions available. +func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { + var keep []types.Tx //nolint:prealloc - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, cap) - txs := make([]types.Tx, 0, cap) - for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - } - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) + for _, w := range txmp.allEntriesSorted() { + if max >= 0 && len(keep) >= max { + break + } + keep = append(keep, w.tx) } - return txs + return keep } -// Update iterates over all the transactions provided by the block producer, -// removes them from the cache (if applicable), and removes -// the transactions from the main transaction store and associated indexes. -// If there are transactions remaining in the mempool, we initiate a -// re-CheckTx for them (if applicable), otherwise, we notify the caller more -// transactions are available. +// Update removes all the given transactions from the mempool and the cache, +// and updates the current block height. The blockTxs and deliverTxResponses +// must have the same length with each response corresponding to the tx at the +// same offset. // -// NOTE: -// - The caller must explicitly acquire a write-lock. +// If the configuration enables recheck, Update sends each remaining +// transaction after removing blockTxs to the ABCI CheckTx method. Any +// transactions marked as invalid during recheck are also removed. +// +// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before +// calling Update. func (txmp *TxMempool) Update( ctx context.Context, blockHeight int64, blockTxs types.Txs, - execTxResult []*abci.ExecTxResult, + deliverTxResponses []*abci.ExecTxResult, newPreFn PreCheckFunc, newPostFn PostCheckFunc, recheck bool, ) error { + // Safety check: Transactions and responses must match in number. + if len(blockTxs) != len(deliverTxResponses) { + panic(fmt.Sprintf("mempool: got %d transactions but %d ExecTx responses", + len(blockTxs), len(deliverTxResponses))) + } + txmp.height = blockHeight txmp.notifiedTxsAvailable = false @@ -436,18 +403,17 @@ func (txmp *TxMempool) Update( } for i, tx := range blockTxs { - if execTxResult[i].Code == abci.CodeTypeOK { - // add the valid committed transaction to the cache (if missing) + // Add successful committed transactions to the cache (if they are not + // already present). Transactions that failed to commit are removed from + // the cache unless the operator has explicitly requested we keep them. + if deliverTxResponses[i].Code == abci.CodeTypeOK { _ = txmp.cache.Push(tx) } else if !txmp.config.KeepInvalidTxsInCache { - // allow invalid transactions to be re-submitted txmp.cache.Remove(tx) } - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { - txmp.removeTx(wtx, false) - } + // Regardless of success, remove the transaction from the mempool. + _ = txmp.removeTxByKey(tx.Key()) } txmp.purgeExpiredTxs(blockHeight) @@ -455,134 +421,173 @@ func (txmp *TxMempool) Update( // If there any uncommitted transactions left in the mempool, we either // initiate re-CheckTx per remaining transaction or notify that remaining // transactions are left. - if txmp.Size() > 0 { + size := txmp.Size() + txmp.metrics.Size.Set(float64(size)) + if size > 0 { if recheck { - txmp.logger.Debug( - "executing re-CheckTx for all remaining transactions", - "num_txs", txmp.Size(), - "height", blockHeight, - ) - txmp.updateReCheckTxs(ctx) + txmp.recheckTransactions(ctx) } else { txmp.notifyTxsAvailable() } } - - txmp.metrics.Size.Set(float64(txmp.Size())) return nil } -// initTxCallback is the callback invoked for a new unique transaction after CheckTx -// has been executed by the ABCI application for the first time on that transaction. -// CheckTx can be called again for the same transaction later when re-checking; -// however, this callback will not be called. +// addNewTransaction handles the ABCI CheckTx response for the first time a +// transaction is added to the mempool. A recheck after a block is committed +// goes to handleRecheckResult. // -// initTxCallback runs after the ABCI application executes CheckTx. -// It runs the postCheck hook if one is defined on the mempool. -// If the CheckTx response response code is not OK, or if the postCheck hook -// reports an error, the transaction is rejected. Otherwise, we attempt to insert -// the transaction into the mempool. +// If either the application rejected the transaction or a post-check hook is +// defined and rejects the transaction, it is discarded. // -// When inserting a transaction, we first check if there is sufficient capacity. -// If there is, the transaction is added to the txStore and all indexes. -// Otherwise, if the mempool is full, we attempt to find a lower priority transaction -// to evict in place of the new incoming transaction. If no such transaction exists, -// the new incoming transaction is rejected. +// Otherwise, if the mempool is full, check for lower-priority transactions +// that can be evicted to make room for the new one. If no such transactions +// exist, this transaction is logged and dropped; otherwise the selected +// transactions are evicted. // -// NOTE: -// - An explicit lock is NOT required. -func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, txInfo TxInfo) error { +// Finally, the new transaction is added and size stats updated. +func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.ResponseCheckTx) error { + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + var err error if txmp.postCheck != nil { - err = txmp.postCheck(wtx.tx, res) + err = txmp.postCheck(wtx.tx, checkTxRes) } - if err != nil || res.Code != abci.CodeTypeOK { - // ignore bad transactions + if err != nil || checkTxRes.Code != abci.CodeTypeOK { txmp.logger.Info( "rejected bad transaction", - "priority", wtx.priority, + "priority", wtx.Priority(), "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "peer_id", txInfo.SenderNodeID, - "code", res.Code, + "peer_id", wtx.peers, + "code", checkTxRes.Code, "post_check_err", err, ) txmp.metrics.FailedTxs.Add(1) + // Remove the invalid transaction from the cache, unless the operator has + // instructed us to keep invalid transactions. if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) } - return err + + if err != nil { + return err + } + // TODO(creachadair): Report an error for an invalid transaction. + // This is an API change, unfortunately, but should be made safe if it isn't. + // fmt.Errorf("invalid transaction: ABCI response code %d", checkTxRes.Code) + return nil } - sender := res.Sender - priority := res.Priority + priority := checkTxRes.Priority + sender := checkTxRes.Sender - if len(sender) > 0 { - if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { - txmp.logger.Error( - "rejected incoming good transaction; tx already exists for sender", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + // Disallow multiple concurrent transactions from the same sender assigned + // by the ABCI application. As a special case, an empty sender is not + // restricted. + if sender != "" { + elt, ok := txmp.txBySender[sender] + if ok { + w := elt.Value.(*WrappedTx) + txmp.logger.Debug( + "rejected valid incoming transaction; tx already exists for sender", + "tx", fmt.Sprintf("%X", w.tx.Hash()), "sender", sender, ) txmp.metrics.RejectedTxs.Add(1) + // TODO(creachadair): Report an error for a duplicate sender. + // This is an API change, unfortunately, but should be made safe if it isn't. + // fmt.Errorf("transaction rejected: tx already exists for sender %q (%X)", sender, w.tx.Hash()) return nil } } + // At this point the application has ruled the transaction valid, but the + // mempool might be full. If so, find the lowest-priority items with lower + // priority than the application assigned to this new one, and evict as many + // of them as necessary to make room for tx. If no such items exist, we + // discard tx. + if err := txmp.canAddTx(wtx); err != nil { - evictTxs := txmp.priorityIndex.GetEvictableTxs( - priority, - int64(wtx.Size()), - txmp.SizeBytes(), - txmp.config.MaxTxsBytes, - ) - if len(evictTxs) == 0 { - // No room for the new incoming transaction so we just remove it from - // the cache and return an error to the user. + var victims []*clist.CElement // eligible transactions for eviction + var victimBytes int64 // total size of victims + for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() { + cw := cur.Value.(*WrappedTx) + if cw.priority < priority { + victims = append(victims, cur) + victimBytes += cw.Size() + } + } + + // If there are no suitable eviction candidates, or the total size of + // those candidates is not enough to make room for the new transaction, + // drop the new one. + if len(victims) == 0 || victimBytes < wtx.Size() { txmp.cache.Remove(wtx.tx) txmp.logger.Error( - "rejected incoming good transaction; mempool full", + "rejected valid incoming transaction; mempool is full", "tx", fmt.Sprintf("%X", wtx.tx.Hash()), "err", err.Error(), ) txmp.metrics.RejectedTxs.Add(1) - return err + // TODO(creachadair): Report an error for a full mempool. + // This is an API change, unfortunately, but should be made safe if it isn't. + // fmt.Errorf("transaction rejected: mempool is full (%X)", wtx.tx.Hash()) + return nil } - // evict an existing transaction(s) - // - // NOTE: - // - The transaction, toEvict, can be removed while a concurrent - // reCheckTx callback is being executed for the same transaction. - for _, toEvict := range evictTxs { - txmp.removeTx(toEvict, true) + txmp.logger.Debug("evicting lower-priority transactions", + "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "new_priority", priority, + ) + + // Sort lowest priority items first so they will be evicted first. Break + // ties in favor of newer items (to maintain FIFO semantics in a group). + sort.Slice(victims, func(i, j int) bool { + iw := victims[i].Value.(*WrappedTx) + jw := victims[j].Value.(*WrappedTx) + if iw.Priority() == jw.Priority() { + return iw.timestamp.After(jw.timestamp) + } + return iw.Priority() < jw.Priority() + }) + + // Evict as many of the victims as necessary to make room. + var evictedBytes int64 + for _, vic := range victims { + w := vic.Value.(*WrappedTx) + txmp.logger.Debug( - "evicted existing good transaction; mempool full", - "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), - "old_priority", toEvict.priority, - "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "new_priority", wtx.priority, + "evicted valid existing transaction; mempool full", + "old_tx", fmt.Sprintf("%X", w.tx.Hash()), + "old_priority", w.priority, ) + txmp.removeTxByElement(vic) + txmp.cache.Remove(w.tx) txmp.metrics.EvictedTxs.Add(1) + + // We may not need to evict all the eligible transactions. Bail out + // early if we have made enough room. + evictedBytes += w.Size() + if evictedBytes >= wtx.Size() { + break + } } } - wtx.gasWanted = res.GasWanted - wtx.priority = priority - wtx.sender = sender - wtx.peers = map[uint16]struct{}{ - txInfo.SenderID: {}, - } + wtx.SetGasWanted(checkTxRes.GasWanted) + wtx.SetPriority(priority) + wtx.SetSender(sender) + txmp.insertTx(wtx) txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) txmp.metrics.Size.Set(float64(txmp.Size())) - - txmp.insertTx(wtx) txmp.logger.Debug( - "inserted good transaction", - "priority", wtx.priority, + "inserted new valid transaction", + "priority", wtx.Priority(), "tx", fmt.Sprintf("%X", wtx.tx.Hash()), "height", txmp.height, "num_txs", txmp.Size(), @@ -591,149 +596,129 @@ func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, return nil } -// defaultTxCallback is the CheckTx application callback used when a -// transaction is being re-checked (if re-checking is enabled). The -// caller must hold a mempool write-lock (via Lock()) and when -// executing Update(), if the mempool is non-empty and Recheck is -// enabled, then all remaining transactions will be rechecked via -// CheckTx. The order transactions are rechecked must be the same as -// the order in which this callback is called. -func (txmp *TxMempool) defaultTxCallback(tx types.Tx, res *abci.ResponseCheckTx) { - if txmp.recheckCursor == nil { - return +func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + elt := txmp.txs.PushBack(wtx) + txmp.txByKey[wtx.tx.Key()] = elt + if s := wtx.Sender(); s != "" { + txmp.txBySender[s] = elt } - txmp.metrics.RecheckTimes.Add(1) - - wtx := txmp.recheckCursor.Value.(*WrappedTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, wtx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } - - txmp.logger.Error( - "re-CheckTx transaction mismatch", - "got", wtx.tx.Hash(), - "expected", tx.Key(), - ) + atomic.AddInt64(&txmp.txsBytes, wtx.Size()) +} - if txmp.recheckCursor == txmp.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - txmp.recheckCursor = nil - return - } +// handleRecheckResult handles the responses from ABCI CheckTx calls issued +// during the recheck phase of a block Update. It removes any transactions +// invalidated by the application. +// +// This method is NOT executed for the initial CheckTx on a new transaction; +// that case is handled by addNewTransaction instead. +func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.ResponseCheckTx) { + txmp.metrics.RecheckTimes.Add(1) + txmp.mtx.Lock() + defer txmp.mtx.Unlock() - txmp.recheckCursor = txmp.recheckCursor.Next() - wtx = txmp.recheckCursor.Value.(*WrappedTx) + // Find the transaction reported by the ABCI callback. It is possible the + // transaction was evicted during the recheck, in which case the transaction + // will be gone. + elt, ok := txmp.txByKey[tx.Key()] + if !ok { + return } + wtx := elt.Value.(*WrappedTx) - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, res) - } - - if res.Code == abci.CodeTypeOK && err == nil { - wtx.priority = res.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err, - "code", res.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } + // If a postcheck hook is defined, call it before checking the result. + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes) } - // move reCheckTx cursor to next element - if txmp.recheckCursor == txmp.recheckEnd { - txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() + if checkTxRes.Code == abci.CodeTypeOK && err == nil { + wtx.SetPriority(checkTxRes.Priority) + return // N.B. Size of mempool did not change } - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") - - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() - } + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.Priority(), + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", checkTxRes.Code, + ) + txmp.removeTxByElement(elt) + txmp.metrics.FailedTxs.Add(1) + if !txmp.config.KeepInvalidTxsInCache { + txmp.cache.Remove(wtx.tx) } - txmp.metrics.Size.Set(float64(txmp.Size())) } -// updateReCheckTxs updates the recheck cursors using the gossipIndex. For -// each transaction, it executes CheckTx. The global callback defined on -// the proxyAppConn will be executed for each transaction after CheckTx is -// executed. +// recheckTransactions initiates re-CheckTx ABCI calls for all the transactions +// currently in the mempool. It reports the number of recheck calls that were +// successfully initiated. // -// NOTE: -// - The caller must have a write-lock when executing updateReCheckTxs. -func (txmp *TxMempool) updateReCheckTxs(ctx context.Context) { +// Precondition: The mempool is not empty. +// The caller must hold txmp.mtx exclusively. +func (txmp *TxMempool) recheckTransactions(ctx context.Context) { if txmp.Size() == 0 { - panic("attempted to update re-CheckTx txs when mempool is empty") + panic("mempool: cannot run recheck on an empty mempool") } + txmp.logger.Debug( + "executing re-CheckTx for all remaining transactions", + "num_txs", txmp.Size(), + "height", txmp.height, + ) - txmp.recheckCursor = txmp.gossipIndex.Front() - txmp.recheckEnd = txmp.gossipIndex.Back() - - for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { - wtx := e.Value.(*WrappedTx) - - // Only execute CheckTx if the transaction is not marked as removed which - // could happen if the transaction was evicted. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - res, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{ - Tx: wtx.tx, - Type: abci.CheckTxType_Recheck, + // Collect transactions currently in the mempool requiring recheck. + wtxs := make([]*WrappedTx, 0, txmp.txs.Len()) + for e := txmp.txs.Front(); e != nil; e = e.Next() { + wtxs = append(wtxs, e.Value.(*WrappedTx)) + } + + // Issue CheckTx calls for each remaining transaction, and when all the + // rechecks are complete signal watchers that transactions may be available. + go func() { + g, start := taskgroup.New(nil).Limit(2 * runtime.NumCPU()) + + for _, wtx := range wtxs { + wtx := wtx + start(func() error { + rsp, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{ + Tx: wtx.tx, + Type: abci.CheckTxType_Recheck, + }) + if err != nil { + txmp.logger.Error("failed to execute CheckTx during recheck", + "err", err, "hash", fmt.Sprintf("%x", wtx.tx.Hash())) + } else { + txmp.handleRecheckResult(wtx.tx, rsp) + } + return nil }) - if err != nil { - // no need in retrying since the tx will be rechecked after the next block - txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) - continue - } - txmp.defaultTxCallback(wtx.tx, res) } - } + if err := txmp.proxyAppConn.Flush(ctx); err != nil { + txmp.logger.Error("failed to flush transactions during recheck", "err", err) + } - if err := txmp.proxyAppConn.Flush(ctx); err != nil { - txmp.logger.Error("failed to flush transactions during rechecking", "err", err) - } + // When recheck is complete, trigger a notification for more transactions. + _ = g.Wait() + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + txmp.notifyTxsAvailable() + }() } // canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. If it returns nil, -// the transaction can be inserted into the mempool. +// the mempool due to mempool configured constraints. Otherwise, nil is +// returned and the transaction can be inserted into the mempool. func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { - var ( - numTxs = txmp.Size() - sizeBytes = txmp.SizeBytes() - ) + numTxs := txmp.Size() + txBytes := txmp.SizeBytes() - if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { + if numTxs >= txmp.config.Size || wtx.Size()+txBytes > txmp.config.MaxTxsBytes { return types.ErrMempoolIsFull{ NumTxs: numTxs, MaxTxs: txmp.config.Size, - TxsBytes: sizeBytes, + TxsBytes: txBytes, MaxTxsBytes: txmp.config.MaxTxsBytes, } } @@ -741,96 +726,40 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { return nil } -func (txmp *TxMempool) insertTx(wtx *WrappedTx) { - txmp.txStore.SetTx(wtx) - txmp.priorityIndex.PushTx(wtx) - txmp.heightIndex.Insert(wtx) - txmp.timestampIndex.Insert(wtx) - - // Insert the transaction into the gossip index and mark the reference to the - // linked-list element, which will be needed at a later point when the - // transaction is removed. - gossipEl := txmp.gossipIndex.PushBack(wtx) - wtx.gossipEl = gossipEl - - atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) -} - -func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { - if txmp.txStore.IsTxRemoved(wtx.hash) { - return - } - - txmp.txStore.RemoveTx(wtx) - txmp.priorityIndex.RemoveTx(wtx) - txmp.heightIndex.Remove(wtx) - txmp.timestampIndex.Remove(wtx) - - // Remove the transaction from the gossip index and cleanup the linked-list - // element so it can be garbage collected. - txmp.gossipIndex.Remove(wtx.gossipEl) - wtx.gossipEl.DetachPrev() - - atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) - - if removeFromCache { - txmp.cache.Remove(wtx.tx) - } -} - -// purgeExpiredTxs removes all transactions that have exceeded their respective -// height- and/or time-based TTLs from their respective indexes. Every expired -// transaction will be removed from the mempool, but preserved in the cache. +// purgeExpiredTxs removes all transactions from the mempool that have exceeded +// their respective height or time-based limits as of the given blockHeight. +// Transactions removed by this operation are not removed from the cache. // -// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which -// the caller has a write-lock on the mempool and so we can safely iterate over -// the height and time based indexes. +// The caller must hold txmp.mtx exclusively. func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { - now := time.Now() - expiredTxs := make(map[types.TxKey]*WrappedTx) - - if txmp.config.TTLNumBlocks > 0 { - purgeIdx := -1 - for i, wtx := range txmp.heightIndex.txs { - if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] - } + if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 { + return // nothing to do } - if txmp.config.TTLDuration > 0 { - purgeIdx := -1 - for i, wtx := range txmp.timestampIndex.txs { - if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] + now := time.Now() + cur := txmp.txs.Front() + for cur != nil { + // N.B. Grab the next element first, since if we remove cur its successor + // will be invalidated. + next := cur.Next() + + w := cur.Value.(*WrappedTx) + if txmp.config.TTLNumBlocks > 0 && (blockHeight-w.height) > txmp.config.TTLNumBlocks { + txmp.removeTxByElement(cur) + txmp.cache.Remove(w.tx) + txmp.metrics.EvictedTxs.Add(1) + } else if txmp.config.TTLDuration > 0 && now.Sub(w.timestamp) > txmp.config.TTLDuration { + txmp.removeTxByElement(cur) + txmp.cache.Remove(w.tx) + txmp.metrics.EvictedTxs.Add(1) } - } - - for _, wtx := range expiredTxs { - txmp.removeTx(wtx, false) + cur = next } } func (txmp *TxMempool) notifyTxsAvailable() { if txmp.Size() == 0 { - panic("attempt to notify txs available but mempool is empty!") + return // nothing to do } if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 42fb13bdce..2071d1f057 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -86,9 +86,19 @@ func setup(t testing.TB, app abciclient.Client, cacheSize int, options ...TxMemp return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, app, options...) } -func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { - t.Helper() +// mustCheckTx invokes txmp.CheckTx for the given transaction and waits until +// its callback has finished executing. It fails t if CheckTx fails. +func mustCheckTx(ctx context.Context, t *testing.T, txmp *TxMempool, spec string) { + done := make(chan struct{}) + if err := txmp.CheckTx(ctx, []byte(spec), func(*abci.ResponseCheckTx) { + close(done) + }, TxInfo{}); err != nil { + t.Fatalf("CheckTx for %q failed: %v", spec, err) + } + <-done +} +func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { txs := make([]testTx, numTxs) txInfo := TxInfo{SenderID: peerID} @@ -217,6 +227,87 @@ func TestTxMempool_Size(t *testing.T) { require.Equal(t, int64(2850), txmp.SizeBytes()) } +func TestTxMempool_Eviction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 1000) + txmp.config.Size = 5 + txmp.config.MaxTxsBytes = 60 + txExists := func(spec string) bool { + txmp.Lock() + defer txmp.Unlock() + key := types.Tx(spec).Key() + _, ok := txmp.txByKey[key] + return ok + } + t.Cleanup(client.Wait) + + // A transaction bigger than the mempool should be rejected even when there + // are slots available. + mustCheckTx(ctx, t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1") + require.Equal(t, 0, txmp.Size()) + + // Nearly-fill the mempool with a low-priority transaction, to show that it + // is evicted even when slots are available for a higher-priority tx. + const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2" + mustCheckTx(ctx, t, txmp, bigTx) + require.Equal(t, 1, txmp.Size()) // bigTx is the only element + require.True(t, txExists(bigTx)) + require.Equal(t, int64(len(bigTx)), txmp.SizeBytes()) + + // The next transaction should evict bigTx, because it is higher priority + // but does not fit on size. + mustCheckTx(ctx, t, txmp, "key1=0000=25") + require.True(t, txExists("key1=0000=25")) + require.False(t, txExists(bigTx)) + require.False(t, txmp.cache.Has([]byte(bigTx))) + require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes()) + + // Now fill up the rest of the slots with other transactions. + mustCheckTx(ctx, t, txmp, "key2=0001=5") + mustCheckTx(ctx, t, txmp, "key3=0002=10") + mustCheckTx(ctx, t, txmp, "key4=0003=3") + mustCheckTx(ctx, t, txmp, "key5=0004=3") + + // A new transaction with low priority should be discarded. + mustCheckTx(ctx, t, txmp, "key6=0005=1") + require.False(t, txExists("key6=0005=1")) + + // A new transaction with higher priority should evict key5, which is the + // newest of the two transactions with lowest priority. + mustCheckTx(ctx, t, txmp, "key7=0006=7") + require.True(t, txExists("key7=0006=7")) // new transaction added + require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted + require.True(t, txExists("key4=0003=3")) // older low-priority tx retained + + // Another new transaction evicts the other low-priority element. + mustCheckTx(ctx, t, txmp, "key8=0007=20") + require.True(t, txExists("key8=0007=20")) + require.False(t, txExists("key4=0003=3")) + + // Now the lowest-priority tx is 5, so that should be the next to go. + mustCheckTx(ctx, t, txmp, "key9=0008=9") + require.True(t, txExists("key9=0008=9")) + require.False(t, txExists("k3y2=0001=5")) + + // Add a transaction that requires eviction of multiple lower-priority + // entries, in order to fit the size of the element. + mustCheckTx(ctx, t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11 + require.True(t, txExists("key1=0000=25")) + require.True(t, txExists("key8=0007=20")) + require.True(t, txExists("key10=0123456789abcdef=11")) + require.False(t, txExists("key3=0002=10")) + require.False(t, txExists("key9=0008=9")) + require.False(t, txExists("key7=0006=7")) +} + func TestTxMempool_Flush(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -537,7 +628,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { tTxs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) - require.Equal(t, 100, txmp.heightIndex.Size()) // reap 5 txs at the next height -- no txs should expire reapedTxs := txmp.ReapMaxTxs(5) @@ -551,12 +641,10 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { txmp.Unlock() require.Equal(t, 95, txmp.Size()) - require.Equal(t, 95, txmp.heightIndex.Size()) // check more txs at height 101 _ = checkTxs(ctx, t, txmp, 50, 1) require.Equal(t, 145, txmp.Size()) - require.Equal(t, 145, txmp.heightIndex.Size()) // Reap 5 txs at a height that would expire all the transactions from before // the previous Update (height 100). @@ -577,7 +665,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { txmp.Unlock() require.GreaterOrEqual(t, txmp.Size(), 45) - require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45) } func TestTxMempool_CheckTxPostCheckError(t *testing.T) { diff --git a/internal/mempool/priority_queue.go b/internal/mempool/priority_queue.go deleted file mode 100644 index e31997397e..0000000000 --- a/internal/mempool/priority_queue.go +++ /dev/null @@ -1,158 +0,0 @@ -package mempool - -import ( - "container/heap" - "sort" - "sync" -) - -var _ heap.Interface = (*TxPriorityQueue)(nil) - -// TxPriorityQueue defines a thread-safe priority queue for valid transactions. -type TxPriorityQueue struct { - mtx sync.RWMutex - txs []*WrappedTx -} - -func NewTxPriorityQueue() *TxPriorityQueue { - pq := &TxPriorityQueue{ - txs: make([]*WrappedTx, 0), - } - - heap.Init(pq) - - return pq -} - -// GetEvictableTxs attempts to find and return a list of *WrappedTx than can be -// evicted to make room for another *WrappedTx with higher priority. If no such -// list of *WrappedTx exists, nil will be returned. The returned list of *WrappedTx -// indicate that these transactions can be removed due to them being of lower -// priority and that their total sum in size allows room for the incoming -// transaction according to the mempool's configured limits. -func (pq *TxPriorityQueue) GetEvictableTxs(priority, txSize, totalSize, cap int64) []*WrappedTx { - pq.mtx.RLock() - defer pq.mtx.RUnlock() - - txs := make([]*WrappedTx, len(pq.txs)) - copy(txs, pq.txs) - - sort.Slice(txs, func(i, j int) bool { - return txs[i].priority < txs[j].priority - }) - - var ( - toEvict []*WrappedTx - i int - ) - - currSize := totalSize - - // Loop over all transactions in ascending priority order evaluating those - // that are only of less priority than the provided argument. We continue - // evaluating transactions until there is sufficient capacity for the new - // transaction (size) as defined by txSize. - for i < len(txs) && txs[i].priority < priority { - toEvict = append(toEvict, txs[i]) - currSize -= int64(txs[i].Size()) - - if currSize+txSize <= cap { - return toEvict - } - - i++ - } - - return nil -} - -// NumTxs returns the number of transactions in the priority queue. It is -// thread safe. -func (pq *TxPriorityQueue) NumTxs() int { - pq.mtx.RLock() - defer pq.mtx.RUnlock() - - return len(pq.txs) -} - -// RemoveTx removes a specific transaction from the priority queue. -func (pq *TxPriorityQueue) RemoveTx(tx *WrappedTx) { - pq.mtx.Lock() - defer pq.mtx.Unlock() - - if tx.heapIndex < len(pq.txs) { - heap.Remove(pq, tx.heapIndex) - } -} - -// PushTx adds a valid transaction to the priority queue. It is thread safe. -func (pq *TxPriorityQueue) PushTx(tx *WrappedTx) { - pq.mtx.Lock() - defer pq.mtx.Unlock() - - heap.Push(pq, tx) -} - -// PopTx removes the top priority transaction from the queue. It is thread safe. -func (pq *TxPriorityQueue) PopTx() *WrappedTx { - pq.mtx.Lock() - defer pq.mtx.Unlock() - - x := heap.Pop(pq) - if x != nil { - return x.(*WrappedTx) - } - - return nil -} - -// Push implements the Heap interface. -// -// NOTE: A caller should never call Push. Use PushTx instead. -func (pq *TxPriorityQueue) Push(x interface{}) { - n := len(pq.txs) - item := x.(*WrappedTx) - item.heapIndex = n - pq.txs = append(pq.txs, item) -} - -// Pop implements the Heap interface. -// -// NOTE: A caller should never call Pop. Use PopTx instead. -func (pq *TxPriorityQueue) Pop() interface{} { - old := pq.txs - n := len(old) - item := old[n-1] - old[n-1] = nil // avoid memory leak - item.heapIndex = -1 // for safety - pq.txs = old[0 : n-1] - return item -} - -// Len implements the Heap interface. -// -// NOTE: A caller should never call Len. Use NumTxs instead. -func (pq *TxPriorityQueue) Len() int { - return len(pq.txs) -} - -// Less implements the Heap interface. It returns true if the transaction at -// position i in the queue is of less priority than the transaction at position j. -func (pq *TxPriorityQueue) Less(i, j int) bool { - // If there exists two transactions with the same priority, consider the one - // that we saw the earliest as the higher priority transaction. - if pq.txs[i].priority == pq.txs[j].priority { - return pq.txs[i].timestamp.Before(pq.txs[j].timestamp) - } - - // We want Pop to give us the highest, not lowest, priority so we use greater - // than here. - return pq.txs[i].priority > pq.txs[j].priority -} - -// Swap implements the Heap interface. It swaps two transactions in the queue. -func (pq *TxPriorityQueue) Swap(i, j int) { - pq.txs[i], pq.txs[j] = pq.txs[j], pq.txs[i] - pq.txs[i].heapIndex = i - pq.txs[j].heapIndex = j -} diff --git a/internal/mempool/priority_queue_test.go b/internal/mempool/priority_queue_test.go deleted file mode 100644 index 90f6111625..0000000000 --- a/internal/mempool/priority_queue_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package mempool - -import ( - "math/rand" - "sort" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestTxPriorityQueue(t *testing.T) { - pq := NewTxPriorityQueue() - numTxs := 1000 - - priorities := make([]int, numTxs) - - var wg sync.WaitGroup - for i := 1; i <= numTxs; i++ { - priorities[i-1] = i - wg.Add(1) - - go func(i int) { - pq.PushTx(&WrappedTx{ - priority: int64(i), - timestamp: time.Now(), - }) - - wg.Done() - }(i) - } - - sort.Sort(sort.Reverse(sort.IntSlice(priorities))) - - wg.Wait() - require.Equal(t, numTxs, pq.NumTxs()) - - // Wait a second and push a tx with a duplicate priority - time.Sleep(time.Second) - now := time.Now() - pq.PushTx(&WrappedTx{ - priority: 1000, - timestamp: now, - }) - require.Equal(t, 1001, pq.NumTxs()) - - tx := pq.PopTx() - require.Equal(t, 1000, pq.NumTxs()) - require.Equal(t, int64(1000), tx.priority) - require.NotEqual(t, now, tx.timestamp) - - gotPriorities := make([]int, 0) - for pq.NumTxs() > 0 { - gotPriorities = append(gotPriorities, int(pq.PopTx().priority)) - } - - require.Equal(t, priorities, gotPriorities) -} - -func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) { - pq := NewTxPriorityQueue() - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - values := make([]int, 1000) - - for i := 0; i < 1000; i++ { - tx := make([]byte, 5) // each tx is 5 bytes - _, err := rng.Read(tx) - require.NoError(t, err) - - x := rng.Intn(100000) - pq.PushTx(&WrappedTx{ - tx: tx, - priority: int64(x), - }) - - values[i] = x - } - - sort.Ints(values) - - max := values[len(values)-1] - min := values[0] - totalSize := int64(len(values) * 5) - - testCases := []struct { - name string - priority, txSize, totalSize, cap int64 - expectedLen int - }{ - { - name: "largest priority; single tx", - priority: int64(max + 1), - txSize: 5, - totalSize: totalSize, - cap: totalSize, - expectedLen: 1, - }, - { - name: "largest priority; multi tx", - priority: int64(max + 1), - txSize: 17, - totalSize: totalSize, - cap: totalSize, - expectedLen: 4, - }, - { - name: "largest priority; out of capacity", - priority: int64(max + 1), - txSize: totalSize + 1, - totalSize: totalSize, - cap: totalSize, - expectedLen: 0, - }, - { - name: "smallest priority; no tx", - priority: int64(min - 1), - txSize: 5, - totalSize: totalSize, - cap: totalSize, - expectedLen: 0, - }, - { - name: "small priority; no tx", - priority: int64(min), - txSize: 5, - totalSize: totalSize, - cap: totalSize, - expectedLen: 0, - }, - } - - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - evictTxs := pq.GetEvictableTxs(tc.priority, tc.txSize, tc.totalSize, tc.cap) - require.Len(t, evictTxs, tc.expectedLen) - }) - } -} - -func TestTxPriorityQueue_RemoveTx(t *testing.T) { - pq := NewTxPriorityQueue() - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - numTxs := 1000 - - values := make([]int, numTxs) - - for i := 0; i < numTxs; i++ { - x := rng.Intn(100000) - pq.PushTx(&WrappedTx{ - priority: int64(x), - }) - - values[i] = x - } - - require.Equal(t, numTxs, pq.NumTxs()) - - sort.Ints(values) - max := values[len(values)-1] - - wtx := pq.txs[pq.NumTxs()/2] - pq.RemoveTx(wtx) - require.Equal(t, numTxs-1, pq.NumTxs()) - require.Equal(t, int64(max), pq.PopTx().priority) - require.Equal(t, numTxs-2, pq.NumTxs()) - - require.NotPanics(t, func() { - pq.RemoveTx(&WrappedTx{heapIndex: numTxs}) - pq.RemoveTx(&WrappedTx{heapIndex: numTxs + 1}) - }) - require.Equal(t, numTxs-2, pq.NumTxs()) -} diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 62cdf386c5..1f4b3f78a4 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -307,8 +307,8 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m select { case <-ctx.Done(): return - case <-r.mempool.WaitForNextTx(): // wait until a tx is available - if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil { + case <-r.mempool.TxsWaitChan(): // wait until a tx is available + if nextGossipTx = r.mempool.TxsFront(); nextGossipTx == nil { continue } } @@ -318,7 +318,7 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m // NOTE: Transaction batching was disabled due to: // https://github.com/tendermint/tendermint/issues/5796 - if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok { + if !memTx.HasPeer(peerMempoolID) { // Send the mempool tx to the corresponding peer. Note, the peer may be // behind and thus would not be able to process the mempool tx correctly. if err := mempoolCh.Send(ctx, p2p.Envelope{ diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index ee7fe777f2..bd6ccf8b22 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -68,7 +68,7 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode require.NoError(t, client.Start(ctx)) t.Cleanup(client.Wait) - mempool := setup(t, client, 0) + mempool := setup(t, client, 1<<20) rts.mempools[nodeID] = mempool rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) @@ -170,7 +170,9 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { secondaryReactor.observePanic = observePanic firstTx := &WrappedTx{} + primaryMempool.Lock() primaryMempool.insertTx(firstTx) + primaryMempool.Unlock() // run the router rts.start(ctx, t) @@ -183,6 +185,8 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { wg.Add(1) go func() { defer wg.Done() + primaryMempool.Lock() + defer primaryMempool.Unlock() primaryMempool.insertTx(next) }() } diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index c7113c9513..1a221e2c3c 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,11 +1,9 @@ package mempool import ( - "sort" "sync" "time" - "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/types" ) @@ -24,270 +22,78 @@ type TxInfo struct { // WrappedTx defines a wrapper around a raw transaction with additional metadata // that is used for indexing. type WrappedTx struct { - // tx represents the raw binary transaction data - tx types.Tx - - // hash defines the transaction hash and the primary key used in the mempool - hash types.TxKey - - // height defines the height at which the transaction was validated at - height int64 - - // gasWanted defines the amount of gas the transaction sender requires - gasWanted int64 - - // priority defines the transaction's priority as specified by the application - // in the ResponseCheckTx response. - priority int64 - - // sender defines the transaction's sender as specified by the application in - // the ResponseCheckTx response. - sender string - - // timestamp is the time at which the node first received the transaction from - // a peer. It is used as a second dimension is prioritizing transactions when - // two transactions have the same priority. - timestamp time.Time - - // peers records a mapping of all peers that sent a given transaction - peers map[uint16]struct{} - - // heapIndex defines the index of the item in the heap - heapIndex int - - // gossipEl references the linked-list element in the gossip index - gossipEl *clist.CElement - - // removed marks the transaction as removed from the mempool. This is set - // during RemoveTx and is needed due to the fact that a given existing - // transaction in the mempool can be evicted when it is simultaneously having - // a reCheckTx callback executed. - removed bool -} - -func (wtx *WrappedTx) Size() int { - return len(wtx.tx) -} - -// TxStore implements a thread-safe mapping of valid transaction(s). -// -// NOTE: -// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative -// access is not allowed. Regardless, it is not expected for the mempool to -// need mutative access. -type TxStore struct { - mtx sync.RWMutex - hashTxs map[types.TxKey]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application -} - -func NewTxStore() *TxStore { - return &TxStore{ - senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[types.TxKey]*WrappedTx), - } -} - -// Size returns the total number of transactions in the store. -func (txs *TxStore) Size() int { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return len(txs.hashTxs) -} - -// GetAllTxs returns all the transactions currently in the store. -func (txs *TxStore) GetAllTxs() []*WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wTxs := make([]*WrappedTx, len(txs.hashTxs)) - i := 0 - for _, wtx := range txs.hashTxs { - wTxs[i] = wtx - i++ - } - - return wTxs -} - -// GetTxBySender returns a *WrappedTx by the transaction's sender property -// defined by the ABCI application. -func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.senderTxs[sender] -} - -// GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.hashTxs[hash] -} - -// IsTxRemoved returns true if a transaction by hash is marked as removed and -// false otherwise. -func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx, ok := txs.hashTxs[hash] - if ok { - return wtx.removed + tx types.Tx // the original transaction data + hash types.TxKey // the transaction hash + height int64 // height when this transaction was initially checked (for expiry) + timestamp time.Time // time when transaction was entered (for TTL) + + mtx sync.Mutex + gasWanted int64 // app: gas required to execute this transaction + priority int64 // app: priority value for this transaction + sender string // app: assigned sender label + peers map[uint16]bool // peer IDs who have sent us this transaction +} + +// Size reports the size of the raw transaction in bytes. +func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) } + +// SetPeer adds the specified peer ID as a sender of w. +func (w *WrappedTx) SetPeer(id uint16) { + w.mtx.Lock() + defer w.mtx.Unlock() + if w.peers == nil { + w.peers = map[uint16]bool{id: true} + } else { + w.peers[id] = true } - - return false } -// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a -// non-empty sender, we additionally store the transaction by the sender as -// defined by the ABCI application. -func (txs *TxStore) SetTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - txs.senderTxs[wtx.sender] = wtx - } - - txs.hashTxs[wtx.tx.Key()] = wtx -} - -// RemoveTx removes a *WrappedTx from the transaction store. It deletes all -// indexes of the transaction. -func (txs *TxStore) RemoveTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - delete(txs.senderTxs, wtx.sender) - } - - delete(txs.hashTxs, wtx.tx.Key()) - wtx.removed = true -} - -// TxHasPeer returns true if a transaction by hash has a given peer ID and false -// otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return false - } - - _, ok := wtx.peers[peerID] +// HasPeer reports whether the specified peer ID is a sender of w. +func (w *WrappedTx) HasPeer(id uint16) bool { + w.mtx.Lock() + defer w.mtx.Unlock() + _, ok := w.peers[id] return ok } -// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the -// given peerID to the WrappedTx's set of peers that sent us this transaction. -// We return true if we've already recorded the given peer for this transaction -// and false otherwise. If the transaction does not exist by hash, we return -// (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return nil, false - } - - if wtx.peers == nil { - wtx.peers = make(map[uint16]struct{}) - } - - if _, ok := wtx.peers[peerID]; ok { - return wtx, true - } - - wtx.peers[peerID] = struct{}{} - return wtx, false -} - -// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be -// used to build generic transaction indexes in the mempool. It accepts a -// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx -// references which is used during Insert in order to determine sorted order. If -// less returns true, a <= b. -type WrappedTxList struct { - mtx sync.RWMutex - txs []*WrappedTx - less func(*WrappedTx, *WrappedTx) bool +// SetGasWanted sets the application-assigned gas requirement of w. +func (w *WrappedTx) SetGasWanted(gas int64) { + w.mtx.Lock() + defer w.mtx.Unlock() + w.gasWanted = gas } -func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { - return &WrappedTxList{ - txs: make([]*WrappedTx, 0), - less: less, - } +// GasWanted reports the application-assigned gas requirement of w. +func (w *WrappedTx) GasWanted() int64 { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.gasWanted } -// Size returns the number of WrappedTx objects in the list. -func (wtl *WrappedTxList) Size() int { - wtl.mtx.RLock() - defer wtl.mtx.RUnlock() - - return len(wtl.txs) +// SetSender sets the application-assigned sender of w. +func (w *WrappedTx) SetSender(sender string) { + w.mtx.Lock() + defer w.mtx.Unlock() + w.sender = sender } -// Reset resets the list of transactions to an empty list. -func (wtl *WrappedTxList) Reset() { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - wtl.txs = make([]*WrappedTx, 0) +// Sender reports the application-assigned sender of w. +func (w *WrappedTx) Sender() string { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.sender } -// Insert inserts a WrappedTx reference into the sorted list based on the list's -// comparator function. -func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - if i == len(wtl.txs) { - // insert at the end - wtl.txs = append(wtl.txs, wtx) - return - } - - // Make space for the inserted element by shifting values at the insertion - // index up one index. - // - // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). - wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) - wtl.txs[i] = wtx +// SetPriority sets the application-assigned priority of w. +func (w *WrappedTx) SetPriority(p int64) { + w.mtx.Lock() + defer w.mtx.Unlock() + w.priority = p } -// Remove attempts to remove a WrappedTx from the sorted list. -func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - // Since the list is sorted, we evaluate all elements starting at i. Note, if - // the element does not exist, we may potentially evaluate the entire remainder - // of the list. However, a caller should not be expected to call Remove with a - // non-existing element. - for i < len(wtl.txs) { - if wtl.txs[i] == wtx { - wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) - return - } - - i++ - } +// Priority reports the application-assigned priority of w. +func (w *WrappedTx) Priority() int64 { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.priority } diff --git a/internal/mempool/tx_test.go b/internal/mempool/tx_test.go deleted file mode 100644 index c6d494b047..0000000000 --- a/internal/mempool/tx_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package mempool - -import ( - "fmt" - "math/rand" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/types" -) - -func TestTxStore_GetTxBySender(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - sender: "foo", - priority: 1, - timestamp: time.Now(), - } - - res := txs.GetTxBySender(wtx.sender) - require.Nil(t, res) - - txs.SetTx(wtx) - - res = txs.GetTxBySender(wtx.sender) - require.NotNil(t, res) - require.Equal(t, wtx, res) -} - -func TestTxStore_GetTxByHash(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - sender: "foo", - priority: 1, - timestamp: time.Now(), - } - - key := wtx.tx.Key() - res := txs.GetTxByHash(key) - require.Nil(t, res) - - txs.SetTx(wtx) - - res = txs.GetTxByHash(key) - require.NotNil(t, res) - require.Equal(t, wtx, res) -} - -func TestTxStore_SetTx(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - priority: 1, - timestamp: time.Now(), - } - - key := wtx.tx.Key() - txs.SetTx(wtx) - - res := txs.GetTxByHash(key) - require.NotNil(t, res) - require.Equal(t, wtx, res) - - wtx.sender = "foo" - txs.SetTx(wtx) - - res = txs.GetTxByHash(key) - require.NotNil(t, res) - require.Equal(t, wtx, res) -} - -func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - priority: 1, - timestamp: time.Now(), - } - - key := wtx.tx.Key() - txs.SetTx(wtx) - - res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15) - require.Nil(t, res) - require.False(t, ok) - - res, ok = txs.GetOrSetPeerByTxHash(key, 15) - require.NotNil(t, res) - require.False(t, ok) - - res, ok = txs.GetOrSetPeerByTxHash(key, 15) - require.NotNil(t, res) - require.True(t, ok) - - require.True(t, txs.TxHasPeer(key, 15)) - require.False(t, txs.TxHasPeer(key, 16)) -} - -func TestTxStore_RemoveTx(t *testing.T) { - txs := NewTxStore() - wtx := &WrappedTx{ - tx: []byte("test_tx"), - priority: 1, - timestamp: time.Now(), - } - - txs.SetTx(wtx) - - key := wtx.tx.Key() - res := txs.GetTxByHash(key) - require.NotNil(t, res) - - txs.RemoveTx(res) - - res = txs.GetTxByHash(key) - require.Nil(t, res) -} - -func TestTxStore_Size(t *testing.T) { - txStore := NewTxStore() - numTxs := 1000 - - for i := 0; i < numTxs; i++ { - txStore.SetTx(&WrappedTx{ - tx: []byte(fmt.Sprintf("test_tx_%d", i)), - priority: int64(i), - timestamp: time.Now(), - }) - } - - require.Equal(t, numTxs, txStore.Size()) -} - -func TestWrappedTxList_Reset(t *testing.T) { - list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }) - - require.Zero(t, list.Size()) - - for i := 0; i < 100; i++ { - list.Insert(&WrappedTx{height: int64(i)}) - } - - require.Equal(t, 100, list.Size()) - - list.Reset() - require.Zero(t, list.Size()) -} - -func TestWrappedTxList_Insert(t *testing.T) { - list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }) - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - var expected []int - for i := 0; i < 100; i++ { - height := rng.Int63n(10000) - expected = append(expected, int(height)) - list.Insert(&WrappedTx{height: height}) - - if i%10 == 0 { - list.Insert(&WrappedTx{height: height}) - expected = append(expected, int(height)) - } - } - - got := make([]int, list.Size()) - for i, wtx := range list.txs { - got[i] = int(wtx.height) - } - - sort.Ints(expected) - require.Equal(t, expected, got) -} - -func TestWrappedTxList_Remove(t *testing.T) { - list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }) - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - var txs []*WrappedTx - for i := 0; i < 100; i++ { - height := rng.Int63n(10000) - tx := &WrappedTx{height: height} - - txs = append(txs, tx) - list.Insert(tx) - - if i%10 == 0 { - tx = &WrappedTx{height: height} - list.Insert(tx) - txs = append(txs, tx) - } - } - - // remove a tx that does not exist - list.Remove(&WrappedTx{height: 20000}) - - // remove a tx that exists (by height) but not referenced - list.Remove(&WrappedTx{height: txs[0].height}) - - // remove a few existing txs - for i := 0; i < 25; i++ { - j := rng.Intn(len(txs)) - list.Remove(txs[j]) - txs = append(txs[:j], txs[j+1:]...) - } - - expected := make([]int, len(txs)) - for i, tx := range txs { - expected[i] = int(tx.height) - } - - got := make([]int, list.Size()) - for i, wtx := range list.txs { - got[i] = int(wtx.height) - } - - sort.Ints(expected) - require.Equal(t, expected, got) -} From e33635a601ce00353e428faf411d2187919bccbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Jul 2022 17:20:31 -0700 Subject: [PATCH 179/203] build(deps): Bump terser from 4.8.0 to 4.8.1 in /docs (#9051) --- docs/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 7240aaa2d0..da9a0c1c27 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -12288,9 +12288,9 @@ } }, "node_modules/terser": { - "version": "4.8.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", - "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.1.tgz", + "integrity": "sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==", "dependencies": { "commander": "^2.20.0", "source-map": "~0.6.1", @@ -23925,9 +23925,9 @@ "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==" }, "terser": { - "version": "4.8.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", - "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.1.tgz", + "integrity": "sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==", "requires": { "commander": "^2.20.0", "source-map": "~0.6.1", From d320b3088e57a7c427e94d670f4676f744d031ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Jul 2022 01:50:14 +0000 Subject: [PATCH 180/203] build(deps): Bump github.com/golangci/golangci-lint from 1.47.0 to 1.47.1 (#9044) Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.47.0 to 1.47.1.
Release notes

Sourced from github.com/golangci/golangci-lint's releases.

v1.47.1

Changelog

  • a91463cd build(deps): bump github.com/daixiang0/gci from 0.4.2 to 0.4.3 (#2992)
  • 4c8bdc70 build(deps): bump github.com/sivchari/tenv from 1.6.0 to 1.7.0 (#2988)
  • 4e60e8a8 gci: fix options display (#2989)
  • fd87bd1e gci: remove the use of stdin (#2984)
Changelog

Sourced from github.com/golangci/golangci-lint's changelog.

v1.47.1

  1. updated linters:
    • gci: from 0.4.2 to 0.4.3
    • gci: remove the use of stdin
    • gci: fix options display
    • tenv: from 1.6.0 to 1.7.0
    • unparam: bump to HEAD
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/golangci/golangci-lint&package-manager=go_modules&previous-version=1.47.0&new-version=1.47.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 12 ++++++------ go.sum | 27 +++++++++++++-------------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/go.mod b/go.mod index 3e3ef647ce..cfc3f08aa2 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/bufbuild/buf v1.4.0 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 - github.com/golangci/golangci-lint v1.47.0 + github.com/golangci/golangci-lint v1.47.1 github.com/google/go-cmp v0.5.8 github.com/vektra/mockery/v2 v2.14.0 gotest.tools v2.2.0+incompatible @@ -76,7 +76,7 @@ require ( github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/daixiang0/gci v0.4.2 // indirect + github.com/daixiang0/gci v0.4.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect @@ -192,7 +192,7 @@ require ( github.com/sirupsen/logrus v1.8.1 // indirect github.com/sivchari/containedctx v1.0.2 // indirect github.com/sivchari/nosnakecase v1.5.0 // indirect - github.com/sivchari/tenv v1.6.0 // indirect + github.com/sivchari/tenv v1.7.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect github.com/spf13/afero v1.8.2 // indirect @@ -221,12 +221,12 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect + golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/sys v0.0.0-20220702020025-31831981b65f // indirect golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.11 // indirect + golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.6 // indirect @@ -236,7 +236,7 @@ require ( mvdan.cc/gofumpt v0.3.1 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect + mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect ) require ( diff --git a/go.sum b/go.sum index be1ef7f4b5..d273a1edc4 100644 --- a/go.sum +++ b/go.sum @@ -250,8 +250,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.4.2 h1:PyT/Y4a265wDhPCZo2ip/YH33M4zEuFA3nDMdAvcKSA= -github.com/daixiang0/gci v0.4.2/go.mod h1:d0f+IJhr9loBtIq+ebwhRoTt1LGbPH96ih8bKlsRT9E= +github.com/daixiang0/gci v0.4.3 h1:wf7x0xRjQqTlA2dzHTI0A/xPyp7VcBatBG9nwGatwbQ= +github.com/daixiang0/gci v0.4.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -448,8 +448,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.47.0 h1:h2s+ZGGF63fdzUtac+VYUHPsEO0ADTqHouI7Vase+FY= -github.com/golangci/golangci-lint v1.47.0/go.mod h1:3TZhfF5KolbIkXYjUFvER6G9CoxzLEaafr/u/QI1S5A= +github.com/golangci/golangci-lint v1.47.1 h1:hbubHskV2Ppwz4ZZE2lc0/Pw9ZhqLuzm2dT7ZVpLA6Y= +github.com/golangci/golangci-lint v1.47.1/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -1015,8 +1015,8 @@ github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYI github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= github.com/sivchari/nosnakecase v1.5.0 h1:ZBvAu1H3uteN0KQ0IsLpIFOwYgPEhKLyv2ahrVkub6M= github.com/sivchari/nosnakecase v1.5.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= -github.com/sivchari/tenv v1.6.0 h1:FyE4WysxLwYljKqWhTfOMjgKjBSnmzzg7lWOmpDiAcc= -github.com/sivchari/tenv v1.6.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE= +github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= @@ -1103,8 +1103,6 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1253,8 +1251,9 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= +golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1487,7 +1486,6 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1503,6 +1501,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f h1:xdsejrW/0Wf2diT5CPp3XmKUNbr7Xvw8kYilQ+6qjRY= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1629,8 +1628,9 @@ golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlz golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 h1:NHLFZ56qCjD+0hYY3kE5Wl40Z7q4Gn9Ln/7YU0lsGko= +golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1864,7 +1864,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -1888,8 +1887,8 @@ mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wp mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= -mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= +mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk= +mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= pgregory.net/rapid v0.4.8 h1:d+5SGZWUbJPbl3ss6tmPFqnNeQR6VDOFly+eTjwPiEw= pgregory.net/rapid v0.4.8/go.mod h1:Z5PbWqjvWR1I3UGjvboUuan4fe4ZYEYNLNQLExzCoUs= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= From 87708b8ae7612093d4c80d54ad9fab9e7f045695 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 21 Jul 2022 01:22:23 -0700 Subject: [PATCH 181/203] Forward-port changelog for v0.35.9 to master. (#9059) --- CHANGELOG.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6b99cd3a7..127763e3fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). +## v0.35.9 + +July 20, 2022 + +This release fixes a deadlock that could occur in some cases when using the +priority mempool with the ABCI socket client. + +### BUG FIXES + +- [mempool] [\#9030](https://github.com/tendermint/tendermint/pull/9030) rework lock discipline to mitigate callback deadlocks (@creachadair) + + ## v0.35.8 July 12, 2022 @@ -735,7 +747,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [light] [\#5347](https://github.com/tendermint/tendermint/pull/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes) - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker) - [proto] [\#5025](https://github.com/tendermint/tendermint/pull/5025) All proto files have been moved to `/proto` directory. (@marbar3778) - - Using the recommended the file layout from buf, [see here for more info](https://buf.build/docs/lint-checkers#file_layout) + - Using the recommended the file layout from buf, [see here for more info](https://docs.buf.build/lint/rules) - [rpc/client] [\#4947](https://github.com/tendermint/tendermint/pull/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes) - `UnconfirmedTxs` `limit` param is a pointer - [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/pull/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes) From 65c0fba564a023fe097ec70a1f652664957e6ce3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Jul 2022 09:20:01 -0400 Subject: [PATCH 182/203] build(deps): Bump github.com/BurntSushi/toml from 1.1.0 to 1.2.0 (#9061) Bumps [github.com/BurntSushi/toml](https://github.com/BurntSushi/toml) from 1.1.0 to 1.2.0. - [Release notes](https://github.com/BurntSushi/toml/releases) - [Commits](https://github.com/BurntSushi/toml/compare/v1.1.0...v1.2.0) --- updated-dependencies: - dependency-name: github.com/BurntSushi/toml dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index cfc3f08aa2..eb3419b1d2 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/tendermint/tendermint go 1.17 require ( - github.com/BurntSushi/toml v1.1.0 + github.com/BurntSushi/toml v1.2.0 github.com/adlio/schema v1.3.3 github.com/btcsuite/btcd v0.22.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce diff --git a/go.sum b/go.sum index d273a1edc4..cf38631bd2 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,9 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= From 023c21f3074189da5e8dfbe01ce8469dfdc90eb6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Jul 2022 14:16:44 +0000 Subject: [PATCH 183/203] build(deps): Bump github.com/golangci/golangci-lint from 1.47.1 to 1.47.2 (#9070) Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.47.1 to 1.47.2.
Release notes

Sourced from github.com/golangci/golangci-lint's releases.

v1.47.2

Changelog

  • 61673b34 revive: ignore slow rules (#2999)
Changelog

Sourced from github.com/golangci/golangci-lint's changelog.

v1.47.2

  1. updated linters:
    • revive: ignore slow rules
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/golangci/golangci-lint&package-manager=go_modules&previous-version=1.47.1&new-version=1.47.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index eb3419b1d2..704731c325 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/bufbuild/buf v1.4.0 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 - github.com/golangci/golangci-lint v1.47.1 + github.com/golangci/golangci-lint v1.47.2 github.com/google/go-cmp v0.5.8 github.com/vektra/mockery/v2 v2.14.0 gotest.tools v2.2.0+incompatible diff --git a/go.sum b/go.sum index cf38631bd2..49f3247760 100644 --- a/go.sum +++ b/go.sum @@ -449,8 +449,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.47.1 h1:hbubHskV2Ppwz4ZZE2lc0/Pw9ZhqLuzm2dT7ZVpLA6Y= -github.com/golangci/golangci-lint v1.47.1/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg= +github.com/golangci/golangci-lint v1.47.2 h1:qvMDVv49Hrx3PSEXZ0bD/yhwSbhsOihQjFYCKieegIw= +github.com/golangci/golangci-lint v1.47.2/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= From 6c302218e3823535d7a83758843773db1a472e07 Mon Sep 17 00:00:00 2001 From: Steven Ferrer Date: Mon, 25 Jul 2022 16:15:18 +0800 Subject: [PATCH 184/203] Documentation: update go tutorials (#9048) --- docs/tutorials/go-built-in.md | 385 ++++++++++++++++------------------ docs/tutorials/go.md | 237 +++++++++++---------- 2 files changed, 316 insertions(+), 306 deletions(-) diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index 456024ebf6..35f0cbca90 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -23,8 +23,6 @@ yourself with the syntax. By following along with this guide, you'll create a Tendermint Core project called kvstore, a (very) simple distributed BFT key-value store. -> Note: please use a released version of Tendermint with this guide. The guides will work with the latest version. Please, do not use master. - ## Built-in app vs external app Running your application inside the same process as Tendermint Core will give @@ -36,28 +34,27 @@ through a TCP, Unix domain socket or gRPC. ## 1.1 Installing Go Please refer to [the official guide for installing -Go](https://golang.org/doc/install). +Go](https://go.dev/doc/install). Verify that you have the latest version of Go installed: -```bash +```sh $ go version -go version go1.16.x darwin/amd64 +go version go1.18.x darwin/amd64 ``` ## 1.2 Creating a new Go project -We'll start by creating a new Go project. +We'll start by creating a new Go project. First, initialize the project folder with `go mod init`. Running this command should create the `go.mod` file. -```bash -mkdir kvstore -cd kvstore -go mod init github.com// +```sh +$ mkdir kvstore +$ cd kvstore +$ go mod init github.com//kvstore +go: creating new go.mod: module github.com//kvstore ``` -Inside the example directory create a `main.go` file with the following content: - -> Note: there is no need to clone or fork Tendermint in this tutorial. +Inside the project directory, create a `main.go` file with the following content: ```go package main @@ -73,7 +70,7 @@ func main() { When run, this should print "Hello, Tendermint Core" to the standard output. -```bash +```sh $ go run main.go Hello, Tendermint Core ``` @@ -152,16 +149,43 @@ func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk } ``` -Now I will go through each method explaining when it's called and adding +Now, we will go through each method and explain when it is executed while adding required business logic. -### 1.3.1 CheckTx +### 1.3.1 Key-value store setup + +For the underlying key-value store we'll use the latest version of [badger](https://github.com/dgraph-io/badger), which is an embeddable, persistent and fast key-value (KV) database. -When a new transaction is added to the Tendermint Core, it will ask the -application to check it (validate the format, signatures, etc.). +```sh +$ go get github.com/dgraph-io/badger/v3 +go: added github.com/dgraph-io/badger/v3 v3.2103.2 +``` ```go -import "bytes" +import "github.com/dgraph-io/badger/v3" + +type KVStoreApplication struct { + db *badger.DB + currentBatch *badger.Txn +} + +func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { + return &KVStoreApplication{ + db: db, + } +} +``` + +### 1.3.2 CheckTx + +When a new transaction is added to the Tendermint Core, it will ask the application to check it (validate the format, signatures, etc.). + +```go +import ( + "bytes" + + ... +) func (app *KVStoreApplication) isValid(tx []byte) (code uint32) { // check format @@ -214,26 +238,7 @@ Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas). -For the underlying key-value store we'll use -[badger](https://github.com/dgraph-io/badger), which is an embeddable, -persistent and fast key-value (KV) database. - -```go -import "github.com/dgraph-io/badger" - -type KVStoreApplication struct { - db *badger.DB - currentBatch *badger.Txn -} - -func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { - return &KVStoreApplication{ - db: db, - } -} -``` - -### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit +### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit When Tendermint Core has decided on the block, it's transfered to the application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and @@ -290,7 +295,7 @@ func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { } ``` -### 1.3.3 Query +### 1.3.4 Query Now, when the client wants to know whenever a particular key/value exist, it will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call @@ -348,17 +353,15 @@ import ( "path/filepath" "syscall" - "github.com/dgraph-io/badger" + "github.com/dgraph-io/badger/v3" "github.com/spf13/viper" - abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - tmflags "github.com/tendermint/tendermint/libs/cli/flags" - "github.com/tendermint/tendermint/libs/log" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" + abciclient "github.com/tendermint/tendermint/abci/client" + abcitypes "github.com/tendermint/tendermint/abci/types" + tmconfig "github.com/tendermint/tendermint/config" + tmlog "github.com/tendermint/tendermint/libs/log" + tmservice "github.com/tendermint/tendermint/libs/service" + tmnode "github.com/tendermint/tendermint/node" ) var configFile string @@ -395,57 +398,42 @@ func main() { <-c } -func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { - // read config - config := cfg.DefaultValidatorConfig() - config.RootDir = filepath.Dir(filepath.Dir(configFile)) - viper.SetConfigFile(configFile) - if err := viper.ReadInConfig(); err != nil { - return nil, fmt.Errorf("viper failed to read config file: %w", err) - } - if err := viper.Unmarshal(config); err != nil { - return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) - } - if err := config.ValidateBasic(); err != nil { - return nil, fmt.Errorf("config is invalid: %w", err) - } - - // create logger - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - var err error - logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel) - if err != nil { - return nil, fmt.Errorf("failed to parse log level: %w", err) - } - - // read private validator - pv := privval.LoadFilePV( - config.PrivValidatorKeyFile(), - config.PrivValidatorStateFile(), - ) - - // read node key - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) - if err != nil { - return nil, fmt.Errorf("failed to load node's key: %w", err) - } - - // create node - node, err := nm.NewNode( - config, - pv, - nodeKey, - abcicli.NewLocalClientCreator(app), - nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, - nm.DefaultMetricsProvider(config.Instrumentation), - logger) - if err != nil { - return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) - } - - return node, nil +func newTendermint(app abcitypes.Application, configFile string) (tmservice.Service, error) { + // read config + config := tmconfig.DefaultValidatorConfig() + config.SetRoot(filepath.Dir(filepath.Dir(configFile))) + + viper.SetConfigFile(configFile) + if err := viper.ReadInConfig(); err != nil { + return nil, fmt.Errorf("viper failed to read config file: %w", err) + } + if err := viper.Unmarshal(config); err != nil { + return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) + } + if err := config.ValidateBasic(); err != nil { + return nil, fmt.Errorf("config is invalid: %w", err) + } + + // create logger + logger, err := tmlog.NewDefaultLogger(tmlog.LogFormatPlain, config.LogLevel, false) + if err != nil { + return nil, fmt.Errorf("failed to create logger: %w", err) + } + + // create node + node, err := tmnode.New( + config, + logger, + abciclient.NewLocalCreator(app), + nil, + ) + if err != nil { + return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) + } + + return node, nil } + ``` This is a huge blob of code, so let's break it down into pieces. @@ -469,7 +457,7 @@ This can be avoided by setting the truncate option to true, like this: db, err := badger.Open(badger.DefaultOptions("/tmp/badger").WithTruncate(true)) ``` -Then we use it to create a Tendermint Core `Node` instance: +Then we use it to create a Tendermint Core [Service](https://github.com/tendermint/tendermint/blob/v0.35.8/libs/service/service.go#L24) instance: ```go flag.Parse() @@ -483,75 +471,48 @@ if err != nil { ... // create node -node, err := nm.NewNode( - config, - pv, - nodeKey, - abcicli.NewLocalClientCreator(app), - nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, - nm.DefaultMetricsProvider(config.Instrumentation), - logger) +node, err := tmnode.New( + config, + logger, + abciclient.NewLocalCreator(app), + nil, +) if err != nil { - return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) + return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) } ``` -`NewNode` requires a few things including a configuration file, a private -validator, a node key and a few others in order to construct the full node. +[tmnode.New](https://github.com/tendermint/tendermint/blob/v0.35.8/node/public.go#L29) requires a few things including a configuration file, a logger and a few others in order to construct the full node. -Note we use `abcicli.NewLocalClientCreator` here to create a local client instead -of one communicating through a socket or gRPC. +Note that we use [abciclient.NewLocalCreator](https://github.com/tendermint/tendermint/blob/v0.35.8/abci/client/creators.go#L15) here to create a local client instead of one communicating through a socket or gRPC. [viper](https://github.com/spf13/viper) is being used for reading the config, which we will generate later using the `tendermint init` command. ```go -config := cfg.DefaultValidatorConfig() -config.RootDir = filepath.Dir(filepath.Dir(configFile)) +// read config +config := tmconfig.DefaultValidatorConfig() +config.SetRoot(filepath.Dir(filepath.Dir(configFile))) viper.SetConfigFile(configFile) if err := viper.ReadInConfig(); err != nil { - return nil, fmt.Errorf("viper failed to read config file: %w", err) + return nil, fmt.Errorf("viper failed to read config file: %w", err) } if err := viper.Unmarshal(config); err != nil { - return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) + return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) } if err := config.ValidateBasic(); err != nil { - return nil, fmt.Errorf("config is invalid: %w", err) -} -``` - -We use `FilePV`, which is a private validator (i.e. thing which signs consensus -messages). Normally, you would use `SignerRemote` to connect to an external -[HSM](https://kb.certus.one/hsm.html). - -```go -pv := privval.LoadFilePV( - config.PrivValidatorKeyFile(), - config.PrivValidatorStateFile(), -) - -``` - -`nodeKey` is needed to identify the node in a p2p network. - -```go -nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) -if err != nil { - return nil, fmt.Errorf("failed to load node's key: %w", err) + return nil, fmt.Errorf("config is invalid: %w", err) } ``` -As for the logger, we use the build-in library, which provides a nice -abstraction over [go-kit's -logger](https://github.com/go-kit/kit/tree/master/log). +As for the logger, we use the built-in library, which provides a nice +abstraction over [zerolog](https://github.com/rs/zerolog). ```go -logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -var err error -logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) +// create logger +logger, err := tmlog.NewDefaultLogger(tmlog.LogFormatPlain, config.LogLevel, true) if err != nil { - return nil, fmt.Errorf("failed to parse log level: %w", err) + return nil, fmt.Errorf("failed to create logger: %w", err) } ``` @@ -570,40 +531,41 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c ``` -## 1.5 Getting Up and Running +## 1.5 Getting up and running -We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for -dependency management. +Make sure to enable [Go modules](https://github.com/golang/go/wiki/Modules). Run `go mod tidy` to download and add dependencies in `go.mod` file. -```bash -export GO111MODULE=on -go mod init github.com/me/example +```sh +$ go mod tidy +... ``` -This should create a `go.mod` file. The current tutorial only works with -the master branch of Tendermint. so let's make sure we're using the latest version: +Let's make sure we're using the latest version of Tendermint (currently `v0.35.8`). ```sh -go get github.com/tendermint/tendermint@master +$ go get github.com/tendermint/tendermint@latest +... ``` This will populate the `go.mod` with a release number followed by a hash for Tendermint. ```go -module github.com/me/example +module github.com//kvstore -go 1.15 +go 1.18 require ( - github.com/dgraph-io/badger v1.6.2 - github.com/tendermint/tendermint + github.com/dgraph-io/badger/v3 v3.2103.2 + github.com/tendermint/tendermint v0.35.8 + ... ) ``` -Now we can build the binary: +Now, we can build the binary: -```bash -go build +```sh +$ go build +... ``` To create a default configuration, nodeKey and private validator files, let's @@ -614,66 +576,87 @@ installing from source, don't forget to checkout the latest release (`git checkout vX.Y.Z`). Don't forget to check that the application uses the same major version. -```bash -$ rm -rf /tmp/example -$ TMHOME="/tmp/example" tendermint init validator +```sh +$ rm -rf /tmp/kvstore /tmp/badger +$ TMHOME="/tmp/kvstore" tendermint init validator -I[2019-07-16|18:40:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:40:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:40:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -I[2019-07-16|18:40:36.483] Generated config module=main mode=validator +2022-07-20T17:04:41+08:00 INFO Generated private validator keyFile=/tmp/kvstore/config/priv_validator_key.json module=main stateFile=/tmp/kvstore/data/priv_validator_state.json +2022-07-20T17:04:41+08:00 INFO Generated node key module=main path=/tmp/kvstore/config/node_key.json +2022-07-20T17:04:41+08:00 INFO Generated genesis file module=main path=/tmp/kvstore/config/genesis.json +2022-07-20T17:04:41+08:00 INFO Generated config mode=validator module=main ``` -We are ready to start our application: +Feel free to explore the generated files, which can be found at +`/tmp/kvstore/config` directory. Documentation on the config can be found +[here](https://docs.tendermint.com/master/tendermint-core/configuration.html). -```bash -$ ./example -config "/tmp/example/config/config.toml" +We are ready to start our application: -badger 2019/07/16 18:42:25 INFO: All 0 tables opened in 0s -badger 2019/07/16 18:42:25 INFO: Replaying file id: 0 at offset: 0 -badger 2019/07/16 18:42:25 INFO: Replay took: 695.227s -E[2019-07-16|18:42:25.818] Couldn't connect to any seeds module=p2p -I[2019-07-16|18:42:26.853] Executed block module=state height=1 validTxs=0 invalidTxs=0 -I[2019-07-16|18:42:26.865] Committed state module=state height=1 txs=0 appHash= +```sh +$ ./kvstore -config "/tmp/kvstore/config/config.toml" + +badger 2022/07/16 13:55:59 INFO: All 0 tables opened in 0s +badger 2022/07/16 13:55:59 INFO: Replaying file id: 0 at offset: 0 +badger 2022/07/16 13:55:59 INFO: Replay took: 3.052µs +badger 2022/07/16 13:55:59 DEBUG: Value log discard stats empty +2022-07-16T13:55:59+08:00 INFO starting service impl=multiAppConn module=proxy service=multiAppConn +2022-07-16T13:55:59+08:00 INFO starting service connection=query impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service connection=snapshot impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service connection=mempool impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service connection=consensus impl=localClient module=abci-client service=localClient +2022-07-16T13:55:59+08:00 INFO starting service impl=EventBus module=events service=EventBus +2022-07-16T13:55:59+08:00 INFO starting service impl=PubSub module=pubsub service=PubSub +2022-07-16T13:55:59+08:00 INFO starting service impl=IndexerService module=txindex service=IndexerService +2022-07-16T13:55:59+08:00 INFO ABCI Handshake App Info hash= height=0 module=consensus protocol-version=0 software-version= +2022-07-16T13:55:59+08:00 INFO ABCI Replay Blocks appHeight=0 module=consensus stateHeight=0 storeHeight=0 +2022-07-16T13:55:59+08:00 INFO Completed ABCI Handshake - Tendermint and App are synced appHash= appHeight=0 module=consensus +2022-07-16T13:55:59+08:00 INFO Version info block=11 mode=validator p2p=8 tmVersion=0.35.8 ``` -Now open another tab in your terminal and try sending a transaction: +Let's try sending a transaction. Open another terminal and execute the below command. -```bash +```sh $ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { - "check_tx": { - "gasWanted": "1", - ... - }, - "deliver_tx": { ... }, - "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", - "height": "128" + ... + "result": { + "check_tx": { + ... + "gas_wanted": "1", + ... + }, + "deliver_tx": {...}, + "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", + "height": "91" + } } ``` Response should contain the height where this transaction was committed. -Now let's check if the given key now exists and its value: +Let's check if the given key now exists and its value: -```json +```sh $ curl -s 'localhost:26657/abci_query?data="tendermint"' { - "response": { - "code": 0, - "log": "exists", - "info": "", - "index": "0", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3M=", - "proofOps": null, - "height": "6", - "codespace": "" + ... + "result": { + "response": { + "code": 0, + "log": "exists", + "info": "", + "index": "0", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", + "proofOps": null, + "height": "0", + "codespace": "" + } } } ``` -"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of +`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of "tendermint" and "rocks" accordingly. ## Outro diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index ff85bd0695..908c63c744 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -37,25 +37,27 @@ Core will not have access to application's state. ## 1.1 Installing Go Please refer to [the official guide for installing -Go](https://golang.org/doc/install). +Go](https://go.dev/doc/install). Verify that you have the latest version of Go installed: -```bash +```sh $ go version -go version go1.16.x darwin/amd64 +go version go1.18.x darwin/amd64 ``` ## 1.2 Creating a new Go project -We'll start by creating a new Go project. +We'll start by creating a new Go project. Initialize the folder with `go mod init`. Running this command should create the `go.mod` file. -```bash -mkdir kvstore -cd kvstore +```sh +$ mkdir kvstore +$ cd kvstore +$ go mod init github.com//kvstore +go: creating new go.mod: module github.com//kvstore ``` -Inside the example directory create a `main.go` file with the following content: +Inside the project directory, create a `main.go` file with the following content: ```go package main @@ -71,8 +73,8 @@ func main() { When run, this should print "Hello, Tendermint Core" to the standard output. -```bash -go run main.go +```sh +$ go run main.go Hello, Tendermint Core ``` @@ -150,10 +152,34 @@ func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk } ``` -Now I will go through each method explaining when it's called and adding +Now, we will go through each method and explain when it is executed while adding required business logic. -### 1.3.1 CheckTx +### 1.3.1 Key-value store setup + +For the underlying key-value store we'll use the latest version of [badger](https://github.com/dgraph-io/badger), which is an embeddable, persistent and fast key-value (KV) database. + +```sh +$ go get github.com/dgraph-io/badger/v3 +go: added github.com/dgraph-io/badger/v3 v3.2103.2 +``` + +```go +import "github.com/dgraph-io/badger/v3" + +type KVStoreApplication struct { + db *badger.DB + currentBatch *badger.Txn +} + +func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { + return &KVStoreApplication{ + db: db, + } +} +``` + +### 1.3.2 CheckTx When a new transaction is added to the Tendermint Core, it will ask the application to check it (validate the format, signatures, etc.). @@ -212,26 +238,8 @@ Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas). -For the underlying key-value store we'll use -[badger](https://github.com/dgraph-io/badger), which is an embeddable, -persistent and fast key-value (KV) database. -```go -import "github.com/dgraph-io/badger" - -type KVStoreApplication struct { - db *badger.DB - currentBatch *badger.Txn -} - -func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { - return &KVStoreApplication{ - db: db, - } -} -``` - -### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit +### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit When Tendermint Core has decided on the block, it's transferred to the application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and @@ -287,7 +295,7 @@ func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { } ``` -### 1.3.3 Query +### 1.3.4 Query Now, when the client wants to know whenever a particular key/value exist, it will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call @@ -344,7 +352,7 @@ import ( "os/signal" "syscall" - "github.com/dgraph-io/badger" + "github.com/dgraph-io/badger/v3" abciserver "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/libs/log" @@ -353,7 +361,7 @@ import ( var socketAddr string func init() { - flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address") + flag.StringVar(&socketAddr, "socket-addr", "unix://kvstore.sock", "Unix domain socket address") } func main() { @@ -426,40 +434,41 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c ``` -## 1.5 Getting Up and Running +## 1.5 Getting up and running -We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for -dependency management. +Make sure to enable [Go modules](https://github.com/golang/go/wiki/Modules). Run `go mod tidy` to download and add dependencies in `go.mod` file. -```bash -export GO111MODULE=on -go mod init github.com/me/example +```sh +$ go mod tidy +... ``` -This should create a `go.mod` file. The current tutorial only works with -the master branch of Tendermint, so let's make sure we're using the latest version: +Let's make sure we're using the latest version of Tendermint (currently `v0.35.8`). ```sh -go get github.com/tendermint/tendermint@97a3e44e0724f2017079ce24d36433f03124c09e +$ go get github.com/tendermint/tendermint@latest +... ``` This will populate the `go.mod` with a release number followed by a hash for Tendermint. ```go -module github.com/me/example +module github.com//kvstore -go 1.16 +go 1.18 require ( - github.com/dgraph-io/badger v1.6.2 - github.com/tendermint/tendermint + github.com/dgraph-io/badger/v3 v3.2103.2 + github.com/tendermint/tendermint v0.35.8 + ... ) ``` -Now we can build the binary: +Now, we can build the binary: -```bash -go build +```sh +$ go build +... ``` To create a default configuration, nodeKey and private validator files, let's @@ -470,94 +479,112 @@ installing from source, don't forget to checkout the latest release (`git checkout vX.Y.Z`). Don't forget to check that the application uses the same major version. -```bash -rm -rf /tmp/example -TMHOME="/tmp/example" tendermint init validator +```sh +$ rm -rf /tmp/kvstore /tmp/badger +$ TMHOME="/tmp/kvstore" tendermint init validator -I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -I[2019-07-16|18:20:36.483] Generated config module=main mode=validator +2022-07-20T17:04:41+08:00 INFO Generated private validator keyFile=/tmp/kvstore/config/priv_validator_key.json module=main stateFile=/tmp/kvstore/data/priv_validator_state.json +2022-07-20T17:04:41+08:00 INFO Generated node key module=main path=/tmp/kvstore/config/node_key.json +2022-07-20T17:04:41+08:00 INFO Generated genesis file module=main path=/tmp/kvstore/config/genesis.json +2022-07-20T17:04:41+08:00 INFO Generated config mode=validator module=main ``` Feel free to explore the generated files, which can be found at -`/tmp/example/config` directory. Documentation on the config can be found +`/tmp/kvstore/config` directory. Documentation on the config can be found [here](https://docs.tendermint.com/master/tendermint-core/configuration.html). We are ready to start our application: -```bash -rm example.sock -./example - -badger 2019/07/16 18:25:11 INFO: All 0 tables opened in 0s -badger 2019/07/16 18:25:11 INFO: Replaying file id: 0 at offset: 0 -badger 2019/07/16 18:25:11 INFO: Replay took: 300.4s -I[2019-07-16|18:25:11.523] Starting ABCIServer impl=ABCIServ +```sh +$ rm kvstore.sock +$ ./kvstore + +badger 2022/07/20 17:07:17 INFO: All 1 tables opened in 9ms +badger 2022/07/20 17:07:17 INFO: Replaying file id: 0 at offset: 256 +badger 2022/07/20 17:07:17 INFO: Replay took: 9.077µs +badger 2022/07/20 17:07:17 DEBUG: Value log discard stats empty +2022-07-20T17:07:17+08:00 INFO starting service impl=ABCIServer service=ABCIServer +2022-07-20T17:07:17+08:00 INFO Waiting for new connection... ``` -Then we need to start Tendermint Core and point it to our application. Staying -within the application directory execute: - -```bash -TMHOME="/tmp/example" tendermint node --proxy-app=unix://example.sock +Then, we need to start Tendermint Core and point it to our application. Staying +within the project directory, open another terminal and execute the command below: -I[2019-07-16|18:26:20.362] Version info module=main software=0.32.1 block=10 p2p=7 -I[2019-07-16|18:26:20.383] Starting Node module=main impl=Node -E[2019-07-16|18:26:20.392] Couldn't connect to any seeds module=p2p -I[2019-07-16|18:26:20.394] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:8dab80770ae8e295d4ce905d86af78c4ff634b79 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-nIO96P Version:0.32.1 Channels:4020212223303800 Moniker:app48.fun-box.ru Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" -I[2019-07-16|18:26:21.440] Executed block module=state height=1 validTxs=0 invalidTxs=0 -I[2019-07-16|18:26:21.446] Committed state module=state height=1 txs=0 appHash= +```sh +$ TMHOME="/tmp/kvstore" tendermint node --proxy-app=unix://kvstore.sock + +2022-07-20T17:10:22+08:00 INFO starting service impl=multiAppConn module=proxy service=multiAppConn +2022-07-20T17:10:22+08:00 INFO starting service connection=query impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service connection=snapshot impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service connection=mempool impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service connection=consensus impl=socketClient module=abci-client service=socketClient +2022-07-20T17:10:22+08:00 INFO starting service impl=EventBus module=events service=EventBus +2022-07-20T17:10:22+08:00 INFO starting service impl=PubSub module=pubsub service=PubSub +2022-07-20T17:10:22+08:00 INFO starting service impl=IndexerService module=txindex service=IndexerService +... +2022-07-20T17:10:22+08:00 INFO starting service impl=Node module=main service=Node +2022-07-20T17:10:22+08:00 INFO Starting RPC HTTP server on 127.0.0.1:26657 module=rpc-server +2022-07-20T17:10:22+08:00 INFO p2p service legacy_enabled=false module=main +2022-07-20T17:10:22+08:00 INFO starting service impl=router module=p2p service=router +2022-07-20T17:10:22+08:00 INFO starting router channels=402021222330386061626300 listen_addr=tcp://0.0.0.0:26656 module=p2p net_addr={"id":"715727499e94f8fcaef1763192ebcc8460f44666","ip":"0.0.0.0","port":26656} node_id=715727499e94f8fcaef1763192ebcc8460f44666 +... ``` This should start the full node and connect to our ABCI application. ```sh -I[2019-07-16|18:25:11.525] Waiting for new connection... -I[2019-07-16|18:26:20.329] Accepted a new connection -I[2019-07-16|18:26:20.329] Waiting for new connection... -I[2019-07-16|18:26:20.330] Accepted a new connection -I[2019-07-16|18:26:20.330] Waiting for new connection... -I[2019-07-16|18:26:20.330] Accepted a new connection +2022-07-20T17:09:55+08:00 INFO Waiting for new connection... +2022-07-20T17:10:22+08:00 INFO Accepted a new connection +2022-07-20T17:10:22+08:00 INFO Waiting for new connection... +2022-07-20T17:10:22+08:00 INFO Accepted a new connection +2022-07-20T17:10:22+08:00 INFO Waiting for new connection... +2022-07-20T17:10:22+08:00 INFO Accepted a new connection ``` -Now open another tab in your terminal and try sending a transaction: +Let's try sending a transaction. Open another terminal and execute the below command. -```json +```sh $ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { - "check_tx": { - "gasWanted": "1", - ... - }, - "deliver_tx": { ... }, - "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", - "height": "33" + ... + "result": { + "check_tx": { + ... + "gas_wanted": "1", + ... + }, + "deliver_tx": { ... }, + "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", + "height": "15" + } } ``` Response should contain the height where this transaction was committed. -Now let's check if the given key now exists and its value: +Let's check if the given key now exists and its value: -```json +```sh $ curl -s 'localhost:26657/abci_query?data="tendermint"' { - "response": { - "code": 0, - "log": "exists", - "info": "", - "index": "0", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3M=", - "proofOps": null, - "height": "6", - "codespace": "" + ... + "result": { + "response": { + "code": 0, + "log": "exists", + "info": "", + "index": "0", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", + "proofOps": null, + "height": "0", + "codespace": "" + } } } ``` -"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of +`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of "tendermint" and "rocks" accordingly. ## Outro From 89246e993af52723a03a963185ce63f5c49fd5d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Jul 2022 12:07:29 +0200 Subject: [PATCH 185/203] build(deps): Bump docker/build-push-action from 3.0.0 to 3.1.0 (#9082) --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0a006f9b91..73f682089e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -49,7 +49,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v3.0.0 + uses: docker/build-push-action@v3.1.0 with: context: . file: ./DOCKER/Dockerfile From 488e1d4bd240d3715d30fd2d55036e70f9d760c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Jul 2022 12:53:08 +0000 Subject: [PATCH 186/203] build(deps): Bump github.com/creachadair/tomledit from 0.0.22 to 0.0.23 (#9084) Bumps [github.com/creachadair/tomledit](https://github.com/creachadair/tomledit) from 0.0.22 to 0.0.23.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/creachadair/tomledit&package-manager=go_modules&previous-version=0.0.22&new-version=0.0.23)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 704731c325..f238b242ef 100644 --- a/go.mod +++ b/go.mod @@ -240,7 +240,7 @@ require ( ) require ( - github.com/creachadair/tomledit v0.0.22 + github.com/creachadair/tomledit v0.0.23 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.37.0 github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca diff --git a/go.sum b/go.sum index 49f3247760..c95d3a8109 100644 --- a/go.sum +++ b/go.sum @@ -245,8 +245,8 @@ github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2 github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= -github.com/creachadair/tomledit v0.0.22 h1:lRtepmrwhzDq+g1gv5ftVn5itgo7CjYbm6abKTToqJ4= -github.com/creachadair/tomledit v0.0.22/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y= +github.com/creachadair/tomledit v0.0.23 h1:ohYJjMsxwzj4dDzKaBWFbWH5J+3LO/8CYnlVY+baBWA= +github.com/creachadair/tomledit v0.0.23/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= From 7a84425aecd0f1c2ec94544d70b91d7a501889d4 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Tue, 26 Jul 2022 12:05:48 +0200 Subject: [PATCH 187/203] update dependabot frequencies (#9087) --- .github/dependabot.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 23e73e7458..cdd7147712 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,7 +4,6 @@ updates: directory: "/" schedule: interval: weekly - day: monday target-branch: "master" open-pull-requests-limit: 10 labels: @@ -15,7 +14,6 @@ updates: directory: "/" schedule: interval: weekly - day: monday target-branch: "v0.34.x" open-pull-requests-limit: 10 labels: @@ -26,7 +24,6 @@ updates: directory: "/" schedule: interval: weekly - day: monday target-branch: "v0.35.x" open-pull-requests-limit: 10 labels: @@ -37,7 +34,6 @@ updates: directory: "/" schedule: interval: weekly - day: monday target-branch: "v0.36.x" open-pull-requests-limit: 10 labels: @@ -48,7 +44,6 @@ updates: directory: "/docs" schedule: interval: weekly - day: monday open-pull-requests-limit: 10 ################################### @@ -58,7 +53,7 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: weekly target-branch: "master" open-pull-requests-limit: 10 labels: @@ -68,7 +63,7 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: weekly target-branch: "v0.34.x" open-pull-requests-limit: 10 labels: @@ -78,7 +73,7 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: weekly target-branch: "v0.35.x" open-pull-requests-limit: 10 labels: @@ -88,7 +83,7 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: weekly target-branch: "v0.36.x" open-pull-requests-limit: 10 labels: From b9d6bb4cd193a02c47f1f0e1234c26e293f93505 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Wed, 27 Jul 2022 11:03:36 -0400 Subject: [PATCH 188/203] RELEASES: add a set of pre-release steps for Tendermint versions (#8786) {[rendered](https://github.com/tendermint/tendermint/blob/wb/release-document/RELEASES.md)} This pull request adds a proposed set of steps to perform before each Tendermint minor version release. This represents an initial set of ideas that derives from conversations among members of the Tendermint team. If you have additional suggestions for pre-release steps, please leave a comment explaining the specific suggestion and detailing how it would help build confidence in the quality of the release of Tendermint. --- RELEASES.md | 168 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 152 insertions(+), 16 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index f3bfd20d5c..803fc0d95a 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,8 +1,9 @@ # Releases -Tendermint uses [semantic versioning](https://semver.org/) with each release following -a `vX.Y.Z` format. The `master` branch is used for active development and thus it's -advisable not to build against it. +Tendermint uses modified [semantic versioning](https://semver.org/) with each +release following a `vX.Y.Z` format. Tendermint is currently on major version +0 and uses the minor version to signal breaking changes. The `master` branch is +used for active development and thus it is not advisable to build against it. The latest changes are always initially merged into `master`. Releases are specified using tags and are built from long-lived "backport" branches @@ -29,8 +30,8 @@ merging the pull request. ### Creating a backport branch -If this is the first release candidate for a major release, you get to have the -honor of creating the backport branch! +If this is the first release candidate for a minor version release, e.g. +v0.25.0, you get to have the honor of creating the backport branch! Note that, after creating the backport branch, you'll also need to update the tags on `master` so that `go mod` is able to order the branches correctly. You @@ -77,7 +78,8 @@ the 0.35.x line. After doing these steps, go back to `master` and do the following: -1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub. +1. Tag `master` as the dev branch for the _next_ minor version release and push + it up to GitHub. For example: ```sh git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36." @@ -99,7 +101,7 @@ After doing these steps, go back to `master` and do the following: ## Release candidates -Before creating an official release, especially a major release, we may want to create a +Before creating an official release, especially a minor release, we may want to create a release candidate (RC) for our friends and partners to test out. We use git tags to create RCs, and we build them off of backport branches. @@ -109,7 +111,7 @@ Tags for RCs should follow the "standard" release naming conventions, with `-rcX (Note that branches and tags _cannot_ have the same names, so it's important that these branches have distinct names from the tags/release names.) -If this is the first RC for a major release, you'll have to make a new backport branch (see above). +If this is the first RC for a minor release, you'll have to make a new backport branch (see above). Otherwise: 1. Start from the backport branch (e.g. `v0.35.x`). @@ -140,11 +142,13 @@ Note that this process should only be used for "true" RCs-- release candidates that, if successful, will be the next release. For more experimental "RCs," create a new, short-lived branch and tag that instead. -## Major release +## Minor release -This major release process assumes that this release was preceded by release candidates. +This minor release process assumes that this release was preceded by release candidates. If there were no release candidates, begin by creating a backport branch, as described above. +Before performing these steps, be sure the [Minor Release Checklist](#minor-release-checklist) has been completed. + 1. Start on the backport branch (e.g. `v0.35.x`) 2. Run integration tests (`make test_integrations`) and the e2e nightlies. 3. Prepare the release: @@ -176,16 +180,16 @@ If there were no release candidates, begin by creating a backport branch, as des - Commit these changes to `master` and backport them into the backport branch for this release. -## Minor release (point releases) +## Patch release -Minor releases are done differently from major releases: They are built off of +Patch releases are done differently from minor releases: They are built off of long-lived backport branches, rather than from master. As non-breaking changes land on `master`, they should also be backported into these backport branches. -Minor releases don't have release candidates by default, although any tricky +Patch releases don't have release candidates by default, although any tricky changes may merit a release candidate. -To create a minor release: +To create a patch release: 1. Checkout the long-lived backport branch: `git checkout v0.35.x` 2. Run integration tests (`make test_integrations`) and the nightlies. @@ -197,11 +201,143 @@ To create a minor release: - Bump the TMDefaultVersion in `version.go` - Bump the ABCI version number, if necessary. (Note that ABCI follows semver, and that ABCI versions are the only versions - which can change during minor releases, and only field additions are valid minor changes.) + which can change during patch releases, and only field additions are valid patch changes.) 4. Open a PR with these changes that will land them back on `v0.35.x` 5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - `git tag -a v0.35.1 -m 'Release v0.35.1'` - `git push origin v0.35.1` 6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - - Remove all `R:minor` labels from the pull requests that were included in the release. + - Remove all `R:patch` labels from the pull requests that were included in the release. - Do not merge the backport branch into master. + +## Minor Release Checklist + +The following set of steps are performed on all releases that increment the +_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is +well tested, stable, and suitable for adoption by the various diverse projects +that rely on Tendermint. + +### Feature Freeze + +Ahead of any minor version release of Tendermint, the software enters 'Feature +Freeze' for at least two weeks. A feature freeze means that _no_ new features +are added to the code being prepared for release. No code changes should be made +to the code being released that do not directly improve pressing issues of code +quality. The following must not be merged during a feature freeze: + +* Refactors that are not related to specific bug fixes. +* Dependency upgrades. +* New test code that does not test a discovered regression. +* New features of any kind. +* Documentation or spec improvements that are not related to the newly developed +code. + +This period directly follows the creation of the [backport +branch](#creating-a-backport-branch). The Tendermint team instead directs all +attention to ensuring that the existing code is stable and reliable. Broken +tests are fixed, flakey-tests are remedied, end-to-end test failures are +thoroughly diagnosed and all efforts of the team are aimed at improving the +quality of the code. During this period, the upgrade harness tests are run +repeatedly and a variety of in-house testnets are run to ensure Tendermint +functions at the scale it will be used by application developers and node +operators. + +### Nightly End-To-End Tests + +The Tendermint team maintains [a set of end-to-end +tests](https://github.com/tendermint/tendermint/blob/master/test/e2e/README.md#L1) +that run each night on the latest commit of the project and on the code in the +tip of each supported backport branch. These tests start a network of containerized +Tendermint processes and run automated checks that the network functions as +expected in both stable and unstable conditions. During the feature freeze, +these tests are run nightly and must pass consistently for a release of +Tendermint to be considered stable. + +### Upgrade Harness + +> TODO(williambanfield): Change to past tense and clarify this section once +> upgrade harness is complete. + +The Tendermint team is creating an upgrade test harness to exercise the +workflow of stopping an instance of Tendermint running one version of the +software and starting up the same application running the next version. To +support upgrade testing, we will add the ability to terminate the Tendermint +process at specific pre-defined points in its execution so that we can verify +upgrades work in a representative sample of stop conditions. + +### Large Scale Testnets + +The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise +basic consensus interactions. Real world deployments of Tendermint often have over +a hundred nodes just in the validator set, with many others acting as full +nodes and sentry nodes. To gain more assurance before a release, we will also run +larger-scale test networks to shake out emergent behaviors at scale. + +Large-scale test networks are run on a set of virtual machines (VMs). Each VM +is equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very +simple key-value store application. The application adds artificial delays to +different ABCI calls to simulate a slow application. Each testnet is briefly +run with no load being generated to collect a baseline performance. Once +baseline is captured, a consistent load is applied across the network. This +load takes the form of 10% of the running nodes all receiving a consistent +stream of two hundred transactions per minute each. + +During each test net, the following metrics are monitored and collected on each +node: + +* Consensus rounds per height +* Maximum connected peers, Minimum connected peers, Rate of change of peer connections +* Memory resident set size +* CPU utilization +* Blocks produced per minute +* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit) +* Latency to receive block proposals + +For these tests we intentionally target low-powered host machines (with low core +counts and limited memory) to ensure we observe similar kinds of resource contention +and limitation that real-world deployments of Tendermint experience in production. + +#### 200 Node Testnet + +To test the stability and performance of Tendermint in a real world scenario, +a 200 node test network is run. The network comprises 5 seed nodes, 100 +validators and 95 non-validating full nodes. All nodes begin by dialing +a subset of the seed nodes to discover peers. The network is run for several +days, with metrics being collected continuously. In cases of changes to performance +critical systems, testnets of larger sizes should be considered. + +#### Rotating Node Testnet + +Real-world deployments of Tendermint frequently see new nodes arrive and old +nodes exit the network. The rotating node testnet ensures that Tendermint is +able to handle this reliably. In this test, a network with 10 validators and +3 seed nodes is started. A rolling set of 25 full nodes are started and each +connects to the network by dialing one of the seed nodes. Once the node is able +to blocksync to the head of the chain and begins producing blocks using +Tendermint consensus it is stopped. Once stopped, a new node is started and +takes its place. This network is run for several days. + +#### Network Partition Testnet + +Tendermint is expected to recover from network partitions. A partition where no +subset of the nodes is left with the super-majority of the stake is expected to +stop making blocks. Upon alleviation of the partition, the network is expected +to once again become fully connected and capable of producing blocks. The +network partition testnet ensures that Tendermint is able to handle this +reliably at scale. In this test, a network with 100 validators and 95 full +nodes is started. All validators have equal stake. Once the network is +producing blocks, a set of firewall rules is deployed to create a partitioned +network with 50% of the stake on one side and 50% on the other. Once the +network stops producing blocks, the firewall rules are removed and the nodes +are monitored to ensure they reconnect and that the network again begins +producing blocks. + +#### Absent Stake Testnet + +Tendermint networks often run with _some_ portion of the voting power offline. +The absent stake testnet ensures that large networks are able to handle this +reliably. A set of 150 validator nodes and three seed nodes is started. The set +of 150 validators is configured to only possess a cumulative stake of 67% of +the total stake. The remaining 33% of the stake is configured to belong to +a validator that is never actually run in the test network. The network is run +for multiple days, ensuring that it is able to produce blocks without issue. From 48147e1fb9bb3a277b5bc07f7e51a7c28c84b5cb Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 27 Jul 2022 15:16:51 -0400 Subject: [PATCH 189/203] logging: implement lazy sprinting (#8898) shout out to @joeabbey for the inspiration. This makes the lazy functions internal by default to prevent potential misuse by external callers. Should backport cleanly into 0.36 and I'll handle a messy merge into 0.35 --- abci/client/grpc_client.go | 3 +- abci/client/socket_client.go | 8 +- internal/blocksync/reactor.go | 6 +- internal/consensus/reactor.go | 3 +- internal/consensus/state.go | 198 +++++++++++++++--------------- internal/libs/strings/string.go | 86 +++++++++++++ internal/mempool/mempool.go | 7 +- internal/mempool/reactor.go | 6 +- internal/p2p/conn/connection.go | 2 +- internal/p2p/pex/reactor.go | 3 +- internal/p2p/pqueue.go | 12 +- internal/p2p/router.go | 3 +- internal/statesync/reactor.go | 30 ++--- internal/statesync/syncer.go | 40 ++++-- libs/log/default.go | 8 +- light/client.go | 14 ++- light/detector.go | 6 +- rpc/jsonrpc/server/http_server.go | 2 +- test/e2e/app/app.go | 6 +- test/e2e/runner/cleanup.go | 3 +- 20 files changed, 269 insertions(+), 177 deletions(-) diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 1e163056d3..bd255bebe4 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -3,7 +3,6 @@ package abciclient import ( "context" "errors" - "fmt" "net" "sync" "time" @@ -65,7 +64,7 @@ RETRY_LOOP: if cli.mustConnect { return err } - cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err) + cli.logger.Error("abci.grpcClient failed to connect, Retrying...", "addr", cli.addr, "err", err) timer.Reset(time.Second * dialRetryIntervalSeconds) select { case <-ctx.Done(): diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 7dfcf76cc3..d023e70741 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -67,8 +67,11 @@ func (cli *socketClient) OnStart(ctx context.Context) error { if cli.mustConnect { return err } - cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...", - cli.addr, dialRetryIntervalSeconds), "err", err) + + cli.logger.Error("abci.socketClient failed to connect, retrying after", + "retry_after", dialRetryIntervalSeconds, + "target", cli.addr, + "err", err) timer.Reset(time.Second * dialRetryIntervalSeconds) select { @@ -77,7 +80,6 @@ func (cli *socketClient) OnStart(ctx context.Context) error { case <-timer.C: continue } - } cli.conn = conn diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index c1b032b03d..9f0a760894 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -469,12 +469,10 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh lastAdvance = r.pool.LastAdvance() ) - r.logger.Debug( - "consensus ticker", + r.logger.Debug("consensus ticker", "num_pending", numPending, "total", lenRequesters, - "height", height, - ) + "height", height) switch { diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 3ba95c8361..12317a9b4c 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -10,6 +10,7 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" @@ -1113,7 +1114,7 @@ func (r *Reactor) handleDataMessage(ctx context.Context, envelope *p2p.Envelope, } if r.WaitSync() { - logger.Info("ignoring message received during sync", "msg", fmt.Sprintf("%T", msgI)) + logger.Info("ignoring message received during sync", "msg", tmstrings.LazySprintf("%T", msgI)) return nil } diff --git a/internal/consensus/state.go b/internal/consensus/state.go index a7a0b8fed6..5b10d4b506 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -21,6 +21,7 @@ import ( "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/internal/libs/autofile" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" sm "github.com/tendermint/tendermint/internal/state" tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" @@ -778,11 +779,9 @@ func (cs *State) updateToState(state sm.State) { // signal the new round step, because other services (eg. txNotifier) // depend on having an up-to-date peer state! if state.LastBlockHeight <= cs.state.LastBlockHeight { - cs.logger.Debug( - "ignoring updateToState()", + cs.logger.Debug("ignoring updateToState()", "new_height", state.LastBlockHeight+1, - "old_height", cs.state.LastBlockHeight+1, - ) + "old_height", cs.state.LastBlockHeight+1) cs.newStep() return } @@ -1038,12 +1037,10 @@ func (cs *State) handleMsg(ctx context.Context, mi msgInfo) { } if err != nil && msg.Round != cs.Round { - cs.logger.Debug( - "received block part from wrong round", + cs.logger.Debug("received block part from wrong round", "height", cs.Height, "cs_round", cs.Round, - "block_round", msg.Round, - ) + "block_round", msg.Round) err = nil } @@ -1073,7 +1070,7 @@ func (cs *State) handleMsg(ctx context.Context, mi msgInfo) { // We could make note of this and help filter in broadcastHasVoteMessage(). default: - cs.logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg)) + cs.logger.Error("unknown msg type", "type", tmstrings.LazySprintf("%T", msg)) return } @@ -1184,10 +1181,10 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) { logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { - logger.Debug( - "entering new round with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering new round with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1195,7 +1192,10 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) { logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) } - logger.Debug("entering new round", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering new round", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) // increment validators if necessary validators := cs.Validators @@ -1274,10 +1274,10 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) { logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { - logger.Debug( - "entering propose step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering propose step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1291,7 +1291,10 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) { } } - logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering propose step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPropose: @@ -1333,17 +1336,13 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) { } if cs.isProposer(addr) { - logger.Debug( - "propose step; our turn to propose", - "proposer", addr, - ) + logger.Debug("propose step; our turn to propose", + "proposer", addr) cs.decideProposal(ctx, height, round) } else { - logger.Debug( - "propose step; not our turn to propose", - "proposer", cs.Validators.GetProposer().Address, - ) + logger.Debug("propose step; not our turn to propose", + "proposer", cs.Validators.GetProposer().Address) } } @@ -1480,10 +1479,10 @@ func (cs *State) enterPrevote(ctx context.Context, height int64, round int32) { logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { - logger.Debug( - "entering prevote step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering prevote step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1493,7 +1492,10 @@ func (cs *State) enterPrevote(ctx context.Context, height int64, round int32) { cs.newStep() }() - logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering prevote step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) // Sign and broadcast vote as necessary cs.doPrevote(ctx, height, round) @@ -1533,14 +1535,10 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 //TODO: Remove this temporary fix when the complete solution is ready. See #8739 if !cs.replayMode && cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() { logger.Debug("prevote step: Proposal is not timely; prevoting nil", - "proposed", - tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano), - "received", - tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano), - "msg_delay", - sp.MessageDelay, - "precision", - sp.Precision) + "proposed", tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano), + "received", tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano), + "msg_delay", sp.MessageDelay, + "precision", sp.Precision) cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1625,8 +1623,8 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority() if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round { if cs.LockedRound <= cs.Proposal.POLRound { - logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" + - "majority in a round later than the locked round; prevoting the proposal") + logger.Debug("prevote step: ProposalBlock is valid and received a 2/3 majority in a round later than the locked round", + "outcome", "prevoting the proposal") cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) return } @@ -1637,8 +1635,8 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32 } } - logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or " + - "did not receive a more recent majority; prevoting nil") + logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or did not receive a more recent majority", + "outcome", "prevoting nil") cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) } @@ -1647,10 +1645,10 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { - logger.Debug( - "entering prevote wait step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering prevote wait step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } @@ -1661,7 +1659,10 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { )) } - logger.Debug("entering prevote wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering prevote wait step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPrevoteWait: @@ -1679,17 +1680,21 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) // else, precommit nil otherwise. func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) { - logger := cs.logger.With("height", height, "round", round) + logger := cs.logger.With("new_height", height, "new_round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { - logger.Debug( - "entering precommit step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering precommit step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) + return } - logger.Debug("entering precommit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering precommit step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPrecommit: @@ -1796,14 +1801,13 @@ func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) // Enter: any +2/3 precommits for next round. func (cs *State) enterPrecommitWait(height int64, round int32) { - logger := cs.logger.With("height", height, "round", round) + logger := cs.logger.With("new_height", height, "new_round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { - logger.Debug( - "entering precommit wait step with invalid args", + logger.Debug("entering precommit wait step with invalid args", "triggered_timeout", cs.TriggeredTimeoutPrecommit, - "current", fmt.Sprintf("%v/%v", cs.Height, cs.Round), - ) + "height", cs.Height, + "round", cs.Round) return } @@ -1814,7 +1818,10 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { )) } - logger.Debug("entering precommit wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering precommit wait step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterPrecommitWait: @@ -1828,17 +1835,20 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { // Enter: +2/3 precommits for block func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int32) { - logger := cs.logger.With("height", height, "commit_round", commitRound) + logger := cs.logger.With("new_height", height, "commit_round", commitRound) if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { - logger.Debug( - "entering commit step with invalid args", - "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), - ) + logger.Debug("entering commit step with invalid args", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) return } - logger.Debug("entering commit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("entering commit step", + "height", cs.Height, + "round", cs.Round, + "step", cs.Step) defer func() { // Done enterCommit: @@ -1892,12 +1902,12 @@ func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int3 // If we have the block AND +2/3 commits for it, finalize. func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) { - logger := cs.logger.With("height", height) - if cs.Height != height { panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } + logger := cs.logger.With("height", height) + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() if !ok || blockID.IsNil() { logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil") @@ -1907,9 +1917,8 @@ func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) { if !cs.ProposalBlock.HashesTo(blockID.Hash) { // TODO: this happens every time if we're not a validator (ugly logs) // TODO: ^^ wait, why does it matter that we're a validator? - logger.Debug( - "failed attempt to finalize commit; we do not have the commit block", - "proposal_block", cs.ProposalBlock.Hash(), + logger.Debug("failed attempt to finalize commit; we do not have the commit block", + "proposal_block", tmstrings.LazyBlockHash(cs.ProposalBlock), "commit_block", blockID.Hash, ) return @@ -1951,11 +1960,10 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) { logger.Info( "finalizing commit of block", - "hash", block.Hash(), + "hash", tmstrings.LazyBlockHash(block), "root", block.AppHash, "num_txs", len(block.Txs), ) - logger.Debug(fmt.Sprintf("%v", block)) // Save to blockStore. if cs.blockStore.Height() < block.Height { @@ -2052,8 +2060,11 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { address types.Address ) if commitSize != valSetLen { - cs.logger.Error(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", - commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators)) + cs.logger.Error("commit size doesn't match valset", + "size", commitSize, + "valset_len", valSetLen, + "height", block.Height, + "extra", tmstrings.LazySprintf("\n%v\n\n%v", block.LastCommit.Signatures, cs.LastValidators.Validators)) return } @@ -2187,8 +2198,7 @@ func (cs *State) addProposalBlockPart( cs.metrics.BlockGossipPartsReceived.With("matches_current", "false").Add(1) // NOTE: this can happen when we've gone to a higher round and // then receive parts from the previous round - not necessarily a bad peer. - cs.logger.Debug( - "received a block part when we are not expecting any", + cs.logger.Debug("received a block part when we are not expecting any", "height", height, "round", round, "index", part.Index, @@ -2248,11 +2258,9 @@ func (cs *State) handleCompleteProposal(ctx context.Context, height int64) { blockID, hasTwoThirds := prevotes.TwoThirdsMajority() if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) { if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.logger.Debug( - "updating valid block to new proposal block", + cs.logger.Debug("updating valid block to new proposal block", "valid_round", cs.Round, - "valid_block_hash", cs.ProposalBlock.Hash(), - ) + "valid_block_hash", tmstrings.LazyBlockHash(cs.ProposalBlock)) cs.ValidRound = cs.Round cs.ValidBlock = cs.ProposalBlock @@ -2291,23 +2299,19 @@ func (cs *State) tryAddVote(ctx context.Context, vote *types.Vote, peerID types. } if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) { - cs.logger.Error( - "found conflicting vote from ourselves; did you unsafe_reset a validator?", + cs.logger.Error("found conflicting vote from ourselves; did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, - "type", vote.Type, - ) + "type", vote.Type) return added, err } // report conflicting votes to the evidence pool cs.evpool.ReportConflictingVotes(voteErr.VoteA, voteErr.VoteB) - cs.logger.Debug( - "found and sent conflicting votes to the evidence pool", + cs.logger.Debug("found and sent conflicting votes to the evidence pool", "vote_a", voteErr.VoteA, - "vote_b", voteErr.VoteB, - ) + "vote_b", voteErr.VoteB) return added, err } else if errors.Is(err, types.ErrVoteNonDeterministicSignature) { @@ -2331,13 +2335,11 @@ func (cs *State) addVote( vote *types.Vote, peerID types.NodeID, ) (added bool, err error) { - cs.logger.Debug( - "adding vote", + cs.logger.Debug("adding vote", "vote_height", vote.Height, "vote_type", vote.Type, "val_index", vote.ValidatorIndex, - "cs_height", cs.Height, - ) + "cs_height", cs.Height) if vote.Height < cs.Height || (vote.Height == cs.Height && vote.Round < cs.Round) { cs.metrics.MarkLateVote(vote.Type) @@ -2458,11 +2460,9 @@ func (cs *State) addVote( cs.ValidBlock = cs.ProposalBlock cs.ValidBlockParts = cs.ProposalBlockParts } else { - cs.logger.Debug( - "valid block we do not know about; set ProposalBlock=nil", - "proposal", cs.ProposalBlock.Hash(), - "block_id", blockID.Hash, - ) + cs.logger.Debug("valid block we do not know about; set ProposalBlock=nil", + "proposal", tmstrings.LazyBlockHash(cs.ProposalBlock), + "block_id", blockID.Hash) // we're getting the wrong block cs.ProposalBlock = nil diff --git a/internal/libs/strings/string.go b/internal/libs/strings/string.go index 95ea03b5a6..067f31ffc3 100644 --- a/internal/libs/strings/string.go +++ b/internal/libs/strings/string.go @@ -3,8 +3,94 @@ package strings import ( "fmt" "strings" + + tmbytes "github.com/tendermint/tendermint/libs/bytes" ) +type lazyStringf struct { + tmpl string + args []interface{} + out string +} + +func (s *lazyStringf) String() string { + if s.out == "" && s.tmpl != "" { + s.out = fmt.Sprintf(s.tmpl, s.args) + s.args = nil + s.tmpl = "" + } + return s.out +} + +// LazySprintf creates a fmt.Stringer implementation with similar +// semantics as fmt.Sprintf, *except* that the string is built when +// String() is called on the object. This means that format arguments +// are resolved/captured into string format when String() is called, +// and not, as in fmt.Sprintf when that function returns. +// +// As a result, if you use this type in go routines or defer +// statements it's possible to pass an argument to LazySprintf which +// has one value at the call site and a different value when the +// String() is evaluated, which may lead to unexpected outcomes. In +// these situations, either be *extremely* careful about the arguments +// passed to this function or use fmt.Sprintf. +// +// The implementation also caches the output of the underlying +// fmt.Sprintf statement when String() is called, so subsequent calls +// will produce the same result. +func LazySprintf(t string, args ...interface{}) fmt.Stringer { + return &lazyStringf{tmpl: t, args: args} +} + +type lazyStringer struct { + val fmt.Stringer + out string +} + +func (l *lazyStringer) String() string { + if l.out == "" && l.val != nil { + l.out = l.val.String() + l.val = nil + } + return l.out +} + +// LazyStringer captures a fmt.Stringer implementation resolving the +// underlying string *only* when the String() method is called and +// caching the result for future use. +func LazyStringer(v fmt.Stringer) fmt.Stringer { return &lazyStringer{val: v} } + +type lazyBlockHash struct { + block interface{ Hash() tmbytes.HexBytes } + out string +} + +// LazyBlockHash defers block Hash until the Stringer interface is invoked. +// This is particularly useful for avoiding calling Sprintf when debugging is not +// active. +// +// As a result, if you use this type in go routines or defer +// statements it's possible to pass an argument to LazyBlockHash that +// has one value at the call site and a different value when the +// String() is evaluated, which may lead to unexpected outcomes. In +// these situations, either be *extremely* careful about the arguments +// passed to this function or use fmt.Sprintf. +// +// The implementation also caches the output of the string form of the +// block hash when String() is called, so subsequent calls will +// produce the same result. +func LazyBlockHash(block interface{ Hash() tmbytes.HexBytes }) fmt.Stringer { + return &lazyBlockHash{block: block} +} + +func (l *lazyBlockHash) String() string { + if l.out == "" && l.block != nil { + l.out = l.block.Hash().String() + l.block = nil + } + return l.out +} + // SplitAndTrimEmpty slices s into all subslices separated by sep and returns a // slice of the string s with all leading and trailing Unicode code points // contained in cutset removed. If sep is empty, SplitAndTrim splits after each diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 0354eb28a2..c0da7cef20 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -14,6 +14,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -540,7 +541,7 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon } txmp.logger.Debug("evicting lower-priority transactions", - "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "new_tx", tmstrings.LazySprintf("%X", wtx.tx.Hash()), "new_priority", priority, ) @@ -562,7 +563,7 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon txmp.logger.Debug( "evicted valid existing transaction; mempool full", - "old_tx", fmt.Sprintf("%X", w.tx.Hash()), + "old_tx", tmstrings.LazySprintf("%X", w.tx.Hash()), "old_priority", w.priority, ) txmp.removeTxByElement(vic) @@ -588,7 +589,7 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon txmp.logger.Debug( "inserted new valid transaction", "priority", wtx.Priority(), - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "tx", tmstrings.LazySprintf("%X", wtx.tx.Hash()), "height", txmp.height, "num_txs", txmp.Size(), ) diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 1f4b3f78a4..18124f82b9 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -9,6 +9,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -330,9 +331,8 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m return } - r.logger.Debug( - "gossiped tx to peer", - "tx", fmt.Sprintf("%X", memTx.tx.Hash()), + r.logger.Debug("gossiped tx to peer", + "tx", tmstrings.LazySprintf("%X", memTx.tx.Hash()), "peer", peerID, ) } diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 8f8453e71a..adc287328f 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -313,7 +313,7 @@ func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) + c.logger.Error("Cannot send bytes to unknown channel", "channel", chID) return false } diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 87677799d0..9618433f4d 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -358,7 +358,8 @@ func (r *Reactor) calculateNextRequestTime(added int) time.Duration { // If the peer store is nearly full, wait the maximum interval. if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { r.logger.Debug("Peer manager is nearly full", - "sleep_period", fullCapacityInterval, "ratio", ratio) + "sleep_period", fullCapacityInterval, + "ratio", ratio) return fullCapacityInterval } diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index 3cd1c897a5..f53c988a6f 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -208,13 +208,11 @@ func (s *pqScheduler) process(ctx context.Context) { } else { pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.ChannelID)) s.metrics.PeerQueueDroppedMsgs.With("ch_id", pqEnvTmpChIDStr).Add(1) - s.logger.Debug( - "dropped envelope", + s.logger.Debug("dropped envelope", "ch_id", pqEnvTmpChIDStr, "priority", pqEnvTmp.priority, "msg_size", pqEnvTmp.size, - "capacity", s.capacity, - ) + "capacity", s.capacity) s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnvTmp.envelope.To)).Add(float64(-pqEnvTmp.size)) @@ -238,13 +236,11 @@ func (s *pqScheduler) process(ctx context.Context) { // There is not sufficient capacity to drop lower priority Envelopes, // so we drop the incoming Envelope. s.metrics.PeerQueueDroppedMsgs.With("ch_id", chIDStr).Add(1) - s.logger.Debug( - "dropped envelope", + s.logger.Debug("dropped envelope", "ch_id", chIDStr, "priority", pqEnv.priority, "msg_size", pqEnv.size, - "capacity", s.capacity, - ) + "capacity", s.capacity) } } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 4f3af13465..f106917157 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -13,6 +13,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/crypto" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" @@ -461,7 +462,7 @@ func (r *Router) acceptPeers(ctx context.Context, transport Transport) { closeErr := conn.Close() r.logger.Debug("rate limiting incoming peer", "err", err, - "ip", incomingIP.String(), + "ip", tmstrings.LazyStringer(incomingIP), "close_err", closeErr, ) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index deed8d0d3e..ffd863dda1 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -686,37 +686,31 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.ChunkRequest: - r.logger.Debug( - "received chunk request", + r.logger.Debug("received chunk request", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, - "peer", envelope.From, - ) + "peer", envelope.From) resp, err := r.conn.LoadSnapshotChunk(ctx, &abci.RequestLoadSnapshotChunk{ Height: msg.Height, Format: msg.Format, Chunk: msg.Index, }) if err != nil { - r.logger.Error( - "failed to load chunk", + r.logger.Error("failed to load chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "err", err, - "peer", envelope.From, - ) + "peer", envelope.From) return nil } - r.logger.Debug( - "sending chunk", + r.logger.Debug("sending chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, - "peer", envelope.From, - ) + "peer", envelope.From) if err := chunkCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ChunkResponse{ @@ -739,13 +733,11 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope return nil } - r.logger.Debug( - "received chunk; adding to sync", + r.logger.Debug("received chunk; adding to sync", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, - "peer", envelope.From, - ) + "peer", envelope.From) _, err := r.syncer.AddChunk(&chunk{ Height: msg.Height, Format: msg.Format, @@ -754,14 +746,12 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope Sender: envelope.From, }) if err != nil { - r.logger.Error( - "failed to add chunk", + r.logger.Error("failed to add chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "err", err, - "peer", envelope.From, - ) + "peer", envelope.From) return nil } diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index a09b558926..591639fcdd 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -84,11 +84,9 @@ func (s *syncer) AddChunk(chunk *chunk) (bool, error) { return false, err } if added { - s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, - "chunk", chunk.Index) + s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index) } else { - s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, - "chunk", chunk.Index) + s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index) } return added, nil } @@ -137,12 +135,20 @@ func (s *syncer) SyncAny( discoveryTime = minimumDiscoveryTime } + timer := time.NewTimer(discoveryTime) + defer timer.Stop() + if discoveryTime > 0 { if err := requestSnapshots(); err != nil { return sm.State{}, nil, err } - s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) - time.Sleep(discoveryTime) + s.logger.Info("discovering snapshots", + "interval", discoveryTime) + select { + case <-ctx.Done(): + return sm.State{}, nil, ctx.Err() + case <-timer.C: + } } // The app may ask us to retry a snapshot restoration, in which case we need to reuse @@ -151,8 +157,11 @@ func (s *syncer) SyncAny( snapshot *snapshot chunks *chunkQueue err error + iters int ) + for { + iters++ // If not nil, we're going to retry restoration of the same snapshot. if snapshot == nil { snapshot = s.snapshots.Best() @@ -162,9 +171,16 @@ func (s *syncer) SyncAny( if discoveryTime == 0 { return sm.State{}, nil, errNoSnapshots } - s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) - time.Sleep(discoveryTime) - continue + s.logger.Info("discovering snapshots", + "iterations", iters, + "interval", discoveryTime) + timer.Reset(discoveryTime) + select { + case <-ctx.Done(): + return sm.State{}, nil, ctx.Err() + case <-timer.C: + continue + } } if chunks == nil { chunks, err = newChunkQueue(snapshot, s.tempDir) @@ -494,13 +510,11 @@ func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uin return nil } - s.logger.Debug( - "Requesting snapshot chunk", + s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height, "format", snapshot.Format, "chunk", chunk, - "peer", peer, - ) + "peer", peer) msg := p2p.Envelope{ To: peer, diff --git a/libs/log/default.go b/libs/log/default.go index 557ba6551b..a6e8296396 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -55,9 +55,7 @@ func NewDefaultLogger(format, level string) (Logger, error) { // make the writer thread-safe logWriter = newSyncWriter(logWriter) - return &defaultLogger{ - Logger: zerolog.New(logWriter).Level(logLevel).With().Timestamp().Logger(), - }, nil + return &defaultLogger{Logger: zerolog.New(logWriter).Level(logLevel).With().Timestamp().Logger()}, nil } func (l defaultLogger) Info(msg string, keyVals ...interface{}) { @@ -73,9 +71,7 @@ func (l defaultLogger) Debug(msg string, keyVals ...interface{}) { } func (l defaultLogger) With(keyVals ...interface{}) Logger { - return &defaultLogger{ - Logger: l.Logger.With().Fields(keyVals).Logger(), - } + return &defaultLogger{Logger: l.Logger.With().Fields(keyVals).Logger()} } // OverrideWithNewLogger replaces an existing logger's internal with diff --git a/light/client.go b/light/client.go index f38e9d59de..3c32f5c692 100644 --- a/light/client.go +++ b/light/client.go @@ -9,6 +9,7 @@ import ( "sync" "time" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/light/provider" @@ -475,7 +476,8 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now return fmt.Errorf("existing trusted header %X does not match newHeader %X", l.Hash(), newHeader.Hash()) } c.logger.Debug("header has already been verified", - "height", newHeader.Height, "hash", newHeader.Hash()) + "height", newHeader.Height, + "hash", tmstrings.LazyBlockHash(newHeader)) return nil } @@ -576,7 +578,7 @@ func (c *Client) verifySequential( // 2) Verify them c.logger.Debug("verify adjacent newLightBlock against verifiedBlock", "trustedHeight", verifiedBlock.Height, - "trustedHash", verifiedBlock.Hash(), + "trustedHash", tmstrings.LazyBlockHash(verifiedBlock), "newHeight", interimBlock.Height, "newHash", interimBlock.Hash()) @@ -663,9 +665,9 @@ func (c *Client) verifySkipping( for { c.logger.Debug("verify non-adjacent newHeader against verifiedBlock", "trustedHeight", verifiedBlock.Height, - "trustedHash", verifiedBlock.Hash(), + "trustedHash", tmstrings.LazyBlockHash(verifiedBlock), "newHeight", blockCache[depth].Height, - "newHash", blockCache[depth].Hash()) + "newHash", tmstrings.LazyBlockHash(blockCache[depth])) // Verify the untrusted header. This function is equivalent to // ValidAndVerified in the spec @@ -897,9 +899,9 @@ func (c *Client) backwards( interimHeader = interimBlock.Header c.logger.Debug("verify newHeader against verifiedHeader", "trustedHeight", verifiedHeader.Height, - "trustedHash", verifiedHeader.Hash(), + "trustedHash", tmstrings.LazyBlockHash(verifiedHeader), "newHeight", interimHeader.Height, - "newHash", interimHeader.Hash()) + "newHash", tmstrings.LazyBlockHash(interimHeader)) if err := VerifyBackwards(interimHeader, verifiedHeader); err != nil { // verification has failed c.logger.Info("backwards verification failed, replacing primary...", "err", err, "primary", c.primary) diff --git a/light/detector.go b/light/detector.go index a5ac35a02d..a0afdca89d 100644 --- a/light/detector.go +++ b/light/detector.go @@ -39,8 +39,10 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig lastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader witnessesToRemove = make([]int, 0) ) - c.logger.Debug("running detector against trace", "finalizeBlockHeight", lastVerifiedHeader.Height, - "finalizeBlockHash", lastVerifiedHeader.Hash, "length", len(primaryTrace)) + c.logger.Debug("running detector against trace", + "finalizeBlockHeight", lastVerifiedHeader.Height, + "finalizeBlockHash", lastVerifiedHeader.Hash, + "length", len(primaryTrace)) // launch one goroutine per witness to retrieve the light block of the target height // and compare it with the header from the primary diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 50a37158ec..fffc002f39 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -58,7 +58,7 @@ func DefaultConfig() *Config { // Serve creates a http.Server and calls Serve with the given listener. It // wraps handler to recover panics and limit the request body size. func Serve(ctx context.Context, listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { - logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) + logger.Info("Starting RPC HTTP server on", "addr", listener.Addr()) h := recoverAndLogHandler(MaxBytesHandler(handler, config.MaxBodyBytes), logger) s := &http.Server{ Handler: h, diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 1f66e95cce..4a18a54a4a 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -18,6 +18,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" + tmstrings "github.com/tendermint/tendermint/internal/libs/strings" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/version" @@ -484,7 +485,10 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot time.Sleep(time.Duration(app.cfg.VoteExtensionDelayMS) * time.Millisecond) } - app.logger.Info("generated vote extension", "num", num, "ext", fmt.Sprintf("%x", ext[:extLen]), "state.Height", app.state.Height) + app.logger.Info("generated vote extension", + "num", num, + "ext", tmstrings.LazySprintf("%x", ext[:extLen]), + "state.Height", app.state.Height) return &abci.ResponseExtendVote{ VoteExtension: ext[:extLen], }, nil diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index 25a1008e6e..5332af29cb 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -3,7 +3,6 @@ package main import ( "context" "errors" - "fmt" "os" "github.com/tendermint/tendermint/libs/log" @@ -27,6 +26,6 @@ func Cleanup(ctx context.Context, logger log.Logger, testnetDir string, ti infra return err } - logger.Info(fmt.Sprintf("Removing testnet directory %q", testnetDir)) + logger.Info("Removing testnet", "directory", testnetDir) return os.RemoveAll(testnetDir) } From d433ebe68de20457d6cc0fd77a67ac5a7dbf5216 Mon Sep 17 00:00:00 2001 From: Mark Rushakoff Date: Fri, 29 Jul 2022 09:41:54 -0400 Subject: [PATCH 190/203] Improve handling of -short flag in tests (#9075) As a small developer quality of life improvement, I found many individual unit tests that take longer than around a second to complete, and set them to skip when run under `go test -short`. On my machine, the wall timings for tests (with `go test -count=1 ./...` and optionally `-short` and `-race`) are roughly: - Long tests, no race detector: about 1m42s - Short tests, no race detector: about 17s - Long tests, race detector enabled: about 2m1s - Short tests, race detector enabled: about 28s This PR is split into many commits each touching a single package, with commit messages detailing the approximate timing change per package. --- cmd/tendermint/commands/rollback_test.go | 4 +++ internal/consensus/byzantine_test.go | 4 +++ internal/consensus/reactor_test.go | 8 ++++++ internal/consensus/replay_test.go | 4 +++ internal/mempool/mempool_test.go | 8 ++++++ internal/p2p/conn/connection_test.go | 12 ++++++++ internal/p2p/peermanager_test.go | 4 +++ internal/p2p/router_test.go | 27 ++++++++++++++++-- internal/p2p/transport_mconn_test.go | 4 +++ internal/statesync/block_queue_test.go | 8 ++++++ internal/statesync/reactor_test.go | 12 ++++++++ internal/statesync/syncer_test.go | 4 +++ light/detector_test.go | 4 +++ light/example_test.go | 4 +++ light/light_test.go | 12 ++++++++ light/provider/http/http_test.go | 4 +++ node/node_test.go | 12 ++++++++ rpc/client/examples_test.go | 8 ++++++ rpc/client/helpers_test.go | 4 +++ rpc/client/rpc_test.go | 36 ++++++++++++++++++++++++ rpc/jsonrpc/client/ws_client_test.go | 12 ++++++++ rpc/jsonrpc/jsonrpc_test.go | 4 +++ types/part_set_test.go | 16 +++++++++++ types/validator_set_test.go | 4 +++ 24 files changed, 217 insertions(+), 2 deletions(-) diff --git a/cmd/tendermint/commands/rollback_test.go b/cmd/tendermint/commands/rollback_test.go index 6d1ba818f0..9aae403cb3 100644 --- a/cmd/tendermint/commands/rollback_test.go +++ b/cmd/tendermint/commands/rollback_test.go @@ -15,6 +15,10 @@ import ( ) func TestRollbackIntegration(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + var height int64 dir := t.TempDir() ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 9c6f4a2954..0c1b47bdc5 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -33,6 +33,10 @@ import ( // Byzantine node sends two different prevotes (nil and blockID) to the same // validator. func TestByzantinePrevoteEquivocation(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // empirically, this test either passes in <1s or hits some // kind of deadlock and hit the larger timeout. This timeout // can be extended a bunch if needed, but it's good to avoid diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index d848f53e7a..8536fd0d29 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -779,6 +779,10 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorVotingPowerChange(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -885,6 +889,10 @@ func TestReactorVotingPowerChange(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 328dba040a..2339fa4a57 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -118,6 +118,10 @@ func sendTxs(ctx context.Context, t *testing.T, cs *State) { // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. func TestWALCrash(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + testCases := []struct { name string initFn func(dbm.DB, *State, context.Context) diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 2071d1f057..3505d7040a 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -132,6 +132,10 @@ func convertTex(in []testTx) types.Txs { } func TestTxMempool_TxsAvailable(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -540,6 +544,10 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) { } func TestTxMempool_ConcurrentTxs(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 5a604cd23f..2f497bdd4e 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -315,6 +315,10 @@ func TestMConnectionMultiplePings(t *testing.T) { } func TestMConnectionPingPongs(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // check that we are not leaking any go-routines t.Cleanup(leaktest.CheckTimeout(t, 10*time.Second)) @@ -558,6 +562,10 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { } func TestMConnectionTrySend(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) ctx, cancel := context.WithCancel(context.Background()) @@ -606,6 +614,10 @@ func TestConnVectors(t *testing.T) { } func TestMConnectionChannelOverflow(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index a1543bf18d..3e72c333b2 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -296,6 +296,10 @@ func TestPeerManager_DialNext(t *testing.T) { } func TestPeerManager_DialNext_Retry(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index dd336510c9..748731f32d 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -41,6 +41,10 @@ func echoReactor(ctx context.Context, channel p2p.Channel) { } func TestRouter_Network(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -162,6 +166,10 @@ func TestRouter_Channel_Basic(t *testing.T) { // Channel tests are hairy to mock, so we use an in-memory network instead. func TestRouter_Channel_SendReceive(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -224,6 +232,10 @@ func TestRouter_Channel_SendReceive(t *testing.T) { } func TestRouter_Channel_Broadcast(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) ctx, cancel := context.WithCancel(context.Background()) @@ -255,6 +267,10 @@ func TestRouter_Channel_Broadcast(t *testing.T) { } func TestRouter_Channel_Wrapper(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) ctx, cancel := context.WithCancel(context.Background()) @@ -443,6 +459,11 @@ func TestRouter_AcceptPeers(t *testing.T) { } func TestRouter_AcceptPeers_Errors(t *testing.T) { + if testing.Short() { + // Each subtest takes more than one second due to the time.Sleep call, + // so just skip from the parent test in short mode. + t.Skip("skipping test in short mode") + } for _, err := range []error{io.EOF, context.Canceled, context.DeadlineExceeded} { t.Run(err.Error(), func(t *testing.T) { @@ -480,9 +501,7 @@ func TestRouter_AcceptPeers_Errors(t *testing.T) { router.Stop() mockTransport.AssertExpectations(t) - }) - } } @@ -811,6 +830,10 @@ func TestRouter_EvictPeers(t *testing.T) { } func TestRouter_ChannelCompatability(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index c478dbe1d2..6fafd01aeb 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -59,6 +59,10 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { } func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index 364a7f5b29..b088e15eaf 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -126,6 +126,10 @@ func TestBlockQueueWithFailures(t *testing.T) { // Test that when all the blocks are retrieved that the queue still holds on to // it's workers and in the event of failure can still fetch the failed block func TestBlockQueueBlocks(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2) @@ -176,6 +180,10 @@ loop: } func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index b81c1ac2c8..427f3bbe2a 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -197,6 +197,10 @@ func setup( } func TestReactor_Sync(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() @@ -618,6 +622,10 @@ func TestReactor_StateProviderP2P(t *testing.T) { } func TestReactor_Backfill(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -626,6 +634,10 @@ func TestReactor_Backfill(t *testing.T) { for _, failureRate := range failureRates { failureRate := failureRate t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) { + if testing.Short() && failureRate > 0 { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 3fc3f0db40..15beef34b1 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -22,6 +22,10 @@ import ( ) func TestSyncer_SyncAny(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/light/detector_test.go b/light/detector_test.go index 4a86b5b872..1a67cf050a 100644 --- a/light/detector_test.go +++ b/light/detector_test.go @@ -235,6 +235,10 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { } func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // primary performs a lunatic attack but changes the time of the header to // something in the future relative to the blockchain var ( diff --git a/light/example_test.go b/light/example_test.go index c735c21a2c..ad097983a9 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -17,6 +17,10 @@ import ( // Manually getting light blocks and verifying them. func TestExampleClient(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() conf, err := rpctest.CreateConfig(t, "ExampleClient_VerifyLightBlockAtHeight") diff --git a/light/light_test.go b/light/light_test.go index 58fd9f0b19..4c1bd59848 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -23,6 +23,10 @@ import ( // Automatically getting new headers and verifying them. func TestClientIntegration_Update(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Parallel() ctx, cancel := context.WithCancel(context.Background()) @@ -84,6 +88,10 @@ func TestClientIntegration_Update(t *testing.T) { // Manually getting light blocks and verifying them. func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -162,6 +170,10 @@ func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*type } func TestClientStatusRPC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() conf, err := rpctest.CreateConfig(t, t.Name()) diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index cb443caaf1..3dc425c7ac 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -33,6 +33,10 @@ func TestNewProvider(t *testing.T) { } func TestProvider(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() cfg, err := rpctest.CreateConfig(t, t.Name()) diff --git a/node/node_test.go b/node/node_test.go index 245e39b3c2..6103d554ec 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -106,6 +106,10 @@ func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger } func TestNodeDelayedStart(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + cfg, err := config.ResetTestRoot(t.TempDir(), "node_delayed_start_test") require.NoError(t, err) @@ -195,6 +199,10 @@ func TestNodeSetPrivValTCP(t *testing.T) { // address without a protocol must result in error func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -441,6 +449,10 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { } func TestMaxProposalBlockSize(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 163093c848..935c141f38 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -18,6 +18,10 @@ import ( ) func TestHTTPSimple(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -68,6 +72,10 @@ func TestHTTPSimple(t *testing.T) { } func TestHTTPBatching(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index a66becbd58..eb13e8d77e 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -15,6 +15,10 @@ import ( ) func TestWaitForHeight(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 7d37264968..3ac884ed7c 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -133,6 +133,10 @@ func TestClientOperations(t *testing.T) { }) t.Run("Batching", func(t *testing.T) { t.Run("JSONRPCCalls", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + logger := log.NewTestingLogger(t) c := getHTTPClient(t, logger, conf) testBatchedJSONRPCCalls(ctx, t, c) @@ -171,6 +175,10 @@ func TestClientOperations(t *testing.T) { require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") }) t.Run("ConcurrentJSONRPC", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + logger := log.NewTestingLogger(t) var wg sync.WaitGroup @@ -291,6 +299,10 @@ func TestClientMethodCalls(t *testing.T) { "first: %+v, doc: %s", first, string(doc)) }) t.Run("ABCIQuery", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // write something k, v, tx := MakeTxKV() status, err := c.Status(ctx) @@ -309,6 +321,10 @@ func TestClientMethodCalls(t *testing.T) { } }) t.Run("AppCalls", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // get an offset of height to avoid racing and guessing s, err := c.Status(ctx) require.NoError(t, err) @@ -409,6 +425,10 @@ func TestClientMethodCalls(t *testing.T) { // XXX Test proof }) t.Run("BlockchainInfo", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -439,6 +459,10 @@ func TestClientMethodCalls(t *testing.T) { assert.Contains(t, err.Error(), "can't be greater than max") }) t.Run("BroadcastTxCommit", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) require.NoError(t, err, "%d: %+v", i, err) @@ -481,6 +505,10 @@ func TestClientMethodCalls(t *testing.T) { // TODO: more checks... }) t.Run("Block", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + const subscriber = "TestBlockEvents" eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) @@ -515,6 +543,10 @@ func TestClientMethodCalls(t *testing.T) { }) t.Run("Evidence", func(t *testing.T) { t.Run("BroadcastDuplicateVote", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -722,6 +754,10 @@ func TestClientMethodCallsAdvanced(t *testing.T) { } }) t.Run("TxSearchWithTimeout", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + logger := log.NewTestingLogger(t) timeoutClient := getHTTPClientWithTimeout(t, logger, conf, 10*time.Second) diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index 5bbb5fc25a..0434f6461c 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -65,6 +65,10 @@ func (h *myTestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func TestWSClientReconnectsAfterReadFailure(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) // start server @@ -97,6 +101,10 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) { } func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) // start server @@ -127,6 +135,10 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { } func TestWSClientReconnectFailure(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + t.Cleanup(leaktest.Check(t)) // start server diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 236db9b320..0586e3019f 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -340,6 +340,10 @@ func TestRPC(t *testing.T) { } }) t.Run("WSClientPingPong", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // TestWSClientPingPong checks that a client & server exchange pings // & pongs so connection stays alive. t.Cleanup(leaktest.CheckTimeout(t, 4*time.Second)) diff --git a/types/part_set_test.go b/types/part_set_test.go index af65ca8db0..760abe9224 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -16,6 +16,10 @@ const ( ) func TestBasicPartSet(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // Construct random data of size partSize * 100 nParts := 100 data := tmrand.Bytes(testPartSize * nParts) @@ -64,6 +68,10 @@ func TestBasicPartSet(t *testing.T) { } func TestWrongProof(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + // Construct random data of size partSize * 100 data := tmrand.Bytes(testPartSize * 100) partSet := NewPartSetFromData(data, testPartSize) @@ -89,6 +97,10 @@ func TestWrongProof(t *testing.T) { } func TestPartSetHeaderValidateBasic(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + testCases := []struct { testName string malleatePartSetHeader func(*PartSetHeader) @@ -110,6 +122,10 @@ func TestPartSetHeaderValidateBasic(t *testing.T) { } func TestPartValidateBasic(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + testCases := []struct { testName string malleatePart func(*Part) diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 81e81919dd..75eaad34c4 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1207,6 +1207,10 @@ func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, vals } func TestValSetUpdatePriorityOrderTests(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + const nMaxElections int32 = 5000 testCases := []testVSetCfg{ From 7fd94a46724f054401f95292e1c2bae50af9b2e6 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 11 Aug 2022 13:04:50 +0200 Subject: [PATCH 191/203] fix: make a code compilable --- abci/client/mocks/client.go | 4 +- abci/example/counter/counter.go | 4 +- abci/types/mocks/application.go | 4 +- abci/types/types.go | 7 +- abci/types/types.pb.go | 3126 ++++--------------- dash/quorum/validator_conn_executor_test.go | 2 +- internal/blocksync/pool.go | 14 +- internal/blocksync/pool_test.go | 2 +- internal/blocksync/reactor.go | 70 +- internal/blocksync/reactor_test.go | 50 +- internal/consensus/byzantine_test.go | 4 +- internal/consensus/mempool_test.go | 2 +- internal/consensus/metrics.go | 2 - internal/consensus/reactor.go | 18 +- internal/consensus/reactor_test.go | 30 +- internal/consensus/replay_test.go | 38 +- internal/consensus/state.go | 68 +- internal/consensus/state_test.go | 130 - internal/evidence/mocks/block_store.go | 4 +- internal/mempool/mocks/mempool.go | 4 +- internal/p2p/mocks/connection.go | 4 +- internal/p2p/mocks/transport.go | 4 +- internal/p2p/peermanager.go | 10 +- internal/rpc/core/mempool.go | 11 +- internal/state/execution.go | 42 +- internal/state/execution_test.go | 6 +- internal/state/helpers_test.go | 16 +- internal/state/indexer/mocks/event_sink.go | 4 +- internal/state/mocks/block_store.go | 25 +- internal/state/mocks/evidence_pool.go | 4 +- internal/state/mocks/store.go | 4 +- internal/state/services.go | 2 - internal/statesync/mocks/state_provider.go | 4 +- internal/store/store.go | 56 - internal/store/store_test.go | 92 +- internal/test/factory/commit.go | 2 +- libs/time/mocks/source.go | 4 +- light/provider/http/http.go | 48 +- light/provider/mocks/provider.go | 4 +- light/rpc/mocks/light_client.go | 4 +- proto/tendermint/blocksync/types.pb.go | 77 +- proto/tendermint/blocksync/types.proto | 4 +- proto/tendermint/p2p/types.pb.go | 194 +- proto/tendermint/state/types.pb.go | 261 +- proto/tendermint/types/types.pb.go | 1910 +++++------ proto/tendermint/types/types.proto | 22 - rpc/client/mocks/abci_client.go | 34 +- rpc/client/mocks/client.go | 4 +- rpc/client/mocks/events_client.go | 11 +- rpc/client/mocks/evidence_client.go | 11 +- rpc/client/mocks/history_client.go | 11 +- rpc/client/mocks/mempool_client.go | 11 +- rpc/client/mocks/network_client.go | 11 +- rpc/client/mocks/remote_client.go | 34 +- rpc/client/mocks/sign_client.go | 11 +- rpc/client/mocks/status_client.go | 11 +- rpc/client/mocks/subscription_client.go | 11 +- types/block.go | 356 --- types/block_test.go | 4 +- types/params.go | 9 - types/test_util.go | 2 +- types/vote.go | 25 +- types/vote_set.go | 19 +- types/vote_set_test.go | 2 +- types/vote_test.go | 45 +- 65 files changed, 2029 insertions(+), 4990 deletions(-) diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index add3c2ae9d..31325d7fec 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -420,13 +420,13 @@ func (_m *Client) Wait() { _m.Called() } -type NewClientT interface { +type mockConstructorTestingTNewClient interface { mock.TestingT Cleanup(func()) } // NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t NewClientT) *Client { +func NewClient(t mockConstructorTestingTNewClient) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 4bb6f5b404..4b041ea9c9 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -35,7 +35,6 @@ func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (* if len(req.Tx) > 8 { return &types.ResponseCheckTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx)), }, nil } tx8 := make([]byte, 8) @@ -44,7 +43,6 @@ func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (* if txValue < uint64(app.txCount) { return &types.ResponseCheckTx{ Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue), }, nil } } @@ -60,7 +58,7 @@ func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) endHash := make([]byte, 8) binary.BigEndian.PutUint64(endHash, uint64(app.txCount)) hash = append(hash, endHash...) - return &types.ResponseCommit{Data: hash}, nil + return &types.ResponseCommit{}, nil } func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) { diff --git a/abci/types/mocks/application.go b/abci/types/mocks/application.go index 62cf929057..16cf59d663 100644 --- a/abci/types/mocks/application.go +++ b/abci/types/mocks/application.go @@ -336,13 +336,13 @@ func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.Reque return r0, r1 } -type NewApplicationT interface { +type mockConstructorTestingTNewApplication interface { mock.TestingT Cleanup(func()) } // NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewApplication(t NewApplicationT) *Application { +func NewApplication(t mockConstructorTestingTNewApplication) *Application { mock := &Application{} mock.Mock.Test(t) diff --git a/abci/types/types.go b/abci/types/types.go index 121e721592..2d24b4ee17 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -148,7 +148,10 @@ type validatorUpdateJSON struct { } func (v *ValidatorUpdate) MarshalJSON() ([]byte, error) { - key, err := encoding.PubKeyFromProto(v.PubKey) + if v.PubKey == nil { + return nil, nil + } + key, err := encoding.PubKeyFromProto(*v.PubKey) if err != nil { return nil, err } @@ -175,7 +178,7 @@ func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error { if err != nil { return err } - v.PubKey = pkey + v.PubKey = &pkey v.Power = vu.Power return nil } diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 949309b0f0..fec4ec050b 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -121,7 +121,7 @@ func (x ResponseOfferSnapshot_Result) String() string { } func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33, 0} + return fileDescriptor_252557cfdd89a31a, []int{28, 0} } type ResponseApplySnapshotChunk_Result int32 @@ -158,7 +158,7 @@ func (x ResponseApplySnapshotChunk_Result) String() string { } func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35, 0} + return fileDescriptor_252557cfdd89a31a, []int{30, 0} } type ResponseProcessProposal_ProposalStatus int32 @@ -186,7 +186,7 @@ func (x ResponseProcessProposal_ProposalStatus) String() string { } func (ResponseProcessProposal_ProposalStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37, 0} + return fileDescriptor_252557cfdd89a31a, []int{32, 0} } type ResponseVerifyVoteExtension_VerifyStatus int32 @@ -214,7 +214,7 @@ func (x ResponseVerifyVoteExtension_VerifyStatus) String() string { } func (ResponseVerifyVoteExtension_VerifyStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40, 0} + return fileDescriptor_252557cfdd89a31a, []int{35, 0} } // TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal @@ -246,7 +246,7 @@ func (x TxRecord_TxAction) String() string { } func (TxRecord_TxAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{48, 0} + return fileDescriptor_252557cfdd89a31a, []int{43, 0} } type Request struct { @@ -256,10 +256,7 @@ type Request struct { // *Request_Info // *Request_InitChain // *Request_Query - // *Request_BeginBlock // *Request_CheckTx - // *Request_DeliverTx - // *Request_EndBlock // *Request_Commit // *Request_ListSnapshots // *Request_OfferSnapshot @@ -327,18 +324,9 @@ type Request_InitChain struct { type Request_Query struct { Query *RequestQuery `protobuf:"bytes,5,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Request_BeginBlock struct { - BeginBlock *RequestBeginBlock `protobuf:"bytes,6,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Request_CheckTx struct { CheckTx *RequestCheckTx `protobuf:"bytes,7,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Request_DeliverTx struct { - DeliverTx *RequestDeliverTx `protobuf:"bytes,8,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Request_EndBlock struct { - EndBlock *RequestEndBlock `protobuf:"bytes,9,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Request_Commit struct { Commit *RequestCommit `protobuf:"bytes,10,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -375,10 +363,7 @@ func (*Request_Flush) isRequest_Value() {} func (*Request_Info) isRequest_Value() {} func (*Request_InitChain) isRequest_Value() {} func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} func (*Request_Commit) isRequest_Value() {} func (*Request_ListSnapshots) isRequest_Value() {} func (*Request_OfferSnapshot) isRequest_Value() {} @@ -432,14 +417,6 @@ func (m *Request) GetQuery() *RequestQuery { return nil } -// Deprecated: Do not use. -func (m *Request) GetBeginBlock() *RequestBeginBlock { - if x, ok := m.GetValue().(*Request_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Request) GetCheckTx() *RequestCheckTx { if x, ok := m.GetValue().(*Request_CheckTx); ok { return x.CheckTx @@ -447,22 +424,6 @@ func (m *Request) GetCheckTx() *RequestCheckTx { return nil } -// Deprecated: Do not use. -func (m *Request) GetDeliverTx() *RequestDeliverTx { - if x, ok := m.GetValue().(*Request_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -// Deprecated: Do not use. -func (m *Request) GetEndBlock() *RequestEndBlock { - if x, ok := m.GetValue().(*Request_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Request) GetCommit() *RequestCommit { if x, ok := m.GetValue().(*Request_Commit); ok { return x.Commit @@ -541,10 +502,7 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_Info)(nil), (*Request_InitChain)(nil), (*Request_Query)(nil), - (*Request_BeginBlock)(nil), (*Request_CheckTx)(nil), - (*Request_DeliverTx)(nil), - (*Request_EndBlock)(nil), (*Request_Commit)(nil), (*Request_ListSnapshots)(nil), (*Request_OfferSnapshot)(nil), @@ -866,74 +824,6 @@ func (m *RequestQuery) GetProve() bool { return false } -type RequestBeginBlock struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo CommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` -} - -func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } -func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } -func (*RequestBeginBlock) ProtoMessage() {} -func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{6} -} -func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBeginBlock.Merge(m, src) -} -func (m *RequestBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo - -func (m *RequestBeginBlock) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *RequestBeginBlock) GetHeader() types1.Header { - if m != nil { - return m.Header - } - return types1.Header{} -} - -func (m *RequestBeginBlock) GetLastCommitInfo() CommitInfo { - if m != nil { - return m.LastCommitInfo - } - return CommitInfo{} -} - -func (m *RequestBeginBlock) GetByzantineValidators() []Misbehavior { - if m != nil { - return m.ByzantineValidators - } - return nil -} - type RequestCheckTx struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=tendermint.abci.CheckTxType" json:"type,omitempty"` @@ -943,7 +833,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{7} + return fileDescriptor_252557cfdd89a31a, []int{6} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -986,94 +876,6 @@ func (m *RequestCheckTx) GetType() CheckTxType { return CheckTxType_New } -type RequestDeliverTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } -func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } -func (*RequestDeliverTx) ProtoMessage() {} -func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{8} -} -func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestDeliverTx.Merge(m, src) -} -func (m *RequestDeliverTx) XXX_Size() int { - return m.Size() -} -func (m *RequestDeliverTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestDeliverTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestDeliverTx proto.InternalMessageInfo - -func (m *RequestDeliverTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type RequestEndBlock struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` -} - -func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } -func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } -func (*RequestEndBlock) ProtoMessage() {} -func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{9} -} -func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEndBlock.Merge(m, src) -} -func (m *RequestEndBlock) XXX_Size() int { - return m.Size() -} -func (m *RequestEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestEndBlock proto.InternalMessageInfo - -func (m *RequestEndBlock) GetHeight() int64 { - if m != nil { - return m.Height - } - return 0 -} - type RequestCommit struct { } @@ -1081,7 +883,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{10} + return fileDescriptor_252557cfdd89a31a, []int{7} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1118,7 +920,7 @@ func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } func (*RequestListSnapshots) ProtoMessage() {} func (*RequestListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{11} + return fileDescriptor_252557cfdd89a31a, []int{8} } func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1157,7 +959,7 @@ func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*RequestOfferSnapshot) ProtoMessage() {} func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{12} + return fileDescriptor_252557cfdd89a31a, []int{9} } func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1211,7 +1013,7 @@ func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChu func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestLoadSnapshotChunk) ProtoMessage() {} func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{13} + return fileDescriptor_252557cfdd89a31a, []int{10} } func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1272,7 +1074,7 @@ func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotC func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*RequestApplySnapshotChunk) ProtoMessage() {} func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{14} + return fileDescriptor_252557cfdd89a31a, []int{11} } func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1327,12 +1129,12 @@ type RequestPrepareProposal struct { MaxTxBytes int64 `protobuf:"varint,1,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` // txs is an array of transactions that will be included in a block, // sent to the app for possible modifications. - Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` - LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` - NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,4,rep,name=misbehavior,proto3" json:"misbehavior"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` // Dash's fields CoreChainLockedHeight uint32 `protobuf:"varint,100,opt,name=core_chain_locked_height,json=coreChainLockedHeight,proto3" json:"core_chain_locked_height,omitempty"` ProposerProTxHash []byte `protobuf:"bytes,101,opt,name=proposer_pro_tx_hash,json=proposerProTxHash,proto3" json:"proposer_pro_tx_hash,omitempty"` @@ -1344,7 +1146,7 @@ func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } func (*RequestPrepareProposal) ProtoMessage() {} func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{15} + return fileDescriptor_252557cfdd89a31a, []int{12} } func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1394,9 +1196,9 @@ func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { return ExtendedCommitInfo{} } -func (m *RequestPrepareProposal) GetByzantineValidators() []Misbehavior { +func (m *RequestPrepareProposal) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1451,9 +1253,9 @@ func (m *RequestPrepareProposal) GetVersion() *version.Consensus { } type RequestProcessProposal struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` // hash is the merkle root hash of the fields of the proposed block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` @@ -1466,7 +1268,7 @@ func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } func (*RequestProcessProposal) ProtoMessage() {} func (*RequestProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{13} } func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1509,9 +1311,9 @@ func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { return CommitInfo{} } -func (m *RequestProcessProposal) GetByzantineValidators() []Misbehavior { +func (m *RequestProcessProposal) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1561,7 +1363,7 @@ func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } func (*RequestExtendVote) ProtoMessage() {} func (*RequestExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_252557cfdd89a31a, []int{14} } func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1616,7 +1418,7 @@ func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExt func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*RequestVerifyVoteExtension) ProtoMessage() {} func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{18} + return fileDescriptor_252557cfdd89a31a, []int{15} } func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1674,10 +1476,10 @@ func (m *RequestVerifyVoteExtension) GetVoteExtensions() []*ExtendVoteExtension } type RequestFinalizeBlock struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` - ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - // hash is the merkle root hash of the fields of the proposed block. + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` + // hash is the merkle root hash of the fields of the decided block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` @@ -1693,7 +1495,7 @@ func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } func (m *RequestFinalizeBlock) String() string { return proto.CompactTextString(m) } func (*RequestFinalizeBlock) ProtoMessage() {} func (*RequestFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} + return fileDescriptor_252557cfdd89a31a, []int{16} } func (m *RequestFinalizeBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1736,9 +1538,9 @@ func (m *RequestFinalizeBlock) GetDecidedLastCommit() CommitInfo { return CommitInfo{} } -func (m *RequestFinalizeBlock) GetByzantineValidators() []Misbehavior { +func (m *RequestFinalizeBlock) GetMisbehavior() []Misbehavior { if m != nil { - return m.ByzantineValidators + return m.Misbehavior } return nil } @@ -1807,10 +1609,7 @@ type Response struct { // *Response_Info // *Response_InitChain // *Response_Query - // *Response_BeginBlock // *Response_CheckTx - // *Response_DeliverTx - // *Response_EndBlock // *Response_Commit // *Response_ListSnapshots // *Response_OfferSnapshot @@ -1828,7 +1627,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} + return fileDescriptor_252557cfdd89a31a, []int{17} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1881,18 +1680,9 @@ type Response_InitChain struct { type Response_Query struct { Query *ResponseQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` } -type Response_BeginBlock struct { - BeginBlock *ResponseBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} type Response_CheckTx struct { CheckTx *ResponseCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } -type Response_DeliverTx struct { - DeliverTx *ResponseDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Response_EndBlock struct { - EndBlock *ResponseEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} type Response_Commit struct { Commit *ResponseCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } @@ -1930,10 +1720,7 @@ func (*Response_Flush) isResponse_Value() {} func (*Response_Info) isResponse_Value() {} func (*Response_InitChain) isResponse_Value() {} func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} func (*Response_Commit) isResponse_Value() {} func (*Response_ListSnapshots) isResponse_Value() {} func (*Response_OfferSnapshot) isResponse_Value() {} @@ -1994,14 +1781,6 @@ func (m *Response) GetQuery() *ResponseQuery { return nil } -// Deprecated: Do not use. -func (m *Response) GetBeginBlock() *ResponseBeginBlock { - if x, ok := m.GetValue().(*Response_BeginBlock); ok { - return x.BeginBlock - } - return nil -} - func (m *Response) GetCheckTx() *ResponseCheckTx { if x, ok := m.GetValue().(*Response_CheckTx); ok { return x.CheckTx @@ -2009,22 +1788,6 @@ func (m *Response) GetCheckTx() *ResponseCheckTx { return nil } -// Deprecated: Do not use. -func (m *Response) GetDeliverTx() *ResponseDeliverTx { - if x, ok := m.GetValue().(*Response_DeliverTx); ok { - return x.DeliverTx - } - return nil -} - -// Deprecated: Do not use. -func (m *Response) GetEndBlock() *ResponseEndBlock { - if x, ok := m.GetValue().(*Response_EndBlock); ok { - return x.EndBlock - } - return nil -} - func (m *Response) GetCommit() *ResponseCommit { if x, ok := m.GetValue().(*Response_Commit); ok { return x.Commit @@ -2104,10 +1867,7 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_Info)(nil), (*Response_InitChain)(nil), (*Response_Query)(nil), - (*Response_BeginBlock)(nil), (*Response_CheckTx)(nil), - (*Response_DeliverTx)(nil), - (*Response_EndBlock)(nil), (*Response_Commit)(nil), (*Response_ListSnapshots)(nil), (*Response_OfferSnapshot)(nil), @@ -2130,7 +1890,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} + return fileDescriptor_252557cfdd89a31a, []int{18} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2174,7 +1934,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} + return fileDescriptor_252557cfdd89a31a, []int{19} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2217,7 +1977,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} + return fileDescriptor_252557cfdd89a31a, []int{20} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2259,7 +2019,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} + return fileDescriptor_252557cfdd89a31a, []int{21} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2335,7 +2095,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} + return fileDescriptor_252557cfdd89a31a, []int{22} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2416,7 +2176,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} + return fileDescriptor_252557cfdd89a31a, []int{23} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2508,70 +2268,20 @@ func (m *ResponseQuery) GetCodespace() string { return "" } -type ResponseBeginBlock struct { - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` -} - -func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } -func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseBeginBlock) ProtoMessage() {} -func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} -} -func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBeginBlock.Merge(m, src) -} -func (m *ResponseBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *ResponseBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo - -func (m *ResponseBeginBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` - Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` - // ABCI applications creating a ResponseCheckTX should not set mempool_error. - MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} + return fileDescriptor_252557cfdd89a31a, []int{24} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2614,20 +2324,6 @@ func (m *ResponseCheckTx) GetData() []byte { return nil } -func (m *ResponseCheckTx) GetLog() string { - if m != nil { - return m.Log - } - return "" -} - -func (m *ResponseCheckTx) GetInfo() string { - if m != nil { - return m.Info - } - return "" -} - func (m *ResponseCheckTx) GetGasWanted() int64 { if m != nil { return m.GasWanted @@ -2635,20 +2331,6 @@ func (m *ResponseCheckTx) GetGasWanted() int64 { return 0 } -func (m *ResponseCheckTx) GetGasUsed() int64 { - if m != nil { - return m.GasUsed - } - return 0 -} - -func (m *ResponseCheckTx) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - func (m *ResponseCheckTx) GetCodespace() string { if m != nil { return m.Codespace @@ -2670,13 +2352,6 @@ func (m *ResponseCheckTx) GetPriority() int64 { return 0 } -func (m *ResponseCheckTx) GetMempoolError() string { - if m != nil { - return m.MempoolError - } - return "" -} - type ResponseDeliverTx struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2692,7 +2367,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} + return fileDescriptor_252557cfdd89a31a, []int{25} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2777,25 +2452,22 @@ func (m *ResponseDeliverTx) GetCodespace() string { return "" } -type ResponseEndBlock struct { - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` - NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,100,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` - ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,101,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` +type ResponseCommit struct { + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } -func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } -func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseEndBlock) ProtoMessage() {} -func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} +func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } +func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } +func (*ResponseCommit) ProtoMessage() {} +func (*ResponseCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{26} } -func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { +func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2805,93 +2477,19 @@ func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEndBlock.Merge(m, src) +func (m *ResponseCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCommit.Merge(m, src) } -func (m *ResponseEndBlock) XXX_Size() int { +func (m *ResponseCommit) XXX_Size() int { return m.Size() } -func (m *ResponseEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) +func (m *ResponseCommit) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCommit.DiscardUnknown(m) } -var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo +var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo -func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { - if m != nil { - return m.ConsensusParamUpdates - } - return nil -} - -func (m *ResponseEndBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *ResponseEndBlock) GetNextCoreChainLockUpdate() *types1.CoreChainLock { - if m != nil { - return m.NextCoreChainLockUpdate - } - return nil -} - -func (m *ResponseEndBlock) GetValidatorSetUpdate() *ValidatorSetUpdate { - if m != nil { - return m.ValidatorSetUpdate - } - return nil -} - -type ResponseCommit struct { - // reserve 1 - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` -} - -func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } -func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } -func (*ResponseCommit) ProtoMessage() {} -func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} -} -func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCommit.Merge(m, src) -} -func (m *ResponseCommit) XXX_Size() int { - return m.Size() -} -func (m *ResponseCommit) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseCommit.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo - -func (m *ResponseCommit) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ResponseCommit) GetRetainHeight() int64 { +func (m *ResponseCommit) GetRetainHeight() int64 { if m != nil { return m.RetainHeight } @@ -2906,7 +2504,7 @@ func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } func (*ResponseListSnapshots) ProtoMessage() {} func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} + return fileDescriptor_252557cfdd89a31a, []int{27} } func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2950,7 +2548,7 @@ func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*ResponseOfferSnapshot) ProtoMessage() {} func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} + return fileDescriptor_252557cfdd89a31a, []int{28} } func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2994,7 +2592,7 @@ func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotC func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseLoadSnapshotChunk) ProtoMessage() {} func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} + return fileDescriptor_252557cfdd89a31a, []int{29} } func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3040,7 +2638,7 @@ func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapsho func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseApplySnapshotChunk) ProtoMessage() {} func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} + return fileDescriptor_252557cfdd89a31a, []int{30} } func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3102,7 +2700,7 @@ func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } func (*ResponsePrepareProposal) ProtoMessage() {} func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} + return fileDescriptor_252557cfdd89a31a, []int{31} } func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3178,7 +2776,7 @@ func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } func (*ResponseProcessProposal) ProtoMessage() {} func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{32} } func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3251,7 +2849,7 @@ func (m *ExtendVoteExtension) Reset() { *m = ExtendVoteExtension{} } func (m *ExtendVoteExtension) String() string { return proto.CompactTextString(m) } func (*ExtendVoteExtension) ProtoMessage() {} func (*ExtendVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_252557cfdd89a31a, []int{33} } func (m *ExtendVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3302,7 +2900,7 @@ func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } func (*ResponseExtendVote) ProtoMessage() {} func (*ResponseExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} + return fileDescriptor_252557cfdd89a31a, []int{34} } func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3346,7 +2944,7 @@ func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteE func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } func (*ResponseVerifyVoteExtension) ProtoMessage() {} func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} + return fileDescriptor_252557cfdd89a31a, []int{35} } func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3387,7 +2985,6 @@ type ResponseFinalizeBlock struct { TxResults []*ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - RetainHeight int64 `protobuf:"varint,6,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,100,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,101,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` } @@ -3396,7 +2993,7 @@ func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } func (*ResponseFinalizeBlock) ProtoMessage() {} func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{41} + return fileDescriptor_252557cfdd89a31a, []int{36} } func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3453,13 +3050,6 @@ func (m *ResponseFinalizeBlock) GetAppHash() []byte { return nil } -func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { - if m != nil { - return m.RetainHeight - } - return 0 -} - func (m *ResponseFinalizeBlock) GetNextCoreChainLockUpdate() *types1.CoreChainLock { if m != nil { return m.NextCoreChainLockUpdate @@ -3486,7 +3076,7 @@ func (m *CommitInfo) Reset() { *m = CommitInfo{} } func (m *CommitInfo) String() string { return proto.CompactTextString(m) } func (*CommitInfo) ProtoMessage() {} func (*CommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{42} + return fileDescriptor_252557cfdd89a31a, []int{37} } func (m *CommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3568,7 +3158,7 @@ func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedCommitInfo) ProtoMessage() {} func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{43} + return fileDescriptor_252557cfdd89a31a, []int{38} } func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3633,7 +3223,7 @@ func (m *ExtendedCommitInfo) GetThresholdVoteExtensions() []*types1.VoteExtensio } // Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// ResponseFinalizeBlock, ResponseDeliverTx, ExecTxResult // Later, transactions may be queried using these events. type Event struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` @@ -3644,7 +3234,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{44} + return fileDescriptor_252557cfdd89a31a, []int{39} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3698,7 +3288,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{45} + return fileDescriptor_252557cfdd89a31a, []int{40} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3766,7 +3356,7 @@ func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } func (*ExecTxResult) ProtoMessage() {} func (*ExecTxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{46} + return fileDescriptor_252557cfdd89a31a, []int{41} } func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3865,7 +3455,7 @@ func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{47} + return fileDescriptor_252557cfdd89a31a, []int{42} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3931,7 +3521,7 @@ func (m *TxRecord) Reset() { *m = TxRecord{} } func (m *TxRecord) String() string { return proto.CompactTextString(m) } func (*TxRecord) ProtoMessage() {} func (*TxRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{48} + return fileDescriptor_252557cfdd89a31a, []int{43} } func (m *TxRecord) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3986,7 +3576,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{49} + return fileDescriptor_252557cfdd89a31a, []int{44} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4041,7 +3631,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{50} + return fileDescriptor_252557cfdd89a31a, []int{45} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4108,7 +3698,7 @@ func (m *ValidatorSetUpdate) Reset() { *m = ValidatorSetUpdate{} } func (m *ValidatorSetUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorSetUpdate) ProtoMessage() {} func (*ValidatorSetUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{51} + return fileDescriptor_252557cfdd89a31a, []int{46} } func (m *ValidatorSetUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4166,7 +3756,7 @@ func (m *ThresholdPublicKeyUpdate) Reset() { *m = ThresholdPublicKeyUpda func (m *ThresholdPublicKeyUpdate) String() string { return proto.CompactTextString(m) } func (*ThresholdPublicKeyUpdate) ProtoMessage() {} func (*ThresholdPublicKeyUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{52} + return fileDescriptor_252557cfdd89a31a, []int{47} } func (m *ThresholdPublicKeyUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4210,7 +3800,7 @@ func (m *QuorumHashUpdate) Reset() { *m = QuorumHashUpdate{} } func (m *QuorumHashUpdate) String() string { return proto.CompactTextString(m) } func (*QuorumHashUpdate) ProtoMessage() {} func (*QuorumHashUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{53} + return fileDescriptor_252557cfdd89a31a, []int{48} } func (m *QuorumHashUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4256,7 +3846,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{54} + return fileDescriptor_252557cfdd89a31a, []int{49} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4313,7 +3903,7 @@ func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedVoteInfo) ProtoMessage() {} func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{55} + return fileDescriptor_252557cfdd89a31a, []int{50} } func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4381,7 +3971,7 @@ func (m *Misbehavior) Reset() { *m = Misbehavior{} } func (m *Misbehavior) String() string { return proto.CompactTextString(m) } func (*Misbehavior) ProtoMessage() {} func (*Misbehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{56} + return fileDescriptor_252557cfdd89a31a, []int{51} } func (m *Misbehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4458,7 +4048,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{57} + return fileDescriptor_252557cfdd89a31a, []int{52} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4543,10 +4133,7 @@ func init() { proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") - proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") - proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") - proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") @@ -4564,10 +4151,8 @@ func init() { proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") - proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.ResponseBeginBlock") proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.ResponseDeliverTx") - proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.ResponseEndBlock") proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") @@ -4600,248 +4185,236 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3850 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x23, 0x47, - 0x76, 0x67, 0xf3, 0x4b, 0xe4, 0xe3, 0xa7, 0x4a, 0x9a, 0x19, 0x0e, 0x67, 0x46, 0x92, 0x7b, 0x62, - 0xcf, 0x78, 0x6c, 0x4b, 0xb6, 0x26, 0xf6, 0x8c, 0x63, 0x27, 0x06, 0x45, 0x71, 0x4c, 0xcd, 0x68, - 0x24, 0x4d, 0x8b, 0x92, 0xe1, 0x38, 0x9e, 0x76, 0x8b, 0x5d, 0x12, 0xdb, 0x43, 0xb2, 0xdb, 0xdd, - 0x4d, 0x99, 0xf2, 0xd5, 0xf1, 0xc5, 0x27, 0xdf, 0x92, 0x8b, 0x91, 0x4b, 0x02, 0xe4, 0x92, 0xbf, - 0x20, 0x40, 0x72, 0x35, 0x72, 0x32, 0x10, 0x20, 0x09, 0x02, 0xc4, 0x31, 0xec, 0xcb, 0x62, 0x8f, - 0xbb, 0xc0, 0x2e, 0xb0, 0x87, 0xdd, 0x45, 0x7d, 0xf4, 0x17, 0xc9, 0xe6, 0x87, 0xe5, 0xfd, 0xf4, - 0xde, 0xba, 0x5e, 0xbd, 0xf7, 0xba, 0xaa, 0xfa, 0xd5, 0x7b, 0xaf, 0x7e, 0xf5, 0x1a, 0xae, 0xd8, - 0xb8, 0xab, 0x62, 0xb3, 0xa3, 0x75, 0xed, 0x35, 0xe5, 0xa8, 0xa9, 0xad, 0xd9, 0x67, 0x06, 0xb6, - 0x56, 0x0d, 0x53, 0xb7, 0x75, 0x54, 0xf0, 0x3a, 0x57, 0x49, 0x67, 0xf9, 0x9a, 0x8f, 0xbb, 0x69, - 0x9e, 0x19, 0xb6, 0xbe, 0x66, 0x98, 0xba, 0x7e, 0xcc, 0xf8, 0xcb, 0x7e, 0x65, 0x54, 0xcf, 0x9a, - 0xaa, 0x58, 0x2d, 0xde, 0x79, 0x75, 0xa8, 0xd3, 0xf7, 0xaa, 0x40, 0x2f, 0xd7, 0xfc, 0x04, 0x9f, - 0x39, 0xbd, 0xd7, 0x86, 0x64, 0x0d, 0xc5, 0x54, 0x3a, 0x4e, 0xf7, 0x92, 0xaf, 0xfb, 0x14, 0x9b, - 0x96, 0xa6, 0x77, 0x03, 0xca, 0x97, 0x4f, 0x74, 0xfd, 0xa4, 0x8d, 0xd7, 0x68, 0xeb, 0xa8, 0x77, - 0xbc, 0x66, 0x6b, 0x1d, 0x6c, 0xd9, 0x4a, 0xc7, 0xe0, 0x0c, 0x8b, 0x27, 0xfa, 0x89, 0x4e, 0x1f, - 0xd7, 0xc8, 0x13, 0xa3, 0x8a, 0xbf, 0x02, 0x98, 0x93, 0xf0, 0x07, 0x3d, 0x6c, 0xd9, 0x68, 0x1d, - 0xe2, 0xb8, 0xd9, 0xd2, 0x4b, 0xc2, 0x8a, 0x70, 0x33, 0xb3, 0x7e, 0x75, 0x75, 0x60, 0x65, 0x56, - 0x39, 0x5f, 0xad, 0xd9, 0xd2, 0xeb, 0x11, 0x89, 0xf2, 0xa2, 0x97, 0x21, 0x71, 0xdc, 0xee, 0x59, - 0xad, 0x52, 0x94, 0x0a, 0x5d, 0x0b, 0x13, 0xba, 0x47, 0x98, 0xea, 0x11, 0x89, 0x71, 0x93, 0x57, - 0x69, 0xdd, 0x63, 0xbd, 0x14, 0x1b, 0xff, 0xaa, 0xad, 0xee, 0x31, 0x7d, 0x15, 0xe1, 0x45, 0x1b, - 0x00, 0x5a, 0x57, 0xb3, 0xe5, 0x66, 0x4b, 0xd1, 0xba, 0xa5, 0x38, 0x95, 0x7c, 0x2a, 0x5c, 0x52, - 0xb3, 0xab, 0x84, 0xb1, 0x1e, 0x91, 0xd2, 0x9a, 0xd3, 0x20, 0xc3, 0xfd, 0xa0, 0x87, 0xcd, 0xb3, - 0x52, 0x62, 0xfc, 0x70, 0x1f, 0x11, 0x26, 0x32, 0x5c, 0xca, 0x8d, 0xb6, 0x20, 0x73, 0x84, 0x4f, - 0xb4, 0xae, 0x7c, 0xd4, 0xd6, 0x9b, 0x4f, 0x4a, 0x49, 0x2a, 0x2c, 0x86, 0x09, 0x6f, 0x10, 0xd6, - 0x0d, 0xc2, 0xb9, 0x11, 0x2d, 0x09, 0xf5, 0x88, 0x04, 0x47, 0x2e, 0x05, 0xbd, 0x0e, 0xa9, 0x66, - 0x0b, 0x37, 0x9f, 0xc8, 0x76, 0xbf, 0x34, 0x47, 0xf5, 0x2c, 0x87, 0xe9, 0xa9, 0x12, 0xbe, 0x46, - 0xbf, 0x1e, 0x91, 0xe6, 0x9a, 0xec, 0x11, 0xdd, 0x03, 0x50, 0x71, 0x5b, 0x3b, 0xc5, 0x26, 0x91, - 0x4f, 0x8d, 0x5f, 0x83, 0x4d, 0xc6, 0xd9, 0xe8, 0xf3, 0x61, 0xa4, 0x55, 0x87, 0x80, 0xaa, 0x90, - 0xc6, 0x5d, 0x95, 0x4f, 0x27, 0x4d, 0xd5, 0xac, 0x84, 0x7e, 0xef, 0xae, 0xea, 0x9f, 0x4c, 0x0a, - 0xf3, 0x36, 0xba, 0x0b, 0xc9, 0xa6, 0xde, 0xe9, 0x68, 0x76, 0x09, 0xa8, 0x86, 0xa5, 0xd0, 0x89, - 0x50, 0xae, 0x7a, 0x44, 0xe2, 0xfc, 0x68, 0x07, 0xf2, 0x6d, 0xcd, 0xb2, 0x65, 0xab, 0xab, 0x18, - 0x56, 0x4b, 0xb7, 0xad, 0x52, 0x86, 0x6a, 0x78, 0x3a, 0x4c, 0xc3, 0xb6, 0x66, 0xd9, 0xfb, 0x0e, - 0x73, 0x3d, 0x22, 0xe5, 0xda, 0x7e, 0x02, 0xd1, 0xa7, 0x1f, 0x1f, 0x63, 0xd3, 0x55, 0x58, 0xca, - 0x8e, 0xd7, 0xb7, 0x4b, 0xb8, 0x1d, 0x79, 0xa2, 0x4f, 0xf7, 0x13, 0xd0, 0x3b, 0xb0, 0xd0, 0xd6, - 0x15, 0xd5, 0x55, 0x27, 0x37, 0x5b, 0xbd, 0xee, 0x93, 0x52, 0x8e, 0x2a, 0x7d, 0x36, 0x74, 0x90, - 0xba, 0xa2, 0x3a, 0x2a, 0xaa, 0x44, 0xa0, 0x1e, 0x91, 0xe6, 0xdb, 0x83, 0x44, 0xf4, 0x18, 0x16, - 0x15, 0xc3, 0x68, 0x9f, 0x0d, 0x6a, 0xcf, 0x53, 0xed, 0xb7, 0xc2, 0xb4, 0x57, 0x88, 0xcc, 0xa0, - 0x7a, 0xa4, 0x0c, 0x51, 0x51, 0x03, 0x8a, 0x86, 0x89, 0x0d, 0xc5, 0xc4, 0xb2, 0x61, 0xea, 0x86, - 0x6e, 0x29, 0xed, 0x52, 0x81, 0xea, 0xbe, 0x11, 0xa6, 0x7b, 0x8f, 0xf1, 0xef, 0x71, 0xf6, 0x7a, - 0x44, 0x2a, 0x18, 0x41, 0x12, 0xd3, 0xaa, 0x37, 0xb1, 0x65, 0x79, 0x5a, 0x8b, 0x93, 0xb4, 0x52, - 0xfe, 0xa0, 0xd6, 0x00, 0x09, 0xd5, 0x20, 0x83, 0xfb, 0x44, 0x5c, 0x3e, 0xd5, 0x6d, 0x5c, 0x9a, - 0x1f, 0xbf, 0xb1, 0x6a, 0x94, 0xf5, 0x50, 0xb7, 0x31, 0xd9, 0x54, 0xd8, 0x6d, 0x21, 0x05, 0x2e, - 0x9c, 0x62, 0x53, 0x3b, 0x3e, 0xa3, 0x6a, 0x64, 0xda, 0x43, 0x3c, 0x64, 0x09, 0x51, 0x85, 0xcf, - 0x85, 0x29, 0x3c, 0xa4, 0x42, 0x44, 0x45, 0xcd, 0x11, 0xa9, 0x47, 0xa4, 0x85, 0xd3, 0x61, 0x32, - 0x31, 0xb1, 0x63, 0xad, 0xab, 0xb4, 0xb5, 0x8f, 0x30, 0xdf, 0x36, 0x0b, 0xe3, 0x4d, 0xec, 0x1e, - 0xe7, 0xa6, 0x7b, 0x85, 0x98, 0xd8, 0xb1, 0x9f, 0xb0, 0x31, 0x07, 0x89, 0x53, 0xa5, 0xdd, 0xc3, - 0xe2, 0x0d, 0xc8, 0xf8, 0x1c, 0x2b, 0x2a, 0xc1, 0x5c, 0x07, 0x5b, 0x96, 0x72, 0x82, 0xa9, 0x1f, - 0x4e, 0x4b, 0x4e, 0x53, 0xcc, 0x43, 0xd6, 0xef, 0x4c, 0xc5, 0xcf, 0x04, 0x57, 0x92, 0xf8, 0x49, - 0x22, 0xc9, 0x03, 0x83, 0x23, 0xc9, 0x9b, 0xe8, 0x3a, 0xe4, 0xe8, 0x90, 0x65, 0xa7, 0x9f, 0x38, - 0xeb, 0xb8, 0x94, 0xa5, 0xc4, 0x43, 0xce, 0xb4, 0x0c, 0x19, 0x63, 0xdd, 0x70, 0x59, 0x62, 0x94, - 0x05, 0x8c, 0x75, 0xc3, 0x61, 0x78, 0x0a, 0xb2, 0x64, 0x7e, 0x2e, 0x47, 0x9c, 0xbe, 0x24, 0x43, - 0x68, 0x9c, 0x45, 0xfc, 0xdb, 0x18, 0x14, 0x07, 0x1d, 0x30, 0xba, 0x0b, 0x71, 0x12, 0x8b, 0x78, - 0x58, 0x29, 0xaf, 0xb2, 0x40, 0xb5, 0xea, 0x04, 0xaa, 0xd5, 0x86, 0x13, 0xa8, 0x36, 0x52, 0x5f, - 0x7c, 0xb5, 0x1c, 0xf9, 0xec, 0xff, 0x97, 0x05, 0x89, 0x4a, 0xa0, 0xcb, 0xc4, 0x57, 0x2a, 0x5a, - 0x57, 0xd6, 0x54, 0x3a, 0xe4, 0x34, 0x71, 0x84, 0x8a, 0xd6, 0xdd, 0x52, 0xd1, 0x36, 0x14, 0x9b, - 0x7a, 0xd7, 0xc2, 0x5d, 0xab, 0x67, 0xc9, 0x2c, 0x50, 0xf2, 0x60, 0x12, 0x70, 0x87, 0x2c, 0x42, - 0x56, 0x1d, 0xce, 0x3d, 0xca, 0x28, 0x15, 0x9a, 0x41, 0x02, 0xda, 0x81, 0xdc, 0xa9, 0xd2, 0xd6, - 0x54, 0xc5, 0xd6, 0x4d, 0xd9, 0xc2, 0x36, 0x8f, 0x2e, 0xd7, 0x87, 0xbe, 0xed, 0xa1, 0xc3, 0xb5, - 0x8f, 0xed, 0x03, 0x43, 0x55, 0x6c, 0xbc, 0x11, 0xff, 0xe2, 0xab, 0x65, 0x41, 0xca, 0x9e, 0xfa, - 0x7a, 0xd0, 0x33, 0x50, 0x50, 0x0c, 0x43, 0xb6, 0x6c, 0xc5, 0xc6, 0xf2, 0xd1, 0x99, 0x8d, 0x2d, - 0x1a, 0x70, 0xb2, 0x52, 0x4e, 0x31, 0x8c, 0x7d, 0x42, 0xdd, 0x20, 0x44, 0xf4, 0x34, 0xe4, 0x49, - 0x6c, 0xd2, 0x94, 0xb6, 0xdc, 0xc2, 0xda, 0x49, 0xcb, 0xa6, 0xa1, 0x25, 0x26, 0xe5, 0x38, 0xb5, - 0x4e, 0x89, 0x68, 0x15, 0x16, 0x1c, 0xb6, 0xa6, 0x6e, 0x62, 0x87, 0x97, 0x84, 0x8f, 0x9c, 0x34, - 0xcf, 0xbb, 0xaa, 0xba, 0x89, 0x19, 0xbf, 0xa8, 0xba, 0x96, 0x42, 0xe3, 0x18, 0x42, 0x10, 0x57, - 0x15, 0x5b, 0xa1, 0x5f, 0x20, 0x2b, 0xd1, 0x67, 0x42, 0x33, 0x14, 0xbb, 0xc5, 0xd7, 0x95, 0x3e, - 0xa3, 0x8b, 0x90, 0xe4, 0xaa, 0x63, 0x74, 0x18, 0xbc, 0x85, 0x16, 0x21, 0x61, 0x98, 0xfa, 0x29, - 0xa6, 0xcb, 0x92, 0x92, 0x58, 0x43, 0xfc, 0x38, 0x0a, 0xf3, 0x43, 0x11, 0x8f, 0xe8, 0x6d, 0x29, - 0x56, 0xcb, 0x79, 0x17, 0x79, 0x46, 0xaf, 0x10, 0xbd, 0x8a, 0x8a, 0x4d, 0x9e, 0x25, 0x94, 0x86, - 0x3f, 0x51, 0x9d, 0xf6, 0xd3, 0xc5, 0x8c, 0x48, 0x9c, 0x1b, 0x3d, 0x80, 0x62, 0x5b, 0xb1, 0x6c, - 0x99, 0x45, 0x0d, 0xd9, 0x97, 0x31, 0x5c, 0x19, 0xfa, 0x32, 0x2c, 0xc6, 0x90, 0x8d, 0xc0, 0x95, - 0xe4, 0x89, 0xa8, 0x47, 0x45, 0x07, 0xb0, 0x78, 0x74, 0xf6, 0x91, 0xd2, 0xb5, 0xb5, 0x2e, 0x96, - 0xdd, 0xaf, 0x65, 0x95, 0xe2, 0x2b, 0xb1, 0x91, 0x29, 0xc8, 0x43, 0xcd, 0x3a, 0xc2, 0x2d, 0xe5, - 0x54, 0xd3, 0x9d, 0x61, 0x2d, 0xb8, 0xf2, 0xae, 0x19, 0x58, 0xa2, 0x04, 0xf9, 0x60, 0xb8, 0x46, - 0x79, 0x88, 0xda, 0x7d, 0x3e, 0xff, 0xa8, 0xdd, 0x47, 0x2f, 0x42, 0x9c, 0xcc, 0x91, 0xce, 0x3d, - 0x3f, 0xe2, 0x45, 0x5c, 0xae, 0x71, 0x66, 0x60, 0x89, 0x72, 0x8a, 0xa2, 0xbb, 0x8b, 0xdc, 0x10, - 0x3e, 0xa8, 0x55, 0x7c, 0x16, 0x0a, 0x03, 0xf1, 0xd9, 0xf7, 0xf9, 0x04, 0xff, 0xe7, 0x13, 0x0b, - 0x90, 0x0b, 0x04, 0x62, 0xf1, 0x22, 0x2c, 0x8e, 0x8a, 0xab, 0x62, 0xcb, 0xa5, 0x07, 0xe2, 0x23, - 0x7a, 0x19, 0x52, 0x6e, 0x60, 0x65, 0xbb, 0xf8, 0xf2, 0xd0, 0x2c, 0x1c, 0x66, 0xc9, 0x65, 0x25, - 0xdb, 0x97, 0xec, 0x02, 0x6a, 0x0e, 0x51, 0x3a, 0xf0, 0x39, 0xc5, 0x30, 0xea, 0x8a, 0xd5, 0x12, - 0xdf, 0x83, 0x52, 0x58, 0xd0, 0x1c, 0x98, 0x46, 0xdc, 0xb5, 0xc2, 0x8b, 0x90, 0x3c, 0xd6, 0xcd, - 0x8e, 0x62, 0x53, 0x65, 0x39, 0x89, 0xb7, 0x88, 0x75, 0xb2, 0x00, 0x1a, 0xa3, 0x64, 0xd6, 0x10, - 0x65, 0xb8, 0x1c, 0x1a, 0x38, 0x89, 0x88, 0xd6, 0x55, 0x31, 0x5b, 0xcf, 0x9c, 0xc4, 0x1a, 0x9e, - 0x22, 0x36, 0x58, 0xd6, 0x20, 0xaf, 0xb5, 0xe8, 0x5c, 0xa9, 0xfe, 0xb4, 0xc4, 0x5b, 0xe2, 0x7f, - 0xc6, 0xe1, 0xe2, 0xe8, 0xf0, 0x89, 0x56, 0x20, 0xdb, 0x51, 0xfa, 0xb2, 0xdd, 0xe7, 0x7b, 0x9f, - 0x7d, 0x0e, 0xe8, 0x28, 0xfd, 0x46, 0x9f, 0x6d, 0xfc, 0x22, 0xc4, 0xec, 0xbe, 0x55, 0x8a, 0xae, - 0xc4, 0x6e, 0x66, 0x25, 0xf2, 0x88, 0x0e, 0x60, 0xbe, 0xad, 0x37, 0x95, 0xb6, 0xec, 0xb3, 0x78, - 0x6e, 0xec, 0xc3, 0x6e, 0x88, 0x05, 0x42, 0xac, 0x0e, 0x19, 0x7d, 0x81, 0xea, 0xd8, 0x76, 0x2d, - 0xff, 0x37, 0x64, 0xf5, 0xbe, 0x6f, 0x94, 0x08, 0x78, 0x0a, 0xc7, 0xd7, 0x27, 0x67, 0xf6, 0xf5, - 0x2f, 0xc2, 0x62, 0x17, 0xf7, 0x6d, 0xdf, 0x18, 0x99, 0xe1, 0xcc, 0xd1, 0x6f, 0x81, 0x48, 0x9f, - 0xf7, 0x7e, 0x62, 0x43, 0xe8, 0x0e, 0x94, 0xa8, 0x37, 0x64, 0x21, 0x82, 0xec, 0x00, 0xac, 0x3a, - 0xae, 0x51, 0xa5, 0xdf, 0xf5, 0x02, 0xe9, 0xa7, 0x41, 0x68, 0x9b, 0xf6, 0x72, 0x77, 0xba, 0x06, - 0x8b, 0x2c, 0x85, 0xc1, 0x26, 0xc9, 0x65, 0xc8, 0x77, 0xa2, 0xaf, 0xc2, 0xf4, 0x55, 0xf3, 0x4e, - 0xdf, 0x9e, 0xa9, 0x37, 0xfa, 0xf4, 0x4d, 0x2f, 0xba, 0x02, 0xaa, 0x4c, 0x2c, 0xda, 0x89, 0x80, - 0xc7, 0xd4, 0x3e, 0x91, 0xd3, 0x57, 0x31, 0xdc, 0x58, 0x79, 0xc7, 0x8b, 0xc5, 0x27, 0xc3, 0x27, - 0x0d, 0xde, 0xe5, 0xc5, 0x25, 0x37, 0x54, 0x8b, 0xff, 0x10, 0xf3, 0x59, 0x55, 0x30, 0x57, 0xe2, - 0x36, 0x23, 0x78, 0x36, 0xb3, 0xef, 0x1b, 0x97, 0xdf, 0x6c, 0xa2, 0xd3, 0xfa, 0x48, 0x77, 0xe8, - 0x53, 0x58, 0x4c, 0xec, 0x7c, 0x16, 0xe3, 0xc4, 0x85, 0xb8, 0x2f, 0x2e, 0xfc, 0x3e, 0x58, 0x51, - 0x98, 0x31, 0xa4, 0x42, 0x8c, 0x41, 0x7c, 0xc3, 0x8d, 0x7a, 0x5e, 0x3a, 0x3a, 0x32, 0xea, 0x79, - 0xb3, 0x8b, 0x06, 0xdc, 0xf1, 0x7f, 0x08, 0x50, 0x0e, 0xcf, 0x3f, 0x47, 0xaa, 0x7a, 0x09, 0x2e, - 0x78, 0xf9, 0x89, 0x7f, 0x94, 0xcc, 0x53, 0x21, 0xb7, 0xd3, 0xb3, 0xd9, 0xb0, 0x58, 0xfe, 0x10, - 0x0a, 0xc1, 0x1c, 0xd9, 0xf1, 0x05, 0x7f, 0x16, 0xe2, 0x65, 0x02, 0xa3, 0x93, 0xf2, 0xa7, 0xfe, - 0xa6, 0x25, 0xfe, 0x7b, 0xdc, 0x8d, 0x19, 0x81, 0x84, 0x77, 0x84, 0xb5, 0x3e, 0x82, 0x05, 0x15, - 0x37, 0x35, 0xf5, 0xbb, 0x1a, 0xeb, 0x3c, 0x97, 0xfe, 0xe1, 0xda, 0xea, 0x1f, 0xa5, 0xc7, 0xfb, - 0xef, 0x0c, 0xa4, 0x24, 0x6c, 0x19, 0xa4, 0x07, 0x6d, 0x40, 0x1a, 0xf7, 0x9b, 0xd8, 0xb0, 0x9d, - 0x53, 0xcc, 0xe8, 0xd3, 0x20, 0xe3, 0xae, 0x39, 0x9c, 0xf5, 0x88, 0xe4, 0x89, 0xa1, 0xdb, 0x1c, - 0xc6, 0x0a, 0x47, 0xa4, 0xb8, 0xb8, 0x1f, 0xc7, 0x7a, 0xc5, 0xc1, 0xb1, 0x62, 0xa1, 0x50, 0x06, - 0x93, 0x1a, 0x00, 0xb2, 0x6e, 0x73, 0x20, 0x2b, 0x3e, 0xe1, 0x65, 0x01, 0x24, 0xab, 0x1a, 0x40, - 0xb2, 0x12, 0x13, 0xa6, 0x19, 0x02, 0x65, 0xbd, 0xe2, 0x40, 0x59, 0xc9, 0x09, 0x23, 0x1e, 0xc0, - 0xb2, 0xee, 0x07, 0xb1, 0xac, 0xb9, 0x90, 0x14, 0xc3, 0x91, 0x1e, 0x0b, 0x66, 0xfd, 0xa5, 0x0f, - 0xcc, 0x4a, 0x85, 0xa2, 0x48, 0x4c, 0xd1, 0x08, 0x34, 0xeb, 0xcd, 0x00, 0x9a, 0x95, 0x9e, 0xb0, - 0x0e, 0x63, 0xe0, 0xac, 0x4d, 0x3f, 0x9c, 0x05, 0xa1, 0xa8, 0x18, 0xff, 0xee, 0x61, 0x78, 0xd6, - 0xab, 0x2e, 0x9e, 0x95, 0x09, 0x05, 0xe6, 0xf8, 0x5c, 0x06, 0x01, 0xad, 0xdd, 0x21, 0x40, 0x8b, - 0x01, 0x50, 0xcf, 0x84, 0xaa, 0x98, 0x80, 0x68, 0xed, 0x0e, 0x21, 0x5a, 0xb9, 0x09, 0x0a, 0x27, - 0x40, 0x5a, 0x7f, 0x33, 0x1a, 0xd2, 0x0a, 0x07, 0x9d, 0xf8, 0x30, 0xa7, 0xc3, 0xb4, 0xe4, 0x10, - 0x4c, 0xab, 0x10, 0x8a, 0xbf, 0x30, 0xf5, 0x53, 0x83, 0x5a, 0x07, 0x23, 0x40, 0x2d, 0x06, 0x3f, - 0xdd, 0x0c, 0x55, 0x3e, 0x05, 0xaa, 0x75, 0x30, 0x02, 0xd5, 0x9a, 0x9f, 0xa8, 0x76, 0x22, 0xac, - 0x75, 0x2f, 0x08, 0x6b, 0xa1, 0x09, 0x7b, 0x2c, 0x14, 0xd7, 0x3a, 0x0a, 0xc3, 0xb5, 0x18, 0xf6, - 0xf4, 0x7c, 0xa8, 0xc6, 0x19, 0x80, 0xad, 0xdd, 0x21, 0x60, 0x6b, 0x71, 0x82, 0xa5, 0x4d, 0x8b, - 0x6c, 0x3d, 0x4b, 0x32, 0xa5, 0x01, 0x57, 0x4d, 0x0e, 0x59, 0xd8, 0x34, 0x75, 0x93, 0x63, 0x54, - 0xac, 0x21, 0xde, 0x84, 0xac, 0xdf, 0x2d, 0x8f, 0x41, 0xc1, 0xe8, 0x61, 0xd6, 0xe7, 0x8a, 0xc5, - 0xff, 0x13, 0x3c, 0x59, 0x7a, 0xd0, 0xf7, 0xa3, 0x1d, 0x69, 0x8e, 0x76, 0xf8, 0xb0, 0xb1, 0x68, - 0x10, 0x1b, 0x5b, 0x86, 0x8c, 0x3f, 0xc0, 0x71, 0xd8, 0x4b, 0xf1, 0x02, 0xdb, 0x2d, 0x98, 0xa7, - 0xe9, 0x0a, 0x43, 0xd0, 0x78, 0xb4, 0x8d, 0xd3, 0x1c, 0xa0, 0x40, 0x3a, 0xd8, 0x2a, 0xb0, 0x38, - 0xfb, 0x02, 0x2c, 0xf8, 0x78, 0xdd, 0xc3, 0x2f, 0xc3, 0x7e, 0x8a, 0x2e, 0x77, 0x85, 0x9d, 0x82, - 0xef, 0xc7, 0x53, 0x6a, 0x11, 0x4b, 0xd7, 0x78, 0x36, 0x34, 0x3a, 0xb0, 0x8b, 0x3f, 0x8a, 0x7a, - 0xcb, 0xe8, 0x81, 0x6a, 0xa3, 0xf0, 0x2f, 0xe1, 0x3b, 0xe3, 0x5f, 0xfe, 0x93, 0x7a, 0x2c, 0x70, - 0x52, 0x47, 0xef, 0xc0, 0x62, 0x00, 0x1a, 0x93, 0x7b, 0x14, 0xf6, 0xa2, 0xf9, 0xc6, 0x0c, 0x08, - 0x59, 0xc4, 0x97, 0xa4, 0xba, 0x3d, 0xe8, 0x5d, 0xb8, 0x42, 0x53, 0xa0, 0x81, 0xc9, 0x3b, 0xef, - 0xc0, 0xc3, 0x6e, 0xd8, 0x99, 0x90, 0x2f, 0xcb, 0x91, 0x2e, 0x11, 0x1d, 0x01, 0x12, 0x57, 0x1f, - 0x82, 0x9b, 0x1d, 0x87, 0xe1, 0x66, 0x3f, 0x17, 0x3c, 0xe3, 0x72, 0x91, 0xb3, 0xa6, 0xae, 0x62, - 0x8e, 0x13, 0xd0, 0x67, 0x92, 0xd9, 0xb6, 0xf5, 0x13, 0x8e, 0x06, 0x90, 0x47, 0xc2, 0xe5, 0x26, - 0x01, 0x69, 0x1e, 0xe3, 0x5d, 0x88, 0x81, 0xa5, 0x8b, 0x1c, 0x62, 0x28, 0x42, 0xec, 0x09, 0x66, - 0x21, 0x3b, 0x2b, 0x91, 0x47, 0xc2, 0x47, 0x77, 0x0b, 0x4f, 0xfb, 0x58, 0x03, 0xdd, 0x85, 0x34, - 0xbd, 0x74, 0x94, 0x75, 0xc3, 0xe2, 0x91, 0x35, 0x90, 0x21, 0xb3, 0xeb, 0xc3, 0xd5, 0x3d, 0xc2, - 0xb3, 0x6b, 0x58, 0x52, 0xca, 0xe0, 0x4f, 0xbe, 0x3c, 0x35, 0x1d, 0xc8, 0x53, 0xaf, 0x42, 0x9a, - 0x8c, 0xde, 0x32, 0x94, 0x26, 0xa6, 0x21, 0x32, 0x2d, 0x79, 0x04, 0xf1, 0x31, 0xa0, 0xe1, 0x80, - 0x8f, 0xea, 0x90, 0xc4, 0xa7, 0xb8, 0x6b, 0xb3, 0x34, 0x3e, 0xb3, 0x7e, 0x71, 0xf8, 0x88, 0x40, - 0xba, 0x37, 0x4a, 0xe4, 0x03, 0xff, 0xf8, 0xab, 0xe5, 0x22, 0xe3, 0x7e, 0x5e, 0xef, 0x68, 0x36, - 0xee, 0x18, 0xf6, 0x99, 0xc4, 0xe5, 0xc5, 0xff, 0x8d, 0x42, 0x61, 0x20, 0x11, 0x18, 0xb9, 0xb6, - 0xce, 0xde, 0x8d, 0xfa, 0x90, 0xca, 0xe9, 0xd6, 0xfb, 0x1a, 0xc0, 0x89, 0x62, 0xc9, 0x1f, 0x2a, - 0x5d, 0x1b, 0xab, 0x7c, 0xd1, 0xd3, 0x27, 0x8a, 0xf5, 0x16, 0x25, 0x10, 0x0b, 0x27, 0xdd, 0x3d, - 0x0b, 0xab, 0x1c, 0x63, 0x9d, 0x3b, 0x51, 0xac, 0x03, 0x0b, 0xab, 0xbe, 0x59, 0xce, 0x9d, 0x6f, - 0x96, 0xc1, 0x35, 0x4e, 0x0d, 0xac, 0xb1, 0x0f, 0x48, 0x4a, 0xfb, 0x81, 0x24, 0x54, 0x86, 0x94, - 0x61, 0x6a, 0xba, 0xa9, 0xd9, 0x67, 0xf4, 0xc3, 0xc4, 0x24, 0xb7, 0x8d, 0xae, 0x43, 0xae, 0x83, - 0x3b, 0x86, 0xae, 0xb7, 0x65, 0xe6, 0x35, 0x33, 0x54, 0x34, 0xcb, 0x89, 0x35, 0xea, 0x3c, 0x3f, - 0xf1, 0x79, 0x08, 0x0f, 0x30, 0xfc, 0x7e, 0x97, 0x77, 0x69, 0xc4, 0xf2, 0xfa, 0x28, 0x64, 0x12, - 0x03, 0xeb, 0xeb, 0xb6, 0x7f, 0x5b, 0x0b, 0x2c, 0xfe, 0x34, 0x0a, 0xc5, 0xc1, 0x24, 0x0f, 0xbd, - 0x0d, 0x97, 0x06, 0x1c, 0x25, 0xf7, 0x2e, 0x16, 0x3f, 0x20, 0x4c, 0xe1, 0x2f, 0x2f, 0x04, 0xfd, - 0x25, 0xf3, 0x2e, 0x96, 0x6f, 0x5e, 0xb1, 0x73, 0xce, 0x6b, 0x82, 0x1f, 0x54, 0xcf, 0xe9, 0x07, - 0xc3, 0x7c, 0x38, 0x9e, 0xf5, 0x96, 0x63, 0x84, 0x0f, 0x17, 0xb7, 0x20, 0x1f, 0x4c, 0x8b, 0x47, - 0x5a, 0xd9, 0x75, 0xc8, 0x99, 0xd8, 0x26, 0x13, 0x0b, 0xa0, 0x12, 0x59, 0x46, 0xe4, 0xfe, 0x77, - 0x0f, 0x2e, 0x8c, 0x4c, 0x8f, 0xd1, 0x1d, 0x48, 0x7b, 0x99, 0x35, 0xf3, 0x45, 0x63, 0x10, 0x68, - 0x8f, 0x57, 0xfc, 0x37, 0xc1, 0x53, 0x19, 0xc4, 0xb4, 0x6b, 0x90, 0x34, 0xb1, 0xd5, 0x6b, 0x33, - 0x94, 0x39, 0xbf, 0xfe, 0xc2, 0x74, 0x89, 0x35, 0xa1, 0xf6, 0xda, 0xb6, 0xc4, 0x85, 0xc5, 0xc7, - 0x90, 0x64, 0x14, 0x94, 0x81, 0xb9, 0x83, 0x9d, 0x07, 0x3b, 0xbb, 0x6f, 0xed, 0x14, 0x23, 0x08, - 0x20, 0x59, 0xa9, 0x56, 0x6b, 0x7b, 0x8d, 0xa2, 0x80, 0xd2, 0x90, 0xa8, 0x6c, 0xec, 0x4a, 0x8d, - 0x62, 0x94, 0x90, 0xa5, 0xda, 0xfd, 0x5a, 0xb5, 0x51, 0x8c, 0xa1, 0x79, 0xc8, 0xb1, 0x67, 0xf9, - 0xde, 0xae, 0xf4, 0xb0, 0xd2, 0x28, 0xc6, 0x7d, 0xa4, 0xfd, 0xda, 0xce, 0x66, 0x4d, 0x2a, 0x26, - 0xc4, 0x97, 0xe0, 0x72, 0x68, 0x2a, 0xee, 0x01, 0xd6, 0x82, 0x0f, 0xb0, 0x16, 0xff, 0x3e, 0x0a, - 0xe5, 0xf0, 0xfc, 0x1a, 0xdd, 0x1f, 0x98, 0xf8, 0xfa, 0x0c, 0xc9, 0xf9, 0xc0, 0xec, 0xd1, 0xd3, - 0x90, 0x37, 0xf1, 0x31, 0xb6, 0x9b, 0x2d, 0x96, 0xef, 0x33, 0x44, 0x3b, 0x27, 0xe5, 0x38, 0x95, - 0x0a, 0x59, 0x8c, 0xed, 0x7d, 0xdc, 0xb4, 0x65, 0xe6, 0xf2, 0xd8, 0x86, 0x49, 0x13, 0x36, 0x42, - 0xdd, 0x67, 0x44, 0xf1, 0xbd, 0x99, 0xd6, 0x32, 0x0d, 0x09, 0xa9, 0xd6, 0x90, 0xde, 0x2e, 0xc6, - 0x10, 0x82, 0x3c, 0x7d, 0x94, 0xf7, 0x77, 0x2a, 0x7b, 0xfb, 0xf5, 0x5d, 0xb2, 0x96, 0x0b, 0x50, - 0x70, 0xd6, 0xd2, 0x21, 0x26, 0xc4, 0xff, 0x8a, 0xc2, 0xa5, 0x90, 0xd3, 0x01, 0xba, 0x0b, 0x60, - 0xf7, 0x65, 0x13, 0x37, 0x75, 0x53, 0x0d, 0x37, 0xb2, 0x46, 0x5f, 0xa2, 0x1c, 0x52, 0xda, 0xe6, - 0x4f, 0xd6, 0x98, 0x7b, 0x0e, 0xf4, 0x3a, 0x57, 0x4a, 0x66, 0xe5, 0xb8, 0x89, 0x6b, 0x23, 0x80, - 0x36, 0xdc, 0x24, 0x8a, 0xe9, 0xda, 0x52, 0xc5, 0x94, 0x1f, 0x3d, 0x84, 0x79, 0x6f, 0xdf, 0x3a, - 0x5e, 0x8b, 0xa1, 0x75, 0x2b, 0xe1, 0x9b, 0x96, 0xed, 0x4b, 0xa9, 0x78, 0x1a, 0x24, 0x58, 0xe3, - 0x5c, 0x61, 0xe2, 0x7c, 0xae, 0x50, 0xfc, 0xc7, 0x98, 0x7f, 0x61, 0x83, 0x87, 0xa1, 0x5d, 0x48, - 0x5a, 0xb6, 0x62, 0xf7, 0x2c, 0x6e, 0x70, 0x77, 0xa6, 0x3d, 0x59, 0xad, 0x3a, 0x0f, 0xfb, 0x54, - 0x5c, 0xe2, 0x6a, 0xfe, 0xb4, 0xde, 0x96, 0xf8, 0x32, 0xe4, 0x83, 0x8b, 0x13, 0xbe, 0x65, 0x3c, - 0x9f, 0x13, 0x15, 0xdb, 0xb0, 0x30, 0x02, 0xd4, 0x45, 0x77, 0xf8, 0x0d, 0x25, 0xfb, 0x3e, 0xd7, - 0x87, 0x47, 0x15, 0x60, 0xf7, 0x2e, 0x2a, 0x49, 0x3c, 0xf6, 0xce, 0xa4, 0xec, 0x53, 0x78, 0x04, - 0xb1, 0xe9, 0x25, 0x95, 0x3e, 0xa8, 0x7c, 0x04, 0x00, 0x2d, 0x9c, 0x03, 0x80, 0xfe, 0x27, 0x01, - 0xae, 0x8c, 0x39, 0xf5, 0xa2, 0x47, 0x03, 0xd6, 0xf7, 0xea, 0x2c, 0x67, 0xe6, 0x55, 0x46, 0x0b, - 0xda, 0x9f, 0x78, 0x1b, 0xb2, 0x7e, 0xfa, 0x74, 0x4b, 0xff, 0xb3, 0x98, 0x17, 0x89, 0x82, 0x48, - 0xf9, 0xf7, 0x96, 0x65, 0x0f, 0x58, 0x7f, 0x74, 0x46, 0xeb, 0x1f, 0x63, 0xae, 0xf1, 0x73, 0x66, - 0x4a, 0xfe, 0x1d, 0x9b, 0x08, 0xee, 0xd8, 0xa1, 0xc4, 0x20, 0x39, 0x9c, 0x18, 0xfc, 0x41, 0xe7, - 0x47, 0x3f, 0x11, 0x00, 0x7c, 0x65, 0x08, 0x8b, 0x90, 0x30, 0xf5, 0x5e, 0x57, 0xa5, 0xe6, 0x98, - 0x90, 0x58, 0x03, 0x2d, 0x43, 0xe6, 0x83, 0x9e, 0x6e, 0xf6, 0x3a, 0xfe, 0x33, 0x38, 0x30, 0x12, - 0x5d, 0xa6, 0x1b, 0x50, 0x60, 0xa0, 0x82, 0xa5, 0x9d, 0x74, 0x15, 0xbb, 0x67, 0x62, 0x7e, 0x3b, - 0x91, 0xa7, 0xe4, 0x7d, 0x87, 0x4a, 0x18, 0x59, 0xd9, 0x89, 0xc7, 0xc8, 0x56, 0x3c, 0x4f, 0xc9, - 0x1e, 0xe3, 0x3b, 0x70, 0xd9, 0x6e, 0x99, 0xd8, 0x6a, 0xe9, 0x6d, 0x55, 0x1e, 0xdc, 0x91, 0x49, - 0x6a, 0x3b, 0xcb, 0x13, 0x3c, 0x81, 0x74, 0xc9, 0xd5, 0x70, 0x18, 0xdc, 0x95, 0xbf, 0x10, 0x00, - 0x0d, 0x5f, 0x52, 0xff, 0x40, 0x26, 0xff, 0x11, 0x24, 0xe8, 0x8e, 0x25, 0x89, 0xb0, 0xeb, 0x57, - 0xd3, 0xdc, 0x65, 0xbe, 0x0b, 0xa0, 0xd8, 0xb6, 0xa9, 0x1d, 0xf5, 0xd8, 0x11, 0x24, 0x36, 0x12, - 0x68, 0xa6, 0xf2, 0x15, 0x87, 0x6f, 0xe3, 0x2a, 0xdf, 0xfa, 0x8b, 0x9e, 0xa8, 0x6f, 0xfb, 0xfb, - 0x14, 0x8a, 0x3b, 0x90, 0x0f, 0xca, 0x3a, 0x90, 0x03, 0x1b, 0x43, 0x10, 0x72, 0x60, 0x50, 0x18, - 0x87, 0x1c, 0x5c, 0xc0, 0x22, 0xc6, 0x8a, 0x7c, 0x68, 0x43, 0xfc, 0xa5, 0x00, 0x59, 0xbf, 0xc3, - 0xf8, 0xa1, 0x9d, 0xda, 0xc5, 0x4f, 0x04, 0x48, 0xb9, 0x93, 0x0f, 0xa9, 0xb0, 0xf1, 0xd6, 0x2e, - 0xea, 0xaf, 0x27, 0x61, 0x25, 0x3b, 0x31, 0xb7, 0x10, 0xe8, 0x35, 0x37, 0xf3, 0x0e, 0xbb, 0x2d, - 0xf2, 0xaf, 0xb4, 0x53, 0x0b, 0xc5, 0x0f, 0x1a, 0x7f, 0xc7, 0xc7, 0x41, 0x52, 0x4e, 0xf4, 0x17, - 0x90, 0x54, 0x9a, 0xee, 0x1d, 0x59, 0x7e, 0xc4, 0xa5, 0x89, 0xc3, 0xba, 0xda, 0xe8, 0x57, 0x28, - 0xa7, 0xc4, 0x25, 0xf8, 0xa8, 0xa2, 0x6e, 0x21, 0xd1, 0x1b, 0x44, 0x2f, 0xe3, 0x09, 0x46, 0xb2, - 0x3c, 0xc0, 0xc1, 0xce, 0xc3, 0xdd, 0xcd, 0xad, 0x7b, 0x5b, 0xb5, 0x4d, 0x9e, 0x7b, 0x6f, 0x6e, - 0xd6, 0x36, 0x8b, 0x51, 0xc2, 0x27, 0xd5, 0x1e, 0xee, 0x1e, 0xd6, 0x36, 0x8b, 0x31, 0xb1, 0x02, - 0x69, 0xd7, 0x21, 0xd2, 0x52, 0x31, 0xfd, 0x43, 0x5e, 0x2c, 0x13, 0x93, 0x58, 0x03, 0x2d, 0x41, - 0xc6, 0x7f, 0xed, 0xc8, 0x36, 0x6f, 0xda, 0x70, 0xef, 0xd4, 0xff, 0x45, 0x80, 0xc2, 0x40, 0x3e, - 0x85, 0x5e, 0x83, 0x39, 0xa3, 0x77, 0x24, 0x3b, 0xb6, 0x3b, 0x70, 0x9d, 0xeb, 0x00, 0x60, 0xbd, - 0xa3, 0xb6, 0xd6, 0x7c, 0x80, 0xcf, 0xb8, 0x03, 0x4e, 0x1a, 0xbd, 0xa3, 0x07, 0xcc, 0xc4, 0xd9, - 0x30, 0xa2, 0x63, 0x86, 0x11, 0x1b, 0x18, 0x06, 0xba, 0x01, 0xd9, 0xae, 0xae, 0x62, 0x59, 0x51, - 0x55, 0x13, 0x5b, 0x2c, 0xec, 0xa5, 0xb9, 0xe6, 0x0c, 0xe9, 0xa9, 0xb0, 0x0e, 0xf1, 0x6b, 0x01, - 0xd0, 0x70, 0x10, 0x40, 0xfb, 0xa3, 0xf2, 0x47, 0x61, 0xba, 0xfc, 0x91, 0x7f, 0xee, 0xe1, 0x2c, - 0xb2, 0x01, 0x8b, 0x9e, 0xab, 0x32, 0xe8, 0x7c, 0xe9, 0xa2, 0x44, 0xa7, 0x5c, 0x94, 0x88, 0x84, - 0x5c, 0x79, 0xb7, 0x67, 0xa2, 0xcf, 0x15, 0x0d, 0x28, 0x35, 0x86, 0xc4, 0xf8, 0x3c, 0xc3, 0x86, - 0x24, 0x9c, 0x67, 0x48, 0xe2, 0x6d, 0x28, 0x3e, 0x72, 0xdf, 0xcf, 0xdf, 0x34, 0x30, 0x4c, 0x61, - 0x68, 0x98, 0xa7, 0x90, 0x22, 0xde, 0x97, 0x46, 0x97, 0xbf, 0x82, 0xb4, 0xbb, 0x7a, 0x6e, 0xb5, - 0x69, 0xe8, 0xb2, 0xf3, 0x91, 0x78, 0x22, 0xe8, 0x16, 0xcc, 0x93, 0xb8, 0xe1, 0xd4, 0x27, 0xb0, - 0xcb, 0x90, 0x28, 0xf5, 0x86, 0x05, 0xd6, 0xb1, 0xed, 0x20, 0xf8, 0x24, 0xed, 0x2c, 0x3a, 0x01, - 0xee, 0x77, 0x31, 0x00, 0x72, 0xa6, 0x1e, 0xb8, 0x13, 0x62, 0xdf, 0x30, 0x17, 0xc8, 0x8f, 0xc5, - 0x8f, 0xa3, 0x90, 0xf1, 0x55, 0x3d, 0xa0, 0x3f, 0x0f, 0xa4, 0xfa, 0x2b, 0xe3, 0x2a, 0x24, 0x7c, - 0x79, 0x7e, 0x60, 0x62, 0xd1, 0xd9, 0x27, 0x16, 0x56, 0x8c, 0xe2, 0x14, 0x4f, 0xc4, 0x67, 0x2e, - 0x9e, 0x78, 0x1e, 0x90, 0xad, 0xdb, 0x4a, 0x9b, 0x04, 0x6f, 0xad, 0x7b, 0x22, 0xb3, 0xdd, 0xce, - 0x02, 0x48, 0x91, 0xf6, 0x1c, 0xd2, 0x8e, 0x3d, 0x42, 0x17, 0xff, 0x55, 0x80, 0x94, 0x8b, 0xfc, - 0xcc, 0x5a, 0x5f, 0x78, 0x11, 0x92, 0x1c, 0xdc, 0x60, 0x05, 0x86, 0xbc, 0x35, 0xb2, 0x4a, 0xa4, - 0x0c, 0xa9, 0x0e, 0xb6, 0x15, 0x1a, 0x0d, 0x59, 0xe6, 0xe1, 0xb6, 0xbf, 0x73, 0xf5, 0xc6, 0xad, - 0x57, 0x21, 0xe3, 0xab, 0x11, 0x25, 0x91, 0x75, 0xa7, 0xf6, 0x56, 0x31, 0x52, 0x9e, 0xfb, 0xf4, - 0xf3, 0x95, 0xd8, 0x0e, 0xfe, 0x10, 0x95, 0x88, 0x3b, 0xae, 0xd6, 0x6b, 0xd5, 0x07, 0x45, 0xa1, - 0x9c, 0xf9, 0xf4, 0xf3, 0x95, 0x39, 0x09, 0xd3, 0x3b, 0xf6, 0x5b, 0x0f, 0xa0, 0x30, 0xf0, 0x45, - 0x83, 0x3e, 0x1e, 0x41, 0x7e, 0xf3, 0x60, 0x6f, 0x7b, 0xab, 0x5a, 0x69, 0xd4, 0xe4, 0xc3, 0xdd, - 0x46, 0xad, 0x28, 0xa0, 0x4b, 0xb0, 0xb0, 0xbd, 0xf5, 0x66, 0xbd, 0x21, 0x57, 0xb7, 0xb7, 0x6a, - 0x3b, 0x0d, 0xb9, 0xd2, 0x68, 0x54, 0xaa, 0x0f, 0x8a, 0xd1, 0xf5, 0x7f, 0xce, 0x40, 0xa1, 0xb2, - 0x51, 0xdd, 0xaa, 0x18, 0x46, 0x5b, 0x6b, 0x2a, 0x34, 0x62, 0x54, 0x21, 0x4e, 0x2f, 0xec, 0xc6, - 0xfe, 0x2d, 0x54, 0x1e, 0x5f, 0x84, 0x81, 0xee, 0x41, 0x82, 0xde, 0xe5, 0xa1, 0xf1, 0xbf, 0x0f, - 0x95, 0x27, 0x54, 0x65, 0x90, 0xc1, 0xd0, 0x7d, 0x38, 0xf6, 0x7f, 0xa2, 0xf2, 0xf8, 0x22, 0x0d, - 0xb4, 0x0d, 0x73, 0xce, 0x0d, 0xc5, 0xa4, 0x3f, 0x73, 0xca, 0x13, 0xab, 0x1d, 0xc8, 0xd4, 0xd8, - 0x4d, 0xd2, 0xf8, 0x5f, 0x8d, 0xca, 0x13, 0xca, 0x37, 0xd0, 0x16, 0x24, 0x39, 0xba, 0x3a, 0xe1, - 0x2f, 0x9b, 0xf2, 0xa4, 0xaa, 0x05, 0x24, 0x41, 0xda, 0xbb, 0x47, 0x9c, 0xfc, 0x03, 0x55, 0x79, - 0x8a, 0xca, 0x14, 0xf4, 0x18, 0x72, 0x41, 0xc4, 0x76, 0xba, 0x3f, 0x79, 0xca, 0x53, 0xd6, 0x47, - 0x10, 0xfd, 0x41, 0xf8, 0x76, 0xba, 0x3f, 0x7b, 0xca, 0x53, 0x96, 0x4b, 0xa0, 0xf7, 0x61, 0x7e, - 0x18, 0x5e, 0x9d, 0xfe, 0x47, 0x9f, 0xf2, 0x0c, 0x05, 0x14, 0xa8, 0x03, 0x68, 0x04, 0x2c, 0x3b, - 0xc3, 0x7f, 0x3f, 0xe5, 0x59, 0xea, 0x29, 0x90, 0x0a, 0x85, 0x41, 0xa8, 0x73, 0xda, 0xff, 0x80, - 0xca, 0x53, 0xd7, 0x56, 0xb0, 0xb7, 0x04, 0x71, 0xbf, 0x69, 0xff, 0x0b, 0x2a, 0x4f, 0x5d, 0x6a, - 0x81, 0x0e, 0x00, 0x7c, 0x48, 0xd2, 0x14, 0xff, 0x09, 0x95, 0xa7, 0x29, 0xba, 0x40, 0x06, 0x2c, - 0x8c, 0x82, 0x8e, 0x66, 0xf9, 0x6d, 0xa8, 0x3c, 0x53, 0x2d, 0x06, 0xb1, 0xe7, 0x20, 0x08, 0x34, - 0xdd, 0x6f, 0x44, 0xe5, 0x29, 0x8b, 0x32, 0x36, 0x6a, 0x5f, 0x7c, 0xb3, 0x24, 0x7c, 0xf9, 0xcd, - 0x92, 0xf0, 0xf5, 0x37, 0x4b, 0xc2, 0x67, 0xdf, 0x2e, 0x45, 0xbe, 0xfc, 0x76, 0x29, 0xf2, 0x3f, - 0xdf, 0x2e, 0x45, 0xfe, 0xfa, 0xb9, 0x13, 0xcd, 0x6e, 0xf5, 0x8e, 0x56, 0x9b, 0x7a, 0x67, 0xcd, - 0xff, 0xc7, 0xe9, 0xa8, 0x5f, 0x64, 0x8f, 0x92, 0x34, 0x12, 0xdf, 0xfe, 0x75, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xea, 0x82, 0x7b, 0x89, 0x42, 0x3b, 0x00, 0x00, + // 3664 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4b, 0x70, 0x1b, 0x47, + 0x7a, 0xc6, 0xe0, 0x8d, 0x1f, 0xaf, 0x61, 0x93, 0x92, 0x20, 0x48, 0x22, 0xe9, 0x51, 0x6c, 0xc9, + 0xb2, 0x4d, 0xda, 0x52, 0x6c, 0xc9, 0xb1, 0x13, 0x17, 0x08, 0x42, 0x01, 0x29, 0x8a, 0xa4, 0x86, + 0x20, 0x5d, 0x8e, 0x63, 0x8f, 0x87, 0x40, 0x93, 0x18, 0x0b, 0xc0, 0x8c, 0x67, 0x06, 0x34, 0xe8, + 0x6b, 0xe2, 0x8b, 0x0f, 0x29, 0xdf, 0x92, 0x8b, 0x6f, 0x49, 0x55, 0x2e, 0xc9, 0x3d, 0x95, 0xaa, + 0x1c, 0x72, 0x52, 0xed, 0xc9, 0x7b, 0xd9, 0xda, 0xcb, 0x7a, 0x5d, 0xf6, 0x65, 0x6b, 0x8f, 0x7b, + 0xd9, 0xaa, 0x3d, 0x6c, 0x6d, 0xf5, 0x63, 0x5e, 0x00, 0x06, 0x0f, 0x69, 0x6b, 0x77, 0x6b, 0xbd, + 0x37, 0xf4, 0xdf, 0xff, 0xff, 0xf7, 0x63, 0xba, 0xff, 0xc7, 0xf7, 0x37, 0xe0, 0x8a, 0x8d, 0x7b, + 0x2d, 0x6c, 0x76, 0xb5, 0x9e, 0xbd, 0xae, 0x1e, 0x37, 0xb5, 0x75, 0xfb, 0xdc, 0xc0, 0xd6, 0x9a, + 0x61, 0xea, 0xb6, 0x8e, 0x8a, 0x5e, 0xe7, 0x1a, 0xe9, 0x2c, 0x5f, 0xf3, 0x71, 0x37, 0xcd, 0x73, + 0xc3, 0xd6, 0xd7, 0x0d, 0x53, 0xd7, 0x4f, 0x18, 0x7f, 0xd9, 0xaf, 0x8c, 0xea, 0x59, 0x6f, 0xa9, + 0x56, 0x9b, 0x77, 0x5e, 0x1d, 0xe9, 0xf4, 0x0d, 0x15, 0xe8, 0xe5, 0x9a, 0x1f, 0xe3, 0x73, 0xa7, + 0xf7, 0xda, 0x88, 0xac, 0xa1, 0x9a, 0x6a, 0xd7, 0xe9, 0x5e, 0xf6, 0x75, 0x9f, 0x61, 0xd3, 0xd2, + 0xf4, 0x5e, 0x40, 0xf9, 0xca, 0xa9, 0xae, 0x9f, 0x76, 0xf0, 0x3a, 0x6d, 0x1d, 0xf7, 0x4f, 0xd6, + 0x6d, 0xad, 0x8b, 0x2d, 0x5b, 0xed, 0x1a, 0x9c, 0x61, 0xe9, 0x54, 0x3f, 0xd5, 0xe9, 0xcf, 0x75, + 0xf2, 0x8b, 0x51, 0xa5, 0xff, 0xc9, 0x40, 0x4a, 0xc6, 0x9f, 0xf4, 0xb1, 0x65, 0xa3, 0xdb, 0x10, + 0xc7, 0xcd, 0xb6, 0x5e, 0x12, 0x56, 0x85, 0x9b, 0xd9, 0xdb, 0x57, 0xd7, 0x86, 0x76, 0x66, 0x8d, + 0xf3, 0xd5, 0x9a, 0x6d, 0xbd, 0x1e, 0x91, 0x29, 0x2f, 0x7a, 0x1d, 0x12, 0x27, 0x9d, 0xbe, 0xd5, + 0x2e, 0x45, 0xa9, 0xd0, 0xb5, 0x30, 0xa1, 0xfb, 0x84, 0xa9, 0x1e, 0x91, 0x19, 0x37, 0x19, 0x4a, + 0xeb, 0x9d, 0xe8, 0xa5, 0xd8, 0xe4, 0xa1, 0xb6, 0x7a, 0x27, 0x74, 0x28, 0xc2, 0x8b, 0x36, 0x00, + 0xb4, 0x9e, 0x66, 0x2b, 0xcd, 0xb6, 0xaa, 0xf5, 0x4a, 0x71, 0x2a, 0xf9, 0x5c, 0xb8, 0xa4, 0x66, + 0x57, 0x09, 0x63, 0x3d, 0x22, 0x67, 0x34, 0xa7, 0x41, 0xa6, 0xfb, 0x49, 0x1f, 0x9b, 0xe7, 0xa5, + 0xc4, 0xe4, 0xe9, 0x3e, 0x22, 0x4c, 0x64, 0xba, 0x94, 0x1b, 0xbd, 0x0d, 0xe9, 0x66, 0x1b, 0x37, + 0x1f, 0x2b, 0xf6, 0xa0, 0x94, 0xa2, 0x92, 0x2b, 0x61, 0x92, 0x55, 0xc2, 0xd7, 0x18, 0xd4, 0x23, + 0x72, 0xaa, 0xc9, 0x7e, 0xa2, 0x7b, 0x90, 0x6c, 0xea, 0xdd, 0xae, 0x66, 0x97, 0x80, 0xca, 0x2e, + 0x87, 0xca, 0x52, 0xae, 0x7a, 0x44, 0xe6, 0xfc, 0x68, 0x17, 0x0a, 0x1d, 0xcd, 0xb2, 0x15, 0xab, + 0xa7, 0x1a, 0x56, 0x5b, 0xb7, 0xad, 0x52, 0x96, 0x6a, 0x78, 0x3e, 0x4c, 0xc3, 0x8e, 0x66, 0xd9, + 0x07, 0x0e, 0x73, 0x3d, 0x22, 0xe7, 0x3b, 0x7e, 0x02, 0xd1, 0xa7, 0x9f, 0x9c, 0x60, 0xd3, 0x55, + 0x58, 0xca, 0x4d, 0xd6, 0xb7, 0x47, 0xb8, 0x1d, 0x79, 0xa2, 0x4f, 0xf7, 0x13, 0xd0, 0xfb, 0xb0, + 0xd8, 0xd1, 0xd5, 0x96, 0xab, 0x4e, 0x69, 0xb6, 0xfb, 0xbd, 0xc7, 0xa5, 0x3c, 0x55, 0xfa, 0x62, + 0xe8, 0x24, 0x75, 0xb5, 0xe5, 0xa8, 0xa8, 0x12, 0x81, 0x7a, 0x44, 0x5e, 0xe8, 0x0c, 0x13, 0xd1, + 0x87, 0xb0, 0xa4, 0x1a, 0x46, 0xe7, 0x7c, 0x58, 0x7b, 0x81, 0x6a, 0xbf, 0x15, 0xa6, 0xbd, 0x42, + 0x64, 0x86, 0xd5, 0x23, 0x75, 0x84, 0x8a, 0x1a, 0x20, 0x1a, 0x26, 0x36, 0x54, 0x13, 0x2b, 0x86, + 0xa9, 0x1b, 0xba, 0xa5, 0x76, 0x4a, 0x45, 0xaa, 0xfb, 0x46, 0x98, 0xee, 0x7d, 0xc6, 0xbf, 0xcf, + 0xd9, 0xeb, 0x11, 0xb9, 0x68, 0x04, 0x49, 0x4c, 0xab, 0xde, 0xc4, 0x96, 0xe5, 0x69, 0x15, 0xa7, + 0x69, 0xa5, 0xfc, 0x41, 0xad, 0x01, 0x12, 0xaa, 0x41, 0x16, 0x0f, 0x88, 0xb8, 0x72, 0xa6, 0xdb, + 0xb8, 0xb4, 0x40, 0x15, 0x4a, 0xa1, 0x37, 0x94, 0xb2, 0x1e, 0xe9, 0x36, 0xae, 0x47, 0x64, 0xc0, + 0x6e, 0x0b, 0xa9, 0x70, 0xe1, 0x0c, 0x9b, 0xda, 0xc9, 0x39, 0x55, 0xa3, 0xd0, 0x1e, 0x62, 0x49, + 0x4a, 0x88, 0x2a, 0x7c, 0x29, 0x4c, 0xe1, 0x11, 0x15, 0x22, 0x2a, 0x6a, 0x8e, 0x48, 0x3d, 0x22, + 0x2f, 0x9e, 0x8d, 0x92, 0xc9, 0x11, 0x3b, 0xd1, 0x7a, 0x6a, 0x47, 0xfb, 0x0c, 0x2b, 0xc7, 0x1d, + 0xbd, 0xf9, 0xb8, 0xb4, 0x38, 0xf9, 0x88, 0xdd, 0xe7, 0xdc, 0x1b, 0x84, 0x99, 0x1c, 0xb1, 0x13, + 0x3f, 0x61, 0x23, 0x05, 0x89, 0x33, 0xb5, 0xd3, 0xc7, 0xdb, 0xf1, 0x74, 0x52, 0x4c, 0x6d, 0xc7, + 0xd3, 0x69, 0x31, 0xb3, 0x1d, 0x4f, 0x67, 0x44, 0x90, 0x6e, 0x40, 0xd6, 0x67, 0x92, 0x50, 0x09, + 0x52, 0x5d, 0x6c, 0x59, 0xea, 0x29, 0xa6, 0x16, 0x2c, 0x23, 0x3b, 0x4d, 0xa9, 0x00, 0x39, 0xbf, + 0x19, 0x92, 0xbe, 0x14, 0x5c, 0x49, 0x62, 0x61, 0x88, 0x24, 0x37, 0xa9, 0x8e, 0x24, 0x6f, 0xa2, + 0xeb, 0x90, 0xa7, 0x8b, 0x50, 0x9c, 0x7e, 0x62, 0xe6, 0xe2, 0x72, 0x8e, 0x12, 0x8f, 0x38, 0xd3, + 0x0a, 0x64, 0x8d, 0xdb, 0x86, 0xcb, 0x12, 0xa3, 0x2c, 0x60, 0xdc, 0x36, 0x1c, 0x86, 0xe7, 0x20, + 0x47, 0x56, 0xec, 0x72, 0xc4, 0xe9, 0x20, 0x59, 0x42, 0xe3, 0x2c, 0xd2, 0x3f, 0xc7, 0x40, 0x1c, + 0x36, 0x5d, 0xe8, 0x1e, 0xc4, 0x89, 0x15, 0xe7, 0x06, 0xb9, 0xbc, 0xc6, 0x4c, 0xfc, 0x9a, 0x63, + 0xe2, 0xd7, 0x1a, 0x8e, 0x89, 0xdf, 0x48, 0x3f, 0xf9, 0x66, 0x25, 0xf2, 0xe5, 0xcf, 0x57, 0x04, + 0x99, 0x4a, 0xa0, 0xcb, 0xc4, 0x60, 0xa9, 0x5a, 0x4f, 0xd1, 0x5a, 0x74, 0xca, 0x19, 0x62, 0x8d, + 0x54, 0xad, 0xb7, 0xd5, 0x42, 0x3b, 0x20, 0x36, 0xf5, 0x9e, 0x85, 0x7b, 0x56, 0xdf, 0x52, 0x98, + 0x8b, 0xe1, 0x66, 0x38, 0x60, 0x4c, 0x99, 0x6f, 0xa9, 0x3a, 0x9c, 0xfb, 0x94, 0x51, 0x2e, 0x36, + 0x83, 0x04, 0xb4, 0x0b, 0xf9, 0x33, 0xb5, 0xa3, 0xb5, 0x54, 0x5b, 0x37, 0x15, 0x0b, 0xdb, 0xdc, + 0x2e, 0x5f, 0x1f, 0xf9, 0xda, 0x47, 0x0e, 0xd7, 0x01, 0xb6, 0x0f, 0x8d, 0x96, 0x6a, 0xe3, 0x8d, + 0xf8, 0x93, 0x6f, 0x56, 0x04, 0x39, 0x77, 0xe6, 0xeb, 0x41, 0x2f, 0x40, 0x51, 0x35, 0x0c, 0xc5, + 0xb2, 0x55, 0x1b, 0x2b, 0xc7, 0xe7, 0x36, 0xb6, 0xa8, 0xa9, 0xce, 0xc9, 0x79, 0xd5, 0x30, 0x0e, + 0x08, 0x75, 0x83, 0x10, 0xd1, 0xf3, 0x50, 0x20, 0x56, 0x5d, 0x53, 0x3b, 0x4a, 0x1b, 0x6b, 0xa7, + 0x6d, 0xbb, 0x94, 0x5c, 0x15, 0x6e, 0xc6, 0xe4, 0x3c, 0xa7, 0xd6, 0x29, 0x11, 0xad, 0xc1, 0xa2, + 0xc3, 0xd6, 0xd4, 0x4d, 0xec, 0xf0, 0x12, 0x1b, 0x9e, 0x97, 0x17, 0x78, 0x57, 0x55, 0x37, 0x31, + 0xe3, 0x97, 0x5a, 0xee, 0x49, 0xa1, 0x1e, 0x00, 0x21, 0x88, 0xb7, 0x54, 0x5b, 0xa5, 0x5f, 0x20, + 0x27, 0xd3, 0xdf, 0x84, 0x66, 0xa8, 0x76, 0x9b, 0xef, 0x2b, 0xfd, 0x8d, 0x2e, 0x42, 0x92, 0xab, + 0x8e, 0xd1, 0x69, 0xf0, 0x16, 0x5a, 0x82, 0x84, 0x61, 0xea, 0x67, 0x98, 0x6e, 0x4b, 0x5a, 0x66, + 0x0d, 0x49, 0x86, 0x42, 0xd0, 0x5b, 0xa0, 0x02, 0x44, 0xed, 0x01, 0x1f, 0x25, 0x6a, 0x0f, 0xd0, + 0xab, 0x10, 0x27, 0x1f, 0x80, 0x8e, 0x51, 0x18, 0xe3, 0x1f, 0xb9, 0x5c, 0xe3, 0xdc, 0xc0, 0x32, + 0xe5, 0x94, 0x8a, 0x90, 0x0f, 0x78, 0x11, 0xe9, 0x22, 0x2c, 0x8d, 0x73, 0x0a, 0x52, 0xdb, 0xa5, + 0x07, 0x8c, 0x3b, 0x7a, 0x1d, 0xd2, 0xae, 0x57, 0x60, 0x07, 0xee, 0xf2, 0xc8, 0xb0, 0x0e, 0xb3, + 0xec, 0xb2, 0x92, 0x93, 0x46, 0x3e, 0x58, 0x5b, 0xe5, 0x31, 0x40, 0x4e, 0x4e, 0xa9, 0x86, 0x51, + 0x57, 0xad, 0xb6, 0xf4, 0x11, 0x94, 0xc2, 0x2c, 0xbe, 0x6f, 0xc3, 0x04, 0x7a, 0x5d, 0x9c, 0x0d, + 0xbb, 0x08, 0xc9, 0x13, 0xdd, 0xec, 0xaa, 0x36, 0x55, 0x96, 0x97, 0x79, 0x8b, 0x6c, 0x24, 0xb3, + 0xfe, 0x31, 0x4a, 0x66, 0x0d, 0x49, 0x81, 0xcb, 0xa1, 0x56, 0x9f, 0x88, 0x68, 0xbd, 0x16, 0x66, + 0xdb, 0x9a, 0x97, 0x59, 0xc3, 0x53, 0xc4, 0x26, 0xcb, 0x1a, 0x64, 0x58, 0x8b, 0xae, 0x95, 0xea, + 0xcf, 0xc8, 0xbc, 0x25, 0xfd, 0x7f, 0x1c, 0x2e, 0x8e, 0xb7, 0xfd, 0x68, 0x15, 0x72, 0x5d, 0x75, + 0xa0, 0xd8, 0x03, 0x7e, 0x4c, 0x05, 0xfa, 0xe1, 0xa1, 0xab, 0x0e, 0x1a, 0x03, 0x76, 0x46, 0x45, + 0x88, 0xd9, 0x03, 0xab, 0x14, 0x5d, 0x8d, 0xdd, 0xcc, 0xc9, 0xe4, 0x27, 0x3a, 0x84, 0x85, 0x8e, + 0xde, 0x54, 0x3b, 0x4a, 0x47, 0xb5, 0x6c, 0x85, 0x07, 0x05, 0xb1, 0x90, 0x1b, 0xc3, 0xac, 0x38, + 0x6e, 0xb1, 0xef, 0x49, 0x0c, 0x15, 0xbd, 0x31, 0x11, 0xb9, 0x48, 0x75, 0xec, 0xa8, 0xce, 0xa7, + 0x46, 0x9b, 0x90, 0xed, 0x6a, 0xd6, 0x31, 0x6e, 0xab, 0x67, 0x9a, 0x6e, 0x96, 0xe2, 0xab, 0xb1, + 0xb1, 0x41, 0xd5, 0x43, 0x8f, 0x87, 0x6b, 0xf2, 0x8b, 0xf9, 0x3e, 0x49, 0x22, 0x70, 0x86, 0x1d, + 0x2b, 0x94, 0x9c, 0xdb, 0x0a, 0xbd, 0x0a, 0x4b, 0x3d, 0x3c, 0xb0, 0x15, 0xf7, 0x86, 0x5b, 0xec, + 0x9c, 0xa4, 0xe8, 0xd6, 0x23, 0xd2, 0xe7, 0x9a, 0x05, 0x8b, 0x1c, 0x19, 0x74, 0x17, 0x4a, 0xf4, + 0x9e, 0x32, 0xe3, 0x45, 0x8c, 0x2c, 0x6e, 0x39, 0x97, 0xb6, 0x45, 0x3f, 0xe3, 0x05, 0xd2, 0x4f, + 0xcd, 0xe3, 0x0e, 0xed, 0xe5, 0x17, 0x7d, 0x1d, 0x96, 0x98, 0xbb, 0xc5, 0x26, 0xf1, 0xbb, 0xe4, + 0xb3, 0xd0, 0xa1, 0x30, 0x1d, 0x6a, 0xc1, 0xe9, 0xdb, 0x37, 0xf5, 0xc6, 0x80, 0x8e, 0xf4, 0xaa, + 0x2b, 0xd0, 0x52, 0xc8, 0x01, 0x76, 0x6c, 0xf3, 0x09, 0x3d, 0x8e, 0xc8, 0xe9, 0xab, 0x18, 0xae, + 0x15, 0xbf, 0xeb, 0x79, 0x89, 0xd3, 0xd1, 0xe8, 0x91, 0x77, 0x79, 0x16, 0xd3, 0x75, 0x22, 0xd2, + 0xbf, 0xc4, 0x7c, 0x87, 0x28, 0xe8, 0xd7, 0xf9, 0x11, 0x11, 0xbc, 0x23, 0x72, 0xe0, 0x9b, 0x97, + 0xff, 0x94, 0xb0, 0xf8, 0xfa, 0xca, 0xa8, 0x25, 0x18, 0x3e, 0x1d, 0xee, 0xd4, 0xc3, 0x0f, 0x48, + 0xec, 0xe9, 0x0e, 0x08, 0x82, 0x38, 0xdd, 0xd3, 0x38, 0x33, 0x86, 0xe4, 0xf7, 0x9f, 0xc4, 0xa1, + 0x09, 0xfb, 0xf6, 0xe9, 0x90, 0x6f, 0x2f, 0xbd, 0x03, 0x0b, 0x23, 0x91, 0x92, 0xbb, 0x3a, 0x61, + 0xec, 0xea, 0xa2, 0xfe, 0xd5, 0x49, 0x3f, 0x12, 0xa0, 0x1c, 0x1e, 0x1a, 0x8d, 0x55, 0xf5, 0x1a, + 0x5c, 0xf0, 0x1c, 0xa5, 0x7f, 0x96, 0xcc, 0x0e, 0x21, 0xb7, 0xd3, 0x3b, 0xa2, 0x61, 0x4e, 0xe5, + 0x21, 0x14, 0x83, 0xe1, 0x9b, 0xc5, 0xaf, 0xfc, 0x5f, 0x85, 0xd8, 0x90, 0xc0, 0xec, 0xe4, 0xc2, + 0x99, 0xbf, 0x69, 0x49, 0xff, 0x1d, 0x77, 0x3d, 0x42, 0x20, 0x16, 0x1b, 0x73, 0x38, 0x1f, 0xc1, + 0x62, 0x0b, 0x37, 0xb5, 0xd6, 0xd3, 0x9e, 0xcd, 0x05, 0x2e, 0xfd, 0x83, 0x39, 0x9a, 0x7f, 0x9e, + 0xf6, 0x0c, 0x20, 0x2d, 0x63, 0xcb, 0x20, 0x3d, 0x68, 0x03, 0x32, 0x78, 0xd0, 0xc4, 0x86, 0xed, + 0x44, 0xcf, 0xe3, 0xf3, 0x12, 0xc6, 0x5d, 0x73, 0x38, 0x49, 0x56, 0xee, 0x8a, 0xa1, 0x3b, 0x1c, + 0x78, 0x08, 0xc7, 0x10, 0xb8, 0xb8, 0x1f, 0x79, 0x78, 0xc3, 0x41, 0x1e, 0x62, 0xa1, 0x49, 0x35, + 0x93, 0x1a, 0x82, 0x1e, 0xee, 0x70, 0xe8, 0x21, 0x3e, 0x65, 0xb0, 0x00, 0xf6, 0x50, 0x0d, 0x60, + 0x0f, 0x89, 0x29, 0xcb, 0x0c, 0x01, 0x1f, 0xde, 0x70, 0xc0, 0x87, 0xe4, 0x94, 0x19, 0x0f, 0xa1, + 0x0f, 0x7f, 0xeb, 0x43, 0x1f, 0xd2, 0x54, 0x74, 0x35, 0x54, 0x74, 0x0c, 0xfc, 0xf0, 0xa6, 0x0b, + 0x3f, 0x64, 0x43, 0xa1, 0x0b, 0x2e, 0x3c, 0x8c, 0x3f, 0xec, 0x8d, 0xe0, 0x0f, 0x0c, 0x2f, 0x78, + 0x21, 0x54, 0xc5, 0x14, 0x00, 0x62, 0x6f, 0x04, 0x80, 0xc8, 0x4f, 0x51, 0x38, 0x05, 0x81, 0xf8, + 0xc7, 0xf1, 0x08, 0x44, 0x38, 0x46, 0xc0, 0xa7, 0x39, 0x1b, 0x04, 0xa1, 0x84, 0x40, 0x10, 0xc5, + 0xd0, 0x74, 0x99, 0xa9, 0x9f, 0x19, 0x83, 0x38, 0x1c, 0x83, 0x41, 0x30, 0xb4, 0xe0, 0x66, 0xa8, + 0xf2, 0x19, 0x40, 0x88, 0xc3, 0x31, 0x20, 0xc4, 0xc2, 0x54, 0xb5, 0x53, 0x51, 0x88, 0xfb, 0x41, + 0x14, 0x02, 0x85, 0x04, 0xae, 0xde, 0x6d, 0x0f, 0x81, 0x21, 0x8e, 0xc3, 0x60, 0x08, 0x06, 0x15, + 0xbc, 0x1c, 0xaa, 0x71, 0x0e, 0x1c, 0x62, 0x6f, 0x04, 0x87, 0x58, 0x9a, 0x72, 0xd2, 0x66, 0x07, + 0x22, 0x52, 0x62, 0x9a, 0x41, 0x10, 0xdb, 0xf1, 0x34, 0x88, 0x59, 0xe9, 0x45, 0x12, 0x4f, 0x0c, + 0x59, 0x38, 0x92, 0x68, 0x60, 0xd3, 0xd4, 0x4d, 0x0e, 0x29, 0xb0, 0x86, 0x74, 0x93, 0x24, 0x98, + 0x9e, 0x35, 0x9b, 0x00, 0x5a, 0xd0, 0x84, 0xce, 0x67, 0xc1, 0xa4, 0x9f, 0x09, 0x9e, 0x2c, 0x85, + 0x2d, 0xfc, 0xc9, 0x69, 0x86, 0x27, 0xa7, 0x3e, 0x28, 0x23, 0x1a, 0x84, 0x32, 0x56, 0x20, 0xeb, + 0xf7, 0x0b, 0x1c, 0xa5, 0x50, 0x3d, 0x7f, 0x70, 0x0b, 0x16, 0xa8, 0x53, 0x67, 0x80, 0x07, 0x77, + 0x52, 0x71, 0xea, 0x3a, 0x8b, 0xa4, 0x83, 0xed, 0x0b, 0x73, 0x4f, 0xaf, 0xc0, 0xa2, 0x8f, 0xd7, + 0x4d, 0x00, 0x59, 0xaa, 0x2e, 0xba, 0xdc, 0x15, 0x96, 0x09, 0x6e, 0xc7, 0xd3, 0x2d, 0x11, 0xcb, + 0xd7, 0x78, 0xcc, 0x30, 0xde, 0x1f, 0x4a, 0xbf, 0x88, 0x7a, 0xdb, 0xe8, 0x61, 0x20, 0xe3, 0xe0, + 0x0a, 0xe1, 0xa9, 0xe1, 0x0a, 0x7f, 0xb6, 0x1a, 0x0b, 0x64, 0xab, 0xe8, 0x7d, 0x58, 0x0a, 0x20, + 0x19, 0x4a, 0x9f, 0xa2, 0x14, 0xd4, 0x4d, 0xcf, 0x01, 0x68, 0x44, 0x7c, 0xa1, 0x9c, 0xdb, 0x83, + 0x3e, 0x80, 0x2b, 0x34, 0x72, 0x18, 0x5a, 0xbc, 0x33, 0x06, 0x1e, 0x35, 0xcc, 0xce, 0x82, 0x7c, + 0xc1, 0x81, 0x7c, 0x89, 0xe8, 0x08, 0x90, 0xb8, 0xfa, 0x10, 0x98, 0xe3, 0x24, 0x0c, 0xe6, 0xf8, + 0xb5, 0xe0, 0x1d, 0x2e, 0x17, 0xe8, 0x68, 0xea, 0x2d, 0xcc, 0x73, 0x65, 0xfa, 0x9b, 0xc4, 0x7f, + 0x1d, 0xfd, 0x94, 0x67, 0xc4, 0xe4, 0x27, 0xe1, 0x72, 0x7d, 0x67, 0x86, 0xbb, 0x46, 0x37, 0xcd, + 0x66, 0x51, 0x16, 0x4f, 0xb3, 0x45, 0x88, 0x3d, 0xc6, 0xcc, 0xd3, 0xe5, 0x64, 0xf2, 0x93, 0xf0, + 0xd1, 0xfb, 0xc3, 0xa3, 0x25, 0xd6, 0x40, 0xf7, 0x20, 0x43, 0xab, 0x2b, 0x8a, 0x6e, 0x58, 0xdc, + 0xb9, 0x05, 0xe2, 0x48, 0x56, 0x27, 0x59, 0xdb, 0x27, 0x3c, 0x7b, 0x86, 0x25, 0xa7, 0x0d, 0xfe, + 0xcb, 0x17, 0xde, 0x65, 0x02, 0xe1, 0xdd, 0x55, 0xc8, 0x90, 0xd9, 0x5b, 0x86, 0xda, 0xc4, 0x14, + 0x70, 0xcf, 0xc8, 0x1e, 0x41, 0x7a, 0x22, 0x40, 0x71, 0xc8, 0x57, 0x8e, 0x5d, 0xbb, 0x73, 0xb7, + 0xa2, 0x3e, 0xe0, 0xe7, 0x1a, 0xc0, 0xa9, 0x6a, 0x29, 0x9f, 0xaa, 0x3d, 0x1b, 0xb7, 0xf8, 0x72, + 0x33, 0xa7, 0xaa, 0xf5, 0x2e, 0x25, 0x04, 0x07, 0x4e, 0x0f, 0x0d, 0xec, 0x43, 0x18, 0x32, 0x7e, + 0x84, 0x01, 0x95, 0x21, 0x6d, 0x98, 0x9a, 0x6e, 0x6a, 0xf6, 0x39, 0x9d, 0x6d, 0x4c, 0x76, 0xdb, + 0xdb, 0xf1, 0x74, 0x4c, 0x8c, 0x6f, 0xc7, 0xd3, 0x71, 0x31, 0xe1, 0xc2, 0x9f, 0xcc, 0xf6, 0x64, + 0xc5, 0x9c, 0xf4, 0xb9, 0xef, 0xbe, 0x6c, 0xe2, 0x8e, 0x76, 0x86, 0xcd, 0x39, 0x16, 0x33, 0xdb, + 0xc7, 0x5d, 0x1e, 0xb3, 0x64, 0x1f, 0x85, 0xcc, 0x9e, 0xb4, 0xfa, 0x16, 0x6e, 0x71, 0x00, 0xce, + 0x6d, 0xa3, 0x3a, 0x24, 0xf1, 0x19, 0xee, 0xd9, 0x56, 0x29, 0x45, 0x83, 0xfa, 0x8b, 0xa3, 0xd9, + 0x09, 0xe9, 0xde, 0x28, 0x91, 0x5b, 0xf3, 0xcb, 0x6f, 0x56, 0x44, 0xc6, 0xfd, 0xb2, 0xde, 0xd5, + 0x6c, 0xdc, 0x35, 0xec, 0x73, 0x99, 0xcb, 0x4f, 0xde, 0x59, 0xa9, 0x02, 0x85, 0x60, 0x00, 0x83, + 0xae, 0x43, 0xde, 0xc4, 0x36, 0xb9, 0x64, 0x81, 0xfc, 0x29, 0xc7, 0x88, 0xec, 0x0e, 0x6c, 0xc7, + 0xd3, 0x82, 0x18, 0xdd, 0x8e, 0xa7, 0xa3, 0x62, 0x4c, 0xda, 0x87, 0x0b, 0x63, 0x03, 0x18, 0x74, + 0x17, 0x32, 0x5e, 0xec, 0x23, 0xd0, 0x65, 0x4c, 0x40, 0xc5, 0x3c, 0x5e, 0xe9, 0xff, 0x04, 0x4f, + 0x65, 0x10, 0x67, 0xab, 0x41, 0xd2, 0xc4, 0x56, 0xbf, 0xc3, 0x90, 0xaf, 0xc2, 0xed, 0x57, 0x66, + 0x0b, 0x7d, 0x08, 0xb5, 0xdf, 0xb1, 0x65, 0x2e, 0x2c, 0x7d, 0x08, 0x49, 0x46, 0x41, 0x59, 0x48, + 0x1d, 0xee, 0x3e, 0xd8, 0xdd, 0x7b, 0x77, 0x57, 0x8c, 0x20, 0x80, 0x64, 0xa5, 0x5a, 0xad, 0xed, + 0x37, 0x44, 0x01, 0x65, 0x20, 0x51, 0xd9, 0xd8, 0x93, 0x1b, 0x62, 0x94, 0x90, 0xe5, 0xda, 0x76, + 0xad, 0xda, 0x10, 0x63, 0x68, 0x01, 0xf2, 0xec, 0xb7, 0x72, 0x7f, 0x4f, 0x7e, 0x58, 0x69, 0x88, + 0x71, 0x1f, 0xe9, 0xa0, 0xb6, 0xbb, 0x59, 0x93, 0xc5, 0x84, 0xf4, 0x1a, 0x5c, 0x0e, 0x0d, 0x96, + 0x3c, 0x10, 0x4d, 0xf0, 0x81, 0x68, 0xd2, 0xbf, 0x45, 0x49, 0x56, 0x1c, 0x16, 0x01, 0xa1, 0xed, + 0xa1, 0x85, 0xdf, 0x9e, 0x23, 0x7c, 0x1a, 0x5a, 0x3d, 0x7a, 0x1e, 0x0a, 0x26, 0x3e, 0xc1, 0x76, + 0xb3, 0xcd, 0x22, 0x32, 0x86, 0xb2, 0xe5, 0xe5, 0x3c, 0xa7, 0x52, 0x21, 0x8b, 0xb1, 0x7d, 0x8c, + 0x9b, 0xb6, 0xc2, 0x6e, 0x9b, 0x45, 0xf3, 0xcb, 0x0c, 0x61, 0x23, 0xd4, 0x03, 0x46, 0x94, 0x3e, + 0x9a, 0x6b, 0x2f, 0x33, 0x90, 0x90, 0x6b, 0x0d, 0xf9, 0x3d, 0x31, 0x86, 0x10, 0x14, 0xe8, 0x4f, + 0xe5, 0x60, 0xb7, 0xb2, 0x7f, 0x50, 0xdf, 0x23, 0x7b, 0xb9, 0x08, 0x45, 0x67, 0x2f, 0x1d, 0x62, + 0x42, 0xfa, 0x49, 0x14, 0x2e, 0x85, 0xc4, 0x6f, 0xe8, 0x1e, 0x80, 0x3d, 0x50, 0x4c, 0xdc, 0xd4, + 0xcd, 0x56, 0xf8, 0x21, 0x6b, 0x0c, 0x64, 0xca, 0x21, 0x67, 0x6c, 0xfe, 0xcb, 0x9a, 0x80, 0xbd, + 0xa2, 0xb7, 0xb9, 0x52, 0xb2, 0x2a, 0x8b, 0x67, 0xd5, 0xd7, 0xc6, 0xc0, 0x03, 0xb8, 0x49, 0x14, + 0xd3, 0xbd, 0xa5, 0x8a, 0x29, 0x3f, 0x7a, 0x08, 0x0b, 0x9e, 0x2f, 0x64, 0x3e, 0xca, 0xc1, 0x18, + 0x56, 0xc3, 0x1d, 0x21, 0x73, 0x46, 0xb2, 0x78, 0x16, 0x24, 0x58, 0xe8, 0x3d, 0xb8, 0x34, 0xe4, + 0xc3, 0x5d, 0xa5, 0x89, 0x59, 0x5d, 0xf9, 0x85, 0xa0, 0x2b, 0xe7, 0xaa, 0xa5, 0x7f, 0x8f, 0xf9, + 0x37, 0x36, 0x18, 0xae, 0xee, 0x41, 0xd2, 0xb2, 0x55, 0xbb, 0x6f, 0xf1, 0x03, 0x77, 0x77, 0xd6, + 0xd8, 0x77, 0xcd, 0xf9, 0x71, 0x40, 0xc5, 0x65, 0xae, 0xe6, 0x2f, 0xfb, 0x6d, 0x49, 0xaf, 0x43, + 0x21, 0xb8, 0x39, 0xe1, 0x57, 0xc6, 0xb3, 0x39, 0x51, 0xa9, 0x03, 0x8b, 0x63, 0xa0, 0x28, 0x74, + 0x97, 0x97, 0x39, 0xd8, 0xf7, 0xb9, 0x3e, 0x3a, 0xab, 0x00, 0xbb, 0x57, 0xed, 0x20, 0x1e, 0xc1, + 0xcb, 0x1a, 0xd8, 0xa7, 0xf0, 0x08, 0x52, 0x13, 0xd0, 0x68, 0x0e, 0x32, 0x0e, 0x36, 0x13, 0x9e, + 0x01, 0x36, 0xfb, 0x0f, 0x01, 0xae, 0x4c, 0xc8, 0x4b, 0xd0, 0xa3, 0xa1, 0xd3, 0xf7, 0xe6, 0x3c, + 0x59, 0xcd, 0x1a, 0xa3, 0x05, 0xcf, 0x9f, 0x74, 0x07, 0x72, 0x7e, 0xfa, 0x6c, 0x5b, 0xff, 0xe3, + 0x98, 0xe7, 0x89, 0x82, 0xf8, 0x9e, 0xe7, 0xa0, 0x85, 0x67, 0x74, 0xd0, 0xc1, 0xd3, 0x1f, 0x9d, + 0xf3, 0xf4, 0x4f, 0x38, 0xae, 0xf1, 0x67, 0x3b, 0xae, 0x81, 0x1b, 0x9b, 0x08, 0xde, 0xd8, 0x29, + 0x21, 0x79, 0xeb, 0x19, 0x43, 0xf2, 0xb0, 0x74, 0x02, 0xcf, 0x5b, 0x1f, 0x1d, 0x93, 0x4e, 0x48, + 0xbf, 0x12, 0x00, 0x3c, 0x70, 0x95, 0xb8, 0x63, 0x53, 0xef, 0xf7, 0x5a, 0xf4, 0xa4, 0x25, 0x64, + 0xd6, 0x20, 0x09, 0xdf, 0x27, 0x7d, 0xdd, 0xec, 0x77, 0xfd, 0xe9, 0x0e, 0x30, 0x12, 0xdd, 0x81, + 0x1b, 0x50, 0x64, 0xf9, 0x9b, 0xa5, 0x9d, 0xf6, 0x54, 0xbb, 0x6f, 0x62, 0x8e, 0x9f, 0x16, 0x28, + 0xf9, 0xc0, 0xa1, 0x12, 0x46, 0x56, 0x90, 0xf5, 0x18, 0xd9, 0x66, 0x16, 0x28, 0xd9, 0x63, 0x7c, + 0x1f, 0x2e, 0xdb, 0x6d, 0x13, 0x5b, 0x6d, 0xbd, 0xd3, 0x52, 0x86, 0x2f, 0x5b, 0x92, 0x1e, 0x8b, + 0x95, 0x29, 0x97, 0x5c, 0xbe, 0xe4, 0x6a, 0x38, 0x0a, 0x5e, 0xb8, 0xdf, 0x08, 0x80, 0x46, 0x6b, + 0x62, 0x3f, 0x90, 0xc5, 0x7f, 0x06, 0x09, 0x7a, 0x19, 0x49, 0x94, 0xee, 0x9a, 0xcc, 0x0c, 0xb7, + 0x86, 0x1f, 0x00, 0xa8, 0xb6, 0x6d, 0x6a, 0xc7, 0x7d, 0x72, 0x67, 0xa2, 0xa3, 0x43, 0x79, 0x97, + 0xb9, 0xe2, 0xf0, 0x6d, 0x5c, 0xe5, 0xb7, 0x7a, 0xc9, 0x13, 0xf5, 0xdd, 0x6c, 0x9f, 0x42, 0x69, + 0x17, 0x0a, 0x41, 0x59, 0x27, 0xbb, 0x63, 0x73, 0x08, 0x66, 0x77, 0x0c, 0x75, 0xe0, 0xd9, 0x9d, + 0x9b, 0x1b, 0xc6, 0x58, 0xf9, 0x9b, 0x36, 0xa4, 0xdf, 0x0a, 0x90, 0xf3, 0xdb, 0x82, 0xdf, 0x73, + 0xce, 0x32, 0x25, 0x4d, 0xbb, 0x3c, 0x92, 0xb2, 0xa4, 0x4e, 0x55, 0xeb, 0xf0, 0x0f, 0x99, 0xb1, + 0x7c, 0x2e, 0x40, 0xda, 0x5d, 0x7c, 0xb0, 0x12, 0x1e, 0x78, 0x3a, 0xc0, 0xf6, 0x2e, 0xea, 0x2f, + 0x5f, 0xb3, 0x87, 0x02, 0x31, 0xf7, 0xa1, 0xc0, 0x5b, 0x6e, 0x50, 0x1d, 0x86, 0x67, 0xfb, 0x77, + 0x9a, 0x23, 0x14, 0x4e, 0x0e, 0xf1, 0xaf, 0x7c, 0x1e, 0x24, 0x9a, 0x44, 0x7f, 0x03, 0x49, 0xb5, + 0xe9, 0xa2, 0xf8, 0x85, 0x31, 0xf0, 0xb6, 0xc3, 0xba, 0xd6, 0x18, 0x54, 0x28, 0xa7, 0xcc, 0x25, + 0xf8, 0xac, 0xa2, 0xce, 0xac, 0xa4, 0x77, 0x88, 0x5e, 0xc6, 0x13, 0x74, 0x52, 0x05, 0x80, 0xc3, + 0xdd, 0x87, 0x7b, 0x9b, 0x5b, 0xf7, 0xb7, 0x6a, 0x9b, 0x3c, 0xac, 0xde, 0xdc, 0xac, 0x6d, 0x8a, + 0x51, 0xc2, 0x27, 0xd7, 0x1e, 0xee, 0x1d, 0xd5, 0x36, 0xc5, 0x98, 0x54, 0x81, 0x8c, 0x6b, 0x10, + 0xe9, 0x23, 0x0a, 0xfd, 0x53, 0x5e, 0x9b, 0x8f, 0xc9, 0xac, 0x81, 0x96, 0x21, 0xeb, 0x2f, 0x8c, + 0xb0, 0xcb, 0x9b, 0x31, 0xdc, 0x22, 0xdf, 0x7f, 0x09, 0x50, 0x1c, 0x0a, 0x95, 0xd0, 0x5b, 0x90, + 0x32, 0xfa, 0xc7, 0x8a, 0x73, 0x76, 0x87, 0x0a, 0x4d, 0x0e, 0xd6, 0xd0, 0x3f, 0xee, 0x68, 0xcd, + 0x07, 0xf8, 0x9c, 0x1b, 0xe0, 0xa4, 0xd1, 0x3f, 0x7e, 0xc0, 0x8e, 0x38, 0x9b, 0x46, 0x74, 0xc2, + 0x34, 0x62, 0x43, 0xd3, 0x40, 0x37, 0x20, 0xd7, 0xd3, 0x5b, 0x58, 0x51, 0x5b, 0x2d, 0x13, 0x5b, + 0xcc, 0xa3, 0x65, 0xb8, 0xe6, 0x2c, 0xe9, 0xa9, 0xb0, 0x0e, 0xe9, 0x5b, 0x01, 0xd0, 0xa8, 0x13, + 0x40, 0x07, 0xe3, 0x42, 0x43, 0x61, 0xb6, 0xd0, 0x90, 0x7f, 0xee, 0xd1, 0x00, 0xb1, 0x01, 0x4b, + 0x9e, 0xa9, 0x32, 0xe8, 0x7a, 0xe9, 0xa6, 0x44, 0x67, 0xdc, 0x94, 0x88, 0x8c, 0x5c, 0x79, 0xb7, + 0x67, 0xaa, 0xcd, 0x95, 0x0c, 0x28, 0x35, 0x46, 0xc4, 0xf8, 0x3a, 0xc3, 0xa6, 0x24, 0x3c, 0xcb, + 0x94, 0xa4, 0x3b, 0x20, 0x3e, 0x72, 0xc7, 0xe7, 0x23, 0x0d, 0x4d, 0x53, 0x18, 0x99, 0xe6, 0x19, + 0xa4, 0x89, 0xf5, 0xa5, 0xde, 0xe5, 0xef, 0x20, 0xe3, 0xee, 0x9e, 0xfb, 0x0e, 0x2b, 0x74, 0xdb, + 0xf9, 0x4c, 0x3c, 0x11, 0x74, 0x0b, 0x16, 0x88, 0xdf, 0x70, 0x0a, 0xa6, 0x0c, 0x89, 0x8e, 0x52, + 0x6b, 0x58, 0x64, 0x1d, 0x3b, 0x0e, 0x58, 0x4a, 0x22, 0x4a, 0xd1, 0x71, 0x70, 0x7f, 0x8c, 0x09, + 0x90, 0x74, 0x79, 0x08, 0x90, 0x67, 0xdf, 0x30, 0x1f, 0x08, 0x7d, 0xa5, 0x7f, 0x8a, 0x42, 0xd6, + 0x57, 0x8f, 0x45, 0x7f, 0x1d, 0x88, 0xe2, 0x57, 0x27, 0xd5, 0x6e, 0x7d, 0x21, 0x7c, 0x60, 0x61, + 0xd1, 0xf9, 0x17, 0x16, 0x56, 0x1d, 0x77, 0xca, 0xbb, 0xf1, 0xb9, 0xcb, 0xbb, 0x2f, 0x03, 0xb2, + 0x75, 0x5b, 0xed, 0x10, 0xe7, 0xad, 0xf5, 0x4e, 0x15, 0x76, 0xdb, 0x99, 0x03, 0x11, 0x69, 0xcf, + 0x11, 0xed, 0xd8, 0x27, 0x74, 0xe9, 0x7f, 0x05, 0x48, 0xbb, 0xa0, 0xce, 0xbc, 0xcf, 0x99, 0x2e, + 0x42, 0x92, 0xe3, 0x16, 0xec, 0x3d, 0x13, 0x6f, 0x8d, 0xad, 0x63, 0x97, 0x21, 0xdd, 0xc5, 0xb6, + 0x4a, 0xbd, 0x21, 0x8b, 0x3c, 0xdc, 0xf6, 0x53, 0xd7, 0x97, 0x6f, 0xbd, 0x09, 0x59, 0xdf, 0x1b, + 0x32, 0xe2, 0x59, 0x77, 0x6b, 0xef, 0x8a, 0x91, 0x72, 0xea, 0x8b, 0xaf, 0x56, 0x63, 0xbb, 0xf8, + 0x53, 0x54, 0x22, 0xe6, 0xb8, 0x5a, 0xaf, 0x55, 0x1f, 0x88, 0x42, 0x39, 0xfb, 0xc5, 0x57, 0xab, + 0x29, 0x19, 0xd3, 0x8a, 0xe2, 0xad, 0x07, 0x50, 0x1c, 0xfa, 0xa2, 0x41, 0x1b, 0x8f, 0xa0, 0xb0, + 0x79, 0xb8, 0xbf, 0xb3, 0x55, 0xad, 0x34, 0x6a, 0xca, 0xd1, 0x5e, 0xa3, 0x26, 0x0a, 0xe8, 0x12, + 0x2c, 0xee, 0x6c, 0xfd, 0x7d, 0xbd, 0xa1, 0x54, 0x77, 0xb6, 0x6a, 0xbb, 0x0d, 0xa5, 0xd2, 0x68, + 0x54, 0xaa, 0x0f, 0xc4, 0xe8, 0xed, 0xff, 0xcc, 0x42, 0xb1, 0xb2, 0x51, 0xdd, 0xaa, 0x18, 0x46, + 0x47, 0x6b, 0xaa, 0xd4, 0x63, 0x54, 0x21, 0x4e, 0x6b, 0x23, 0x13, 0x5f, 0xa0, 0x97, 0x27, 0x97, + 0x89, 0xd1, 0x7d, 0x48, 0xd0, 0xb2, 0x09, 0x9a, 0xfc, 0x24, 0xbd, 0x3c, 0xa5, 0x6e, 0x4c, 0x26, + 0x43, 0xef, 0xe1, 0xc4, 0x37, 0xea, 0xe5, 0xc9, 0x65, 0x64, 0xb4, 0x03, 0x29, 0x07, 0x6c, 0x9e, + 0xf6, 0x70, 0xbc, 0x3c, 0xb5, 0xb6, 0x4b, 0x96, 0xc6, 0x40, 0xfb, 0xc9, 0xcf, 0xd7, 0xcb, 0x53, + 0x0a, 0xcc, 0x68, 0x0b, 0x92, 0x1c, 0x30, 0x9d, 0xf2, 0x22, 0xbd, 0x3c, 0xad, 0x64, 0x8c, 0x64, + 0xc8, 0x78, 0x25, 0x9b, 0xe9, 0x8f, 0xf2, 0xcb, 0x33, 0xd4, 0xce, 0xd1, 0x87, 0x90, 0x0f, 0x82, + 0xb1, 0xb3, 0xbd, 0x7a, 0x2f, 0xcf, 0x58, 0x9c, 0x26, 0xfa, 0x83, 0xc8, 0xec, 0x6c, 0xaf, 0xe0, + 0xcb, 0x33, 0xd6, 0xaa, 0xd1, 0xc7, 0xb0, 0x30, 0x8a, 0x9c, 0xce, 0xfe, 0x28, 0xbe, 0x3c, 0x47, + 0xf5, 0x1a, 0x75, 0x01, 0x8d, 0x41, 0x5c, 0xe7, 0x78, 0x23, 0x5f, 0x9e, 0xa7, 0x98, 0x8d, 0x5a, + 0x50, 0x1c, 0x46, 0x31, 0x67, 0x7d, 0x33, 0x5f, 0x9e, 0xb9, 0xb0, 0xcd, 0x46, 0x09, 0x42, 0x7a, + 0xb3, 0xbe, 0xa1, 0x2f, 0xcf, 0x5c, 0xe7, 0x46, 0x87, 0x00, 0x3e, 0x90, 0x68, 0x86, 0x37, 0xf5, + 0xe5, 0x59, 0x2a, 0xde, 0xc8, 0x80, 0xc5, 0x71, 0xa8, 0xd0, 0x3c, 0x4f, 0xec, 0xcb, 0x73, 0x15, + 0xc2, 0xc9, 0x79, 0x0e, 0xe2, 0x3b, 0xb3, 0x3d, 0xb9, 0x2f, 0xcf, 0x58, 0x11, 0xdf, 0xa8, 0x3d, + 0xf9, 0x6e, 0x59, 0xf8, 0xfa, 0xbb, 0x65, 0xe1, 0xdb, 0xef, 0x96, 0x85, 0x2f, 0xbf, 0x5f, 0x8e, + 0x7c, 0xfd, 0xfd, 0x72, 0xe4, 0xa7, 0xdf, 0x2f, 0x47, 0xfe, 0xe1, 0xa5, 0x53, 0xcd, 0x6e, 0xf7, + 0x8f, 0xd7, 0x9a, 0x7a, 0x77, 0xdd, 0xff, 0x2f, 0xa6, 0x71, 0x7f, 0xbb, 0x3a, 0x4e, 0x52, 0x4f, + 0x7c, 0xe7, 0x77, 0x01, 0x00, 0x00, 0xff, 0xff, 0x51, 0xad, 0x4c, 0xbc, 0x96, 0x35, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5601,27 +5174,6 @@ func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -5643,48 +5195,6 @@ func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - return len(dAtA) - i, nil -} -func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -6071,12 +5581,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err22 != nil { - return 0, err22 + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err19 != nil { + return 0, err19 } - i -= n22 - i = encodeVarintTypes(dAtA, i, uint64(n22)) + i -= n19 + i = encodeVarintTypes(dAtA, i, uint64(n19)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -6134,70 +5644,6 @@ func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6233,7 +5679,7 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { +func (m *RequestCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6243,27 +5689,20 @@ func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { +func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6273,25 +5712,20 @@ func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *RequestCommit) Marshal() (dAtA []byte, err error) { +func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6301,58 +5735,12 @@ func (m *RequestCommit) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6523,12 +5911,12 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x3a } - n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err27 != nil { - return 0, err27 + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err22 != nil { + return 0, err22 } - i -= n27 - i = encodeVarintTypes(dAtA, i, uint64(n27)) + i -= n22 + i = encodeVarintTypes(dAtA, i, uint64(n22)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6536,10 +5924,10 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x28 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6611,12 +5999,12 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x3a } - n29, err29 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err29 != nil { - return 0, err29 + n24, err24 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err24 != nil { + return 0, err24 } - i -= n29 - i = encodeVarintTypes(dAtA, i, uint64(n29)) + i -= n24 + i = encodeVarintTypes(dAtA, i, uint64(n24)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6631,10 +6019,10 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x22 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6822,12 +6210,12 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n32, err32 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err32 != nil { - return 0, err32 + n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err27 != nil { + return 0, err27 } - i -= n32 - i = encodeVarintTypes(dAtA, i, uint64(n32)) + i -= n27 + i = encodeVarintTypes(dAtA, i, uint64(n27)) i-- dAtA[i] = 0x32 if m.Height != 0 { @@ -6842,10 +6230,10 @@ func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7036,27 +6424,6 @@ func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -7078,48 +6445,6 @@ func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} -func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - return len(dAtA) - i, nil -} func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -7637,43 +6962,6 @@ func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7694,13 +6982,6 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.MempoolError) > 0 { - i -= len(m.MempoolError) - copy(dAtA[i:], m.MempoolError) - i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError))) - i-- - dAtA[i] = 0x5a - } if m.Priority != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) i-- @@ -7720,44 +7001,11 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x42 } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.GasUsed != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 - } if m.GasWanted != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) i-- dAtA[i] = 0x28 } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 - } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a - } if len(m.Data) > 0 { i -= len(m.Data) copy(dAtA[i:], m.Data) @@ -7853,7 +7101,7 @@ func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { +func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7863,48 +7111,48 @@ func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ValidatorSetUpdate != nil { - { - size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) i-- - dAtA[i] = 0xaa + dAtA[i] = 0x18 } - if m.NextCoreChainLockUpdate != nil { - { - size, err := m.NextCoreChainLockUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa2 + return len(dAtA) - i, nil +} + +func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Snapshots) > 0 { + for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7912,97 +7160,13 @@ func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - } - if m.ConsensusParamUpdates != nil { - { - size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x12 } return len(dAtA) - i, nil } -func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) - i-- - dAtA[i] = 0x18 - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} - -func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Snapshots) > 0 { - for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { +func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -8090,20 +7254,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA62 := make([]byte, len(m.RefetchChunks)*10) - var j61 int + dAtA51 := make([]byte, len(m.RefetchChunks)*10) + var j50 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA62[j61] = uint8(uint64(num)&0x7f | 0x80) + dAtA51[j50] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j61++ + j50++ } - dAtA62[j61] = uint8(num) - j61++ + dAtA51[j50] = uint8(num) + j50++ } - i -= j61 - copy(dAtA[i:], dAtA62[:j61]) - i = encodeVarintTypes(dAtA, i, uint64(j61)) + i -= j50 + copy(dAtA[i:], dAtA51[:j50]) + i = encodeVarintTypes(dAtA, i, uint64(j50)) i-- dAtA[i] = 0x12 } @@ -8422,11 +7586,6 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0xa2 } - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) - i-- - dAtA[i] = 0x30 - } if len(m.AppHash) > 0 { i -= len(m.AppHash) copy(dAtA[i:], m.AppHash) @@ -9183,12 +8342,12 @@ func (m *Misbehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n74, err74 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err74 != nil { - return 0, err74 + n63, err63 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err63 != nil { + return 0, err63 } - i -= n74 - i = encodeVarintTypes(dAtA, i, uint64(n74)) + i -= n63 + i = encodeVarintTypes(dAtA, i, uint64(n63)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -9356,18 +8515,6 @@ func (m *Request_Query) Size() (n int) { } return n } -func (m *Request_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_CheckTx) Size() (n int) { if m == nil { return 0 @@ -9380,30 +8527,6 @@ func (m *Request_CheckTx) Size() (n int) { } return n } -func (m *Request_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Request_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_Commit) Size() (n int) { if m == nil { return 0 @@ -9625,29 +8748,6 @@ func (m *RequestQuery) Size() (n int) { return n } -func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.LastCommitInfo.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *RequestCheckTx) Size() (n int) { if m == nil { return 0 @@ -9664,31 +8764,6 @@ func (m *RequestCheckTx) Size() (n int) { return n } -func (m *RequestDeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *RequestEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - return n -} - func (m *RequestCommit) Size() (n int) { if m == nil { return 0 @@ -9779,8 +8854,8 @@ func (m *RequestPrepareProposal) Size() (n int) { } l = m.LocalLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9825,8 +8900,8 @@ func (m *RequestProcessProposal) Size() (n int) { } l = m.ProposedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9907,8 +8982,8 @@ func (m *RequestFinalizeBlock) Size() (n int) { } l = m.DecidedLastCommit.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -10027,18 +9102,6 @@ func (m *Response_Query) Size() (n int) { } return n } -func (m *Response_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_CheckTx) Size() (n int) { if m == nil { return 0 @@ -10051,30 +9114,6 @@ func (m *Response_CheckTx) Size() (n int) { } return n } -func (m *Response_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Response_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_Commit) Size() (n int) { if m == nil { return 0 @@ -10325,21 +9364,6 @@ func (m *ResponseQuery) Size() (n int) { return n } -func (m *ResponseBeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - func (m *ResponseCheckTx) Size() (n int) { if m == nil { return 0 @@ -10353,26 +9377,9 @@ func (m *ResponseCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } if m.GasWanted != 0 { n += 1 + sovTypes(uint64(m.GasWanted)) } - if m.GasUsed != 0 { - n += 1 + sovTypes(uint64(m.GasUsed)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } l = len(m.Codespace) if l > 0 { n += 1 + l + sovTypes(uint64(l)) @@ -10384,10 +9391,6 @@ func (m *ResponseCheckTx) Size() (n int) { if m.Priority != 0 { n += 1 + sovTypes(uint64(m.Priority)) } - l = len(m.MempoolError) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } return n } @@ -10431,43 +9434,12 @@ func (m *ResponseDeliverTx) Size() (n int) { return n } -func (m *ResponseEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConsensusParamUpdates != nil { - l = m.ConsensusParamUpdates.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.NextCoreChainLockUpdate != nil { - l = m.NextCoreChainLockUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) - } - if m.ValidatorSetUpdate != nil { - l = m.ValidatorSetUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) - } - return n -} - func (m *ResponseCommit) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } if m.RetainHeight != 0 { n += 1 + sovTypes(uint64(m.RetainHeight)) } @@ -10675,9 +9647,6 @@ func (m *ResponseFinalizeBlock) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.RetainHeight != 0 { - n += 1 + sovTypes(uint64(m.RetainHeight)) - } if m.NextCoreChainLockUpdate != nil { l = m.NextCoreChainLockUpdate.Size() n += 2 + l + sovTypes(uint64(l)) @@ -11243,9 +10212,9 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Query{v} iNdEx = postIndex - case 6: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11272,15 +10241,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestBeginBlock{} + v := &RequestCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_BeginBlock{v} + m.Value = &Request_CheckTx{v} iNdEx = postIndex - case 7: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11307,15 +10276,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCheckTx{} + v := &RequestCommit{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_CheckTx{v} + m.Value = &Request_Commit{v} iNdEx = postIndex - case 8: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11342,112 +10311,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestDeliverTx{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_DeliverTx{v} - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestEndBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_EndBlock{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestCommit{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_Commit{v} - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestListSnapshots{} + v := &RequestListSnapshots{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12257,335 +11121,11 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { break } } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) - } - m.InitialCoreHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InitialCoreHeight |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestQuery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Prove = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) } - var msglen int + m.InitialCoreHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12595,26 +11135,11 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.InitialCoreHeight |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12636,7 +11161,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { +func (m *RequestQuery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12659,15 +11184,15 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -12694,16 +11219,48 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - m.Type = 0 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12713,11 +11270,31 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.Prove = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12739,7 +11316,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12762,10 +11339,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12802,61 +11379,11 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { m.Tx = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Height = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12866,7 +11393,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Type |= CheckTxType(b&0x7F) << shift if b < 0x80 { break } @@ -13469,7 +11996,7 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13496,8 +12023,8 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13812,7 +12339,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13839,8 +12366,8 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14389,7 +12916,7 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14416,8 +12943,8 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14732,116 +13259,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Exception{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseEcho{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Echo{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseFlush{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Flush{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseInfo{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_Info{v} + m.Value = &Response_Exception{v} iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14868,15 +13290,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInitChain{} + v := &ResponseEcho{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_InitChain{v} + m.Value = &Response_Echo{v} iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14903,15 +13325,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseQuery{} + v := &ResponseFlush{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Query{v} + m.Value = &Response_Flush{v} iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14938,15 +13360,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseBeginBlock{} + v := &ResponseInfo{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_BeginBlock{v} + m.Value = &Response_Info{v} iNdEx = postIndex - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14973,15 +13395,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} + v := &ResponseInitChain{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_CheckTx{v} + m.Value = &Response_InitChain{v} iNdEx = postIndex - case 9: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15008,15 +13430,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} + v := &ResponseQuery{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_DeliverTx{v} + m.Value = &Response_Query{v} iNdEx = postIndex - case 10: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15043,11 +13465,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEndBlock{} + v := &ResponseCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_EndBlock{v} + m.Value = &Response_CheckTx{v} iNdEx = postIndex case 11: if wireType != 2 { @@ -16335,90 +14757,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -16501,70 +14839,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { m.Data = []byte{} } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Log = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) @@ -16584,59 +14858,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) - } - m.GasUsed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) @@ -16715,43 +14936,11 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Priority |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Priority |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MempoolError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -17044,198 +15233,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} - } - if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NextCoreChainLockUpdate == nil { - m.NextCoreChainLockUpdate = &types1.CoreChainLock{} - } - if err := m.NextCoreChainLockUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSetUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValidatorSetUpdate == nil { - m.ValidatorSetUpdate = &ValidatorSetUpdate{} - } - if err := m.ValidatorSetUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResponseCommit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -17265,40 +15262,6 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) @@ -18605,25 +16568,6 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) - } - m.RetainHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RetainHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } case 100: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) diff --git a/dash/quorum/validator_conn_executor_test.go b/dash/quorum/validator_conn_executor_test.go index 376376fa93..c40c058e49 100644 --- a/dash/quorum/validator_conn_executor_test.go +++ b/dash/quorum/validator_conn_executor_test.go @@ -669,7 +669,7 @@ func (app *testApp) Info(context.Context, *abci.RequestInfo) (*abci.ResponseInfo } func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - app.ByzantineValidators = req.ByzantineValidators + app.ByzantineValidators = req.Misbehavior txs := make([]*abci.ExecTxResult, 0, len(req.Txs)) for _, tx := range req.Txs { txs = append(txs, &abci.ExecTxResult{Data: tx}) diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index 64ce54dc62..1c856f22d0 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -207,13 +207,13 @@ func (pool *BlockPool) IsCaughtUp() bool { // as we switch from block sync to consensus mode. // // The caller will verify the commit. -func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.ExtendedCommit) { +func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.Commit) { pool.mtx.RLock() defer pool.mtx.RUnlock() if r := pool.requesters[pool.height]; r != nil { first = r.getBlock() - firstExtCommit = r.getExtendedCommit() + firstExtCommit = r.getCommit() } if r := pool.requesters[pool.height+1]; r != nil { second = r.getBlock() @@ -223,7 +223,7 @@ func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtComm // PopRequest pops the first block at pool.height. // It must have been validated by the second Commit from PeekTwoBlocks. -// TODO(thane): (?) and its corresponding ExtendedCommit. +// TODO(thane): (?) and its corresponding Commit. func (pool *BlockPool) PopRequest() { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -276,7 +276,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID { // height of the extended commit and the height of the block do not match, we // do not add the block and return an error. // TODO: ensure that blocks come in order for each peer. -func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, extCommit *types.ExtendedCommit, blockSize int) error { +func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, extCommit *types.Commit, blockSize int) error { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -564,7 +564,7 @@ type bpRequester struct { mtx sync.Mutex peerID types.NodeID block *types.Block - extCommit *types.ExtendedCommit + extCommit *types.Commit } func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester { @@ -590,7 +590,7 @@ func (bpr *bpRequester) OnStart(ctx context.Context) error { func (*bpRequester) OnStop() {} // Returns true if the peer matches and block doesn't already exist. -func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCommit, peerID types.NodeID) bool { +func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.Commit, peerID types.NodeID) bool { bpr.mtx.Lock() if bpr.block != nil || bpr.peerID != peerID { bpr.mtx.Unlock() @@ -615,7 +615,7 @@ func (bpr *bpRequester) getBlock() *types.Block { return bpr.block } -func (bpr *bpRequester) getExtendedCommit() *types.ExtendedCommit { +func (bpr *bpRequester) getCommit() *types.Commit { bpr.mtx.Lock() defer bpr.mtx.Unlock() return bpr.extCommit diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go index 3c47b4a647..c12347e6e4 100644 --- a/internal/blocksync/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -43,7 +43,7 @@ func (p testPeer) runInputRoutine() { // Request desired, pretend like we got the block immediately. func (p testPeer) simulateInput(input inputData) { block := &types.Block{Header: types.Header{Height: input.request.Height}} - extCommit := &types.ExtendedCommit{ + extCommit := &types.Commit{ Height: input.request.Height, } _ = input.pool.AddBlock(input.request.PeerID, block, extCommit, 123) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 89c7b813b8..5109194c31 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -198,16 +198,9 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, }) } - state, err := r.stateStore.Load() - if err != nil { - return fmt.Errorf("loading state: %w", err) - } - var extCommit *types.ExtendedCommit - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(msg.Height) { - extCommit = r.store.LoadBlockExtendedCommit(msg.Height) - if extCommit == nil { - return fmt.Errorf("found block in store with no extended commit: %v", block) - } + commit := r.store.LoadBlockCommit(msg.Height) + if commit == nil { + return fmt.Errorf("found block in store with no extended commit: %v", block) } blockProto, err := block.ToProto() @@ -218,8 +211,8 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, return blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &bcproto.BlockResponse{ - Block: blockProto, - ExtCommit: extCommit.ToProto(), + Block: blockProto, + Commit: commit.ToProto(), }, }) @@ -255,10 +248,10 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo "err", err) return err } - var extCommit *types.ExtendedCommit - if msg.ExtCommit != nil { + var commit *types.Commit + if msg.Commit != nil { var err error - extCommit, err = types.ExtendedCommitFromProto(msg.ExtCommit) + commit, err = types.CommitFromProto(msg.Commit) if err != nil { r.logger.Error("failed to convert extended commit from proto", "peer", envelope.From, @@ -267,7 +260,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo } } - if err := r.pool.AddBlock(envelope.From, block, extCommit, block.Size()); err != nil { + if err := r.pool.AddBlock(envelope.From, block, commit, block.Size()); err != nil { r.logger.Error("failed to add block", "err", err) } @@ -458,7 +451,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh didProcessCh = make(chan struct{}, 1) - initialCommitHasExtensions = (r.initialState.LastBlockHeight > 0 && r.store.LoadBlockExtendedCommit(r.initialState.LastBlockHeight) != nil) + initialCommitHasExtensions = (r.initialState.LastBlockHeight > 0 && r.store.LoadBlockCommit(r.initialState.LastBlockHeight) != nil) ) defer trySyncTicker.Stop() @@ -484,9 +477,6 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // The case statement below is a bit confusing, so here is a breakdown // of its logic and purpose: // - // If VoteExtensions are enabled we cannot switch to consensus without - // the vote extension data for the previous height, i.e. state.LastBlockHeight. - // // If extensions were required during state.LastBlockHeight and we have // sync'd at least one block, then we are guaranteed to have extensions. // BlockSync requires that the blocks it fetches have extensions if @@ -497,8 +487,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // if we already had extensions for the initial height. // If any of these conditions is not met, we continue the loop, looking // for extensions. - case state.ConsensusParams.ABCI.VoteExtensionsEnabled(state.LastBlockHeight) && - (blocksSynced == 0 && !initialCommitHasExtensions): + case blocksSynced == 0 && !initialCommitHasExtensions: r.logger.Info( "no extended commit yet", "height", height, @@ -551,9 +540,8 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // TODO: Uncouple from request routine. // see if there are any blocks to sync - first, second, extCommit := r.pool.PeekTwoBlocks() - if first != nil && extCommit == nil && - state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { + first, second, commit := r.pool.PeekTwoBlocks() + if first != nil && commit == nil { // See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631 panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height)) } else if first == nil || second == nil { @@ -594,10 +582,6 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh // validate the block before we persist it err = r.blockExec.ValidateBlock(ctx, state, first) } - if err == nil && state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { - // if vote extensions were required at this height, ensure they exist. - err = extCommit.EnsureExtensions() - } // If either of the checks failed we log the error and request for a new block // at that height if err != nil { @@ -633,23 +617,19 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh r.pool.PopRequest() - // TODO: batch saves so we do not persist to disk every block - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { - r.store.SaveBlockWithExtendedCommit(first, firstParts, extCommit) - } else { - // We use LastCommit here instead of extCommit. extCommit is not - // guaranteed to be populated by the peer if extensions are not enabled. - // Currently, the peer should provide an extCommit even if the vote extension data are absent - // but this may change so using second.LastCommit is safer. - r.store.SaveBlock(first, firstParts, second.LastCommit) - } + r.store.SaveBlock(first, firstParts, commit) + // We use LastCommit here instead of commit. commit is not + // guaranteed to be populated by the peer if extensions are not enabled. + // Currently, the peer should provide an commit even if the vote extension data are absent + // but this may change so using second.LastCommit is safer. + //r.store.SaveBlock(first, firstParts, second.LastCommit) - // TODO: Same thing for app - but we would need a way to get the hash - // without persisting the state. - state, err = r.blockExec.ApplyBlock(ctx, state, r.nodeProTxHash, firstID, first) - if err != nil { - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } + // TODO: Same thing for app - but we would need a way to get the hash + // without persisting the state. + state, err = r.blockExec.ApplyBlock(ctx, state, r.nodeProTxHash, firstID, first) + if err != nil { + panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) + } r.metrics.RecordConsMetrics(first) diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index ef6596cef7..67deee3b18 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -96,6 +96,7 @@ func setup( func makeReactor( ctx context.Context, t *testing.T, + proTxHash types.ProTxHash, nodeID types.NodeID, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -111,7 +112,6 @@ func makeReactor( stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) - proTxHash := rts.network.Nodes[nodeID].NodeInfo.ProTxHash state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) @@ -135,7 +135,7 @@ func makeReactor( blockExec := sm.NewBlockExecutor( stateStore, log.NewNopLogger(), - rts.app[nodeID], + app, mp, sm.EmptyEvidencePool{}, blockStore, @@ -182,21 +182,22 @@ func (rts *reactorTestSuite) addNode( return rts.blockSyncChannels[nodeID], nil } + proTxHash := rts.network.Nodes[nodeID].NodeInfo.ProTxHash peerEvents := func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] } - reactor := makeReactor(ctx, t, nodeID, genDoc, privVal, chCreator, peerEvents) + reactor := makeReactor(ctx, t, proTxHash, nodeID, genDoc, privVal, chCreator, peerEvents) - lastExtCommit := &types.ExtendedCommit{} + commit := types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil) state, err := reactor.stateStore.Load() require.NoError(t, err) for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - block, blockID, partSet, seenExtCommit := makeNextBlock(ctx, t, state, privVal, blockHeight, lastExtCommit) + block, blockID, partSet, seenCommit := makeNextBlock(ctx, t, state, privVal, blockHeight, commit) - state, err = reactor.blockExec.ApplyBlock(ctx, state, blockID, block) + state, err = reactor.blockExec.ApplyBlock(ctx, state, proTxHash, blockID, block) require.NoError(t, err) - reactor.store.SaveBlockWithExtendedCommit(block, partSet, seenExtCommit) - lastExtCommit = seenExtCommit + reactor.store.SaveBlock(block, partSet, seenCommit) + commit = seenCommit } rts.reactors[nodeID] = reactor @@ -209,11 +210,10 @@ func makeNextBlock(ctx context.Context, state sm.State, signer types.PrivValidator, height int64, - lc *types.ExtendedCommit) (*types.Block, types.BlockID, *types.PartSet, *types.ExtendedCommit) { + commit *types.Commit) (*types.Block, types.BlockID, *types.PartSet, *types.Commit) { - lastExtCommit := lc.Clone() - - block := sf.MakeBlock(state, height, lastExtCommit.ToCommit()) + block, err := sf.MakeBlock(state, height, commit, nil, 0) + require.NoError(t, err) partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: partSet.Header()} @@ -222,23 +222,31 @@ func makeNextBlock(ctx context.Context, vote, err := factory.MakeVote( ctx, signer, + state.Validators, block.Header.ChainID, 0, block.Header.Height, 0, 2, blockID, - time.Now(), + state.StateID(), ) require.NoError(t, err) - seenExtCommit := &types.ExtendedCommit{ - Height: vote.Height, - Round: vote.Round, - BlockID: blockID, - ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, - } - return block, blockID, partSet, seenExtCommit - + seenCommit := types.NewCommit( + vote.Height, + vote.Round, + blockID, + state.StateID(), + &types.CommitSigns{ + QuorumSigns: types.QuorumSigns{ + BlockSign: vote.BlockSignature, + StateSign: vote.StateSignature, + ExtensionSigns: types.MakeThresholdExtensionSigns(vote.VoteExtensions), + }, + QuorumHash: state.Validators.QuorumHash, + }, + ) + return block, blockID, partSet, seenCommit } func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 3bf22c3f71..be0672db7a 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -188,9 +188,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // We're creating a proposal for the first block. // The commit is empty, but not nil. commit = types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil) - case lazyNodeState.LastCommit.HasTwoThirdsMajority(): + case lazyNodeState.LastCommit != nil: // Make the commit from LastCommit - commit = lazyNodeState.LastCommit.MakeCommit() + commit = lazyNodeState.LastCommit default: // This shouldn't happen. lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index e5c3e68811..fc56de464d 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -346,7 +346,7 @@ func (app *CounterApplication) Commit(context.Context) (*abci.ResponseCommit, er } hash := make([]byte, 32) binary.BigEndian.PutUint64(hash, uint64(app.txCount)) - return &abci.ResponseCommit{Data: hash}, nil + return &abci.ResponseCommit{}, nil } func (app *CounterApplication) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index fb0ba8bfbd..bdf0eb412c 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -6,8 +6,6 @@ import ( "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 8fca457e2d..2c3bb9faf8 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -462,7 +462,7 @@ func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundSta time.Sleep(r.state.config.PeerGossipSleepDuration) } -func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh *p2p.Channel, chans channelBundle) { +func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh p2p.Channel, chans channelBundle) { logger := r.logger.With("peer", ps.peerID) timer := time.NewTimer(0) @@ -580,7 +580,7 @@ OUTER_LOOP: } } -func (r *Reactor) sendProposalBlockPart(ctx context.Context, dataCh *p2p.Channel, ps *PeerState, part *types.Part, height int64, round int32) error { +func (r *Reactor) sendProposalBlockPart(ctx context.Context, dataCh p2p.Channel, ps *PeerState, part *types.Part, height int64, round int32) error { partProto, err := part.ToProto() if err != nil { return fmt.Errorf("failed to convert block part to proto, error: %w", err) @@ -636,7 +636,7 @@ func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.V return true, nil } -func (r *Reactor) sendCommit(ctx context.Context, ps *PeerState, commit *types.Commit, voteCh *p2p.Channel) error { +func (r *Reactor) sendCommit(ctx context.Context, ps *PeerState, commit *types.Commit, voteCh p2p.Channel) error { if commit == nil { return fmt.Errorf("attempt to send nil commit to peer %s", ps.peerID) } @@ -651,7 +651,7 @@ func (r *Reactor) sendCommit(ctx context.Context, ps *PeerState, commit *types.C // send sends a message to provided channel. // If to is nil, message will be broadcasted. -func (r *Reactor) send(ctx context.Context, ps *PeerState, channel *p2p.Channel, msg proto.Message) error { +func (r *Reactor) send(ctx context.Context, ps *PeerState, channel p2p.Channel, msg proto.Message) error { select { case <-ctx.Done(): return errReactorClosed @@ -664,7 +664,7 @@ func (r *Reactor) send(ctx context.Context, ps *PeerState, channel *p2p.Channel, } // broadcast sends a broadcast message to all peers connected to the `channel`. -func (r *Reactor) broadcast(ctx context.Context, channel *p2p.Channel, msg proto.Message) error { +func (r *Reactor) broadcast(ctx context.Context, channel p2p.Channel, msg proto.Message) error { select { case <-ctx.Done(): return errReactorClosed @@ -766,7 +766,7 @@ func (r *Reactor) gossipVotesForHeight( } // gossipCommit sends a commit to the peer -func (r *Reactor) gossipCommit(ctx context.Context, voteCh *p2p.Channel, rs *cstypes.RoundState, ps *PeerState, prs *cstypes.PeerRoundState) error { +func (r *Reactor) gossipCommit(ctx context.Context, voteCh p2p.Channel, rs *cstypes.RoundState, ps *PeerState, prs *cstypes.PeerRoundState) error { // logger := r.Logger.With("height", rs.Height, "peer_height", prs.Height, "peer", ps.peerID) var commit *types.Commit blockStoreBase := r.state.blockStore.Base() @@ -797,7 +797,7 @@ func (r *Reactor) gossipCommit(ctx context.Context, voteCh *p2p.Channel, rs *cst return nil // success } -func (r *Reactor) gossipVotesAndCommitRoutine(ctx context.Context, voteCh *p2p.Channel, ps *PeerState) { +func (r *Reactor) gossipVotesAndCommitRoutine(ctx context.Context, voteCh p2p.Channel, ps *PeerState) { logger := r.logger.With("peer", ps.peerID) timer := time.NewTimer(0) @@ -860,7 +860,7 @@ func (r *Reactor) gossipVotesAndCommitRoutine(ctx context.Context, voteCh *p2p.C // NOTE: `queryMaj23Routine` has a simple crude design since it only comes // into play for liveness when there's a signature DDoS attack happening. -func (r *Reactor) queryMaj23Routine(ctx context.Context, stateCh *p2p.Channel, ps *PeerState) { +func (r *Reactor) queryMaj23Routine(ctx context.Context, stateCh p2p.Channel, ps *PeerState) { timer := time.NewTimer(0) defer timer.Stop() @@ -1418,7 +1418,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, cha // Any error encountered during message execution will result in a PeerError being sent // on the StateChannel or DataChannel or VoteChannel or VoteSetBitsChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel gracefully. -func (r *Reactor) processMsgCh(ctx context.Context, msgCh *p2p.Channel, chBundle channelBundle) { +func (r *Reactor) processMsgCh(ctx context.Context, msgCh p2p.Channel, chBundle channelBundle) { iter := msgCh.Receive(ctx) for iter.Next(ctx) { envelope := iter.Envelope() diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index c71b3b6539..cdc9c14460 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -613,42 +613,36 @@ func TestSwitchToConsensusVoteExtensions(t *testing.T) { name string storedHeight int64 initialRequiredHeight int64 - includeExtensions bool shouldPanic bool }{ { name: "no vote extensions but not required", initialRequiredHeight: 0, storedHeight: 2, - includeExtensions: false, shouldPanic: false, }, { name: "no vote extensions but required this height", initialRequiredHeight: 2, storedHeight: 2, - includeExtensions: false, shouldPanic: true, }, { name: "no vote extensions and required in future", initialRequiredHeight: 3, storedHeight: 2, - includeExtensions: false, shouldPanic: false, }, { name: "no vote extensions and required previous height", initialRequiredHeight: 1, storedHeight: 2, - includeExtensions: false, shouldPanic: true, }, { name: "vote extensions and required previous height", initialRequiredHeight: 1, storedHeight: 2, - includeExtensions: true, shouldPanic: false, }, } { @@ -672,31 +666,21 @@ func TestSwitchToConsensusVoteExtensions(t *testing.T) { blockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) - var voteSet *types.VoteSet - if testCase.includeExtensions { - voteSet = types.NewExtendedVoteSet(cs.state.ChainID, testCase.storedHeight, 0, tmproto.PrecommitType, cs.state.Validators) - } else { - voteSet = types.NewVoteSet(cs.state.ChainID, testCase.storedHeight, 0, tmproto.PrecommitType, cs.state.Validators) - } + voteSet := types.NewVoteSet(cs.state.ChainID, testCase.storedHeight, 0, tmproto.PrecommitType, cs.state.Validators, cs.state.StateID()) signedVote := signVote(ctx, t, validator, tmproto.PrecommitType, cs.state.ChainID, types.BlockID{ Hash: propBlock.Hash(), PartSetHeader: blockParts.Header(), - }) - - if !testCase.includeExtensions { - signedVote.Extension = nil - signedVote.ExtensionSignature = nil - } + }, + cs.state.AppHash, + cs.Validators.QuorumType, + cs.Validators.QuorumHash, + ) added, err := voteSet.AddVote(signedVote) require.NoError(t, err) require.True(t, added) - if testCase.includeExtensions { - cs.blockStore.SaveBlockWithExtendedCommit(propBlock, blockParts, voteSet.MakeExtendedCommit()) - } else { - cs.blockStore.SaveBlock(propBlock, blockParts, voteSet.MakeExtendedCommit().ToCommit()) - } + cs.blockStore.SaveBlock(propBlock, blockParts, voteSet.MakeCommit()) reactor := NewReactor( log.NewNopLogger(), cs, diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 07efb845f0..ffce1745b1 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -305,7 +305,7 @@ type simulatorTestSuite struct { GenesisState sm.State Config *config.Config Chain []*types.Block - ExtCommits []*types.ExtendedCommit + Commits []*types.Commit CleanupFunc cleanupFunc Mempool mempool.Mempool @@ -803,10 +803,10 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { ensureNewRound(t, newRoundCh, height+1, 0) sim.Chain = []*types.Block{} - sim.ExtCommits = []*types.ExtendedCommit{} + sim.Commits = []*types.Commit{} for i := 1; i <= numBlocks; i++ { sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) - sim.ExtCommits = append(sim.ExtCommits, css[0].blockStore.LoadBlockExtendedCommit(int64(i))) + sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) } return sim @@ -950,7 +950,7 @@ func testHandshakeReplay( err = wal.Start(ctx) require.NoError(t, err) t.Cleanup(func() { cancel(); wal.Wait() }) - chain, commits = makeBlockchainFromWAL(t, wal) + chain, commits = makeBlockchainFromWAL(t, wal, gdoc) pubKey, err := privVal.GetPubKey(ctx, gdoc.QuorumHash) require.NoError(t, err) stateDB, genesisState, store = stateAndStore(t, cfg, pubKey, kvstore.ProtocolVersion) @@ -1345,10 +1345,10 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL, genDoc *types.GenesisDoc) ([]* // log.Notice("Build a blockchain by reading from the WAL") var ( - blocks []*types.Block - extCommits []*types.ExtendedCommit - thisBlockParts *types.PartSet - thisBlockExtCommit *types.ExtendedCommit + blocks []*types.Block + commits []*types.Commit + thisBlockParts *types.PartSet + thisBlockCommit *types.Commit ) dec := NewWALDecoder(gr) @@ -1380,12 +1380,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL, genDoc *types.GenesisDoc) ([]* require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1) - commitHeight := thisBlockExtCommit.Height + commitHeight := thisBlockCommit.Height require.Equal(t, commitHeight, height+1, "commit doesnt match. got height %d, expected %d", commitHeight, height+1) blocks = append(blocks, block) - extCommits = append(extCommits, thisBlockExtCommit) + commits = append(commits, thisBlockCommit) height++ } case *types.PartSetHeader: @@ -1429,12 +1429,12 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL, genDoc *types.GenesisDoc) ([]* require.NoError(t, err) require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1) - commitHeight := thisBlockExtCommit.Height + commitHeight := thisBlockCommit.Height require.Equal(t, commitHeight, height+1, "commit does not match. got height %d, expected %d", commitHeight, height+1) blocks = append(blocks, block) - extCommits = append(extCommits, thisBlockExtCommit) - return blocks, extCommits + commits = append(commits, thisBlockCommit) + return blocks, commits } func readPieceFromWAL(msg *TimedWALMessage) interface{} { @@ -1522,7 +1522,7 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } } func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { +func (bs *mockBlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { } func (bs *mockBlockStore) SaveBlock( block *types.Block, @@ -1532,20 +1532,20 @@ func (bs *mockBlockStore) SaveBlock( } func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { - return bs.extCommits[height-1].ToCommit() + return bs.commits[height-1] } func (bs *mockBlockStore) LoadSeenCommit() *types.Commit { - return bs.extCommits[len(bs.extCommits)-1].ToCommit() + return bs.commits[len(bs.commits)-1] } -func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { - return bs.extCommits[height-1] +func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.Commit { + return bs.commits[height-1] } func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { pruned := uint64(0) for i := int64(0); i < height-1; i++ { bs.chain[i] = nil - bs.extCommits[i] = nil + bs.commits[i] = nil pruned++ } bs.base = height diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 0f85075206..e877d4a221 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -750,15 +750,15 @@ func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) { // the method will panic on an absent ExtendedCommit or an ExtendedCommit without // extension data. func (cs *State) reconstructLastCommit(state sm.State) { - votes, err := cs.votesFromSeenCommit(state) + commit, err := cs.votesFromSeenCommit(state) if err != nil { panic(fmt.Sprintf("failed to reconstruct last commit; %s", err)) } - cs.LastCommit = votes + cs.LastCommit = commit return } -func (cs *State) votesFromSeenCommit(state sm.State) (*types.VoteSet, error) { +func (cs *State) votesFromSeenCommit(state sm.State) (*types.Commit, error) { commit := cs.blockStore.LoadSeenCommit() if commit == nil || commit.Height != state.LastBlockHeight { commit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight) @@ -766,12 +766,7 @@ func (cs *State) votesFromSeenCommit(state sm.State) (*types.VoteSet, error) { if commit == nil { return nil, fmt.Errorf("commit for height %v not found", state.LastBlockHeight) } - - vs := commit.ToVoteSet(state.ChainID, state.LastValidators) - if !vs.HasTwoThirdsMajority() { - return nil, errors.New("commit does not have +2/3 majority") - } - return vs, nil + return commit, nil } // Updates State and increments height to match that of state. @@ -888,7 +883,7 @@ func (cs *State) updateToState(state sm.State, commit *types.Commit) { cs.ValidBlock = nil cs.ValidBlockParts = nil cs.Commit = nil - cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, stateID, validators) cs.CommitRound = -1 cs.LastValidators = state.LastValidators cs.TriggeredTimeoutPrecommit = false @@ -2826,45 +2821,24 @@ func (cs *State) addVote( return } - // Check to see if the chain is configured to extend votes. - if cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(cs.Height) { - // The chain is configured to extend votes, check that the vote is - // not for a nil block and verify the extensions signature against the - // corresponding public key. - - var myAddr []byte - if cs.privValidatorPubKey != nil { - myAddr = cs.privValidatorPubKey.Address() - } - // Verify VoteExtension if precommit and not nil - // https://github.com/tendermint/tendermint/issues/8487 - if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() && - !bytes.Equal(vote.ValidatorAddress, myAddr) { // Skip the VerifyVoteExtension call if the vote was issued by this validator. - - // The core fields of the vote message were already validated in the - // consensus reactor when the vote was received. - // Here, we verify the signature of the vote extension included in the vote - // message. - _, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex) - if err := vote.VerifyExtension(cs.state.ChainID, val.PubKey); err != nil { - return false, err - } + // Verify VoteExtension if precommit and not nil + // https://github.com/tendermint/tendermint/issues/8487 + if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() && + !bytes.Equal(vote.ValidatorProTxHash, cs.privValidatorProTxHash) { // Skip the VerifyVoteExtension call if the vote was issued by this validator. - err := cs.blockExec.VerifyVoteExtension(ctx, vote) - cs.metrics.MarkVoteExtensionReceived(err == nil) - if err != nil { - return false, err - } + // The core fields of the vote message were already validated in the + // consensus reactor when the vote was received. + // Here, we verify the signature of the vote extension included in the vote + // message. + _, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex) + if err := vote.VerifyExtension(cs.state.ChainID, val.PubKey); err != nil { + return false, err } - } else { - // Vote extensions are not enabled on the network. - // strip the extension data from the vote in case any is present. - // - // TODO punish a peer if it sent a vote with an extension when the feature - // is disabled on the network. - // https://github.com/tendermint/tendermint/issues/8565 - if stripped := vote.StripExtension(); stripped { - cs.logger.Error("vote included extension data but vote extensions are not enabled", "peer", peerID) + + err := cs.blockExec.VerifyVoteExtension(ctx, vote) + cs.metrics.MarkVoteExtensionReceived(err == nil) + if err != nil { + return false, err } } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 7e05f963ad..407b541290 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -19,7 +19,6 @@ import ( "github.com/tendermint/tendermint/internal/eventbus" tmpubsub "github.com/tendermint/tendermint/internal/pubsub" tmquery "github.com/tendermint/tendermint/internal/pubsub/query" - "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -2373,135 +2372,6 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { require.Len(t, rpp.LocalLastCommit.ThresholdVoteExtensions, 1) } -// TestVoteExtensionEnableHeight tests that 'ExtensionRequireHeight' correctly -// enforces that vote extensions be present in consensus for heights greater than -// or equal to the configured value. -func TestVoteExtensionEnableHeight(t *testing.T) { - for _, testCase := range []struct { - name string - enableHeight int64 - hasExtension bool - expectExtendCalled bool - expectVerifyCalled bool - expectSuccessfulRound bool - }{ - { - name: "extension present but not enabled", - hasExtension: true, - enableHeight: 0, - expectExtendCalled: false, - expectVerifyCalled: false, - expectSuccessfulRound: true, - }, - { - name: "extension absent but not required", - hasExtension: false, - enableHeight: 0, - expectExtendCalled: false, - expectVerifyCalled: false, - expectSuccessfulRound: true, - }, - { - name: "extension present and required", - hasExtension: true, - enableHeight: 1, - expectExtendCalled: true, - expectVerifyCalled: true, - expectSuccessfulRound: true, - }, - { - name: "extension absent but required", - hasExtension: false, - enableHeight: 1, - expectExtendCalled: true, - expectVerifyCalled: false, - expectSuccessfulRound: false, - }, - { - name: "extension absent but required in future height", - hasExtension: false, - enableHeight: 2, - expectExtendCalled: false, - expectVerifyCalled: false, - expectSuccessfulRound: true, - }, - } { - t.Run(testCase.name, func(t *testing.T) { - config := configSetup(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - numValidators := 3 - m := abcimocks.NewApplication(t) - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ - Status: abci.ResponseProcessProposal_ACCEPT, - }, nil) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - if testCase.expectExtendCalled { - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) - } - if testCase.expectVerifyCalled { - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, - }, nil).Times(numValidators - 1) - } - r := &abci.ResponseFinalizeBlock{AppHash: []byte("hashyHash")} - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe() - m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() - c := factory.ConsensusParams() - c.ABCI.VoteExtensionsEnableHeight = testCase.enableHeight - cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m, validators: numValidators, consensusParams: c}) - cs1.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.enableHeight - height, round := cs1.Height, cs1.Round - - timeoutCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(ctx) - require.NoError(t, err) - addr := pv1.Address() - voteCh := subscribeToVoter(ctx, t, cs1, addr) - - startTestRound(ctx, cs1, cs1.Height, round) - ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(t, proposalCh, height, round) - rs := cs1.GetRoundState() - - blockID := types.BlockID{ - Hash: rs.ProposalBlock.Hash(), - PartSetHeader: rs.ProposalBlockParts.Header(), - } - - // sign all of the votes - signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) - ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash()) - - var ext []byte - if testCase.hasExtension { - ext = []byte("extension") - } - - for _, vs := range vss[1:] { - vote, err := vs.signVote(ctx, tmproto.PrecommitType, config.ChainID(), blockID, ext) - if !testCase.hasExtension { - vote.ExtensionSignature = nil - } - require.NoError(t, err) - addVotes(cs1, vote) - } - if testCase.expectSuccessfulRound { - ensurePrecommit(t, voteCh, height, round) - height++ - ensureNewRound(t, newRoundCh, height, round) - } else { - ensureNoNewTimeout(t, timeoutCh, cs1.state.ConsensusParams.Timeout.VoteTimeout(round).Nanoseconds()) - } - - m.AssertExpectations(t) - }) - } -} - // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index b0c67ff874..e61c4e0aeb 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -58,13 +58,13 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } -type NewBlockStoreT interface { +type mockConstructorTestingTNewBlockStore interface { mock.TestingT Cleanup(func()) } // NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t NewBlockStoreT) *BlockStore { +func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/internal/mempool/mocks/mempool.go b/internal/mempool/mocks/mempool.go index e1f6994d28..b699ef3c9b 100644 --- a/internal/mempool/mocks/mempool.go +++ b/internal/mempool/mocks/mempool.go @@ -171,13 +171,13 @@ func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types return r0 } -type NewMempoolT interface { +type mockConstructorTestingTNewMempool interface { mock.TestingT Cleanup(func()) } // NewMempool creates a new instance of Mempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMempool(t NewMempoolT) *Mempool { +func NewMempool(t mockConstructorTestingTNewMempool) *Mempool { mock := &Mempool{} mock.Mock.Test(t) diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 20727f8a61..5a317bcf1c 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -153,13 +153,13 @@ func (_m *Connection) String() string { return r0 } -type NewConnectionT interface { +type mockConstructorTestingTNewConnection interface { mock.TestingT Cleanup(func()) } // NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnection(t NewConnectionT) *Connection { +func NewConnection(t mockConstructorTestingTNewConnection) *Connection { mock := &Connection{} mock.Mock.Test(t) diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 436c961c3f..e89f0e25a3 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -149,13 +149,13 @@ func (_m *Transport) String() string { return r0 } -type NewTransportT interface { +type mockConstructorTestingTNewTransport interface { mock.TestingT Cleanup(func()) } // NewTransport creates a new instance of Transport. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransport(t NewTransportT) *Transport { +func NewTransport(t mockConstructorTestingTNewTransport) *Transport { mock := &Transport{} mock.Mock.Test(t) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 08df95977b..cd0a63cb1f 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -1715,5 +1715,13 @@ func (m *PeerManager) UpdatePeerInfo(nodeID types.NodeID, modifier func(peerInfo func (m *PeerManager) IsDialingOrConnected(nodeID types.NodeID) bool { m.mtx.Lock() defer m.mtx.Unlock() - return m.dialing[nodeID] || m.connected[nodeID] + _, ok := m.connected[nodeID] + return m.dialing[nodeID] || ok +} + +// SetProTxHashToPeerInfo sets a proTxHash in peerInfo.proTxHash to keep this value in a store +func SetProTxHashToPeerInfo(proTxHash types.ProTxHash) func(info *peerInfo) { + return func(info *peerInfo) { + info.ProTxHash = proTxHash.Copy() + } } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 4214516d0d..309412baa1 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -58,13 +58,10 @@ func (env *Environment) BroadcastTx(ctx context.Context, req *coretypes.RequestB return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Err()) case r := <-resCh: return &coretypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Codespace: r.Codespace, - MempoolError: r.MempoolError, - Info: r.Info, - Hash: req.Tx.Hash(), + Code: r.Code, + Data: r.Data, + Codespace: r.Codespace, + Hash: req.Tx.Hash(), }, nil } } diff --git a/internal/state/execution.go b/internal/state/execution.go index d84de04cd8..d9d41f51fa 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - tmbytes "github.com/tendermint/tendermint/libs/bytes" "time" abciclient "github.com/tendermint/tendermint/abci/client" @@ -15,6 +14,7 @@ import ( "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" + tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmtypes "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -127,13 +127,13 @@ func (blockExec *BlockExecutor) CreateProposalBlock( rpp, err := blockExec.appClient.PrepareProposal( ctx, &abci.RequestPrepareProposal{ - MaxTxBytes: maxDataBytes, - Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: abci.ExtendedCommitInfo(localLastCommit), - ByzantineValidators: block.Evidence.ToABCI(), - Height: block.Height, - Time: block.Time, - NextValidatorsHash: block.NextValidatorsHash, + MaxTxBytes: maxDataBytes, + Txs: block.Txs.ToSliceOfBytes(), + LocalLastCommit: abci.ExtendedCommitInfo(localLastCommit), + Misbehavior: block.Evidence.ToABCI(), + Height: block.Height, + Time: block.Time, + NextValidatorsHash: block.NextValidatorsHash, // Dash's fields CoreChainLockedHeight: block.CoreChainLockedHeight, @@ -198,6 +198,13 @@ func (blockExec *BlockExecutor) ProcessProposal( if resp.IsStatusUnknown() { panic(fmt.Sprintf("ProcessProposal responded with status %s", resp.Status.String())) } + // we force the abci app to return only 32 byte app hashes (set to 20 temporarily) + if resp.AppHash != nil && len(resp.AppHash) != blockExec.appHashSize { + blockExec.logger.Error( + "Client returned invalid app hash size", "bytesLength", len(resp.AppHash), + ) + return false, errors.New("invalid App Hash size") + } return resp.IsAccepted(), nil } @@ -290,11 +297,16 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, ErrProxyAppConn(err) } + numValUpdates := 0 + if fBlockRes.ValidatorSetUpdate != nil { + numValUpdates = len(fBlockRes.ValidatorSetUpdate.ValidatorUpdates) + } + blockExec.logger.Info( "finalized block", "height", block.Height, "num_txs_res", len(fBlockRes.TxResults), - "num_val_updates", len(fBlockRes.ValidatorUpdates), + "num_val_updates", numValUpdates, "block_app_hash", fmt.Sprintf("%X", fBlockRes.AppHash), ) @@ -320,12 +332,12 @@ func (blockExec *BlockExecutor) ApplyBlock( } // The quorum type should not even matter here - validators, thresholdPublicKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(finalizeBlockResponse.ValidatorSetUpdate) + validators, thresholdPublicKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(fBlockRes.ValidatorSetUpdate) if err != nil { return state, fmt.Errorf("error in chain lock from proto: %v", err) } if len(validators) > 0 { - blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validators)) blockExec.metrics.ValidatorSetUpdates.Add(1) } if fBlockRes.ConsensusParamUpdates != nil { @@ -455,14 +467,6 @@ func (blockExec *BlockExecutor) Commit( return 0, err } - // we force the abci app to return only 32 byte app hashes (set to 20 temporarily) - if res.Data != nil && len(res.Data) != blockExec.appHashSize { - blockExec.logger.Error( - "Client returned invalid app hash size", "bytesLength", len(res.Data), - ) - return nil, 0, errors.New("invalid App Hash size") - } - // ResponseCommit has no error code - just data blockExec.logger.Info( "committed state", diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 4c138407a7..d799910747 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -748,9 +748,9 @@ func TestPrepareProposalRemoveTxs(t *testing.T) { eventBus, sm.NopMetrics(), ) - pa, _ := state.Validators.GetByIndex(0) - commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) - block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa, votes) + proTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proTxHash, 0) require.NoError(t, err) require.Len(t, block.Data.Txs.ToSliceOfBytes(), len(trs)-2) diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 69028771a3..c87c7b6adb 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -170,6 +170,20 @@ func makeHeaderPartsResponsesValPowerChange( return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, finalizeBlockResponses } +func makeHeaderPartsResponsesValKeysRegenerate(t *testing.T, state sm.State, regenerate bool) (types.Header, *types.CoreChainLock, types.BlockID, *abci.ResponseFinalizeBlock) { + block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) + if err != nil { + t.Error(err) + } + fbResp := &abci.ResponseFinalizeBlock{} + if regenerate == true { + proTxHashes := state.Validators.GetProTxHashes() + valUpdates := types.ValidatorUpdatesRegenerateOnProTxHashes(proTxHashes) + fbResp.ValidatorSetUpdate = &valUpdates + } + return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, fbResp +} + func makeHeaderPartsResponsesParams( t *testing.T, state sm.State, @@ -256,7 +270,7 @@ func (app *testApp) Info(_ context.Context, req *abci.RequestInfo) (*abci.Respon } func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - app.Misbehavior = req.ByzantineValidators + app.Misbehavior = req.Misbehavior resTxs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index a7221a0876..0c7cf86df9 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -166,13 +166,13 @@ func (_m *EventSink) Type() indexer.EventSinkType { return r0 } -type NewEventSinkT interface { +type mockConstructorTestingTNewEventSink interface { mock.TestingT Cleanup(func()) } // NewEventSink creates a new instance of EventSink. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventSink(t NewEventSinkT) *EventSink { +func NewEventSink(t mockConstructorTestingTNewEventSink) *EventSink { mock := &EventSink{} mock.Mock.Test(t) diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index aeba4882c1..2ee5d2557c 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -119,22 +119,6 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { return r0 } -// LoadBlockExtendedCommit provides a mock function with given fields: height -func (_m *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { - ret := _m.Called(height) - - var r0 *types.ExtendedCommit - if rf, ok := ret.Get(0).(func(int64) *types.ExtendedCommit); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ExtendedCommit) - } - } - - return r0 -} - // LoadBlockMeta provides a mock function with given fields: height func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) @@ -225,11 +209,6 @@ func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s _m.Called(block, blockParts, seenCommit) } -// SaveBlockWithExtendedCommit provides a mock function with given fields: block, blockParts, seenCommit -func (_m *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { - _m.Called(block, blockParts, seenCommit) -} - // Size provides a mock function with given fields: func (_m *BlockStore) Size() int64 { ret := _m.Called() @@ -244,13 +223,13 @@ func (_m *BlockStore) Size() int64 { return r0 } -type NewBlockStoreT interface { +type mockConstructorTestingTNewBlockStore interface { mock.TestingT Cleanup(func()) } // NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t NewBlockStoreT) *BlockStore { +func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 98abb4776c..aa7c7b3364 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -72,13 +72,13 @@ func (_m *EvidencePool) Update(_a0 context.Context, _a1 state.State, _a2 types.E _m.Called(_a0, _a1, _a2) } -type NewEvidencePoolT interface { +type mockConstructorTestingTNewEvidencePool interface { mock.TestingT Cleanup(func()) } // NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvidencePool(t NewEvidencePoolT) *EvidencePool { +func NewEvidencePool(t mockConstructorTestingTNewEvidencePool) *EvidencePool { mock := &EvidencePool{} mock.Mock.Test(t) diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 94d5614e6c..1200999ef1 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -188,13 +188,13 @@ func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet return r0 } -type NewStoreT interface { +type mockConstructorTestingTNewStore interface { mock.TestingT Cleanup(func()) } // NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStore(t NewStoreT) *Store { +func NewStore(t mockConstructorTestingTNewStore) *Store { mock := &Store{} mock.Mock.Test(t) diff --git a/internal/state/services.go b/internal/state/services.go index 907a53861e..40365f2fbf 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -28,7 +28,6 @@ type BlockStore interface { LoadBlock(height int64) *types.Block SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) - SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) PruneBlocks(height int64) (uint64, error) @@ -38,7 +37,6 @@ type BlockStore interface { LoadBlockCommit(height int64) *types.Commit LoadSeenCommit() *types.Commit - LoadBlockExtendedCommit(height int64) *types.ExtendedCommit } //----------------------------------------------------------------------------- diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 17ddb54ac0..099588ed12 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -83,13 +83,13 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, return r0, r1 } -type NewStateProviderT interface { +type mockConstructorTestingTNewStateProvider interface { mock.TestingT Cleanup(func()) } // NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateProvider(t NewStateProviderT) *StateProvider { +func NewStateProvider(t mockConstructorTestingTNewStateProvider) *StateProvider { mock := &StateProvider{} mock.Mock.Test(t) diff --git a/internal/store/store.go b/internal/store/store.go index ec5f6d0c9d..755b719422 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -287,29 +287,6 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { return commit } -// LoadExtendedCommit returns the ExtendedCommit for the given height. -// The extended commit is not guaranteed to contain the same +2/3 precommits data -// as the commit in the block. -func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { - pbec := new(tmproto.ExtendedCommit) - bz, err := bs.db.Get(extCommitKey(height)) - if err != nil { - panic(fmt.Errorf("fetching extended commit: %w", err)) - } - if len(bz) == 0 { - return nil - } - err = proto.Unmarshal(bz, pbec) - if err != nil { - panic(fmt.Errorf("decoding extended commit: %w", err)) - } - extCommit, err := types.ExtendedCommitFromProto(pbec) - if err != nil { - panic(fmt.Errorf("converting extended commit: %w", err)) - } - return extCommit -} - // LoadSeenCommit returns the last locally seen Commit before being // cannonicalized. This is useful when we've seen a commit, but there // has not yet been a new block at `height + 1` that includes this @@ -496,39 +473,6 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s } } -// SaveBlockWithExtendedCommit persists the given block, blockParts, and -// seenExtendedCommit to the underlying db. seenExtendedCommit is stored under -// two keys in the database: as the seenCommit and as the ExtendedCommit data for the -// height. This allows the vote extension data to be persisted for all blocks -// that are saved. -func (bs *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenExtendedCommit *types.ExtendedCommit) { - if block == nil { - panic("BlockStore can only save a non-nil block") - } - if err := seenExtendedCommit.EnsureExtensions(); err != nil { - panic(fmt.Errorf("saving block with extensions: %w", err)) - } - batch := bs.db.NewBatch() - if err := bs.saveBlockToBatch(batch, block, blockParts, seenExtendedCommit.ToCommit()); err != nil { - panic(err) - } - height := block.Height - - pbec := seenExtendedCommit.ToProto() - extCommitBytes := mustEncode(pbec) - if err := batch.Set(extCommitKey(height), extCommitBytes); err != nil { - panic(err) - } - - if err := batch.WriteSync(); err != nil { - panic(err) - } - - if err := batch.Close(); err != nil { - panic(err) - } -} - func (bs *BlockStore) saveBlockToBatch(batch dbm.Batch, block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) error { if block == nil { panic("BlockStore can only save a non-nil block") diff --git a/internal/store/store_test.go b/internal/store/store_test.go index d737d296d6..d2617445cc 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -159,7 +159,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { block: newBlock(header1, commitAtH10), parts: incompletePartSet, wantPanic: "only save complete block", // incomplete parts - seenCommit: makeTestExtCommit(10, tmtime.Now()), + seenCommit: makeTestCommit(state, 10, tmtime.Now()), }, { @@ -219,7 +219,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { bs, db := newInMemoryBlockStore() // SaveBlock res, err, panicErr := doFn(func() (interface{}, error) { - bs.SaveBlockWithExtendedCommit(tuple.block, tuple.parts, tuple.seenCommit) + bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit) if tuple.block == nil { return nil, nil } @@ -289,90 +289,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } } -// TestSaveBlockWithExtendedCommitPanicOnAbsentExtension tests that saving a -// block with an extended commit panics when the extension data is absent. -func TestSaveBlockWithExtendedCommitPanicOnAbsentExtension(t *testing.T) { - for _, testCase := range []struct { - name string - malleateCommit func(*types.ExtendedCommit) - shouldPanic bool - }{ - { - name: "basic save", - malleateCommit: func(_ *types.ExtendedCommit) {}, - shouldPanic: false, - }, - { - name: "save commit with no extensions", - malleateCommit: func(c *types.ExtendedCommit) { - c.StripExtensions() - }, - shouldPanic: true, - }, - } { - t.Run(testCase.name, func(t *testing.T) { - state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir()) - require.NoError(t, err) - defer cleanup() - block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) - seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) - ps, err := block.MakePartSet(2) - require.NoError(t, err) - testCase.malleateCommit(seenCommit) - if testCase.shouldPanic { - require.Panics(t, func() { - bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) - }) - } else { - bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) - } - }) - } -} - -// TestLoadBlockExtendedCommit tests loading the extended commit for a previously -// saved block. The load method should return nil when only a commit was saved and -// return the extended commit otherwise. -func TestLoadBlockExtendedCommit(t *testing.T) { - for _, testCase := range []struct { - name string - saveExtended bool - expectResult bool - }{ - { - name: "save commit", - saveExtended: false, - expectResult: false, - }, - { - name: "save extended commit", - saveExtended: true, - expectResult: true, - }, - } { - t.Run(testCase.name, func(t *testing.T) { - state, bs, cleanup, err := makeStateAndBlockStore(t.TempDir()) - require.NoError(t, err) - defer cleanup() - block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) - seenCommit := makeTestExtCommit(block.Header.Height, tmtime.Now()) - ps, err := block.MakePartSet(2) - require.NoError(t, err) - if testCase.saveExtended { - bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) - } else { - bs.SaveBlock(block, ps, seenCommit.ToCommit()) - } - res := bs.LoadBlockExtendedCommit(block.Height) - if testCase.expectResult { - require.Equal(t, seenCommit, res) - } else { - require.Nil(t, res) - } - }) - } -} - func TestLoadBaseMeta(t *testing.T) { cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") require.NoError(t, err) @@ -598,9 +514,7 @@ func TestBlockFetchAtHeight(t *testing.T) { } func TestSeenAndCanonicalCommit(t *testing.T) { - state, store, cleanup, err := makeStateAndBlockStore(t.TempDir()) - defer cleanup() - require.NoError(t, err) + state, store := makeStateAndBlockStore(t, t.TempDir()) loadCommit := func() (interface{}, error) { meta := store.LoadSeenCommit() diff --git a/internal/test/factory/commit.go b/internal/test/factory/commit.go index 20bc87e86c..c1bb0c3fba 100644 --- a/internal/test/factory/commit.go +++ b/internal/test/factory/commit.go @@ -48,5 +48,5 @@ func MakeCommit( } } - return voteSet.MakeExtendedCommit(), nil + return voteSet.MakeCommit(), nil } diff --git a/libs/time/mocks/source.go b/libs/time/mocks/source.go index 386d20c45f..4146993b31 100644 --- a/libs/time/mocks/source.go +++ b/libs/time/mocks/source.go @@ -27,13 +27,13 @@ func (_m *Source) Now() time.Time { return r0 } -type NewSourceT interface { +type mockConstructorTestingTNewSource interface { mock.TestingT Cleanup(func()) } // NewSource creates a new instance of Source. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSource(t NewSourceT) *Source { +func NewSource(t mockConstructorTestingTNewSource) *Source { mock := &Source{} mock.Mock.Test(t) diff --git a/light/provider/http/http.go b/light/provider/http/http.go index e39aa77e40..228e28c473 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -170,13 +170,13 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato const maxPages = 100 var ( - perPage = 100 - vals []*types.Validator - thresholdPublicKey crypto.PubKey - quorumType btcjson.LLMQType - quorumHash crypto.QuorumHash - page = 1 - total = -1 + perPage = 100 + vals []*types.Validator + thresholdPubKey crypto.PubKey + quorumType btcjson.LLMQType + quorumHash crypto.QuorumHash + page = 1 + total = -1 ) for len(vals) != total && page <= maxPages { @@ -184,8 +184,8 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato // is negative we will keep repeating. attempt := uint16(0) for { - requestThresholdPublicKey := attempt == 0 - res, err := p.client.Validators(ctx, height, &page, &perPage, &requestThresholdPublicKey) + reqThresholdPubKey := attempt == 0 + res, err := p.client.Validators(ctx, height, &page, &perPage, &reqThresholdPubKey) switch e := err.(type) { case nil: // success!! Now we validate the response if len(res.Validators) == 0 { @@ -213,36 +213,34 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato } return nil, provider.ErrBadLightBlock{Reason: e} - case *rpctypes.RPCError: - // process the rpc error and return the corresponding error to the light client - return nil, p.parseRPCError(e) + case *rpctypes.RPCError: + // process the rpc error and return the corresponding error to the light client + return nil, p.parseRPCError(e) - default: - // check if the error stems from the context - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return nil, err - } - - // If we don't know the error then by default we return an unreliable provider error and - // terminate the connection with the peer. - return nil, provider.ErrUnreliableProvider{Reason: e} + default: + // check if the error stems from the context + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err } + + // If we don't know the error then by default we return an unreliable provider error and + // terminate the connection with the peer. + return nil, provider.ErrUnreliableProvider{Reason: e} } // update the total and increment the page index so we can fetch the // next page of validators if need be total = res.Total vals = append(vals, res.Validators...) - if requestThresholdPublicKey { - thresholdPublicKey = *res.ThresholdPublicKey + if reqThresholdPubKey { + thresholdPubKey = *res.ThresholdPublicKey quorumHash = *res.QuorumHash quorumType = res.QuorumType } break } - } - valSet, err := types.ValidatorSetFromExistingValidators(vals, thresholdPublicKey, quorumType, quorumHash) + valSet, err := types.ValidatorSetFromExistingValidators(vals, thresholdPubKey, quorumType, quorumHash) if err != nil { return nil, provider.ErrBadLightBlock{Reason: err} } diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index af2e9c930f..d77418303a 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -66,13 +66,13 @@ func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) erro return r0 } -type NewProviderT interface { +type mockConstructorTestingTNewProvider interface { mock.TestingT Cleanup(func()) } // NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProvider(t NewProviderT) *Provider { +func NewProvider(t mockConstructorTestingTNewProvider) *Provider { mock := &Provider{} mock.Mock.Test(t) diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index 4130c9575c..439ce31ea7 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -116,13 +116,13 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 return r0, r1 } -type NewLightClientT interface { +type mockConstructorTestingTNewLightClient interface { mock.TestingT Cleanup(func()) } // NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLightClient(t NewLightClientT) *LightClient { +func NewLightClient(t mockConstructorTestingTNewLightClient) *LightClient { mock := &LightClient{} mock.Mock.Test(t) diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index 8757f8ab3e..ae85bb2ed0 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -116,8 +116,8 @@ func (m *NoBlockResponse) GetHeight() int64 { // BlockResponse returns block to the requested type BlockResponse struct { - Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - ExtCommit *types.ExtendedCommit `protobuf:"bytes,2,opt,name=ext_commit,json=extCommit,proto3" json:"ext_commit,omitempty"` + Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Commit *types.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` } func (m *BlockResponse) Reset() { *m = BlockResponse{} } @@ -160,9 +160,9 @@ func (m *BlockResponse) GetBlock() *types.Block { return nil } -func (m *BlockResponse) GetExtCommit() *types.ExtendedCommit { +func (m *BlockResponse) GetCommit() *types.Commit { if m != nil { - return m.ExtCommit + return m.Commit } return nil } @@ -393,33 +393,32 @@ func init() { func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) } var fileDescriptor_19b397c236e0fa07 = []byte{ - // 404 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xcd, 0x4a, 0xeb, 0x50, - 0x10, 0xc7, 0x93, 0x9b, 0xb6, 0x97, 0x3b, 0xb7, 0x69, 0xb8, 0xe1, 0xa2, 0x45, 0x24, 0x94, 0xf8, - 0x81, 0x2e, 0x4c, 0x40, 0x97, 0x0a, 0x42, 0x45, 0xa8, 0xe0, 0x07, 0xa4, 0xb8, 0x71, 0x53, 0x9a, - 0xf4, 0xd0, 0x06, 0x4d, 0x4e, 0xed, 0x39, 0x81, 0x76, 0xe5, 0x2b, 0xf8, 0x02, 0xbe, 0x8f, 0xcb, - 0x2e, 0x5d, 0x4a, 0xfb, 0x22, 0xd2, 0x39, 0x69, 0x9a, 0xc6, 0x98, 0xdd, 0x64, 0xce, 0x7f, 0x7e, - 0xf9, 0xcf, 0x0c, 0x03, 0x0d, 0x4e, 0xc2, 0x1e, 0x19, 0x05, 0x7e, 0xc8, 0x6d, 0xf7, 0x89, 0x7a, - 0x8f, 0x6c, 0x12, 0x7a, 0x36, 0x9f, 0x0c, 0x09, 0xb3, 0x86, 0x23, 0xca, 0xa9, 0xfe, 0x7f, 0xa5, - 0xb0, 0x12, 0xc5, 0xd6, 0x76, 0xaa, 0x0e, 0xd5, 0xa2, 0x5a, 0xd4, 0xe4, 0xbc, 0xa6, 0x88, 0xe6, - 0x3e, 0x54, 0x9b, 0x0b, 0xb1, 0x43, 0x9e, 0x23, 0xc2, 0xb8, 0xbe, 0x01, 0x95, 0x01, 0xf1, 0xfb, - 0x03, 0x5e, 0x97, 0x1b, 0xf2, 0x81, 0xe2, 0xc4, 0x5f, 0xe6, 0x21, 0x68, 0xb7, 0x34, 0x56, 0xb2, - 0x21, 0x0d, 0x19, 0xf9, 0x51, 0xfa, 0x02, 0xea, 0xba, 0xf0, 0x08, 0xca, 0x68, 0x08, 0x75, 0x7f, - 0x8f, 0x37, 0xad, 0x54, 0x17, 0xc2, 0x8b, 0xd0, 0x0b, 0x95, 0x7e, 0x0e, 0x40, 0xc6, 0xbc, 0xe3, - 0xd1, 0x20, 0xf0, 0x79, 0xfd, 0x17, 0xd6, 0x34, 0xbe, 0xd7, 0x5c, 0x8e, 0x31, 0xd5, 0xbb, 0x40, - 0x9d, 0xf3, 0x87, 0x8c, 0xb9, 0x08, 0x4d, 0x0d, 0xd4, 0x36, 0xef, 0xf2, 0x88, 0xc5, 0x4d, 0x99, - 0x67, 0x50, 0x5b, 0x26, 0x8a, 0xbd, 0xeb, 0x3a, 0x94, 0xdc, 0x2e, 0x23, 0xf8, 0x57, 0xc5, 0xc1, - 0xd8, 0x7c, 0x53, 0xe0, 0xf7, 0x0d, 0x61, 0xac, 0xdb, 0x27, 0xfa, 0x15, 0xa8, 0x68, 0xb2, 0x33, - 0x12, 0xe8, 0xb8, 0x25, 0xd3, 0xca, 0x5b, 0x8c, 0x95, 0x9e, 0x6c, 0x4b, 0x72, 0xaa, 0x6e, 0x7a, - 0xd2, 0x6d, 0xf8, 0x17, 0xd2, 0xce, 0x92, 0x26, 0x7c, 0xc5, 0xdd, 0xee, 0xe5, 0xe3, 0x32, 0x0b, - 0x68, 0x49, 0x8e, 0x16, 0x66, 0x76, 0x72, 0x0d, 0xb5, 0x0c, 0x51, 0x41, 0xe2, 0x4e, 0xa1, 0xc1, - 0x84, 0xa7, 0xba, 0x59, 0x1a, 0xc3, 0xb9, 0x25, 0xed, 0x96, 0x8a, 0x68, 0x6b, 0x43, 0x5f, 0xd0, - 0x58, 0x3a, 0xa1, 0xdf, 0x81, 0x96, 0xd0, 0x62, 0x73, 0x65, 0xc4, 0xed, 0x16, 0xe3, 0x12, 0x77, - 0x35, 0xb6, 0x96, 0x69, 0x96, 0x41, 0x61, 0x51, 0xd0, 0xbc, 0x7f, 0x9f, 0x19, 0xf2, 0x74, 0x66, - 0xc8, 0x9f, 0x33, 0x43, 0x7e, 0x9d, 0x1b, 0xd2, 0x74, 0x6e, 0x48, 0x1f, 0x73, 0x43, 0x7a, 0x38, - 0xed, 0xfb, 0x7c, 0x10, 0xb9, 0x96, 0x47, 0x03, 0x3b, 0x7d, 0x05, 0xab, 0x10, 0x8f, 0xc0, 0xce, - 0xbb, 0x3b, 0xb7, 0x82, 0x6f, 0x27, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x13, 0x4f, 0x42, - 0x96, 0x03, 0x00, 0x00, + // 391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xcd, 0x4e, 0xea, 0x40, + 0x14, 0xc7, 0xdb, 0x5b, 0xe0, 0x26, 0xe7, 0x52, 0x9a, 0x3b, 0xb9, 0xb9, 0x12, 0x63, 0x1a, 0x52, + 0x3f, 0xa2, 0x0b, 0x5b, 0xa3, 0x4b, 0x5d, 0xe1, 0x06, 0x13, 0x3f, 0x92, 0x12, 0x37, 0x6e, 0x08, + 0xad, 0x13, 0x68, 0xb4, 0x9d, 0xca, 0x4c, 0x17, 0xbc, 0x85, 0x2f, 0xe0, 0xfb, 0xb8, 0x64, 0xe9, + 0xd2, 0xc0, 0x8b, 0x18, 0x66, 0x86, 0x32, 0xd4, 0xda, 0xdd, 0x30, 0xf3, 0x3b, 0x3f, 0xfe, 0xe7, + 0xcc, 0x14, 0x3a, 0x0c, 0x27, 0x8f, 0x78, 0x12, 0x47, 0x09, 0xf3, 0x82, 0x67, 0x12, 0x3e, 0xd1, + 0x69, 0x12, 0x7a, 0x6c, 0x9a, 0x62, 0xea, 0xa6, 0x13, 0xc2, 0x08, 0xfa, 0xb7, 0x26, 0xdc, 0x9c, + 0xd8, 0xde, 0x51, 0xea, 0x38, 0x2d, 0xaa, 0x45, 0x4d, 0xc9, 0xa9, 0x62, 0x74, 0x0e, 0xa0, 0xd9, + 0x5d, 0xc2, 0x3e, 0x7e, 0xc9, 0x30, 0x65, 0xe8, 0x3f, 0x34, 0xc6, 0x38, 0x1a, 0x8d, 0x59, 0x5b, + 0xef, 0xe8, 0x87, 0x86, 0x2f, 0x7f, 0x39, 0x47, 0x60, 0xdd, 0x12, 0x49, 0xd2, 0x94, 0x24, 0x14, + 0xff, 0x88, 0xa6, 0x60, 0x6e, 0x82, 0xc7, 0x50, 0xe7, 0x81, 0x38, 0xf7, 0xe7, 0x74, 0xcb, 0x55, + 0xba, 0x10, 0x59, 0x04, 0x2f, 0x28, 0x74, 0x02, 0x8d, 0x90, 0xc4, 0x71, 0xc4, 0xda, 0xbf, 0x38, + 0xdf, 0xfe, 0xce, 0x5f, 0xf2, 0x73, 0x5f, 0x72, 0x8e, 0x05, 0x66, 0x9f, 0x0d, 0x59, 0x46, 0x65, + 0x17, 0xce, 0x05, 0xb4, 0x56, 0x1b, 0xd5, 0x61, 0x11, 0x82, 0x5a, 0x30, 0xa4, 0x98, 0xff, 0x95, + 0xe1, 0xf3, 0xb5, 0xf3, 0x66, 0xc0, 0xef, 0x1b, 0x4c, 0xe9, 0x70, 0x84, 0xd1, 0x15, 0x98, 0x3c, + 0xd5, 0x60, 0x22, 0xd4, 0xb2, 0x07, 0xc7, 0x2d, 0xbb, 0x09, 0x57, 0x1d, 0x65, 0x4f, 0xf3, 0x9b, + 0x81, 0x3a, 0xda, 0x3e, 0xfc, 0x4d, 0xc8, 0x60, 0x65, 0x13, 0xb9, 0x64, 0x8b, 0xfb, 0xe5, 0xba, + 0xc2, 0xc4, 0x7b, 0x9a, 0x6f, 0x25, 0x85, 0x4b, 0xb8, 0x86, 0x56, 0xc1, 0x68, 0x70, 0xe3, 0x6e, + 0x65, 0xc0, 0xdc, 0x67, 0x06, 0x45, 0x1b, 0xe5, 0x73, 0xcb, 0xdb, 0xad, 0x55, 0xd9, 0x36, 0x86, + 0xbe, 0xb4, 0x51, 0x75, 0x03, 0xdd, 0x81, 0x95, 0xdb, 0x64, 0xb8, 0x3a, 0xd7, 0xed, 0x55, 0xeb, + 0xf2, 0x74, 0x2d, 0xba, 0xb1, 0xd3, 0xad, 0x83, 0x41, 0xb3, 0xb8, 0x7b, 0xff, 0x3e, 0xb7, 0xf5, + 0xd9, 0xdc, 0xd6, 0x3f, 0xe7, 0xb6, 0xfe, 0xba, 0xb0, 0xb5, 0xd9, 0xc2, 0xd6, 0x3e, 0x16, 0xb6, + 0xf6, 0x70, 0x3e, 0x8a, 0xd8, 0x38, 0x0b, 0xdc, 0x90, 0xc4, 0x9e, 0xfa, 0xec, 0xd7, 0x4b, 0xfe, + 0xea, 0xbd, 0xb2, 0x0f, 0x2d, 0x68, 0xf0, 0xb3, 0xb3, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x83, + 0x34, 0xe6, 0xd2, 0x87, 0x03, 0x00, 0x00, } func (m *BlockRequest) Marshal() (dAtA []byte, err error) { @@ -498,9 +497,9 @@ func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.ExtCommit != nil { + if m.Commit != nil { { - size, err := m.ExtCommit.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -763,8 +762,8 @@ func (m *BlockResponse) Size() (n int) { l = m.Block.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.ExtCommit != nil { - l = m.ExtCommit.Size() + if m.Commit != nil { + l = m.Commit.Size() n += 1 + l + sovTypes(uint64(l)) } return n @@ -1078,7 +1077,7 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtCommit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1105,10 +1104,10 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExtCommit == nil { - m.ExtCommit = &types.ExtendedCommit{} + if m.Commit == nil { + m.Commit = &types.Commit{} } - if err := m.ExtCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto index dca81db2b6..af09429368 100644 --- a/proto/tendermint/blocksync/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -19,8 +19,8 @@ message NoBlockResponse { // BlockResponse returns block to the requested message BlockResponse { - tendermint.types.Block block = 1; - tendermint.types.ExtendedCommit ext_commit = 2; + tendermint.types.Block block = 1; + tendermint.types.Commit commit = 2; } // StatusRequest requests the status of a peer. diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index 7965b668bb..5bfab3fa6a 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -96,6 +96,8 @@ type NodeInfo struct { Channels []byte `protobuf:"bytes,6,opt,name=channels,proto3" json:"channels,omitempty"` Moniker string `protobuf:"bytes,7,opt,name=moniker,proto3" json:"moniker,omitempty"` Other NodeInfoOther `protobuf:"bytes,8,opt,name=other,proto3" json:"other"` + // dash's fields + ProTxHash []byte `protobuf:"bytes,100,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` } func (m *NodeInfo) Reset() { *m = NodeInfo{} } @@ -187,6 +189,13 @@ func (m *NodeInfo) GetOther() NodeInfoOther { return NodeInfoOther{} } +func (m *NodeInfo) GetProTxHash() []byte { + if m != nil { + return m.ProTxHash + } + return nil +} + type NodeInfoOther struct { TxIndex string `protobuf:"bytes,1,opt,name=tx_index,json=txIndex,proto3" json:"tx_index,omitempty"` RPCAddress string `protobuf:"bytes,2,opt,name=rpc_address,json=rpcAddress,proto3" json:"rpc_address,omitempty"` @@ -244,6 +253,8 @@ type PeerInfo struct { AddressInfo []*PeerAddressInfo `protobuf:"bytes,2,rep,name=address_info,json=addressInfo,proto3" json:"address_info,omitempty"` LastConnected *time.Time `protobuf:"bytes,3,opt,name=last_connected,json=lastConnected,proto3,stdtime" json:"last_connected,omitempty"` Inactive bool `protobuf:"varint,4,opt,name=inactive,proto3" json:"inactive,omitempty"` + // dash's fields + ProTxHash []byte `protobuf:"bytes,100,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` } func (m *PeerInfo) Reset() { *m = PeerInfo{} } @@ -307,6 +318,13 @@ func (m *PeerInfo) GetInactive() bool { return false } +func (m *PeerInfo) GetProTxHash() []byte { + if m != nil { + return m.ProTxHash + } + return nil +} + type PeerAddressInfo struct { Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` LastDialSuccess *time.Time `protobuf:"bytes,2,opt,name=last_dial_success,json=lastDialSuccess,proto3,stdtime" json:"last_dial_success,omitempty"` @@ -386,46 +404,48 @@ func init() { func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) } var fileDescriptor_c8a29e659aeca578 = []byte{ - // 621 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xdb, 0x30, - 0x14, 0x6e, 0xda, 0xd2, 0x96, 0x57, 0x4a, 0x99, 0x85, 0xa6, 0x50, 0x69, 0x0d, 0x2a, 0x17, 0x4e, - 0x89, 0xd4, 0x69, 0x87, 0x1d, 0x09, 0x68, 0x53, 0xa5, 0x69, 0x54, 0x1e, 0xda, 0x61, 0x3b, 0x44, - 0x69, 0xec, 0x16, 0x8b, 0xd4, 0xb6, 0x12, 0x97, 0xb1, 0x7f, 0xc1, 0xbf, 0x1a, 0xd2, 0x2e, 0x1c, - 0x77, 0xea, 0xa6, 0x70, 0xdd, 0x8f, 0x98, 0xec, 0x24, 0xd0, 0x56, 0x3b, 0x70, 0xf3, 0xf7, 0x9e, - 0xbf, 0xcf, 0xdf, 0x7b, 0xcf, 0x7a, 0xd0, 0x53, 0x94, 0x13, 0x9a, 0xcc, 0x19, 0x57, 0x9e, 0x1c, - 0x4a, 0x4f, 0x7d, 0x97, 0x34, 0x75, 0x65, 0x22, 0x94, 0x40, 0xbb, 0x4f, 0x39, 0x57, 0x0e, 0x65, - 0x6f, 0x7f, 0x26, 0x66, 0xc2, 0xa4, 0x3c, 0x7d, 0xca, 0x6f, 0xf5, 0x9c, 0x99, 0x10, 0xb3, 0x98, - 0x7a, 0x06, 0x4d, 0x16, 0x53, 0x4f, 0xb1, 0x39, 0x4d, 0x55, 0x38, 0x97, 0xf9, 0x85, 0xc1, 0x05, - 0x74, 0xc7, 0xfa, 0x10, 0x89, 0xf8, 0x33, 0x4d, 0x52, 0x26, 0x38, 0x3a, 0x80, 0x9a, 0x1c, 0x4a, - 0xdb, 0x3a, 0xb4, 0x8e, 0xeb, 0x7e, 0x33, 0x5b, 0x3a, 0xb5, 0xf1, 0x70, 0x8c, 0x75, 0x0c, 0xed, - 0xc3, 0xd6, 0x24, 0x16, 0xd1, 0x95, 0x5d, 0xd5, 0x49, 0x9c, 0x03, 0xb4, 0x07, 0xb5, 0x50, 0x4a, - 0xbb, 0x66, 0x62, 0xfa, 0x38, 0xf8, 0x51, 0x85, 0xd6, 0x47, 0x41, 0xe8, 0x88, 0x4f, 0x05, 0x1a, - 0xc3, 0x9e, 0x2c, 0x9e, 0x08, 0xae, 0xf3, 0x37, 0x8c, 0x78, 0x7b, 0xe8, 0xb8, 0xeb, 0x45, 0xb8, - 0x1b, 0x56, 0xfc, 0xfa, 0xdd, 0xd2, 0xa9, 0xe0, 0xae, 0xdc, 0x70, 0x78, 0x04, 0x4d, 0x2e, 0x08, - 0x0d, 0x18, 0x31, 0x46, 0xb6, 0x7d, 0xc8, 0x96, 0x4e, 0xc3, 0x3c, 0x78, 0x86, 0x1b, 0x3a, 0x35, - 0x22, 0xc8, 0x81, 0x76, 0xcc, 0x52, 0x45, 0x79, 0x10, 0x12, 0x92, 0x18, 0x77, 0xdb, 0x18, 0xf2, - 0xd0, 0x09, 0x21, 0x09, 0xb2, 0xa1, 0xc9, 0xa9, 0xfa, 0x26, 0x92, 0x2b, 0xbb, 0x6e, 0x92, 0x25, - 0xd4, 0x99, 0xd2, 0xe8, 0x56, 0x9e, 0x29, 0x20, 0xea, 0x41, 0x2b, 0xba, 0x0c, 0x39, 0xa7, 0x71, - 0x6a, 0x37, 0x0e, 0xad, 0xe3, 0x1d, 0xfc, 0x88, 0x35, 0x6b, 0x2e, 0x38, 0xbb, 0xa2, 0x89, 0xdd, - 0xcc, 0x59, 0x05, 0x44, 0x6f, 0x61, 0x4b, 0xa8, 0x4b, 0x9a, 0xd8, 0x2d, 0x53, 0xf6, 0xab, 0xcd, - 0xb2, 0xcb, 0x56, 0x9d, 0xeb, 0x4b, 0x45, 0xd1, 0x39, 0x63, 0xf0, 0x15, 0x3a, 0x6b, 0x59, 0x74, - 0x00, 0x2d, 0x75, 0x13, 0x30, 0x4e, 0xe8, 0x8d, 0xe9, 0xe2, 0x36, 0x6e, 0xaa, 0x9b, 0x91, 0x86, - 0xc8, 0x83, 0x76, 0x22, 0x23, 0x53, 0x2e, 0x4d, 0xd3, 0xa2, 0x35, 0xbb, 0xd9, 0xd2, 0x01, 0x3c, - 0x3e, 0x3d, 0xc9, 0xa3, 0x18, 0x12, 0x19, 0x15, 0xe7, 0xc1, 0x4f, 0x0b, 0x5a, 0x63, 0x4a, 0x13, - 0x33, 0xa6, 0x97, 0x50, 0x65, 0x24, 0x97, 0xf4, 0x1b, 0xd9, 0xd2, 0xa9, 0x8e, 0xce, 0x70, 0x95, - 0x11, 0xe4, 0xc3, 0x4e, 0xa1, 0x18, 0x30, 0x3e, 0x15, 0x76, 0xf5, 0xb0, 0xf6, 0xdf, 0xd1, 0x51, - 0x9a, 0x14, 0xba, 0x5a, 0x0e, 0xb7, 0xc3, 0x27, 0x80, 0xde, 0xc3, 0x6e, 0x1c, 0xa6, 0x2a, 0x88, - 0x04, 0xe7, 0x34, 0x52, 0x94, 0x98, 0x71, 0xb4, 0x87, 0x3d, 0x37, 0xff, 0x9f, 0x6e, 0xf9, 0x3f, - 0xdd, 0x8b, 0xf2, 0x7f, 0xfa, 0xf5, 0xdb, 0xdf, 0x8e, 0x85, 0x3b, 0x9a, 0x77, 0x5a, 0xd2, 0x74, - 0xff, 0x19, 0x0f, 0x23, 0xc5, 0xae, 0xa9, 0x19, 0x5a, 0x0b, 0x3f, 0xe2, 0xc1, 0x5f, 0x0b, 0xba, - 0x1b, 0x2e, 0xf4, 0x4c, 0xca, 0x76, 0x14, 0xcd, 0x2a, 0x20, 0xfa, 0x00, 0x2f, 0x8c, 0x25, 0xc2, - 0xc2, 0x38, 0x48, 0x17, 0x51, 0x54, 0xb6, 0xec, 0x39, 0xae, 0xba, 0x9a, 0x7a, 0xc6, 0xc2, 0xf8, - 0x53, 0x4e, 0x5c, 0x57, 0x9b, 0x86, 0x2c, 0x5e, 0x24, 0xf4, 0xd9, 0x35, 0x3e, 0xaa, 0xbd, 0xcb, - 0x89, 0xe8, 0x08, 0x3a, 0xab, 0x42, 0xa9, 0x29, 0xb5, 0x83, 0x77, 0xc8, 0xd3, 0x9d, 0xd4, 0x3f, - 0xbf, 0xcb, 0xfa, 0xd6, 0x7d, 0xd6, 0xb7, 0xfe, 0x64, 0x7d, 0xeb, 0xf6, 0xa1, 0x5f, 0xb9, 0x7f, - 0xe8, 0x57, 0x7e, 0x3d, 0xf4, 0x2b, 0x5f, 0xde, 0xcc, 0x98, 0xba, 0x5c, 0x4c, 0xdc, 0x48, 0xcc, - 0xbd, 0x95, 0x0d, 0xb2, 0xba, 0x4c, 0xcc, 0x9e, 0x58, 0xdf, 0x2e, 0x93, 0x86, 0x89, 0xbe, 0xfe, - 0x17, 0x00, 0x00, 0xff, 0xff, 0x42, 0xcb, 0x37, 0x26, 0x76, 0x04, 0x00, 0x00, + // 648 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xbd, 0x6e, 0xdb, 0x3a, + 0x18, 0xb5, 0x6c, 0xc7, 0x3f, 0x74, 0x1c, 0xe7, 0x12, 0xc1, 0x85, 0x62, 0xe0, 0x4a, 0x81, 0xb3, + 0x64, 0x92, 0x00, 0x5f, 0x74, 0xe8, 0x18, 0x25, 0x68, 0x6b, 0xa0, 0x68, 0x0c, 0x36, 0xe8, 0xd0, + 0x0e, 0x82, 0x2c, 0xd2, 0x36, 0x11, 0x99, 0x24, 0x28, 0x3a, 0x75, 0xdf, 0x22, 0x8f, 0x95, 0x31, + 0x63, 0x27, 0xb7, 0x50, 0x86, 0x0e, 0xed, 0x43, 0x14, 0xa4, 0xa4, 0x24, 0x36, 0x0a, 0x34, 0x1b, + 0xcf, 0xf7, 0xf1, 0x1c, 0x1e, 0x9e, 0x8f, 0x20, 0xe8, 0x2b, 0xc2, 0x30, 0x91, 0x0b, 0xca, 0x94, + 0x2f, 0x86, 0xc2, 0x57, 0x5f, 0x04, 0x49, 0x3d, 0x21, 0xb9, 0xe2, 0x70, 0xef, 0xb1, 0xe7, 0x89, + 0xa1, 0xe8, 0x1f, 0xcc, 0xf8, 0x8c, 0x9b, 0x96, 0xaf, 0x57, 0xf9, 0xae, 0xbe, 0x3b, 0xe3, 0x7c, + 0x96, 0x10, 0xdf, 0xa0, 0xc9, 0x72, 0xea, 0x2b, 0xba, 0x20, 0xa9, 0x8a, 0x16, 0x22, 0xdf, 0x30, + 0xb8, 0x04, 0xbd, 0xb1, 0x5e, 0xc4, 0x3c, 0xf9, 0x40, 0x64, 0x4a, 0x39, 0x83, 0x87, 0xa0, 0x26, + 0x86, 0xc2, 0xb6, 0x8e, 0xac, 0x93, 0x7a, 0xd0, 0xcc, 0xd6, 0x6e, 0x6d, 0x3c, 0x1c, 0x23, 0x5d, + 0x83, 0x07, 0x60, 0x67, 0x92, 0xf0, 0xf8, 0xca, 0xae, 0xea, 0x26, 0xca, 0x01, 0xdc, 0x07, 0xb5, + 0x48, 0x08, 0xbb, 0x66, 0x6a, 0x7a, 0x39, 0xf8, 0x51, 0x05, 0xad, 0x77, 0x1c, 0x93, 0x11, 0x9b, + 0x72, 0x38, 0x06, 0xfb, 0xa2, 0x38, 0x22, 0xbc, 0xce, 0xcf, 0x30, 0xe2, 0x9d, 0xa1, 0xeb, 0x6d, + 0x5e, 0xc2, 0xdb, 0xb2, 0x12, 0xd4, 0x6f, 0xd7, 0x6e, 0x05, 0xf5, 0xc4, 0x96, 0xc3, 0x63, 0xd0, + 0x64, 0x1c, 0x93, 0x90, 0x62, 0x63, 0xa4, 0x1d, 0x80, 0x6c, 0xed, 0x36, 0xcc, 0x81, 0xe7, 0xa8, + 0xa1, 0x5b, 0x23, 0x0c, 0x5d, 0xd0, 0x49, 0x68, 0xaa, 0x08, 0x0b, 0x23, 0x8c, 0xa5, 0x71, 0xd7, + 0x46, 0x20, 0x2f, 0x9d, 0x62, 0x2c, 0xa1, 0x0d, 0x9a, 0x8c, 0xa8, 0xcf, 0x5c, 0x5e, 0xd9, 0x75, + 0xd3, 0x2c, 0xa1, 0xee, 0x94, 0x46, 0x77, 0xf2, 0x4e, 0x01, 0x61, 0x1f, 0xb4, 0xe2, 0x79, 0xc4, + 0x18, 0x49, 0x52, 0xbb, 0x71, 0x64, 0x9d, 0xec, 0xa2, 0x07, 0xac, 0x59, 0x0b, 0xce, 0xe8, 0x15, + 0x91, 0x76, 0x33, 0x67, 0x15, 0x10, 0xbe, 0x04, 0x3b, 0x5c, 0xcd, 0x89, 0xb4, 0x5b, 0xe6, 0xda, + 0xff, 0x6d, 0x5f, 0xbb, 0x8c, 0xea, 0x42, 0x6f, 0x2a, 0x2e, 0x9d, 0x33, 0xa0, 0x03, 0x3a, 0x42, + 0xf2, 0x50, 0xad, 0xc2, 0x79, 0x94, 0xce, 0x6d, 0x6c, 0xce, 0x6c, 0x0b, 0xc9, 0x2f, 0x57, 0x6f, + 0xa2, 0x74, 0x3e, 0xf8, 0x04, 0xba, 0x1b, 0x6c, 0x78, 0x08, 0x5a, 0x6a, 0x15, 0x52, 0x86, 0xc9, + 0xca, 0xa4, 0xdc, 0x46, 0x4d, 0xb5, 0x1a, 0x69, 0x08, 0x7d, 0xd0, 0x91, 0x22, 0x36, 0x71, 0x90, + 0x34, 0x2d, 0xa2, 0xdb, 0xcb, 0xd6, 0x2e, 0x40, 0xe3, 0xb3, 0xd3, 0xbc, 0x8a, 0x80, 0x14, 0x71, + 0xb1, 0x1e, 0xfc, 0xb4, 0x40, 0x6b, 0x4c, 0x88, 0x34, 0x63, 0xfc, 0x17, 0x54, 0x29, 0xce, 0x25, + 0x83, 0x46, 0xb6, 0x76, 0xab, 0xa3, 0x73, 0x54, 0xa5, 0x18, 0x06, 0x60, 0xb7, 0x50, 0x0c, 0x29, + 0x9b, 0x72, 0xbb, 0x7a, 0x54, 0xfb, 0xe3, 0x68, 0x09, 0x91, 0x85, 0xae, 0x96, 0x43, 0x9d, 0xe8, + 0x11, 0xc0, 0xd7, 0x60, 0x2f, 0x89, 0x52, 0x15, 0xc6, 0x9c, 0x31, 0x12, 0x2b, 0x82, 0xcd, 0xb8, + 0x3a, 0xc3, 0xbe, 0x97, 0xbf, 0x5f, 0xaf, 0x7c, 0xbf, 0xde, 0x65, 0xf9, 0x7e, 0x83, 0xfa, 0xcd, + 0x37, 0xd7, 0x42, 0x5d, 0xcd, 0x3b, 0x2b, 0x69, 0x7a, 0x3e, 0x94, 0x45, 0xb1, 0xa2, 0xd7, 0xc4, + 0x0c, 0xb5, 0x85, 0x1e, 0xf0, 0x5f, 0xa3, 0xfc, 0x65, 0x81, 0xde, 0x96, 0x4b, 0x3d, 0xd3, 0x32, + 0xae, 0x22, 0xcc, 0x02, 0xc2, 0xb7, 0xe0, 0x1f, 0x63, 0x19, 0xd3, 0x28, 0x09, 0xd3, 0x65, 0x1c, + 0x97, 0x91, 0x3e, 0xc7, 0x75, 0x4f, 0x53, 0xcf, 0x69, 0x94, 0xbc, 0xcf, 0x89, 0x9b, 0x6a, 0xd3, + 0x88, 0x26, 0x4b, 0x49, 0x9e, 0x9d, 0xc1, 0x83, 0xda, 0xab, 0x9c, 0x08, 0x8f, 0x41, 0xf7, 0xa9, + 0x50, 0x6a, 0xa2, 0xe8, 0xa2, 0x5d, 0xfc, 0xb8, 0x27, 0x0d, 0x2e, 0x6e, 0x33, 0xc7, 0xba, 0xcb, + 0x1c, 0xeb, 0x7b, 0xe6, 0x58, 0x37, 0xf7, 0x4e, 0xe5, 0xee, 0xde, 0xa9, 0x7c, 0xbd, 0x77, 0x2a, + 0x1f, 0x5f, 0xcc, 0xa8, 0x9a, 0x2f, 0x27, 0x5e, 0xcc, 0x17, 0xfe, 0x93, 0x1f, 0xe8, 0xe9, 0x67, + 0x64, 0xfe, 0x99, 0xcd, 0xdf, 0x69, 0xd2, 0x30, 0xd5, 0xff, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, + 0xc4, 0x51, 0xcb, 0xf4, 0xb6, 0x04, 0x00, 0x00, } func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) { @@ -486,6 +506,15 @@ func (m *NodeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ProTxHash) > 0 { + i -= len(m.ProTxHash) + copy(dAtA[i:], m.ProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 + } { size, err := m.Other.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -608,6 +637,15 @@ func (m *PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ProTxHash) > 0 { + i -= len(m.ProTxHash) + copy(dAtA[i:], m.ProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 + } if m.Inactive { i-- if m.Inactive { @@ -770,6 +808,10 @@ func (m *NodeInfo) Size() (n int) { } l = m.Other.Size() n += 1 + l + sovTypes(uint64(l)) + l = len(m.ProTxHash) + if l > 0 { + n += 2 + l + sovTypes(uint64(l)) + } return n } @@ -813,6 +855,10 @@ func (m *PeerInfo) Size() (n int) { if m.Inactive { n += 2 } + l = len(m.ProTxHash) + if l > 0 { + n += 2 + l + sovTypes(uint64(l)) + } return n } @@ -1242,6 +1288,40 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProTxHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProTxHash = append(m.ProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ProTxHash == nil { + m.ProTxHash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -1528,6 +1608,40 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { } } m.Inactive = bool(v != 0) + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProTxHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProTxHash = append(m.ProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ProTxHash == nil { + m.ProTxHash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 7d86d936f2..f2fb79cb17 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -193,9 +193,12 @@ type State struct { ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) - LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockID types.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` - LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID types.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastStateID types.StateID `protobuf:"bytes,102,opt,name=last_state_id,json=lastStateId,proto3" json:"last_state_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + LastCoreChainLockedBlockHeight uint32 `protobuf:"varint,100,opt,name=last_core_chain_locked_block_height,json=lastCoreChainLockedBlockHeight,proto3" json:"last_core_chain_locked_block_height,omitempty"` + NextCoreChainLock *types.CoreChainLock `protobuf:"bytes,101,opt,name=next_core_chain_lock,json=nextCoreChainLock,proto3" json:"next_core_chain_lock,omitempty"` // LastValidators is used to validate block.LastCommit. // Validators are persisted to the database separately every time they change, // so we can query for historical validator sets. @@ -284,6 +287,13 @@ func (m *State) GetLastBlockID() types.BlockID { return types.BlockID{} } +func (m *State) GetLastStateID() types.StateID { + if m != nil { + return m.LastStateID + } + return types.StateID{} +} + func (m *State) GetLastBlockTime() time.Time { if m != nil { return m.LastBlockTime @@ -291,6 +301,20 @@ func (m *State) GetLastBlockTime() time.Time { return time.Time{} } +func (m *State) GetLastCoreChainLockedBlockHeight() uint32 { + if m != nil { + return m.LastCoreChainLockedBlockHeight + } + return 0 +} + +func (m *State) GetNextCoreChainLock() *types.CoreChainLock { + if m != nil { + return m.NextCoreChainLock + } + return nil +} + func (m *State) GetNextValidators() *types.ValidatorSet { if m != nil { return m.NextValidators @@ -357,49 +381,54 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 662 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0xd3, 0x30, - 0x1c, 0x6d, 0xd8, 0x9f, 0xb6, 0xee, 0xda, 0x0e, 0x8f, 0x43, 0x56, 0x58, 0x5a, 0x26, 0x40, 0x13, - 0x87, 0x54, 0x82, 0x03, 0xe2, 0x82, 0x44, 0x3b, 0x89, 0x55, 0x9a, 0x10, 0x64, 0x68, 0x07, 0x2e, - 0x91, 0xdb, 0x78, 0x89, 0x45, 0x1a, 0x47, 0xb1, 0x3b, 0xe0, 0x03, 0x70, 0xdf, 0x95, 0x6f, 0xb4, - 0xe3, 0x8e, 0x9c, 0x06, 0x74, 0x5f, 0x04, 0xf9, 0x4f, 0x12, 0xb7, 0xe5, 0x30, 0xc4, 0xad, 0xfe, - 0xbd, 0xf7, 0x7b, 0xbf, 0x67, 0xfb, 0x39, 0x05, 0x0f, 0x38, 0x4e, 0x02, 0x9c, 0x4d, 0x49, 0xc2, - 0xfb, 0x8c, 0x23, 0x8e, 0xfb, 0xfc, 0x6b, 0x8a, 0x99, 0x9b, 0x66, 0x94, 0x53, 0xb8, 0x5d, 0xa2, - 0xae, 0x44, 0x3b, 0xf7, 0x42, 0x1a, 0x52, 0x09, 0xf6, 0xc5, 0x2f, 0xc5, 0xeb, 0x98, 0x2a, 0xb2, - 0xdf, 0x54, 0xe9, 0xf4, 0x56, 0xd0, 0x73, 0x14, 0x93, 0x00, 0x71, 0x9a, 0x69, 0xc6, 0xde, 0x0a, - 0x23, 0x45, 0x19, 0x9a, 0xe6, 0x02, 0x8e, 0x01, 0x9f, 0xe3, 0x8c, 0x11, 0x9a, 0x2c, 0x0c, 0xe8, - 0x86, 0x94, 0x86, 0x31, 0xee, 0xcb, 0xd5, 0x78, 0x76, 0xd6, 0xe7, 0x64, 0x8a, 0x19, 0x47, 0xd3, - 0x54, 0x11, 0xf6, 0xbf, 0x59, 0xa0, 0x75, 0x9a, 0xcf, 0x64, 0xa3, 0xe4, 0x8c, 0xc2, 0x21, 0x68, - 0x16, 0x2e, 0x7c, 0x86, 0xb9, 0x6d, 0xf5, 0xac, 0x83, 0xc6, 0x33, 0xc7, 0x35, 0xb6, 0xac, 0x66, - 0x14, 0x8d, 0x27, 0x98, 0x7b, 0x5b, 0xe7, 0xc6, 0x0a, 0xba, 0x60, 0x27, 0x46, 0x8c, 0xfb, 0x11, - 0x26, 0x61, 0xc4, 0xfd, 0x49, 0x84, 0x92, 0x10, 0x07, 0xf6, 0x9d, 0x9e, 0x75, 0xb0, 0xe6, 0xdd, - 0x15, 0xd0, 0x91, 0x44, 0x86, 0x0a, 0xd8, 0xff, 0x6e, 0x81, 0x9d, 0x21, 0x4d, 0x18, 0x4e, 0xd8, - 0x8c, 0xbd, 0x93, 0x5b, 0x94, 0x66, 0x3c, 0xb0, 0x3d, 0xc9, 0xcb, 0xbe, 0xda, 0xba, 0xf6, 0xf3, - 0x70, 0xd5, 0xcf, 0x92, 0xc0, 0x60, 0xfd, 0xf2, 0xba, 0x5b, 0xf1, 0xda, 0x93, 0xc5, 0xf2, 0x3f, - 0x7b, 0x8b, 0x40, 0xf5, 0x54, 0x9d, 0x2d, 0x7c, 0x0d, 0xea, 0x85, 0x9a, 0xf6, 0xb1, 0x67, 0xfa, - 0xd0, 0x77, 0x50, 0x3a, 0xd1, 0x1e, 0xca, 0x2e, 0xd8, 0x01, 0x35, 0x46, 0xcf, 0xf8, 0x67, 0x94, - 0x61, 0x39, 0xb2, 0xee, 0x15, 0xeb, 0xfd, 0xdf, 0x9b, 0x60, 0xe3, 0x44, 0xa4, 0x09, 0xbe, 0x04, - 0x55, 0xad, 0xa5, 0xc7, 0xec, 0xba, 0xcb, 0x89, 0x73, 0xb5, 0x29, 0x3d, 0x22, 0xe7, 0xc3, 0x27, - 0xa0, 0x36, 0x89, 0x10, 0x49, 0x7c, 0xa2, 0xf6, 0x54, 0x1f, 0x34, 0xe6, 0xd7, 0xdd, 0xea, 0x50, - 0xd4, 0x46, 0x87, 0x5e, 0x55, 0x82, 0xa3, 0x00, 0x3e, 0x06, 0x2d, 0x92, 0x10, 0x4e, 0x50, 0xac, - 0x4f, 0xc2, 0x6e, 0xc9, 0x13, 0x68, 0xea, 0xaa, 0x3a, 0x04, 0xf8, 0x14, 0xc8, 0x23, 0xf1, 0xc7, - 0x31, 0x9d, 0x7c, 0xca, 0x99, 0x6b, 0x92, 0xd9, 0x16, 0xc0, 0x40, 0xd4, 0x35, 0xd7, 0x03, 0x4d, - 0x83, 0x4b, 0x02, 0x7b, 0x7d, 0xd5, 0xbb, 0xba, 0x2a, 0xd9, 0x35, 0x3a, 0x1c, 0xec, 0x08, 0xef, - 0xf3, 0xeb, 0x6e, 0xe3, 0x38, 0x97, 0x1a, 0x1d, 0x7a, 0x8d, 0x42, 0x77, 0x14, 0xc0, 0x63, 0xd0, - 0x36, 0x34, 0x45, 0x7e, 0xed, 0x0d, 0xa9, 0xda, 0x71, 0x55, 0xb8, 0xdd, 0x3c, 0xdc, 0xee, 0x87, - 0x3c, 0xdc, 0x83, 0x9a, 0x90, 0xbd, 0xf8, 0xd9, 0xb5, 0xbc, 0x66, 0xa1, 0x25, 0x50, 0xf8, 0x06, - 0xb4, 0x13, 0xfc, 0x85, 0xfb, 0x45, 0x58, 0x99, 0xbd, 0x79, 0xab, 0x78, 0xb7, 0x44, 0x5b, 0xf9, - 0x52, 0xe0, 0x2b, 0x00, 0x0c, 0x8d, 0xea, 0xad, 0x34, 0x8c, 0x0e, 0x61, 0x44, 0x6e, 0xcb, 0x10, - 0xa9, 0xdd, 0xce, 0x88, 0x68, 0x33, 0x8c, 0x0c, 0x81, 0x63, 0xa6, 0xb9, 0xd4, 0x2b, 0x82, 0x5d, - 0x97, 0x97, 0x75, 0xbf, 0x0c, 0x76, 0xd9, 0xad, 0x23, 0xfe, 0xd7, 0x67, 0x06, 0xfe, 0xf3, 0x99, - 0xbd, 0x05, 0x8f, 0x16, 0x9e, 0xd9, 0x92, 0x7e, 0x61, 0xaf, 0x21, 0xed, 0xf5, 0x8c, 0x77, 0xb7, - 0x28, 0x94, 0x7b, 0xcc, 0x83, 0x98, 0x61, 0x36, 0x8b, 0x39, 0xf3, 0x23, 0xc4, 0x22, 0x7b, 0xab, - 0x67, 0x1d, 0x6c, 0xa9, 0x20, 0x7a, 0xaa, 0x7e, 0x84, 0x58, 0x04, 0x77, 0x41, 0x0d, 0xa5, 0xa9, - 0xa2, 0x34, 0x25, 0xa5, 0x8a, 0xd2, 0x54, 0x40, 0x83, 0xf7, 0x97, 0x73, 0xc7, 0xba, 0x9a, 0x3b, - 0xd6, 0xaf, 0xb9, 0x63, 0x5d, 0xdc, 0x38, 0x95, 0xab, 0x1b, 0xa7, 0xf2, 0xe3, 0xc6, 0xa9, 0x7c, - 0x7c, 0x11, 0x12, 0x1e, 0xcd, 0xc6, 0xee, 0x84, 0x4e, 0xfb, 0xe6, 0x67, 0xb7, 0xfc, 0xa9, 0x3e, - 0xef, 0xcb, 0x7f, 0x0c, 0xe3, 0x4d, 0x59, 0x7f, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3f, 0xca, - 0x73, 0xb6, 0x33, 0x06, 0x00, 0x00, + // 749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0xc7, 0xe3, 0xdb, 0x8f, 0x24, 0x93, 0x26, 0x69, 0xa7, 0x5d, 0xb8, 0xe9, 0xad, 0x93, 0xdb, + 0x7b, 0x2f, 0x8a, 0x58, 0x38, 0x12, 0x2c, 0x10, 0x1b, 0x24, 0x92, 0x4a, 0x34, 0xa2, 0x42, 0xe0, + 0xa2, 0x2e, 0xd8, 0x58, 0x13, 0x7b, 0x62, 0x5b, 0x24, 0x1e, 0xcb, 0x33, 0x29, 0xf0, 0x00, 0xec, + 0xbb, 0xe5, 0x8d, 0xba, 0xec, 0x12, 0xb1, 0x28, 0x28, 0x7d, 0x11, 0x34, 0x1f, 0xb6, 0x27, 0x49, + 0x91, 0x8a, 0xd8, 0xd9, 0xe7, 0x7f, 0xce, 0x6f, 0xfe, 0x73, 0x3c, 0x67, 0x0c, 0xfe, 0x66, 0x38, + 0xf6, 0x71, 0x3a, 0x8d, 0x62, 0xd6, 0xa3, 0x0c, 0x31, 0xdc, 0x63, 0x9f, 0x12, 0x4c, 0xed, 0x24, + 0x25, 0x8c, 0xc0, 0xed, 0x42, 0xb5, 0x85, 0xda, 0xda, 0x0b, 0x48, 0x40, 0x84, 0xd8, 0xe3, 0x4f, + 0x32, 0xaf, 0xa5, 0x53, 0x44, 0xbd, 0x4e, 0x69, 0x1d, 0xac, 0xa8, 0x3e, 0xa2, 0xa1, 0x12, 0x3b, + 0x2b, 0xe2, 0x05, 0x9a, 0x44, 0x3e, 0x62, 0x24, 0x55, 0x19, 0x87, 0x2b, 0x19, 0x09, 0x4a, 0xd1, + 0x34, 0xa3, 0x5b, 0x9a, 0x7c, 0x81, 0x53, 0x1a, 0x91, 0x78, 0x61, 0xf5, 0x76, 0x40, 0x48, 0x30, + 0xc1, 0x3d, 0xf1, 0x36, 0x9a, 0x8d, 0x7b, 0x2c, 0x9a, 0x62, 0xca, 0xd0, 0x34, 0x91, 0x09, 0x47, + 0x9f, 0x0d, 0xd0, 0x38, 0xcf, 0xd6, 0xa4, 0xc3, 0x78, 0x4c, 0xe0, 0x00, 0xd4, 0x73, 0x17, 0x2e, + 0xc5, 0xcc, 0x34, 0x3a, 0x46, 0xb7, 0xf6, 0xc8, 0xb2, 0xb5, 0x7e, 0xc8, 0x35, 0xf2, 0xc2, 0x33, + 0xcc, 0x9c, 0xad, 0x0b, 0xed, 0x0d, 0xda, 0x60, 0x77, 0x82, 0x28, 0x73, 0x43, 0x1c, 0x05, 0x21, + 0x73, 0xbd, 0x10, 0xc5, 0x01, 0xf6, 0xcd, 0xbf, 0x3a, 0x46, 0x77, 0xcd, 0xd9, 0xe1, 0xd2, 0x89, + 0x50, 0x06, 0x52, 0x38, 0xfa, 0x62, 0x80, 0xdd, 0x01, 0x89, 0x29, 0x8e, 0xe9, 0x8c, 0xbe, 0x16, + 0x5b, 0x14, 0x66, 0x1c, 0xb0, 0xed, 0x65, 0x61, 0x57, 0x6e, 0x5d, 0xf9, 0xf9, 0x67, 0xd5, 0xcf, + 0x12, 0xa0, 0xbf, 0x7e, 0x75, 0xd3, 0x2e, 0x39, 0x4d, 0x6f, 0x31, 0xfc, 0xdb, 0xde, 0x42, 0x50, + 0x3e, 0x97, 0xbd, 0x85, 0xcf, 0x41, 0x35, 0xa7, 0x29, 0x1f, 0x87, 0xba, 0x0f, 0xf5, 0x0d, 0x0a, + 0x27, 0xca, 0x43, 0x51, 0x05, 0x5b, 0xa0, 0x42, 0xc9, 0x98, 0x7d, 0x40, 0x29, 0x16, 0x4b, 0x56, + 0x9d, 0xfc, 0xfd, 0xe8, 0x5b, 0x05, 0x6c, 0x9c, 0xf1, 0xa3, 0x06, 0x9f, 0x82, 0xb2, 0x62, 0xa9, + 0x65, 0xf6, 0xed, 0xe5, 0xe3, 0x68, 0x2b, 0x53, 0x6a, 0x89, 0x2c, 0x1f, 0x3e, 0x00, 0x15, 0x2f, + 0x44, 0x51, 0xec, 0x46, 0x72, 0x4f, 0xd5, 0x7e, 0x6d, 0x7e, 0xd3, 0x2e, 0x0f, 0x78, 0x6c, 0x78, + 0xec, 0x94, 0x85, 0x38, 0xf4, 0xe1, 0xff, 0xa0, 0x11, 0xc5, 0x11, 0x8b, 0xd0, 0x44, 0x75, 0xc2, + 0x6c, 0x88, 0x0e, 0xd4, 0x55, 0x54, 0x36, 0x01, 0x3e, 0x04, 0xa2, 0x25, 0xee, 0x68, 0x42, 0xbc, + 0xf7, 0x59, 0xe6, 0x9a, 0xc8, 0x6c, 0x72, 0xa1, 0xcf, 0xe3, 0x2a, 0xd7, 0x01, 0x75, 0x2d, 0x37, + 0xf2, 0xcd, 0xf5, 0x55, 0xef, 0xf2, 0x53, 0x89, 0xaa, 0xe1, 0x71, 0x7f, 0x97, 0x7b, 0x9f, 0xdf, + 0xb4, 0x6b, 0xa7, 0x19, 0x6a, 0x78, 0xec, 0xd4, 0x72, 0xee, 0xd0, 0xcf, 0x99, 0x62, 0xcf, 0x9c, + 0x39, 0xfe, 0x15, 0x53, 0x74, 0x6e, 0x99, 0xa9, 0x82, 0x92, 0x29, 0x5f, 0x7c, 0x78, 0x0a, 0x9a, + 0x9a, 0x4f, 0x3e, 0x13, 0xe6, 0x86, 0xa0, 0xb6, 0x6c, 0x39, 0x30, 0x76, 0x36, 0x30, 0xf6, 0xdb, + 0x6c, 0x60, 0xfa, 0x15, 0x8e, 0xbd, 0xfc, 0xde, 0x36, 0x9c, 0x7a, 0xee, 0x8f, 0xab, 0xf0, 0x25, + 0xf8, 0x57, 0xd0, 0x3c, 0x92, 0x62, 0x57, 0xb6, 0x9e, 0x6b, 0xd8, 0x5f, 0xec, 0x99, 0xdf, 0x31, + 0xba, 0x75, 0xc7, 0xe2, 0xa9, 0x03, 0x92, 0x62, 0xf1, 0x3d, 0x4e, 0x45, 0x9e, 0xde, 0xc2, 0x73, + 0xb0, 0x17, 0xe3, 0x8f, 0x2b, 0x30, 0x13, 0x0b, 0x7f, 0xed, 0xbb, 0x0e, 0xbd, 0xc6, 0x12, 0x67, + 0xc1, 0x70, 0x76, 0x38, 0x62, 0x41, 0x80, 0x2f, 0x40, 0x53, 0x70, 0xf3, 0x29, 0xa5, 0xe6, 0xe6, + 0xbd, 0xe6, 0xba, 0xc1, 0xcb, 0x8a, 0x2b, 0x02, 0x3e, 0x03, 0x40, 0x63, 0x94, 0xef, 0xc5, 0xd0, + 0x2a, 0xb8, 0x11, 0xd1, 0x2d, 0x0d, 0x52, 0xb9, 0x9f, 0x11, 0x5e, 0xa6, 0x19, 0x19, 0x00, 0x4b, + 0x1f, 0xe3, 0x82, 0x97, 0x4f, 0x74, 0x55, 0x9c, 0xd2, 0x83, 0x62, 0xa2, 0x8b, 0x6a, 0x35, 0xdb, + 0x77, 0xde, 0x2f, 0xe0, 0x0f, 0xef, 0x97, 0x57, 0xe0, 0xbf, 0x85, 0xfb, 0x65, 0x89, 0x9f, 0xdb, + 0xab, 0x09, 0x7b, 0x1d, 0xed, 0xc2, 0x59, 0x04, 0x65, 0x1e, 0xb3, 0x09, 0x4c, 0x31, 0x9d, 0x4d, + 0x18, 0x75, 0x43, 0x44, 0x43, 0x73, 0xab, 0x63, 0x74, 0xb7, 0xe4, 0x04, 0x3a, 0x32, 0x7e, 0x82, + 0x68, 0x08, 0xf7, 0x41, 0x05, 0x25, 0x89, 0x4c, 0xa9, 0x8b, 0x94, 0x32, 0x4a, 0x12, 0x2e, 0xf5, + 0xdf, 0x5c, 0xcd, 0x2d, 0xe3, 0x7a, 0x6e, 0x19, 0x3f, 0xe6, 0x96, 0x71, 0x79, 0x6b, 0x95, 0xae, + 0x6f, 0xad, 0xd2, 0xd7, 0x5b, 0xab, 0xf4, 0xee, 0x49, 0x10, 0xb1, 0x70, 0x36, 0xb2, 0x3d, 0x32, + 0xed, 0xe9, 0xff, 0x9b, 0xe2, 0x51, 0xfe, 0xf4, 0x96, 0x7f, 0x97, 0xa3, 0x4d, 0x11, 0x7f, 0xfc, + 0x33, 0x00, 0x00, 0xff, 0xff, 0x69, 0xc8, 0x1f, 0xf3, 0x49, 0x07, 0x00, 0x00, } func (m *ValidatorsInfo) Marshal() (dAtA []byte, err error) { @@ -540,6 +569,39 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.LastStateID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xb2 + if m.NextCoreChainLock != nil { + { + size, err := m.NextCoreChainLock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xaa + } + if m.LastCoreChainLockedBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastCoreChainLockedBlockHeight)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa0 + } if m.InitialHeight != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) i-- @@ -615,12 +677,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err8 != nil { - return 0, err8 + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err10 != nil { + return 0, err10 } - i -= n8 - i = encodeVarintTypes(dAtA, i, uint64(n8)) + i -= n10 + i = encodeVarintTypes(dAtA, i, uint64(n10)) i-- dAtA[i] = 0x2a { @@ -764,6 +826,15 @@ func (m *State) Size() (n int) { if m.InitialHeight != 0 { n += 1 + sovTypes(uint64(m.InitialHeight)) } + if m.LastCoreChainLockedBlockHeight != 0 { + n += 2 + sovTypes(uint64(m.LastCoreChainLockedBlockHeight)) + } + if m.NextCoreChainLock != nil { + l = m.NextCoreChainLock.Size() + n += 2 + l + sovTypes(uint64(l)) + } + l = m.LastStateID.Size() + n += 2 + l + sovTypes(uint64(l)) return n } @@ -1540,6 +1611,94 @@ func (m *State) Unmarshal(dAtA []byte) error { break } } + case 100: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCoreChainLockedBlockHeight", wireType) + } + m.LastCoreChainLockedBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastCoreChainLockedBlockHeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 101: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NextCoreChainLock == nil { + m.NextCoreChainLock = &types.CoreChainLock{} + } + if err := m.NextCoreChainLock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 102: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastStateID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastStateID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index fcfbc01f54..ff371cd909 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -69,6 +69,7 @@ const ( // Votes PrevoteType SignedMsgType = 1 PrecommitType SignedMsgType = 2 + CommitType SignedMsgType = 3 // Proposals ProposalType SignedMsgType = 32 ) @@ -77,6 +78,7 @@ var SignedMsgType_name = map[int32]string{ 0: "SIGNED_MSG_TYPE_UNKNOWN", 1: "SIGNED_MSG_TYPE_PREVOTE", 2: "SIGNED_MSG_TYPE_PRECOMMIT", + 3: "SIGNED_MSG_TYPE_COMMIT", 32: "SIGNED_MSG_TYPE_PROPOSAL", } @@ -84,6 +86,7 @@ var SignedMsgType_value = map[string]int32{ "SIGNED_MSG_TYPE_UNKNOWN": 0, "SIGNED_MSG_TYPE_PREVOTE": 1, "SIGNED_MSG_TYPE_PRECOMMIT": 2, + "SIGNED_MSG_TYPE_COMMIT": 3, "SIGNED_MSG_TYPE_PROPOSAL": 32, } @@ -261,13 +264,67 @@ func (m *BlockID) GetPartSetHeader() PartSetHeader { return PartSetHeader{} } +// StateID +type StateID struct { + LastAppHash []byte `protobuf:"bytes,1,opt,name=last_app_hash,json=lastAppHash,proto3" json:"last_app_hash,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *StateID) Reset() { *m = StateID{} } +func (m *StateID) String() string { return proto.CompactTextString(m) } +func (*StateID) ProtoMessage() {} +func (*StateID) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{3} +} +func (m *StateID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StateID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StateID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StateID) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateID.Merge(m, src) +} +func (m *StateID) XXX_Size() int { + return m.Size() +} +func (m *StateID) XXX_DiscardUnknown() { + xxx_messageInfo_StateID.DiscardUnknown(m) +} + +var xxx_messageInfo_StateID proto.InternalMessageInfo + +func (m *StateID) GetLastAppHash() []byte { + if m != nil { + return m.LastAppHash + } + return nil +} + +func (m *StateID) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + // Header defines the structure of a Tendermint block header. type Header struct { // basic block info - Version version.Consensus `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` - ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + Version version.Consensus `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + CoreChainLockedHeight uint32 `protobuf:"varint,100,opt,name=core_chain_locked_height,json=coreChainLockedHeight,proto3" json:"core_chain_locked_height,omitempty"` + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` // prev block info LastBlockId BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` // hashes of block data @@ -280,15 +337,16 @@ type Header struct { AppHash []byte `protobuf:"bytes,11,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` // consensus info - EvidenceHash []byte `protobuf:"bytes,13,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` - ProposerAddress []byte `protobuf:"bytes,14,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` + EvidenceHash []byte `protobuf:"bytes,13,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` + ProposerProTxHash []byte `protobuf:"bytes,101,opt,name=proposer_pro_tx_hash,json=proposerProTxHash,proto3" json:"proposer_pro_tx_hash,omitempty"` + ProposedAppVersion uint64 `protobuf:"varint,102,opt,name=proposed_app_version,json=proposedAppVersion,proto3" json:"proposed_app_version,omitempty"` } func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{3} + return fileDescriptor_d3a6e55e2345de56, []int{4} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -338,6 +396,13 @@ func (m *Header) GetHeight() int64 { return 0 } +func (m *Header) GetCoreChainLockedHeight() uint32 { + if m != nil { + return m.CoreChainLockedHeight + } + return 0 +} + func (m *Header) GetTime() time.Time { if m != nil { return m.Time @@ -408,13 +473,20 @@ func (m *Header) GetEvidenceHash() []byte { return nil } -func (m *Header) GetProposerAddress() []byte { +func (m *Header) GetProposerProTxHash() []byte { if m != nil { - return m.ProposerAddress + return m.ProposerProTxHash } return nil } +func (m *Header) GetProposedAppVersion() uint64 { + if m != nil { + return m.ProposedAppVersion + } + return 0 +} + // Data contains the set of transactions included in the block type Data struct { // Txs that will be applied by state @ block.Height+1. @@ -427,7 +499,7 @@ func (m *Data) Reset() { *m = Data{} } func (m *Data) String() string { return proto.CompactTextString(m) } func (*Data) ProtoMessage() {} func (*Data) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{4} + return fileDescriptor_d3a6e55e2345de56, []int{5} } func (m *Data) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,29 +538,26 @@ func (m *Data) GetTxs() [][]byte { // Vote represents a prevote, precommit, or commit vote from validators for // consensus. type Vote struct { - Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` - Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` - BlockID BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` - Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - ValidatorAddress []byte `protobuf:"bytes,6,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` - ValidatorIndex int32 `protobuf:"varint,7,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` - // Vote signature by the validator if they participated in consensus for the - // associated block. - Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"` + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + ValidatorProTxHash []byte `protobuf:"bytes,6,opt,name=validator_pro_tx_hash,json=validatorProTxHash,proto3" json:"validator_pro_tx_hash,omitempty"` + ValidatorIndex int32 `protobuf:"varint,7,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` + BlockSignature []byte `protobuf:"bytes,8,opt,name=block_signature,json=blockSignature,proto3" json:"block_signature,omitempty"` + StateSignature []byte `protobuf:"bytes,10,opt,name=state_signature,json=stateSignature,proto3" json:"state_signature,omitempty"` // Vote extension provided by the application. Only valid for precommit // messages. - Extension []byte `protobuf:"bytes,9,opt,name=extension,proto3" json:"extension,omitempty"` // Vote extension signature by the validator if they participated in // consensus for the associated block. Only valid for precommit messages. - ExtensionSignature []byte `protobuf:"bytes,10,opt,name=extension_signature,json=extensionSignature,proto3" json:"extension_signature,omitempty"` + VoteExtensions []*VoteExtension `protobuf:"bytes,11,rep,name=vote_extensions,json=voteExtensions,proto3" json:"vote_extensions,omitempty"` } func (m *Vote) Reset() { *m = Vote{} } func (m *Vote) String() string { return proto.CompactTextString(m) } func (*Vote) ProtoMessage() {} func (*Vote) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{5} + return fileDescriptor_d3a6e55e2345de56, []int{6} } func (m *Vote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -545,16 +614,9 @@ func (m *Vote) GetBlockID() BlockID { return BlockID{} } -func (m *Vote) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -func (m *Vote) GetValidatorAddress() []byte { +func (m *Vote) GetValidatorProTxHash() []byte { if m != nil { - return m.ValidatorAddress + return m.ValidatorProTxHash } return nil } @@ -566,23 +628,23 @@ func (m *Vote) GetValidatorIndex() int32 { return 0 } -func (m *Vote) GetSignature() []byte { +func (m *Vote) GetBlockSignature() []byte { if m != nil { - return m.Signature + return m.BlockSignature } return nil } -func (m *Vote) GetExtension() []byte { +func (m *Vote) GetStateSignature() []byte { if m != nil { - return m.Extension + return m.StateSignature } return nil } -func (m *Vote) GetExtensionSignature() []byte { +func (m *Vote) GetVoteExtensions() []*VoteExtension { if m != nil { - return m.ExtensionSignature + return m.VoteExtensions } return nil } @@ -590,17 +652,21 @@ func (m *Vote) GetExtensionSignature() []byte { // Commit contains the evidence that a block was committed by a set of // validators. type Commit struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - BlockID BlockID `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id"` - Signatures []CommitSig `protobuf:"bytes,4,rep,name=signatures,proto3" json:"signatures"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id"` + StateID StateID `protobuf:"bytes,100,opt,name=state_id,json=stateId,proto3" json:"state_id"` + QuorumHash []byte `protobuf:"bytes,101,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` + ThresholdBlockSignature []byte `protobuf:"bytes,102,opt,name=threshold_block_signature,json=thresholdBlockSignature,proto3" json:"threshold_block_signature,omitempty"` + ThresholdStateSignature []byte `protobuf:"bytes,103,opt,name=threshold_state_signature,json=thresholdStateSignature,proto3" json:"threshold_state_signature,omitempty"` + ThresholdVoteExtensions []*VoteExtension `protobuf:"bytes,104,rep,name=threshold_vote_extensions,json=thresholdVoteExtensions,proto3" json:"threshold_vote_extensions,omitempty"` } func (m *Commit) Reset() { *m = Commit{} } func (m *Commit) String() string { return proto.CompactTextString(m) } func (*Commit) ProtoMessage() {} func (*Commit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{6} + return fileDescriptor_d3a6e55e2345de56, []int{7} } func (m *Commit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -650,253 +716,57 @@ func (m *Commit) GetBlockID() BlockID { return BlockID{} } -func (m *Commit) GetSignatures() []CommitSig { - if m != nil { - return m.Signatures - } - return nil -} - -// CommitSig is a part of the Vote included in a Commit. -type CommitSig struct { - BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` - ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` - Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (m *CommitSig) Reset() { *m = CommitSig{} } -func (m *CommitSig) String() string { return proto.CompactTextString(m) } -func (*CommitSig) ProtoMessage() {} -func (*CommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{7} -} -func (m *CommitSig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommitSig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommitSig.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CommitSig) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitSig.Merge(m, src) -} -func (m *CommitSig) XXX_Size() int { - return m.Size() -} -func (m *CommitSig) XXX_DiscardUnknown() { - xxx_messageInfo_CommitSig.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitSig proto.InternalMessageInfo - -func (m *CommitSig) GetBlockIdFlag() BlockIDFlag { - if m != nil { - return m.BlockIdFlag - } - return BlockIDFlagUnknown -} - -func (m *CommitSig) GetValidatorAddress() []byte { - if m != nil { - return m.ValidatorAddress - } - return nil -} - -func (m *CommitSig) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -func (m *CommitSig) GetSignature() []byte { - if m != nil { - return m.Signature - } - return nil -} - -type ExtendedCommit struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - BlockID BlockID `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id"` - ExtendedSignatures []ExtendedCommitSig `protobuf:"bytes,4,rep,name=extended_signatures,json=extendedSignatures,proto3" json:"extended_signatures"` -} - -func (m *ExtendedCommit) Reset() { *m = ExtendedCommit{} } -func (m *ExtendedCommit) String() string { return proto.CompactTextString(m) } -func (*ExtendedCommit) ProtoMessage() {} -func (*ExtendedCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{8} -} -func (m *ExtendedCommit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtendedCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExtendedCommit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExtendedCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtendedCommit.Merge(m, src) -} -func (m *ExtendedCommit) XXX_Size() int { - return m.Size() -} -func (m *ExtendedCommit) XXX_DiscardUnknown() { - xxx_messageInfo_ExtendedCommit.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtendedCommit proto.InternalMessageInfo - -func (m *ExtendedCommit) GetHeight() int64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *ExtendedCommit) GetRound() int32 { - if m != nil { - return m.Round - } - return 0 -} - -func (m *ExtendedCommit) GetBlockID() BlockID { - if m != nil { - return m.BlockID - } - return BlockID{} -} - -func (m *ExtendedCommit) GetExtendedSignatures() []ExtendedCommitSig { - if m != nil { - return m.ExtendedSignatures - } - return nil -} - -// ExtendedCommitSig retains all the same fields as CommitSig but adds vote -// extension-related fields. -type ExtendedCommitSig struct { - BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` - ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` - Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` - // Vote extension data - Extension []byte `protobuf:"bytes,5,opt,name=extension,proto3" json:"extension,omitempty"` - // Vote extension signature - ExtensionSignature []byte `protobuf:"bytes,6,opt,name=extension_signature,json=extensionSignature,proto3" json:"extension_signature,omitempty"` -} - -func (m *ExtendedCommitSig) Reset() { *m = ExtendedCommitSig{} } -func (m *ExtendedCommitSig) String() string { return proto.CompactTextString(m) } -func (*ExtendedCommitSig) ProtoMessage() {} -func (*ExtendedCommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{9} -} -func (m *ExtendedCommitSig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtendedCommitSig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExtendedCommitSig.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExtendedCommitSig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtendedCommitSig.Merge(m, src) -} -func (m *ExtendedCommitSig) XXX_Size() int { - return m.Size() -} -func (m *ExtendedCommitSig) XXX_DiscardUnknown() { - xxx_messageInfo_ExtendedCommitSig.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtendedCommitSig proto.InternalMessageInfo - -func (m *ExtendedCommitSig) GetBlockIdFlag() BlockIDFlag { +func (m *Commit) GetStateID() StateID { if m != nil { - return m.BlockIdFlag + return m.StateID } - return BlockIDFlagUnknown + return StateID{} } -func (m *ExtendedCommitSig) GetValidatorAddress() []byte { +func (m *Commit) GetQuorumHash() []byte { if m != nil { - return m.ValidatorAddress + return m.QuorumHash } return nil } -func (m *ExtendedCommitSig) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -func (m *ExtendedCommitSig) GetSignature() []byte { +func (m *Commit) GetThresholdBlockSignature() []byte { if m != nil { - return m.Signature + return m.ThresholdBlockSignature } return nil } -func (m *ExtendedCommitSig) GetExtension() []byte { +func (m *Commit) GetThresholdStateSignature() []byte { if m != nil { - return m.Extension + return m.ThresholdStateSignature } return nil } -func (m *ExtendedCommitSig) GetExtensionSignature() []byte { +func (m *Commit) GetThresholdVoteExtensions() []*VoteExtension { if m != nil { - return m.ExtensionSignature + return m.ThresholdVoteExtensions } return nil } type Proposal struct { - Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` - Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` - PolRound int32 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` - BlockID BlockID `protobuf:"bytes,5,opt,name=block_id,json=blockId,proto3" json:"block_id"` - Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"` + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + CoreChainLockedHeight uint32 `protobuf:"varint,100,opt,name=core_chain_locked_height,json=coreChainLockedHeight,proto3" json:"core_chain_locked_height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + PolRound int32 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` + BlockID BlockID `protobuf:"bytes,5,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"` } func (m *Proposal) Reset() { *m = Proposal{} } func (m *Proposal) String() string { return proto.CompactTextString(m) } func (*Proposal) ProtoMessage() {} func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{10} + return fileDescriptor_d3a6e55e2345de56, []int{8} } func (m *Proposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -939,6 +809,13 @@ func (m *Proposal) GetHeight() int64 { return 0 } +func (m *Proposal) GetCoreChainLockedHeight() uint32 { + if m != nil { + return m.CoreChainLockedHeight + } + return 0 +} + func (m *Proposal) GetRound() int32 { if m != nil { return m.Round @@ -983,7 +860,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} } func (m *SignedHeader) String() string { return proto.CompactTextString(m) } func (*SignedHeader) ProtoMessage() {} func (*SignedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{11} + return fileDescriptor_d3a6e55e2345de56, []int{9} } func (m *SignedHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1035,7 +912,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} } func (m *LightBlock) String() string { return proto.CompactTextString(m) } func (*LightBlock) ProtoMessage() {} func (*LightBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{12} + return fileDescriptor_d3a6e55e2345de56, []int{10} } func (m *LightBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1079,17 +956,18 @@ func (m *LightBlock) GetValidatorSet() *ValidatorSet { } type BlockMeta struct { - BlockID BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id"` - BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` - Header Header `protobuf:"bytes,3,opt,name=header,proto3" json:"header"` - NumTxs int64 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` + BlockID BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id"` + BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` + Header Header `protobuf:"bytes,3,opt,name=header,proto3" json:"header"` + NumTxs int64 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` + HasCoreChainLock bool `protobuf:"varint,100,opt,name=has_core_chain_lock,json=hasCoreChainLock,proto3" json:"has_core_chain_lock,omitempty"` } func (m *BlockMeta) Reset() { *m = BlockMeta{} } func (m *BlockMeta) String() string { return proto.CompactTextString(m) } func (*BlockMeta) ProtoMessage() {} func (*BlockMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{13} + return fileDescriptor_d3a6e55e2345de56, []int{11} } func (m *BlockMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1146,6 +1024,13 @@ func (m *BlockMeta) GetNumTxs() int64 { return 0 } +func (m *BlockMeta) GetHasCoreChainLock() bool { + if m != nil { + return m.HasCoreChainLock + } + return false +} + // TxProof represents a Merkle proof of the presence of a transaction in the // Merkle tree. type TxProof struct { @@ -1158,7 +1043,7 @@ func (m *TxProof) Reset() { *m = TxProof{} } func (m *TxProof) String() string { return proto.CompactTextString(m) } func (*TxProof) ProtoMessage() {} func (*TxProof) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{14} + return fileDescriptor_d3a6e55e2345de56, []int{12} } func (m *TxProof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1214,13 +1099,11 @@ func init() { proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.PartSetHeader") proto.RegisterType((*Part)(nil), "tendermint.types.Part") proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") + proto.RegisterType((*StateID)(nil), "tendermint.types.StateID") proto.RegisterType((*Header)(nil), "tendermint.types.Header") proto.RegisterType((*Data)(nil), "tendermint.types.Data") proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") - proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") - proto.RegisterType((*ExtendedCommit)(nil), "tendermint.types.ExtendedCommit") - proto.RegisterType((*ExtendedCommitSig)(nil), "tendermint.types.ExtendedCommitSig") proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") @@ -1231,95 +1114,101 @@ func init() { func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1396 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4b, 0x6f, 0x1b, 0xd5, - 0x17, 0xcf, 0xd8, 0xe3, 0xd7, 0xb1, 0x9d, 0x38, 0xf7, 0x9f, 0xb6, 0xae, 0xdb, 0x38, 0x96, 0xab, - 0x3f, 0xa4, 0x05, 0x39, 0x25, 0x45, 0x3c, 0x16, 0x2c, 0x6c, 0xc7, 0x6d, 0xad, 0x26, 0x8e, 0x19, - 0xbb, 0x45, 0x74, 0x33, 0x1a, 0x7b, 0x6e, 0xed, 0xa1, 0xf6, 0xcc, 0x68, 0xe6, 0x3a, 0x38, 0xfd, - 0x04, 0x28, 0xab, 0xae, 0xd8, 0x65, 0x05, 0x0b, 0xf6, 0x20, 0xb1, 0x45, 0xac, 0xba, 0xec, 0x0e, - 0x36, 0x14, 0x48, 0x25, 0x3e, 0x07, 0xba, 0x8f, 0x19, 0xcf, 0xc4, 0x31, 0x54, 0x51, 0x05, 0x12, - 0x9b, 0x68, 0xee, 0x39, 0xbf, 0x73, 0xee, 0x79, 0xfc, 0xee, 0xc9, 0x31, 0x5c, 0x25, 0xd8, 0xd4, - 0xb1, 0x33, 0x36, 0x4c, 0xb2, 0x45, 0x0e, 0x6d, 0xec, 0xf2, 0xbf, 0x15, 0xdb, 0xb1, 0x88, 0x85, - 0x72, 0x33, 0x6d, 0x85, 0xc9, 0x0b, 0x6b, 0x03, 0x6b, 0x60, 0x31, 0xe5, 0x16, 0xfd, 0xe2, 0xb8, - 0xc2, 0xc6, 0xc0, 0xb2, 0x06, 0x23, 0xbc, 0xc5, 0x4e, 0xbd, 0xc9, 0xa3, 0x2d, 0x62, 0x8c, 0xb1, - 0x4b, 0xb4, 0xb1, 0x2d, 0x00, 0xeb, 0x81, 0x6b, 0xfa, 0xce, 0xa1, 0x4d, 0x2c, 0x8a, 0xb5, 0x1e, - 0x09, 0x75, 0x31, 0xa0, 0x3e, 0xc0, 0x8e, 0x6b, 0x58, 0x66, 0x30, 0x8e, 0x42, 0x69, 0x2e, 0xca, - 0x03, 0x6d, 0x64, 0xe8, 0x1a, 0xb1, 0x1c, 0x8e, 0x28, 0x7f, 0x08, 0xd9, 0xb6, 0xe6, 0x90, 0x0e, - 0x26, 0x77, 0xb1, 0xa6, 0x63, 0x07, 0xad, 0x41, 0x8c, 0x58, 0x44, 0x1b, 0xe5, 0xa5, 0x92, 0xb4, - 0x99, 0x55, 0xf8, 0x01, 0x21, 0x90, 0x87, 0x9a, 0x3b, 0xcc, 0x47, 0x4a, 0xd2, 0x66, 0x46, 0x61, - 0xdf, 0xe5, 0x21, 0xc8, 0xd4, 0x94, 0x5a, 0x18, 0xa6, 0x8e, 0xa7, 0x9e, 0x05, 0x3b, 0x50, 0x69, - 0xef, 0x90, 0x60, 0x57, 0x98, 0xf0, 0x03, 0x7a, 0x17, 0x62, 0x2c, 0xfe, 0x7c, 0xb4, 0x24, 0x6d, - 0xa6, 0xb7, 0xf3, 0x95, 0x40, 0xa1, 0x78, 0x7e, 0x95, 0x36, 0xd5, 0xd7, 0xe4, 0x67, 0x2f, 0x36, - 0x96, 0x14, 0x0e, 0x2e, 0x8f, 0x20, 0x51, 0x1b, 0x59, 0xfd, 0xc7, 0xcd, 0x1d, 0x3f, 0x10, 0x69, - 0x16, 0x08, 0xda, 0x83, 0x15, 0x5b, 0x73, 0x88, 0xea, 0x62, 0xa2, 0x0e, 0x59, 0x16, 0xec, 0xd2, - 0xf4, 0xf6, 0x46, 0xe5, 0x74, 0x1f, 0x2a, 0xa1, 0x64, 0xc5, 0x2d, 0x59, 0x3b, 0x28, 0x2c, 0xff, - 0x21, 0x43, 0x5c, 0x14, 0xe3, 0x23, 0x48, 0x88, 0xb2, 0xb2, 0x0b, 0xd3, 0xdb, 0xeb, 0x41, 0x8f, - 0x42, 0x55, 0xa9, 0x5b, 0xa6, 0x8b, 0x4d, 0x77, 0xe2, 0x0a, 0x7f, 0x9e, 0x0d, 0x7a, 0x03, 0x92, - 0xfd, 0xa1, 0x66, 0x98, 0xaa, 0xa1, 0xb3, 0x88, 0x52, 0xb5, 0xf4, 0xc9, 0x8b, 0x8d, 0x44, 0x9d, - 0xca, 0x9a, 0x3b, 0x4a, 0x82, 0x29, 0x9b, 0x3a, 0xba, 0x08, 0xf1, 0x21, 0x36, 0x06, 0x43, 0xc2, - 0xca, 0x12, 0x55, 0xc4, 0x09, 0x7d, 0x00, 0x32, 0x25, 0x44, 0x5e, 0x66, 0x77, 0x17, 0x2a, 0x9c, - 0x2d, 0x15, 0x8f, 0x2d, 0x95, 0xae, 0xc7, 0x96, 0x5a, 0x92, 0x5e, 0xfc, 0xf4, 0xd7, 0x0d, 0x49, - 0x61, 0x16, 0xa8, 0x0e, 0xd9, 0x91, 0xe6, 0x12, 0xb5, 0x47, 0xcb, 0x46, 0xaf, 0x8f, 0x31, 0x17, - 0x97, 0xe7, 0x0b, 0x22, 0x0a, 0x2b, 0x42, 0x4f, 0x53, 0x2b, 0x2e, 0xd2, 0xd1, 0x26, 0xe4, 0x98, - 0x93, 0xbe, 0x35, 0x1e, 0x1b, 0x44, 0x65, 0x75, 0x8f, 0xb3, 0xba, 0x2f, 0x53, 0x79, 0x9d, 0x89, - 0xef, 0xd2, 0x0e, 0x5c, 0x81, 0x94, 0xae, 0x11, 0x8d, 0x43, 0x12, 0x0c, 0x92, 0xa4, 0x02, 0xa6, - 0x7c, 0x13, 0x56, 0x7c, 0xd6, 0xb9, 0x1c, 0x92, 0xe4, 0x5e, 0x66, 0x62, 0x06, 0xbc, 0x09, 0x6b, - 0x26, 0x9e, 0x12, 0xf5, 0x34, 0x3a, 0xc5, 0xd0, 0x88, 0xea, 0x1e, 0x84, 0x2d, 0xfe, 0x0f, 0xcb, - 0x7d, 0xaf, 0xf8, 0x1c, 0x0b, 0x0c, 0x9b, 0xf5, 0xa5, 0x0c, 0x76, 0x19, 0x92, 0x9a, 0x6d, 0x73, - 0x40, 0x9a, 0x01, 0x12, 0x9a, 0x6d, 0x33, 0xd5, 0x0d, 0x58, 0x65, 0x39, 0x3a, 0xd8, 0x9d, 0x8c, - 0x88, 0x70, 0x92, 0x61, 0x98, 0x15, 0xaa, 0x50, 0xb8, 0x9c, 0x61, 0xaf, 0x41, 0x16, 0x1f, 0x18, - 0x3a, 0x36, 0xfb, 0x98, 0xe3, 0xb2, 0x0c, 0x97, 0xf1, 0x84, 0x0c, 0x74, 0x1d, 0x72, 0xb6, 0x63, - 0xd9, 0x96, 0x8b, 0x1d, 0x55, 0xd3, 0x75, 0x07, 0xbb, 0x6e, 0x7e, 0x99, 0xfb, 0xf3, 0xe4, 0x55, - 0x2e, 0x2e, 0xe7, 0x41, 0xde, 0xd1, 0x88, 0x86, 0x72, 0x10, 0x25, 0x53, 0x37, 0x2f, 0x95, 0xa2, - 0x9b, 0x19, 0x85, 0x7e, 0x96, 0xbf, 0x8f, 0x82, 0xfc, 0xc0, 0x22, 0x18, 0xdd, 0x02, 0x99, 0xb6, - 0x89, 0xb1, 0x6f, 0xf9, 0x2c, 0x3e, 0x77, 0x8c, 0x81, 0x89, 0xf5, 0x3d, 0x77, 0xd0, 0x3d, 0xb4, - 0xb1, 0xc2, 0xc0, 0x01, 0x3a, 0x45, 0x42, 0x74, 0x5a, 0x83, 0x98, 0x63, 0x4d, 0x4c, 0x9d, 0xb1, - 0x2c, 0xa6, 0xf0, 0x03, 0x6a, 0x40, 0xd2, 0x67, 0x89, 0xfc, 0x77, 0x2c, 0x59, 0xa1, 0x2c, 0xa1, - 0x1c, 0x16, 0x02, 0x25, 0xd1, 0x13, 0x64, 0xa9, 0x41, 0xca, 0x1f, 0x5e, 0x82, 0x6d, 0xaf, 0x46, - 0xd8, 0x99, 0x19, 0x7a, 0x0b, 0x56, 0xfd, 0xde, 0xfb, 0xc5, 0xe3, 0x8c, 0xcb, 0xf9, 0x0a, 0x51, - 0xbd, 0x10, 0xad, 0x54, 0x3e, 0x80, 0x12, 0x2c, 0xaf, 0x19, 0xad, 0x9a, 0x6c, 0x12, 0x5d, 0x85, - 0x94, 0x6b, 0x0c, 0x4c, 0x8d, 0x4c, 0x1c, 0x2c, 0x98, 0x37, 0x13, 0x50, 0x2d, 0x9e, 0x12, 0x6c, - 0xb2, 0x47, 0xce, 0x99, 0x36, 0x13, 0xa0, 0x2d, 0xf8, 0x9f, 0x7f, 0x50, 0x67, 0x5e, 0x38, 0xcb, - 0x90, 0xaf, 0xea, 0x78, 0x9a, 0xf2, 0x0f, 0x12, 0xc4, 0xf9, 0xc3, 0x08, 0xb4, 0x41, 0x3a, 0xbb, - 0x0d, 0x91, 0x45, 0x6d, 0x88, 0x9e, 0xbf, 0x0d, 0x55, 0x00, 0x3f, 0x4c, 0x37, 0x2f, 0x97, 0xa2, - 0x9b, 0xe9, 0xed, 0x2b, 0xf3, 0x8e, 0x78, 0x88, 0x1d, 0x63, 0x20, 0xde, 0x7d, 0xc0, 0xa8, 0xfc, - 0x8b, 0x04, 0x29, 0x5f, 0x8f, 0xaa, 0x90, 0xf5, 0xe2, 0x52, 0x1f, 0x8d, 0xb4, 0x81, 0xa0, 0xe2, - 0xfa, 0xc2, 0xe0, 0x6e, 0x8f, 0xb4, 0x81, 0x92, 0x16, 0xf1, 0xd0, 0xc3, 0xd9, 0x6d, 0x8d, 0x2c, - 0x68, 0x6b, 0x88, 0x47, 0xd1, 0xf3, 0xf1, 0x28, 0xd4, 0x71, 0xf9, 0x54, 0xc7, 0xcb, 0xbf, 0x4b, - 0xb0, 0xdc, 0x98, 0xb2, 0xf0, 0xf5, 0x7f, 0xb3, 0x55, 0x0f, 0x05, 0xb7, 0x74, 0xac, 0xab, 0x73, - 0x3d, 0xbb, 0x36, 0xef, 0x31, 0x1c, 0xf3, 0xac, 0x77, 0xc8, 0xf3, 0xd2, 0x99, 0xf5, 0xf0, 0xbb, - 0x08, 0xac, 0xce, 0xe1, 0xff, 0x7b, 0xbd, 0x0c, 0xbf, 0xde, 0xd8, 0x2b, 0xbe, 0xde, 0xf8, 0xc2, - 0xd7, 0xfb, 0x6d, 0x04, 0x92, 0x6d, 0x36, 0xa5, 0xb5, 0xd1, 0x3f, 0x31, 0x7b, 0xaf, 0x40, 0xca, - 0xb6, 0x46, 0x2a, 0xd7, 0xc8, 0x4c, 0x93, 0xb4, 0xad, 0x91, 0x32, 0x47, 0xb3, 0xd8, 0x6b, 0x1a, - 0xcc, 0xf1, 0xd7, 0xd0, 0x84, 0xc4, 0xe9, 0x07, 0xe5, 0x40, 0x86, 0x97, 0x42, 0x6c, 0x4d, 0x37, - 0x69, 0x0d, 0xd8, 0x1a, 0x26, 0xcd, 0x6f, 0x79, 0x3c, 0x6c, 0x8e, 0x54, 0x04, 0x8e, 0x5a, 0xf0, - 0x25, 0x43, 0x2c, 0x6e, 0xf9, 0x45, 0x13, 0x4b, 0x11, 0xb8, 0xf2, 0x97, 0x12, 0xc0, 0x2e, 0xad, - 0x2c, 0xcb, 0x97, 0xee, 0x3b, 0x2e, 0x0b, 0x41, 0x0d, 0xdd, 0x5c, 0x5c, 0xd4, 0x34, 0x71, 0x7f, - 0xc6, 0x0d, 0xc6, 0x5d, 0x87, 0xec, 0x8c, 0xdb, 0x2e, 0xf6, 0x82, 0x39, 0xc3, 0x89, 0xbf, 0x86, - 0x74, 0x30, 0x51, 0x32, 0x07, 0x81, 0x53, 0xf9, 0x47, 0x09, 0x52, 0x2c, 0xa6, 0x3d, 0x4c, 0xb4, - 0x50, 0x0f, 0xa5, 0xf3, 0xf7, 0x70, 0x1d, 0x80, 0xbb, 0x71, 0x8d, 0x27, 0x58, 0x30, 0x2b, 0xc5, - 0x24, 0x1d, 0xe3, 0x09, 0x46, 0xef, 0xf9, 0x05, 0x8f, 0xfe, 0x75, 0xc1, 0xc5, 0xc4, 0xf0, 0xca, - 0x7e, 0x09, 0x12, 0xe6, 0x64, 0xac, 0xd2, 0xe5, 0x43, 0xe6, 0x6c, 0x35, 0x27, 0xe3, 0xee, 0xd4, - 0x2d, 0x7f, 0x06, 0x89, 0xee, 0x94, 0x2d, 0xe2, 0x94, 0xa2, 0x8e, 0x65, 0x89, 0xed, 0x8f, 0x6f, - 0xdd, 0x49, 0x2a, 0x60, 0xcb, 0x0e, 0x02, 0x99, 0xae, 0x79, 0xde, 0xcf, 0x02, 0xfa, 0x8d, 0x2a, - 0xaf, 0xb8, 0xe2, 0x8b, 0xe5, 0xfe, 0xc6, 0x4f, 0x12, 0xa4, 0x03, 0xe3, 0x06, 0xbd, 0x03, 0x17, - 0x6a, 0xbb, 0xfb, 0xf5, 0x7b, 0x6a, 0x73, 0x47, 0xbd, 0xbd, 0x5b, 0xbd, 0xa3, 0xde, 0x6f, 0xdd, - 0x6b, 0xed, 0x7f, 0xd2, 0xca, 0x2d, 0x15, 0x2e, 0x1e, 0x1d, 0x97, 0x50, 0x00, 0x7b, 0xdf, 0x7c, - 0x6c, 0x5a, 0x9f, 0xd3, 0x77, 0xbe, 0x16, 0x36, 0xa9, 0xd6, 0x3a, 0x8d, 0x56, 0x37, 0x27, 0x15, - 0x2e, 0x1c, 0x1d, 0x97, 0x56, 0x03, 0x16, 0xd5, 0x9e, 0x8b, 0x4d, 0x32, 0x6f, 0x50, 0xdf, 0xdf, - 0xdb, 0x6b, 0x76, 0x73, 0x91, 0x39, 0x03, 0xf1, 0x0f, 0xe2, 0x3a, 0xac, 0x86, 0x0d, 0x5a, 0xcd, - 0xdd, 0x5c, 0xb4, 0x80, 0x8e, 0x8e, 0x4b, 0xcb, 0x01, 0x74, 0xcb, 0x18, 0x15, 0x92, 0x5f, 0x7c, - 0x55, 0x5c, 0xfa, 0xe6, 0xeb, 0xa2, 0x44, 0x33, 0xcb, 0x86, 0x66, 0x04, 0x7a, 0x1b, 0x2e, 0x75, - 0x9a, 0x77, 0x5a, 0x8d, 0x1d, 0x75, 0xaf, 0x73, 0x47, 0xed, 0x7e, 0xda, 0x6e, 0x04, 0xb2, 0x5b, - 0x39, 0x3a, 0x2e, 0xa5, 0x45, 0x4a, 0x8b, 0xd0, 0x6d, 0xa5, 0xf1, 0x60, 0xbf, 0xdb, 0xc8, 0x49, - 0x1c, 0xdd, 0x76, 0xf0, 0x81, 0x45, 0x30, 0x43, 0xdf, 0x84, 0xcb, 0x67, 0xa0, 0xfd, 0xc4, 0x56, - 0x8f, 0x8e, 0x4b, 0xd9, 0xb6, 0x83, 0xf9, 0xfb, 0x61, 0x16, 0x15, 0xc8, 0xcf, 0x5b, 0xec, 0xb7, - 0xf7, 0x3b, 0xd5, 0xdd, 0x5c, 0xa9, 0x90, 0x3b, 0x3a, 0x2e, 0x65, 0xbc, 0x61, 0x48, 0xf1, 0xb3, - 0xcc, 0x6a, 0x1f, 0x3f, 0x3b, 0x29, 0x4a, 0xcf, 0x4f, 0x8a, 0xd2, 0x6f, 0x27, 0x45, 0xe9, 0xe9, - 0xcb, 0xe2, 0xd2, 0xf3, 0x97, 0xc5, 0xa5, 0x9f, 0x5f, 0x16, 0x97, 0x1e, 0xbe, 0x3f, 0x30, 0xc8, - 0x70, 0xd2, 0xab, 0xf4, 0xad, 0xf1, 0x56, 0xf0, 0xc7, 0xe7, 0xec, 0x93, 0xff, 0x08, 0x3e, 0xfd, - 0xc3, 0xb4, 0x17, 0x67, 0xf2, 0x5b, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x46, 0xcf, 0x37, 0x28, - 0x59, 0x0f, 0x00, 0x00, + // 1498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x73, 0x1a, 0x47, + 0x16, 0xd7, 0x08, 0x24, 0xe0, 0x01, 0x12, 0x9a, 0x95, 0x6d, 0x84, 0x6d, 0x34, 0xc5, 0xd6, 0xae, + 0xb5, 0xaa, 0x5d, 0xe4, 0xb5, 0xb7, 0xd6, 0xbb, 0x5b, 0xb5, 0x07, 0x81, 0xb0, 0x45, 0x59, 0x7f, + 0xc8, 0x80, 0x95, 0x4a, 0x72, 0x98, 0x1a, 0x31, 0x2d, 0x98, 0x18, 0xa6, 0x27, 0x33, 0x8d, 0x82, + 0x7c, 0xcd, 0x25, 0xa5, 0x93, 0x4f, 0xb9, 0xe9, 0x94, 0x1c, 0xfc, 0x19, 0xf2, 0x09, 0x7c, 0xf4, + 0xcd, 0x39, 0x39, 0x29, 0xf9, 0x9a, 0x4b, 0xbe, 0x41, 0xaa, 0x5f, 0xf7, 0x30, 0xc3, 0x1f, 0x25, + 0x2e, 0x55, 0x2e, 0xd4, 0xf4, 0x7b, 0xbf, 0xd7, 0xfd, 0xfa, 0xfd, 0x7e, 0xfd, 0xba, 0x81, 0x3b, + 0x8c, 0x38, 0x16, 0xf1, 0xfa, 0xb6, 0xc3, 0xb6, 0xd8, 0x99, 0x4b, 0x7c, 0xf1, 0x5b, 0x76, 0x3d, + 0xca, 0xa8, 0x9a, 0x0b, 0xbd, 0x65, 0xb4, 0x17, 0x56, 0x3b, 0xb4, 0x43, 0xd1, 0xb9, 0xc5, 0xbf, + 0x04, 0xae, 0xb0, 0xde, 0xa1, 0xb4, 0xd3, 0x23, 0x5b, 0x38, 0x3a, 0x1e, 0x9c, 0x6c, 0x31, 0xbb, + 0x4f, 0x7c, 0x66, 0xf6, 0x5d, 0x09, 0xb8, 0x1b, 0x59, 0xa6, 0xed, 0x9d, 0xb9, 0x8c, 0x72, 0x2c, + 0x3d, 0x91, 0xee, 0x62, 0xc4, 0x7d, 0x4a, 0x3c, 0xdf, 0xa6, 0x4e, 0x34, 0x8f, 0x82, 0x36, 0x95, + 0xe5, 0xa9, 0xd9, 0xb3, 0x2d, 0x93, 0x51, 0x4f, 0x22, 0x6e, 0x4f, 0x21, 0x2c, 0xd3, 0xef, 0x0a, + 0x67, 0xe9, 0xbf, 0x90, 0x6d, 0x98, 0x1e, 0x6b, 0x12, 0xb6, 0x4b, 0x4c, 0x8b, 0x78, 0xea, 0x2a, + 0x2c, 0x30, 0xca, 0xcc, 0x5e, 0x5e, 0xd1, 0x94, 0x8d, 0xac, 0x2e, 0x06, 0xaa, 0x0a, 0xf1, 0xae, + 0xe9, 0x77, 0xf3, 0xf3, 0x9a, 0xb2, 0x91, 0xd1, 0xf1, 0xbb, 0xd4, 0x85, 0x38, 0x0f, 0xe5, 0x11, + 0xb6, 0x63, 0x91, 0x61, 0x10, 0x81, 0x03, 0x6e, 0x3d, 0x3e, 0x63, 0xc4, 0x97, 0x21, 0x62, 0xa0, + 0xfe, 0x0b, 0x16, 0x70, 0x73, 0xf9, 0x98, 0xa6, 0x6c, 0xa4, 0x1f, 0xe4, 0xcb, 0x91, 0x2a, 0x8a, + 0xcd, 0x97, 0x1b, 0xdc, 0x5f, 0x89, 0xbf, 0x7e, 0xb7, 0x3e, 0xa7, 0x0b, 0x70, 0xa9, 0x07, 0x89, + 0x4a, 0x8f, 0xb6, 0x9f, 0xd7, 0x77, 0x46, 0x89, 0x28, 0x61, 0x22, 0xea, 0x3e, 0x2c, 0xbb, 0xa6, + 0xc7, 0x0c, 0x9f, 0x30, 0xa3, 0x8b, 0xbb, 0xc0, 0x45, 0xd3, 0x0f, 0xd6, 0xcb, 0x93, 0x24, 0x95, + 0xc7, 0x36, 0x2b, 0x57, 0xc9, 0xba, 0x51, 0x63, 0xa9, 0x06, 0x89, 0x26, 0x33, 0x19, 0xa9, 0xef, + 0xa8, 0x25, 0xc8, 0xf6, 0x4c, 0x9f, 0x19, 0xa6, 0xeb, 0x1a, 0x91, 0x65, 0xd3, 0xdc, 0xb8, 0xed, + 0xba, 0xbb, 0x7c, 0xf5, 0x9b, 0xb0, 0xd8, 0x25, 0x76, 0xa7, 0xcb, 0x70, 0xd1, 0x98, 0x2e, 0x47, + 0xa5, 0xb7, 0x0b, 0xb0, 0x28, 0x6b, 0xfa, 0x7f, 0x48, 0x48, 0xea, 0x70, 0x82, 0xf4, 0x83, 0xbb, + 0xd1, 0xc4, 0xa4, 0xab, 0x5c, 0xa5, 0x8e, 0x4f, 0x1c, 0x7f, 0xe0, 0xcb, 0xb4, 0x82, 0x18, 0xf5, + 0xaf, 0x90, 0x6c, 0x77, 0x4d, 0xdb, 0x31, 0x6c, 0x0b, 0xd7, 0x48, 0x55, 0xd2, 0x97, 0xef, 0xd6, + 0x13, 0x55, 0x6e, 0xab, 0xef, 0xe8, 0x09, 0x74, 0xd6, 0xad, 0x48, 0x26, 0xb1, 0x68, 0x26, 0xea, + 0x23, 0xc8, 0xb7, 0xa9, 0x47, 0x0c, 0x31, 0x09, 0x2f, 0x24, 0xb1, 0x0c, 0x89, 0xb4, 0x90, 0xb3, + 0x1b, 0xdc, 0x8f, 0xf3, 0xed, 0xa1, 0x77, 0x57, 0x04, 0xfe, 0x07, 0xe2, 0x5c, 0xad, 0xf9, 0x38, + 0x26, 0x5d, 0x28, 0x0b, 0x29, 0x97, 0x03, 0x29, 0x97, 0x5b, 0x81, 0x94, 0x2b, 0x49, 0x9e, 0xf1, + 0xcb, 0x1f, 0xd7, 0x15, 0x1d, 0x23, 0xd4, 0xaa, 0x2c, 0xdc, 0x31, 0x5f, 0x8d, 0xe7, 0xbd, 0x80, + 0x53, 0xac, 0x4d, 0x13, 0x22, 0x89, 0x95, 0x7b, 0xc6, 0xca, 0x0a, 0x93, 0xa5, 0x6e, 0x40, 0x0e, + 0x27, 0x69, 0xd3, 0x7e, 0xdf, 0x66, 0x82, 0x80, 0x45, 0x24, 0x60, 0x89, 0xdb, 0xab, 0x68, 0x46, + 0x0e, 0x6e, 0x43, 0xca, 0x32, 0x99, 0x29, 0x20, 0x09, 0x84, 0x24, 0xb9, 0x01, 0x9d, 0xf7, 0x60, + 0x79, 0x74, 0x24, 0x7c, 0x01, 0x49, 0x8a, 0x59, 0x42, 0x33, 0x02, 0xef, 0xc3, 0xaa, 0x43, 0x86, + 0xcc, 0x98, 0x44, 0xa7, 0x10, 0xad, 0x72, 0xdf, 0xd1, 0x78, 0xc4, 0x5f, 0x60, 0xa9, 0x1d, 0xb0, + 0x26, 0xb0, 0x80, 0xd8, 0xec, 0xc8, 0x8a, 0xb0, 0x35, 0x48, 0x8e, 0x14, 0x94, 0x46, 0x40, 0xc2, + 0x94, 0xea, 0xd9, 0x84, 0x15, 0xdc, 0xa3, 0x47, 0xfc, 0x41, 0x8f, 0xc9, 0x49, 0x32, 0x88, 0x59, + 0xe6, 0x0e, 0x5d, 0xd8, 0x11, 0xfb, 0x67, 0xc8, 0x92, 0x53, 0xdb, 0x22, 0x4e, 0x9b, 0x08, 0x5c, + 0x16, 0x71, 0x99, 0xc0, 0x88, 0xa0, 0x2d, 0x58, 0x75, 0x3d, 0xea, 0x52, 0x9f, 0x78, 0x86, 0xeb, + 0x51, 0x83, 0x0d, 0x05, 0x96, 0x20, 0x76, 0x25, 0xf0, 0x35, 0x3c, 0xda, 0x1a, 0x06, 0xbb, 0x96, + 0x46, 0x0b, 0x75, 0x1e, 0x28, 0xf5, 0x44, 0x53, 0x36, 0xe2, 0xba, 0x1a, 0xf8, 0xb6, 0x5d, 0xf7, + 0x48, 0x78, 0x4a, 0x79, 0x88, 0xef, 0x98, 0xcc, 0x54, 0x73, 0x10, 0x63, 0x43, 0x3f, 0xaf, 0x68, + 0xb1, 0x8d, 0x8c, 0xce, 0x3f, 0x4b, 0xaf, 0x62, 0x10, 0x3f, 0xa2, 0x8c, 0xa8, 0x0f, 0x21, 0xce, + 0xe9, 0x45, 0xb9, 0x2f, 0xcd, 0x3a, 0x87, 0x4d, 0xbb, 0xe3, 0x10, 0x6b, 0xdf, 0xef, 0xb4, 0xce, + 0x5c, 0xa2, 0x23, 0xf8, 0xaa, 0x93, 0xc4, 0x5b, 0x89, 0x47, 0x07, 0x8e, 0x85, 0xb2, 0x5e, 0xd0, + 0xc5, 0x40, 0xad, 0x41, 0x72, 0xa4, 0xae, 0xf8, 0xef, 0xa9, 0x6b, 0x99, 0xab, 0x8b, 0x1f, 0x1a, + 0x69, 0xd0, 0x13, 0xc7, 0x52, 0x64, 0xff, 0x84, 0x1b, 0x23, 0xbe, 0xc7, 0x0a, 0x26, 0x94, 0xa6, + 0x8e, 0x9c, 0x61, 0xc5, 0xa2, 0x82, 0x32, 0x44, 0xeb, 0x4b, 0x60, 0x66, 0xa1, 0xa0, 0xea, 0xd8, + 0x03, 0xef, 0xc1, 0xb2, 0x48, 0xd1, 0xb7, 0x3b, 0x8e, 0xc9, 0x06, 0x1e, 0x09, 0x94, 0x87, 0xe6, + 0x66, 0x60, 0xe5, 0x40, 0x9f, 0xb7, 0x9c, 0x08, 0x50, 0x08, 0x69, 0x09, 0xcd, 0x21, 0x70, 0x17, + 0x96, 0x4f, 0x29, 0x23, 0x06, 0x19, 0x32, 0xe2, 0x70, 0x32, 0xfc, 0x7c, 0x5a, 0x8b, 0xcd, 0x6e, + 0x75, 0x9c, 0x88, 0x5a, 0x80, 0xd3, 0x97, 0x4e, 0xa3, 0x43, 0xbf, 0xf4, 0x7d, 0x0c, 0x16, 0xc5, + 0x09, 0x8a, 0xd4, 0x5d, 0x99, 0x5d, 0xf7, 0xf9, 0xab, 0xea, 0x1e, 0xbb, 0x7e, 0xdd, 0x6b, 0x90, + 0x14, 0x5b, 0xb6, 0x2d, 0x6c, 0x42, 0x33, 0xa7, 0x91, 0x7d, 0x38, 0x9c, 0x46, 0x1a, 0xf4, 0x04, + 0xc6, 0xd6, 0x2d, 0x75, 0x1d, 0xd2, 0x5f, 0x0c, 0xa8, 0x37, 0xe8, 0x47, 0x55, 0x0e, 0xc2, 0x84, + 0x64, 0xfd, 0x0f, 0xd6, 0x58, 0xd7, 0x23, 0x7e, 0x97, 0xf6, 0x2c, 0x63, 0x92, 0x8d, 0x13, 0x84, + 0xdf, 0x1a, 0x01, 0x2a, 0xe3, 0xb4, 0x8c, 0xc5, 0x4e, 0x12, 0xd4, 0x99, 0x88, 0x6d, 0x8e, 0x33, + 0xf5, 0x59, 0x34, 0x76, 0x92, 0xb3, 0xee, 0x87, 0x71, 0x16, 0x4e, 0x7e, 0x34, 0x4e, 0xde, 0xcf, + 0xf3, 0x90, 0x6c, 0xe0, 0xc1, 0x34, 0x7b, 0x7f, 0xec, 0x59, 0xbb, 0xf6, 0x5d, 0x31, 0xfb, 0x90, + 0xde, 0x86, 0x94, 0x4b, 0x7b, 0x86, 0xf0, 0xc4, 0xd1, 0x93, 0x74, 0x69, 0x4f, 0x9f, 0x52, 0xd2, + 0xc2, 0xf5, 0x95, 0x54, 0x81, 0xd4, 0xe8, 0x4d, 0x85, 0xa7, 0xf6, 0x43, 0xaf, 0xaa, 0x30, 0x4c, + 0xbd, 0x03, 0xa9, 0x90, 0x59, 0x71, 0x81, 0x84, 0x86, 0x92, 0x07, 0x19, 0x51, 0x43, 0x79, 0x9f, + 0xdf, 0xe7, 0xc5, 0xc3, 0x77, 0x86, 0x32, 0xfd, 0x8c, 0x11, 0x69, 0x0b, 0xa4, 0x2e, 0x71, 0x3c, + 0x42, 0xdc, 0x62, 0xf2, 0x65, 0x32, 0x23, 0x42, 0x1c, 0x46, 0x5d, 0xe2, 0x4a, 0xdf, 0x28, 0x00, + 0x7b, 0xbc, 0xb2, 0xb8, 0x5f, 0x7e, 0xa1, 0xfa, 0x98, 0x82, 0x31, 0xb6, 0x72, 0xf1, 0x2a, 0xb6, + 0xe5, 0xfa, 0x19, 0x3f, 0x9a, 0x77, 0x15, 0xb2, 0x61, 0xe3, 0xf2, 0x49, 0x90, 0xcc, 0x8c, 0x49, + 0x46, 0xf7, 0x5c, 0x93, 0x30, 0x3d, 0x73, 0x1a, 0x19, 0x95, 0x7e, 0x51, 0x20, 0x85, 0x39, 0xed, + 0x13, 0x66, 0x8e, 0x71, 0xa8, 0x5c, 0x9f, 0xc3, 0xbb, 0x00, 0xc1, 0xd9, 0x7c, 0x41, 0xa4, 0x24, + 0x53, 0xb2, 0x49, 0xbe, 0x20, 0xea, 0xbf, 0x47, 0x05, 0x8f, 0xfd, 0x76, 0xc1, 0xe5, 0x33, 0x22, + 0x28, 0xfb, 0x2d, 0x48, 0x38, 0x83, 0xbe, 0xc1, 0x6f, 0xa9, 0xb8, 0x90, 0xb9, 0x33, 0xe8, 0xb7, + 0x86, 0xbe, 0xfa, 0x0f, 0xf8, 0x53, 0xd7, 0xf4, 0x8d, 0x09, 0xa9, 0xa3, 0xc2, 0x93, 0x7a, 0xae, + 0x6b, 0xfa, 0xd5, 0xa8, 0xc8, 0x4b, 0x9f, 0x43, 0xa2, 0x35, 0xc4, 0x87, 0x29, 0x57, 0xb4, 0x47, + 0x29, 0x8b, 0x3e, 0x07, 0x93, 0xdc, 0x80, 0xcd, 0x46, 0x85, 0x38, 0x7f, 0x76, 0x04, 0xcf, 0x64, + 0xfe, 0xad, 0x96, 0x3f, 0xf0, 0xc9, 0x2b, 0x1f, 0xbb, 0x9b, 0x6f, 0x15, 0x48, 0xcb, 0xfa, 0x3c, + 0xee, 0x99, 0x1d, 0x7e, 0x41, 0x55, 0xf6, 0x0e, 0xab, 0x4f, 0x8d, 0xfa, 0x8e, 0xf1, 0x78, 0x6f, + 0xfb, 0x89, 0xf1, 0xec, 0xe0, 0xe9, 0xc1, 0xe1, 0xc7, 0x07, 0xb9, 0xb9, 0xc2, 0xcd, 0xf3, 0x0b, + 0x4d, 0x8d, 0x60, 0x9f, 0x39, 0xcf, 0x1d, 0xfa, 0xa5, 0xc3, 0xdf, 0x00, 0xe3, 0x21, 0xdb, 0x95, + 0x66, 0xed, 0xa0, 0x95, 0x53, 0x0a, 0x37, 0xce, 0x2f, 0xb4, 0x95, 0x48, 0xc4, 0xf6, 0xb1, 0x4f, + 0x1c, 0x36, 0x1d, 0x50, 0x3d, 0xdc, 0xdf, 0xaf, 0xb7, 0x72, 0xf3, 0x53, 0x01, 0xf2, 0xca, 0xf8, + 0x1b, 0xac, 0x8c, 0x07, 0x1c, 0xd4, 0xf7, 0x72, 0xb1, 0x82, 0x7a, 0x7e, 0xa1, 0x2d, 0x45, 0xd0, + 0x07, 0x76, 0xaf, 0x90, 0xfc, 0xfa, 0xdb, 0xe2, 0xdc, 0xab, 0xef, 0x8a, 0xca, 0xe6, 0x57, 0xf3, + 0x90, 0x1d, 0xeb, 0x45, 0xea, 0xdf, 0xe1, 0x56, 0xb3, 0xfe, 0xe4, 0xa0, 0xb6, 0x63, 0xec, 0x37, + 0x9f, 0x18, 0xad, 0x4f, 0x1a, 0xb5, 0xc8, 0xee, 0x96, 0xcf, 0x2f, 0xb4, 0xb4, 0xdc, 0xd2, 0x55, + 0xe8, 0x86, 0x5e, 0x3b, 0x3a, 0x6c, 0xd5, 0x72, 0x8a, 0x40, 0x37, 0x3c, 0xc2, 0x5b, 0x2d, 0xa2, + 0xef, 0xc3, 0xda, 0x0c, 0xf4, 0x68, 0x63, 0x2b, 0xe7, 0x17, 0x5a, 0xb6, 0xe1, 0x11, 0x71, 0xdc, + 0x30, 0x62, 0x13, 0x6e, 0x4e, 0x46, 0x48, 0x78, 0xac, 0xb0, 0x74, 0x7e, 0xa1, 0x41, 0x35, 0xc4, + 0x96, 0x21, 0x3f, 0x3d, 0xfb, 0x61, 0xe3, 0xb0, 0xb9, 0xbd, 0x97, 0xd3, 0x0a, 0xb9, 0xf3, 0x0b, + 0x2d, 0x13, 0x34, 0x68, 0x8e, 0x0f, 0xab, 0x50, 0xf9, 0xe8, 0xf5, 0x65, 0x51, 0x79, 0x73, 0x59, + 0x54, 0x7e, 0xba, 0x2c, 0x2a, 0x2f, 0xdf, 0x17, 0xe7, 0xde, 0xbc, 0x2f, 0xce, 0xfd, 0xf0, 0xbe, + 0x38, 0xf7, 0xe9, 0xa3, 0x8e, 0xcd, 0xba, 0x83, 0xe3, 0x72, 0x9b, 0xf6, 0xb7, 0xa2, 0xff, 0xd9, + 0xc2, 0x4f, 0xf1, 0xef, 0x72, 0xf2, 0xff, 0xdc, 0xf1, 0x22, 0xda, 0x1f, 0xfe, 0x1a, 0x00, 0x00, + 0xff, 0xff, 0xc4, 0x68, 0x3a, 0xa3, 0xb2, 0x0e, 0x00, 0x00, } func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { @@ -1442,6 +1331,41 @@ func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StateID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StateID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StateID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if len(m.LastAppHash) > 0 { + i -= len(m.LastAppHash) + copy(dAtA[i:], m.LastAppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastAppHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Header) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1462,12 +1386,28 @@ func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.ProposerAddress) > 0 { - i -= len(m.ProposerAddress) - copy(dAtA[i:], m.ProposerAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + if m.ProposedAppVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ProposedAppVersion)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xb0 + } + if len(m.ProposerProTxHash) > 0 { + i -= len(m.ProposerProTxHash) + copy(dAtA[i:], m.ProposerProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerProTxHash))) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xaa + } + if m.CoreChainLockedHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CoreChainLockedHeight)) + i-- + dAtA[i] = 0x6 i-- - dAtA[i] = 0x72 + dAtA[i] = 0xa0 } if len(m.EvidenceHash) > 0 { i -= len(m.EvidenceHash) @@ -1620,24 +1560,31 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.ExtensionSignature) > 0 { - i -= len(m.ExtensionSignature) - copy(dAtA[i:], m.ExtensionSignature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) - i-- - dAtA[i] = 0x52 + if len(m.VoteExtensions) > 0 { + for iNdEx := len(m.VoteExtensions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VoteExtensions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } } - if len(m.Extension) > 0 { - i -= len(m.Extension) - copy(dAtA[i:], m.Extension) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) + if len(m.StateSignature) > 0 { + i -= len(m.StateSignature) + copy(dAtA[i:], m.StateSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.StateSignature))) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x52 } - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + if len(m.BlockSignature) > 0 { + i -= len(m.BlockSignature) + copy(dAtA[i:], m.BlockSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockSignature))) i-- dAtA[i] = 0x42 } @@ -1646,21 +1593,13 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x38 } - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + if len(m.ValidatorProTxHash) > 0 { + i -= len(m.ValidatorProTxHash) + copy(dAtA[i:], m.ValidatorProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorProTxHash))) i-- dAtA[i] = 0x32 } - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintTypes(dAtA, i, uint64(n6)) - i-- - dAtA[i] = 0x2a { size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1709,10 +1648,10 @@ func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Signatures) > 0 { - for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ThresholdVoteExtensions) > 0 { + for iNdEx := len(m.ThresholdVoteExtensions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ThresholdVoteExtensions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1720,116 +1659,50 @@ func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xc2 } } - { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + if len(m.ThresholdStateSignature) > 0 { + i -= len(m.ThresholdStateSignature) + copy(dAtA[i:], m.ThresholdStateSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ThresholdStateSignature))) i-- - dAtA[i] = 0x10 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + dAtA[i] = 0x6 i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CommitSig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + dAtA[i] = 0xba } - return dAtA[:n], nil -} - -func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + if len(m.ThresholdBlockSignature) > 0 { + i -= len(m.ThresholdBlockSignature) + copy(dAtA[i:], m.ThresholdBlockSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ThresholdBlockSignature))) i-- - dAtA[i] = 0x22 - } - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err9 != nil { - return 0, err9 - } - i -= n9 - i = encodeVarintTypes(dAtA, i, uint64(n9)) - i-- - dAtA[i] = 0x1a - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + dAtA[i] = 0x6 i-- - dAtA[i] = 0x12 + dAtA[i] = 0xb2 } - if m.BlockIdFlag != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + if len(m.QuorumHash) > 0 { + i -= len(m.QuorumHash) + copy(dAtA[i:], m.QuorumHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.QuorumHash))) i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ExtendedCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xaa } - return dAtA[:n], nil -} - -func (m *ExtendedCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExtendedCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ExtendedSignatures) > 0 { - for iNdEx := len(m.ExtendedSignatures) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ExtendedSignatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + { + size, err := m.StateID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 { size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1853,70 +1726,6 @@ func (m *ExtendedCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ExtendedCommitSig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExtendedCommitSig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExtendedCommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ExtensionSignature) > 0 { - i -= len(m.ExtensionSignature) - copy(dAtA[i:], m.ExtensionSignature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) - i-- - dAtA[i] = 0x32 - } - if len(m.Extension) > 0 { - i -= len(m.Extension) - copy(dAtA[i:], m.Extension) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) - i-- - dAtA[i] = 0x2a - } - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) - i-- - dAtA[i] = 0x22 - } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err11 != nil { - return 0, err11 - } - i -= n11 - i = encodeVarintTypes(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0x1a - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) - i-- - dAtA[i] = 0x12 - } - if m.BlockIdFlag != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *Proposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1937,6 +1746,13 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.CoreChainLockedHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CoreChainLockedHeight)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa0 + } if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) @@ -1944,12 +1760,12 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err12 != nil { - return 0, err12 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err9 != nil { + return 0, err9 } - i -= n12 - i = encodeVarintTypes(dAtA, i, uint64(n12)) + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x32 { @@ -2099,6 +1915,18 @@ func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.HasCoreChainLock { + i-- + if m.HasCoreChainLock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa0 + } if m.NumTxs != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) i-- @@ -2241,6 +2069,22 @@ func (m *BlockID) Size() (n int) { return n } +func (m *StateID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + func (m *Header) Size() (n int) { if m == nil { return 0 @@ -2292,9 +2136,15 @@ func (m *Header) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.ProposerAddress) + if m.CoreChainLockedHeight != 0 { + n += 2 + sovTypes(uint64(m.CoreChainLockedHeight)) + } + l = len(m.ProposerProTxHash) if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n += 2 + l + sovTypes(uint64(l)) + } + if m.ProposedAppVersion != 0 { + n += 2 + sovTypes(uint64(m.ProposedAppVersion)) } return n } @@ -2331,46 +2181,23 @@ func (m *Vote) Size() (n int) { } l = m.BlockID.Size() n += 1 + l + sovTypes(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.ValidatorAddress) + l = len(m.ValidatorProTxHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } if m.ValidatorIndex != 0 { n += 1 + sovTypes(uint64(m.ValidatorIndex)) } - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Extension) + l = len(m.BlockSignature) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.ExtensionSignature) + l = len(m.StateSignature) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - return n -} - -func (m *Commit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) - } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.Signatures) > 0 { - for _, e := range m.Signatures { + if len(m.VoteExtensions) > 0 { + for _, e := range m.VoteExtensions { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -2378,29 +2205,7 @@ func (m *Commit) Size() (n int) { return n } -func (m *CommitSig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockIdFlag != 0 { - n += 1 + sovTypes(uint64(m.BlockIdFlag)) - } - l = len(m.ValidatorAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *ExtendedCommit) Size() (n int) { +func (m *Commit) Size() (n int) { if m == nil { return 0 } @@ -2414,41 +2219,25 @@ func (m *ExtendedCommit) Size() (n int) { } l = m.BlockID.Size() n += 1 + l + sovTypes(uint64(l)) - if len(m.ExtendedSignatures) > 0 { - for _, e := range m.ExtendedSignatures { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - -func (m *ExtendedCommitSig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockIdFlag != 0 { - n += 1 + sovTypes(uint64(m.BlockIdFlag)) - } - l = len(m.ValidatorAddress) + l = m.StateID.Size() + n += 2 + l + sovTypes(uint64(l)) + l = len(m.QuorumHash) if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n += 2 + l + sovTypes(uint64(l)) } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) + l = len(m.ThresholdBlockSignature) if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n += 2 + l + sovTypes(uint64(l)) } - l = len(m.Extension) + l = len(m.ThresholdStateSignature) if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n += 2 + l + sovTypes(uint64(l)) } - l = len(m.ExtensionSignature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.ThresholdVoteExtensions) > 0 { + for _, e := range m.ThresholdVoteExtensions { + l = e.Size() + n += 2 + l + sovTypes(uint64(l)) + } } return n } @@ -2479,6 +2268,9 @@ func (m *Proposal) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.CoreChainLockedHeight != 0 { + n += 2 + sovTypes(uint64(m.CoreChainLockedHeight)) + } return n } @@ -2532,6 +2324,9 @@ func (m *BlockMeta) Size() (n int) { if m.NumTxs != 0 { n += 1 + sovTypes(uint64(m.NumTxs)) } + if m.HasCoreChainLock { + n += 3 + } return n } @@ -2918,7 +2713,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } return nil } -func (m *Header) Unmarshal(dAtA []byte) error { +func (m *StateID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2941,17 +2736,17 @@ func (m *Header) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Header: wiretype end group for non-group") + return fmt.Errorf("proto: StateID: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StateID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastAppHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2961,30 +2756,31 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.LastAppHash = append(m.LastAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastAppHash == nil { + m.LastAppHash = []byte{} } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var stringLen uint64 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2994,20 +2790,122 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } m.ChainID = string(dAtA[iNdEx:postIndex]) @@ -3369,9 +3267,28 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.EvidenceHash = []byte{} } iNdEx = postIndex - case 14: + case 100: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CoreChainLockedHeight", wireType) + } + m.CoreChainLockedHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CoreChainLockedHeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 101: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProposerProTxHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3398,11 +3315,30 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ProposerAddress == nil { - m.ProposerAddress = []byte{} + m.ProposerProTxHash = append(m.ProposerProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerProTxHash == nil { + m.ProposerProTxHash = []byte{} } iNdEx = postIndex + case 102: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposedAppVersion", wireType) + } + m.ProposedAppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProposedAppVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -3554,316 +3490,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) - } - m.Round = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Round |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorAddress == nil { - m.ValidatorAddress = []byte{} - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) - } - m.ValidatorIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ValidatorIndex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) - if m.Signature == nil { - m.Signature = []byte{} - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...) - if m.Extension == nil { - m.Extension = []byte{} - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...) - if m.ExtensionSignature == nil { - m.ExtensionSignature = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Commit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Commit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } @@ -3882,7 +3509,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { break } } - case 2: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } @@ -3901,7 +3528,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { break } } - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) } @@ -3934,11 +3561,11 @@ func (m *Commit) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorProTxHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3948,81 +3575,31 @@ func (m *Commit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Signatures = append(m.Signatures, CommitSig{}) - if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.ValidatorProTxHash = append(m.ValidatorProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorProTxHash == nil { + m.ValidatorProTxHash = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CommitSig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommitSig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommitSig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) } - m.BlockIdFlag = 0 + m.ValidatorIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4032,14 +3609,14 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + m.ValidatorIndex |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlockSignature", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -4066,16 +3643,16 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorAddress == nil { - m.ValidatorAddress = []byte{} + m.BlockSignature = append(m.BlockSignature[:0], dAtA[iNdEx:postIndex]...) + if m.BlockSignature == nil { + m.BlockSignature = []byte{} } iNdEx = postIndex - case 3: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StateSignature", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4085,30 +3662,31 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err + m.StateSignature = append(m.StateSignature[:0], dAtA[iNdEx:postIndex]...) + if m.StateSignature == nil { + m.StateSignature = []byte{} } iNdEx = postIndex - case 4: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtensions", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4118,24 +3696,24 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) - if m.Signature == nil { - m.Signature = []byte{} + m.VoteExtensions = append(m.VoteExtensions, &VoteExtension{}) + if err := m.VoteExtensions[len(m.VoteExtensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -4159,7 +3737,7 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { +func (m *Commit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4182,10 +3760,10 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExtendedCommit: wiretype end group for non-group") + return fmt.Errorf("proto: Commit: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExtendedCommit: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4259,9 +3837,9 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 100: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtendedSignatures", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StateID", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4288,83 +3866,13 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExtendedSignatures = append(m.ExtendedSignatures, ExtendedCommitSig{}) - if err := m.ExtendedSignatures[len(m.ExtendedSignatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StateID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtendedCommitSig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtendedCommitSig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) - } - m.BlockIdFlag = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: + case 101: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QuorumHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -4391,47 +3899,14 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorAddress == nil { - m.ValidatorAddress = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err + m.QuorumHash = append(m.QuorumHash[:0], dAtA[iNdEx:postIndex]...) + if m.QuorumHash == nil { + m.QuorumHash = []byte{} } iNdEx = postIndex - case 4: + case 102: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ThresholdBlockSignature", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -4458,14 +3933,14 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) - if m.Signature == nil { - m.Signature = []byte{} + m.ThresholdBlockSignature = append(m.ThresholdBlockSignature[:0], dAtA[iNdEx:postIndex]...) + if m.ThresholdBlockSignature == nil { + m.ThresholdBlockSignature = []byte{} } iNdEx = postIndex - case 5: + case 103: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ThresholdStateSignature", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -4492,16 +3967,16 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...) - if m.Extension == nil { - m.Extension = []byte{} + m.ThresholdStateSignature = append(m.ThresholdStateSignature[:0], dAtA[iNdEx:postIndex]...) + if m.ThresholdStateSignature == nil { + m.ThresholdStateSignature = []byte{} } iNdEx = postIndex - case 6: + case 104: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ThresholdVoteExtensions", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4511,24 +3986,24 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...) - if m.ExtensionSignature == nil { - m.ExtensionSignature = []byte{} + m.ThresholdVoteExtensions = append(m.ThresholdVoteExtensions, &VoteExtension{}) + if err := m.ThresholdVoteExtensions[len(m.ThresholdVoteExtensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -4757,6 +4232,25 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { m.Signature = []byte{} } iNdEx = postIndex + case 100: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CoreChainLockedHeight", wireType) + } + m.CoreChainLockedHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CoreChainLockedHeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -5155,6 +4649,26 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { break } } + case 100: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasCoreChainLock", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasCoreChainLock = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index 4085623960..0a75ee2180 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -147,28 +147,6 @@ message Commit { repeated VoteExtension threshold_vote_extensions = 104; } -message ExtendedCommit { - int64 height = 1; - int32 round = 2; - BlockID block_id = 3 - [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; - repeated ExtendedCommitSig extended_signatures = 4 [(gogoproto.nullable) = false]; -} - -// ExtendedCommitSig retains all the same fields as CommitSig but adds vote -// extension-related fields. -message ExtendedCommitSig { - BlockIDFlag block_id_flag = 1; - bytes validator_address = 2; - google.protobuf.Timestamp timestamp = 3 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes signature = 4; - // Vote extension data - bytes extension = 5; - // Vote extension signature - bytes extension_signature = 6; -} - message Proposal { SignedMsgType type = 1; int64 height = 2; diff --git a/rpc/client/mocks/abci_client.go b/rpc/client/mocks/abci_client.go index 07683bc261..76587a0775 100644 --- a/rpc/client/mocks/abci_client.go +++ b/rpc/client/mocks/abci_client.go @@ -12,8 +12,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -91,6 +89,29 @@ func (_m *ABCIClient) ABCIQueryWithOptions(ctx context.Context, path string, dat return r0, r1 } +// BroadcastTx provides a mock function with given fields: _a0, _a1 +func (_m *ABCIClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BroadcastTxAsync provides a mock function with given fields: _a0, _a1 func (_m *ABCIClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) @@ -160,8 +181,13 @@ func (_m *ABCIClient) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coret return r0, r1 } -// NewABCIClient creates a new instance of ABCIClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewABCIClient(t testing.TB) *ABCIClient { +type mockConstructorTestingTNewABCIClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewABCIClient creates a new instance of ABCIClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewABCIClient(t mockConstructorTestingTNewABCIClient) *ABCIClient { mock := &ABCIClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index c093b03472..7c485c425d 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -819,13 +819,13 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP return r0, r1 } -type NewClientT interface { +type mockConstructorTestingTNewClient interface { mock.TestingT Cleanup(func()) } // NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t NewClientT) *Client { +func NewClient(t mockConstructorTestingTNewClient) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/events_client.go b/rpc/client/mocks/events_client.go index eba096284c..19b882552f 100644 --- a/rpc/client/mocks/events_client.go +++ b/rpc/client/mocks/events_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // EventsClient is an autogenerated mock type for the EventsClient type @@ -39,8 +37,13 @@ func (_m *EventsClient) Events(ctx context.Context, req *coretypes.RequestEvents return r0, r1 } -// NewEventsClient creates a new instance of EventsClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventsClient(t testing.TB) *EventsClient { +type mockConstructorTestingTNewEventsClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewEventsClient creates a new instance of EventsClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEventsClient(t mockConstructorTestingTNewEventsClient) *EventsClient { mock := &EventsClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/evidence_client.go b/rpc/client/mocks/evidence_client.go index 7824a2ae4e..3b8d3cf2ed 100644 --- a/rpc/client/mocks/evidence_client.go +++ b/rpc/client/mocks/evidence_client.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -41,8 +39,13 @@ func (_m *EvidenceClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evide return r0, r1 } -// NewEvidenceClient creates a new instance of EvidenceClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvidenceClient(t testing.TB) *EvidenceClient { +type mockConstructorTestingTNewEvidenceClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewEvidenceClient creates a new instance of EvidenceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEvidenceClient(t mockConstructorTestingTNewEvidenceClient) *EvidenceClient { mock := &EvidenceClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/history_client.go b/rpc/client/mocks/history_client.go index ecd0190504..ae28fe69b2 100644 --- a/rpc/client/mocks/history_client.go +++ b/rpc/client/mocks/history_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // HistoryClient is an autogenerated mock type for the HistoryClient type @@ -85,8 +83,13 @@ func (_m *HistoryClient) GenesisChunked(_a0 context.Context, _a1 uint) (*coretyp return r0, r1 } -// NewHistoryClient creates a new instance of HistoryClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewHistoryClient(t testing.TB) *HistoryClient { +type mockConstructorTestingTNewHistoryClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewHistoryClient creates a new instance of HistoryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewHistoryClient(t mockConstructorTestingTNewHistoryClient) *HistoryClient { mock := &HistoryClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/mempool_client.go b/rpc/client/mocks/mempool_client.go index 0dfea703fe..714c66c6d4 100644 --- a/rpc/client/mocks/mempool_client.go +++ b/rpc/client/mocks/mempool_client.go @@ -8,8 +8,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -101,8 +99,13 @@ func (_m *MempoolClient) UnconfirmedTxs(ctx context.Context, page *int, perPage return r0, r1 } -// NewMempoolClient creates a new instance of MempoolClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewMempoolClient(t testing.TB) *MempoolClient { +type mockConstructorTestingTNewMempoolClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewMempoolClient creates a new instance of MempoolClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMempoolClient(t mockConstructorTestingTNewMempoolClient) *MempoolClient { mock := &MempoolClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/network_client.go b/rpc/client/mocks/network_client.go index 73bb11d612..d0f7eaa559 100644 --- a/rpc/client/mocks/network_client.go +++ b/rpc/client/mocks/network_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // NetworkClient is an autogenerated mock type for the NetworkClient type @@ -131,8 +129,13 @@ func (_m *NetworkClient) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, return r0, r1 } -// NewNetworkClient creates a new instance of NetworkClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetworkClient(t testing.TB) *NetworkClient { +type mockConstructorTestingTNewNetworkClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewNetworkClient creates a new instance of NetworkClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewNetworkClient(t mockConstructorTestingTNewNetworkClient) *NetworkClient { mock := &NetworkClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/remote_client.go b/rpc/client/mocks/remote_client.go index b4271dceff..029c712034 100644 --- a/rpc/client/mocks/remote_client.go +++ b/rpc/client/mocks/remote_client.go @@ -12,8 +12,6 @@ import ( mock "github.com/stretchr/testify/mock" - testing "testing" - types "github.com/tendermint/tendermint/types" ) @@ -229,6 +227,29 @@ func (_m *RemoteClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evidenc return r0, r1 } +// BroadcastTx provides a mock function with given fields: _a0, _a1 +func (_m *RemoteClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BroadcastTxAsync provides a mock function with given fields: _a0, _a1 func (_m *RemoteClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) @@ -812,8 +833,13 @@ func (_m *RemoteClient) Validators(ctx context.Context, height *int64, page *int return r0, r1 } -// NewRemoteClient creates a new instance of RemoteClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewRemoteClient(t testing.TB) *RemoteClient { +type mockConstructorTestingTNewRemoteClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewRemoteClient creates a new instance of RemoteClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewRemoteClient(t mockConstructorTestingTNewRemoteClient) *RemoteClient { mock := &RemoteClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/sign_client.go b/rpc/client/mocks/sign_client.go index 6c1e674476..008176295a 100644 --- a/rpc/client/mocks/sign_client.go +++ b/rpc/client/mocks/sign_client.go @@ -10,8 +10,6 @@ import ( coretypes "github.com/tendermint/tendermint/rpc/coretypes" mock "github.com/stretchr/testify/mock" - - testing "testing" ) // SignClient is an autogenerated mock type for the SignClient type @@ -249,8 +247,13 @@ func (_m *SignClient) Validators(ctx context.Context, height *int64, page *int, return r0, r1 } -// NewSignClient creates a new instance of SignClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewSignClient(t testing.TB) *SignClient { +type mockConstructorTestingTNewSignClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewSignClient creates a new instance of SignClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSignClient(t mockConstructorTestingTNewSignClient) *SignClient { mock := &SignClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/status_client.go b/rpc/client/mocks/status_client.go index eee3a471f6..bf22efa207 100644 --- a/rpc/client/mocks/status_client.go +++ b/rpc/client/mocks/status_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // StatusClient is an autogenerated mock type for the StatusClient type @@ -39,8 +37,13 @@ func (_m *StatusClient) Status(_a0 context.Context) (*coretypes.ResultStatus, er return r0, r1 } -// NewStatusClient creates a new instance of StatusClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewStatusClient(t testing.TB) *StatusClient { +type mockConstructorTestingTNewStatusClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStatusClient(t mockConstructorTestingTNewStatusClient) *StatusClient { mock := &StatusClient{} mock.Mock.Test(t) diff --git a/rpc/client/mocks/subscription_client.go b/rpc/client/mocks/subscription_client.go index 4a520063d5..c476a4fc7a 100644 --- a/rpc/client/mocks/subscription_client.go +++ b/rpc/client/mocks/subscription_client.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - testing "testing" ) // SubscriptionClient is an autogenerated mock type for the SubscriptionClient type @@ -74,8 +72,13 @@ func (_m *SubscriptionClient) UnsubscribeAll(ctx context.Context, subscriber str return r0 } -// NewSubscriptionClient creates a new instance of SubscriptionClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscriptionClient(t testing.TB) *SubscriptionClient { +type mockConstructorTestingTNewSubscriptionClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewSubscriptionClient creates a new instance of SubscriptionClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSubscriptionClient(t mockConstructorTestingTNewSubscriptionClient) *SubscriptionClient { mock := &SubscriptionClient{} mock.Mock.Test(t) diff --git a/types/block.go b/types/block.go index 02e502035f..f887ebcccd 100644 --- a/types/block.go +++ b/types/block.go @@ -659,96 +659,6 @@ const ( //------------------------------------- -// ExtendedCommitSig contains a commit signature along with its corresponding -// vote extension and vote extension signature. -type ExtendedCommitSig struct { - CommitSig // Commit signature - Extension []byte // Vote extension - ExtensionSignature []byte // Vote extension signature -} - -// NewExtendedCommitSigAbsent returns new ExtendedCommitSig with -// BlockIDFlagAbsent. Other fields are all empty. -func NewExtendedCommitSigAbsent() ExtendedCommitSig { - return ExtendedCommitSig{CommitSig: NewCommitSigAbsent()} -} - -// String returns a string representation of an ExtendedCommitSig. -// -// 1. commit sig -// 2. first 6 bytes of vote extension -// 3. first 6 bytes of vote extension signature -func (ecs ExtendedCommitSig) String() string { - return fmt.Sprintf("ExtendedCommitSig{%s with %X %X}", - ecs.CommitSig, - tmbytes.Fingerprint(ecs.Extension), - tmbytes.Fingerprint(ecs.ExtensionSignature), - ) -} - -// ValidateBasic checks whether the structure is well-formed. -func (ecs ExtendedCommitSig) ValidateBasic() error { - if err := ecs.CommitSig.ValidateBasic(); err != nil { - return err - } - - if ecs.BlockIDFlag == BlockIDFlagCommit { - if len(ecs.Extension) > MaxVoteExtensionSize { - return fmt.Errorf("vote extension is too big (max: %d)", MaxVoteExtensionSize) - } - if len(ecs.ExtensionSignature) > MaxSignatureSize { - return fmt.Errorf("vote extension signature is too big (max: %d)", MaxSignatureSize) - } - return nil - } - - if len(ecs.ExtensionSignature) == 0 && len(ecs.Extension) != 0 { - return errors.New("vote extension signature absent on vote with extension") - } - return nil -} - -// EnsureExtensions validates that a vote extensions signature is present for -// this ExtendedCommitSig. -func (ecs ExtendedCommitSig) EnsureExtension() error { - if ecs.BlockIDFlag == BlockIDFlagCommit && len(ecs.ExtensionSignature) == 0 { - return errors.New("vote extension data is missing") - } - return nil -} - -// ToProto converts the ExtendedCommitSig to its Protobuf representation. -func (ecs *ExtendedCommitSig) ToProto() *tmproto.ExtendedCommitSig { - if ecs == nil { - return nil - } - - return &tmproto.ExtendedCommitSig{ - BlockIdFlag: tmproto.BlockIDFlag(ecs.BlockIDFlag), - ValidatorAddress: ecs.ValidatorAddress, - Timestamp: ecs.Timestamp, - Signature: ecs.Signature, - Extension: ecs.Extension, - ExtensionSignature: ecs.ExtensionSignature, - } -} - -// FromProto populates the ExtendedCommitSig with values from the given -// Protobuf representation. Returns an error if the ExtendedCommitSig is -// invalid. -func (ecs *ExtendedCommitSig) FromProto(ecsp tmproto.ExtendedCommitSig) error { - ecs.BlockIDFlag = BlockIDFlag(ecsp.BlockIdFlag) - ecs.ValidatorAddress = ecsp.ValidatorAddress - ecs.Timestamp = ecsp.Timestamp - ecs.Signature = ecsp.Signature - ecs.Extension = ecsp.Extension - ecs.ExtensionSignature = ecsp.ExtensionSignature - - return ecs.ValidateBasic() -} - -//------------------------------------- - // Commit contains the evidence that a block was committed by a set of validators. // NOTE: Commit is empty for height 1, but never nil. type Commit struct { @@ -1020,272 +930,6 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { return commit, commit.ValidateBasic() } -//------------------------------------- - -// ExtendedCommit is similar to Commit, except that its signatures also retain -// their corresponding vote extensions and vote extension signatures. -type ExtendedCommit struct { - Height int64 - Round int32 - BlockID BlockID - ExtendedSignatures []ExtendedCommitSig - - bitArray *bits.BitArray -} - -// Clone creates a deep copy of this extended commit. -func (ec *ExtendedCommit) Clone() *ExtendedCommit { - sigs := make([]ExtendedCommitSig, len(ec.ExtendedSignatures)) - copy(sigs, ec.ExtendedSignatures) - ecc := *ec - ecc.ExtendedSignatures = sigs - return &ecc -} - -// ToExtendedVoteSet constructs a VoteSet from the Commit and validator set. -// Panics if signatures from the ExtendedCommit can't be added to the voteset. -// Panics if any of the votes have invalid or absent vote extension data. -// Inverse of VoteSet.MakeExtendedCommit(). -func (ec *ExtendedCommit) ToExtendedVoteSet(chainID string, vals *ValidatorSet) *VoteSet { - voteSet := NewExtendedVoteSet(chainID, ec.Height, ec.Round, tmproto.PrecommitType, vals) - ec.addSigsToVoteSet(voteSet) - return voteSet -} - -// ToVoteSet constructs a VoteSet from the Commit and validator set. -// Panics if signatures from the ExtendedCommit can't be added to the voteset. -// Inverse of VoteSet.MakeExtendedCommit(). -func (ec *ExtendedCommit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, ec.Height, ec.Round, tmproto.PrecommitType, vals) - ec.addSigsToVoteSet(voteSet) - return voteSet -} - -// addSigsToVoteSet adds all of the signature to voteSet. -func (ec *ExtendedCommit) addSigsToVoteSet(voteSet *VoteSet) { - for idx, ecs := range ec.ExtendedSignatures { - if ecs.BlockIDFlag == BlockIDFlagAbsent { - continue // OK, some precommits can be missing. - } - vote := ec.GetExtendedVote(int32(idx)) - if err := vote.ValidateBasic(); err != nil { - panic(fmt.Errorf("failed to validate vote reconstructed from LastCommit: %w", err)) - } - added, err := voteSet.AddVote(vote) - if !added || err != nil { - panic(fmt.Errorf("failed to reconstruct vote set from extended commit: %w", err)) - } - } -} - -// ToVoteSet constructs a VoteSet from the Commit and validator set. -// Panics if signatures from the commit can't be added to the voteset. -// Inverse of VoteSet.MakeCommit(). -func (commit *Commit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, commit.Height, commit.Round, tmproto.PrecommitType, vals) - for idx, cs := range commit.Signatures { - if cs.BlockIDFlag == BlockIDFlagAbsent { - continue // OK, some precommits can be missing. - } - vote := commit.GetVote(int32(idx)) - if err := vote.ValidateBasic(); err != nil { - panic(fmt.Errorf("failed to validate vote reconstructed from commit: %w", err)) - } - added, err := voteSet.AddVote(vote) - if !added || err != nil { - panic(fmt.Errorf("failed to reconstruct vote set from commit: %w", err)) - } - } - return voteSet -} - -// EnsureExtensions validates that a vote extensions signature is present for -// every ExtendedCommitSig in the ExtendedCommit. -func (ec *ExtendedCommit) EnsureExtensions() error { - for _, ecs := range ec.ExtendedSignatures { - if err := ecs.EnsureExtension(); err != nil { - return err - } - } - return nil -} - -// StripExtensions removes all VoteExtension data from an ExtendedCommit. This -// is useful when dealing with an ExendedCommit but vote extension data is -// expected to be absent. -func (ec *ExtendedCommit) StripExtensions() bool { - stripped := false - for idx := range ec.ExtendedSignatures { - if len(ec.ExtendedSignatures[idx].Extension) > 0 || len(ec.ExtendedSignatures[idx].ExtensionSignature) > 0 { - stripped = true - } - ec.ExtendedSignatures[idx].Extension = nil - ec.ExtendedSignatures[idx].ExtensionSignature = nil - } - return stripped -} - -// ToCommit converts an ExtendedCommit to a Commit by removing all vote -// extension-related fields. -func (ec *ExtendedCommit) ToCommit() *Commit { - cs := make([]CommitSig, len(ec.ExtendedSignatures)) - for idx, ecs := range ec.ExtendedSignatures { - cs[idx] = ecs.CommitSig - } - return &Commit{ - Height: ec.Height, - Round: ec.Round, - BlockID: ec.BlockID, - Signatures: cs, - } -} - -// GetExtendedVote converts the ExtendedCommitSig for the given validator -// index to a Vote with a vote extensions. -// It panics if valIndex is out of range. -func (ec *ExtendedCommit) GetExtendedVote(valIndex int32) *Vote { - ecs := ec.ExtendedSignatures[valIndex] - return &Vote{ - Type: tmproto.PrecommitType, - Height: ec.Height, - Round: ec.Round, - BlockID: ecs.BlockID(ec.BlockID), - Timestamp: ecs.Timestamp, - ValidatorAddress: ecs.ValidatorAddress, - ValidatorIndex: valIndex, - Signature: ecs.Signature, - Extension: ecs.Extension, - ExtensionSignature: ecs.ExtensionSignature, - } -} - -// Type returns the vote type of the extended commit, which is always -// VoteTypePrecommit -// Implements VoteSetReader. -func (ec *ExtendedCommit) Type() byte { return byte(tmproto.PrecommitType) } - -// GetHeight returns height of the extended commit. -// Implements VoteSetReader. -func (ec *ExtendedCommit) GetHeight() int64 { return ec.Height } - -// GetRound returns height of the extended commit. -// Implements VoteSetReader. -func (ec *ExtendedCommit) GetRound() int32 { return ec.Round } - -// Size returns the number of signatures in the extended commit. -// Implements VoteSetReader. -func (ec *ExtendedCommit) Size() int { - if ec == nil { - return 0 - } - return len(ec.ExtendedSignatures) -} - -// BitArray returns a BitArray of which validators voted for BlockID or nil in -// this extended commit. -// Implements VoteSetReader. -func (ec *ExtendedCommit) BitArray() *bits.BitArray { - if ec.bitArray == nil { - ec.bitArray = bits.NewBitArray(len(ec.ExtendedSignatures)) - for i, extCommitSig := range ec.ExtendedSignatures { - // TODO: need to check the BlockID otherwise we could be counting conflicts, - // not just the one with +2/3 ! - ec.bitArray.SetIndex(i, extCommitSig.BlockIDFlag != BlockIDFlagAbsent) - } - } - return ec.bitArray -} - -// GetByIndex returns the vote corresponding to a given validator index. -// Panics if `index >= extCommit.Size()`. -// Implements VoteSetReader. -func (ec *ExtendedCommit) GetByIndex(valIdx int32) *Vote { - return ec.GetExtendedVote(valIdx) -} - -// IsCommit returns true if there is at least one signature. -// Implements VoteSetReader. -func (ec *ExtendedCommit) IsCommit() bool { - return len(ec.ExtendedSignatures) != 0 -} - -// ValidateBasic checks whether the extended commit is well-formed. Does not -// actually check the cryptographic signatures. -func (ec *ExtendedCommit) ValidateBasic() error { - if ec.Height < 0 { - return errors.New("negative Height") - } - if ec.Round < 0 { - return errors.New("negative Round") - } - - if ec.Height >= 1 { - if ec.BlockID.IsNil() { - return errors.New("commit cannot be for nil block") - } - - if len(ec.ExtendedSignatures) == 0 { - return errors.New("no signatures in commit") - } - for i, extCommitSig := range ec.ExtendedSignatures { - if err := extCommitSig.ValidateBasic(); err != nil { - return fmt.Errorf("wrong ExtendedCommitSig #%d: %v", i, err) - } - } - } - return nil -} - -// ToProto converts ExtendedCommit to protobuf -func (ec *ExtendedCommit) ToProto() *tmproto.ExtendedCommit { - if ec == nil { - return nil - } - - c := new(tmproto.ExtendedCommit) - sigs := make([]tmproto.ExtendedCommitSig, len(ec.ExtendedSignatures)) - for i := range ec.ExtendedSignatures { - sigs[i] = *ec.ExtendedSignatures[i].ToProto() - } - c.ExtendedSignatures = sigs - - c.Height = ec.Height - c.Round = ec.Round - c.BlockID = ec.BlockID.ToProto() - - return c -} - -// ExtendedCommitFromProto constructs an ExtendedCommit from the given Protobuf -// representation. It returns an error if the extended commit is invalid. -func ExtendedCommitFromProto(ecp *tmproto.ExtendedCommit) (*ExtendedCommit, error) { - if ecp == nil { - return nil, errors.New("nil ExtendedCommit") - } - - extCommit := new(ExtendedCommit) - - bi, err := BlockIDFromProto(&ecp.BlockID) - if err != nil { - return nil, err - } - - sigs := make([]ExtendedCommitSig, len(ecp.ExtendedSignatures)) - for i := range ecp.ExtendedSignatures { - if err := sigs[i].FromProto(ecp.ExtendedSignatures[i]); err != nil { - return nil, err - } - } - extCommit.ExtendedSignatures = sigs - extCommit.Height = ecp.Height - extCommit.Round = ecp.Round - extCommit.BlockID = *bi - - return extCommit, extCommit.ValidateBasic() -} - -//------------------------------------- - // Data contains the set of transactions included in the block type Data struct { diff --git a/types/block_test.go b/types/block_test.go index 2783c2dd09..5b5ddb917b 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -492,7 +492,7 @@ func randCommit(ctx context.Context, t *testing.T, stateID StateID) *Commit { require.NoError(t, err) - return commit.ToCommit() + return commit } func hexBytesFromString(t *testing.T, s string) tmbytes.HexBytes { @@ -631,7 +631,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { err := valSet.VerifyCommit(voteSet.ChainID(), blockID, stateID, height-1, commit) assert.NoError(t, err) } else { - assert.Panics(t, func() { voteSet.MakeExtendedCommit() }) + assert.Panics(t, func() { voteSet.MakeCommit() }) } } } diff --git a/types/params.go b/types/params.go index 3ca26e6d42..d507807318 100644 --- a/types/params.go +++ b/types/params.go @@ -100,15 +100,6 @@ type ABCIParams struct { RecheckTx bool `json:"recheck_tx"` } -// VoteExtensionsEnabled returns true if vote extensions are enabled at height h -// and false otherwise. -func (a ABCIParams) VoteExtensionsEnabled(h int64) bool { - if a.VoteExtensionsEnableHeight == 0 { - return false - } - return a.VoteExtensionsEnableHeight <= h -} - // DefaultConsensusParams returns a default ConsensusParams. func DefaultConsensusParams() *ConsensusParams { return &ConsensusParams{ diff --git a/types/test_util.go b/types/test_util.go index c282456235..a68b6ef2ca 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -52,7 +52,7 @@ func makeCommit( } } - return voteSet.MakeExtendedCommit(), nil + return voteSet.MakeCommit(), nil } // signAddVote signs a vote using StateID configured inside voteSet, and adds it to that voteSet diff --git a/types/vote.go b/types/vote.go index 22996a177b..7cc9df7ca3 100644 --- a/types/vote.go +++ b/types/vote.go @@ -1,6 +1,7 @@ package types import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -270,9 +271,11 @@ func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey) error { return nil } v := vote.ToProto() - extSignBytes := VoteExtensionSignBytes(chainID, v) - if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { - return ErrVoteInvalidSignature + for _, ve := range v.VoteExtensions { + extSignBytes := VoteExtensionSignBytes(chainID, vote.Height, vote.Round, ve) + if !pubKey.VerifySignature(extSignBytes, ve.Signature) { + return ErrVoteInvalidSignature + } } return nil } @@ -380,22 +383,6 @@ func (vote *Vote) ValidateWithExtension() error { return nil } -// EnsureExtension checks for the presence of extensions signature data -// on precommit vote types. -func (vote *Vote) EnsureExtension() error { - // We should always see vote extension signatures in non-nil precommits - if vote.Type != tmproto.PrecommitType { - return nil - } - if vote.BlockID.IsNil() { - return nil - } - if len(vote.ExtensionSignature) > 0 { - return nil - } - return ErrVoteExtensionAbsent -} - // ToProto converts the handwritten type to proto generated type // return type, nil if everything converts safely, otherwise nil, error func (vote *Vote) ToProto() *tmproto.Vote { diff --git a/types/vote_set.go b/types/vote_set.go index 429f475e50..bdb7ce1783 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -3,7 +3,6 @@ package types import ( "bytes" "encoding/json" - "errors" "fmt" "strings" "sync" @@ -102,16 +101,6 @@ func NewVoteSet(chainID string, height int64, round int32, } } -// NewExtendedVoteSet constructs a vote set with additional vote verification logic. -// The VoteSet constructed with NewExtendedVoteSet verifies the vote extension -// data for every vote added to the set. -func NewExtendedVoteSet(chainID string, height int64, round int32, - signedMsgType tmproto.SignedMsgType, valSet *ValidatorSet) *VoteSet { - vs := NewVoteSet(chainID, height, round, signedMsgType, valSet) - vs.extensionsEnabled = true - return vs -} - func (voteSet *VoteSet) ChainID() string { return voteSet.chainID } @@ -702,21 +691,21 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { //-------------------------------------------------------------------------------- // Commit -// MakeExtendedCommit constructs a Commit from the VoteSet. It only includes +// MakeCommit constructs a Commit from the VoteSet. It only includes // precommits for the block, which has 2/3+ majority, and nil. // // Panics if the vote type is not PrecommitType or if there's no +2/3 votes for // a single block. -func (voteSet *VoteSet) MakeExtendedCommit() *Commit { +func (voteSet *VoteSet) MakeCommit() *Commit { if voteSet.signedMsgType != tmproto.PrecommitType { - panic("Cannot MakeExtendCommit() unless VoteSet.Type is PrecommitType") + panic("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() // Make sure we have a 2/3 majority if voteSet.maj23 == nil { - panic("Cannot MakeExtendCommit() unless a blockhash has +2/3") + panic("Cannot MakeCommit() unless a blockhash has +2/3") } if voteSet.thresholdBlockSig == nil { diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 1c74f02f63..206e7c0966 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -505,7 +505,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { } // MakeCommit should fail. - assert.Panics(t, func() { voteSet.MakeExtendedCommit() }, "Doesn't have +2/3 majority") + assert.Panics(t, func() { voteSet.MakeCommit() }, "Doesn't have +2/3 majority") // 7th voted for some other block. { diff --git a/types/vote_test.go b/types/vote_test.go index 166f18d521..c01912860c 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -272,8 +272,15 @@ func TestVoteExtension(t *testing.T) { expectError: false, }, { - name: "no extension signature", - extension: []byte("extension"), + name: "no extension signature", + extensions: VoteExtensions{ + tmproto.VoteExtensionType_DEFAULT: []VoteExtension{ + { + Extension: []byte("extension"), + Signature: nil, + }, + }, + }, includeSignature: false, expectError: true, }, @@ -468,7 +475,6 @@ func TestValidVotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, tc.vote, stateID, nil) tc.malleateVote(tc.vote) require.NoError(t, tc.vote.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.NoError(t, tc.vote.EnsureExtension(), "EnsureExtension for %s", tc.name) } } @@ -497,13 +503,11 @@ func TestInvalidVotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, prevote, stateID, nil) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s in invalid prevote", tc.name) - require.NoError(t, prevote.EnsureExtension(), "EnsureExtension for %s in invalid prevote", tc.name) precommit := examplePrecommit(t) signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) tc.malleateVote(precommit) require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s in invalid precommit", tc.name) - require.NoError(t, precommit.EnsureExtension(), "EnsureExtension for %s in invalid precommit", tc.name) } } @@ -538,7 +542,6 @@ func TestInvalidPrevotes(t *testing.T) { signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, prevote, stateID, nil) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s", tc.name) - require.NoError(t, prevote.EnsureExtension(), "EnsureExtension for %s", tc.name) } } @@ -582,36 +585,6 @@ func TestInvalidPrecommitExtensions(t *testing.T) { } } -func TestEnsureVoteExtension(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - privVal := NewMockPV() - - testCases := []struct { - name string - malleateVote func(*Vote) - expectError bool - }{ - {"vote extension signature absent", func(v *Vote) { - v.Extension = nil - v.ExtensionSignature = nil - }, true}, - {"vote extension signature present", func(v *Vote) { - v.ExtensionSignature = []byte("extension signature") - }, false}, - } - for _, tc := range testCases { - precommit := examplePrecommit(t) - signVote(ctx, t, privVal, "test_chain_id", precommit) - tc.malleateVote(precommit) - if tc.expectError { - require.Error(t, precommit.EnsureExtension(), "EnsureExtension for %s", tc.name) - } else { - require.NoError(t, precommit.EnsureExtension(), "EnsureExtension for %s", tc.name) - } - } -} - func TestVoteProtobuf(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() From 467e268e6ac5d4eee45d42a1d9213f51740ba00a Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 11 Aug 2022 15:41:46 +0200 Subject: [PATCH 192/203] fix: blocksync tests --- internal/blocksync/reactor.go | 4 ++-- internal/store/store.go | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 5109194c31..7a9b205f4e 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -198,9 +198,9 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, }) } - commit := r.store.LoadBlockCommit(msg.Height) + commit := r.store.LoadBlockSeenCommitAt(msg.Height) if commit == nil { - return fmt.Errorf("found block in store with no extended commit: %v", block) + return fmt.Errorf("found block in store with no commit: %v", block) } blockProto, err := block.ToProto() diff --git a/internal/store/store.go b/internal/store/store.go index 755b719422..5a3b36f742 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -528,6 +528,11 @@ func (bs *BlockStore) saveBlockToBatch(batch dbm.Batch, block *types.Block, bloc return err } + // stores seen-commit with a height, because tendermint does the same but only for extended commit + if err := batch.Set(blockCommitKey(height), seenCommitBytes); err != nil { + return err + } + return nil } From 03a1415e52212fc3a5aad39680340eeaf685d262 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Fri, 12 Aug 2022 17:57:24 +0200 Subject: [PATCH 193/203] fix: some missed stuff during merging --- abci/example/kvstore/kvstore.go | 1 + abci/types/types.go | 41 ++++++ internal/blocksync/reactor.go | 2 +- internal/consensus/reactor.go | 4 +- internal/consensus/replay_test.go | 4 +- internal/consensus/state.go | 4 +- internal/consensus/types/height_vote_set.go | 24 +--- internal/state/execution.go | 5 - internal/state/mocks/block_store.go | 16 +++ internal/state/services.go | 1 + internal/state/state_test.go | 19 ++- internal/state/validation_test.go | 4 +- internal/store/store.go | 54 +++++--- internal/store/store_test.go | 6 +- internal/test/factory/params.go | 1 - proto/tendermint/types/params.pb.go | 143 +++++++------------- proto/tendermint/types/params.proto | 11 -- rpc/client/rpc_test.go | 2 +- rpc/coretypes/responses.go | 3 +- rpc/coretypes/responses_test.go | 51 ++++--- types/params.go | 37 +---- types/params_test.go | 95 +------------ types/validator_set_test.go | 2 - types/vote.go | 9 +- types/vote_test.go | 44 +++--- 25 files changed, 231 insertions(+), 352 deletions(-) diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 7ba0b3bb60..f7ec4b5430 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -206,6 +206,7 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal return &types.ResponseFinalizeBlock{ TxResults: respTxs, ValidatorSetUpdate: proto.Clone(&app.valSetUpdate).(*types.ValidatorSetUpdate), + AppHash: appHash, }, nil } diff --git a/abci/types/types.go b/abci/types/types.go index 2d24b4ee17..78cb13ed42 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -183,6 +183,47 @@ func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error { return nil } +type validatorSetUpdateJSON struct { + ValidatorUpdates []ValidatorUpdate `json:"validator_updates"` + ThresholdPubKey json.RawMessage `json:"threshold_public_key"` + QuorumHash []byte `json:"quorum_hash,omitempty"` +} + +func (m *ValidatorSetUpdate) MarshalJSON() ([]byte, error) { + key, err := encoding.PubKeyFromProto(m.ThresholdPublicKey) + if err != nil { + return nil, err + } + jkey, err := jsontypes.Marshal(key) + if err != nil { + return nil, err + } + return json.Marshal(validatorSetUpdateJSON{ + ValidatorUpdates: m.ValidatorUpdates, + ThresholdPubKey: jkey, + QuorumHash: m.QuorumHash, + }) +} + +func (m *ValidatorSetUpdate) UnmarshalJSON(data []byte) error { + var vsu validatorSetUpdateJSON + if err := json.Unmarshal(data, &vsu); err != nil { + return err + } + var key crypto.PubKey + if err := jsontypes.Unmarshal(vsu.ThresholdPubKey, &key); err != nil { + return err + } + pkey, err := encoding.PubKeyToProto(key) + if err != nil { + return err + } + m.ValidatorUpdates = vsu.ValidatorUpdates + m.ThresholdPublicKey = pkey + m.QuorumHash = vsu.QuorumHash + return nil +} + // Some compile time assertions to ensure we don't // have accidental runtime surprises later on. diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 7a9b205f4e..7bd9f9a0c8 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -198,7 +198,7 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, }) } - commit := r.store.LoadBlockSeenCommitAt(msg.Height) + commit := r.store.LoadSeenCommitAt(msg.Height) if commit == nil { return fmt.Errorf("found block in store with no commit: %v", block) } diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 2c3bb9faf8..a090c32329 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -253,7 +253,7 @@ func (r *Reactor) WaitSync() bool { func (r *Reactor) SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) { r.logger.Info("switching to consensus") - // We have no votes, so reconstruct LastPrecommits from SeenCommit. + // we have no votes, so reconstruct LastCommit from SeenCommit if state.LastBlockHeight > 0 { r.state.reconstructLastCommit(state) } @@ -697,7 +697,7 @@ func (r *Reactor) gossipVotesForHeight( ) (bool, error) { logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) - // If there are lastPrecommits to send... + // if there are lastPrecommits to send... if prs.Step == cstypes.RoundStepNewHeight { if ok, err := r.pickSendVote(ctx, ps, rs.LastPrecommits, voteCh); err != nil { logger.Debug("picked previous precommit vote to send") diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index ffce1745b1..acbee3e133 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -1522,8 +1522,6 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } } func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { -} func (bs *mockBlockStore) SaveBlock( block *types.Block, blockParts *types.PartSet, @@ -1537,7 +1535,7 @@ func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { func (bs *mockBlockStore) LoadSeenCommit() *types.Commit { return bs.commits[len(bs.commits)-1] } -func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.Commit { +func (bs *mockBlockStore) LoadSeenCommitAt(height int64) *types.Commit { return bs.commits[height-1] } diff --git a/internal/consensus/state.go b/internal/consensus/state.go index e877d4a221..76e5fbf745 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -2152,10 +2152,12 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) { // but may differ from the LastPrecommits included in the next block precommits := cs.Votes.Precommits(cs.CommitRound) seenCommit := precommits.MakeCommit() - cs.blockStore.SaveBlock(block, blockParts, seenCommit) + cs.applyCommit(ctx, seenCommit, logger) } else { // Happens during replay if we already saved the block but didn't commit logger.Debug("calling finalizeCommit on already stored block", "height", block.Height) + // Todo: do we need this? + cs.applyCommit(ctx, nil, logger) } } diff --git a/internal/consensus/types/height_vote_set.go b/internal/consensus/types/height_vote_set.go index b2d03fb1eb..dcae57a7b8 100644 --- a/internal/consensus/types/height_vote_set.go +++ b/internal/consensus/types/height_vote_set.go @@ -39,11 +39,10 @@ We let each peer provide us with up to 2 unexpected "catchup" rounds. One for their LastPrecommits round, and another for the official commit round. */ type HeightVoteSet struct { - chainID string - height int64 - valSet *types.ValidatorSet - stateID types.StateID // State ID describing current state (eg. previous height and previous app hash) - extensionsEnabled bool + chainID string + height int64 + valSet *types.ValidatorSet + stateID types.StateID // State ID describing current state (eg. previous height and previous app hash) mtx sync.Mutex round int32 // max tracked round @@ -53,19 +52,8 @@ type HeightVoteSet struct { func NewHeightVoteSet(chainID string, height int64, stateID types.StateID, valSet *types.ValidatorSet) *HeightVoteSet { hvs := &HeightVoteSet{ - chainID: chainID, - stateID: stateID, - extensionsEnabled: false, - } - hvs.Reset(height, valSet) - return hvs -} - -func NewExtendedHeightVoteSet(chainID string, height int64, stateID types.StateID, valSet *types.ValidatorSet) *HeightVoteSet { - hvs := &HeightVoteSet{ - chainID: chainID, - stateID: stateID, - extensionsEnabled: true, + chainID: chainID, + stateID: stateID, } hvs.Reset(height, valSet) return hvs diff --git a/internal/state/execution.go b/internal/state/execution.go index d9d41f51fa..4ed1ed20a5 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -624,11 +624,6 @@ func (state State) Update( return state, fmt.Errorf("updating consensus params: %w", err) } - err = state.ConsensusParams.ValidateUpdate(consensusParamUpdates, header.Height) - if err != nil { - return state, fmt.Errorf("updating consensus params: %w", err) - } - state.Version.Consensus.App = nextParams.Version.AppVersion // Change results from this height but only applies to the next height. diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index 2ee5d2557c..cf61a8587e 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -167,6 +167,22 @@ func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { return r0 } +// LoadBlockSeenCommitAt provides a mock function with given fields: height +func (_m *BlockStore) LoadSeenCommitAt(height int64) *types.Commit { + ret := _m.Called(height) + + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Commit) + } + } + + return r0 +} + // LoadSeenCommit provides a mock function with given fields: func (_m *BlockStore) LoadSeenCommit() *types.Commit { ret := _m.Called() diff --git a/internal/state/services.go b/internal/state/services.go index 40365f2fbf..c8f4ffe5ee 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -37,6 +37,7 @@ type BlockStore interface { LoadBlockCommit(height int64) *types.Commit LoadSeenCommit() *types.Commit + LoadSeenCommitAt(height int64) *types.Commit } //----------------------------------------------------------------------------- diff --git a/internal/state/state_test.go b/internal/state/state_test.go index c0635b07f5..867bb41be0 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -23,7 +23,6 @@ import ( sm "github.com/tendermint/tendermint/internal/state" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -1018,16 +1017,16 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex++ cp = params[changeIndex] } - header, _, blockID, responses := makeHeaderPartsResponsesParams(t, state, &cp) - validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.FinalizeBlock.ValidatorSetUpdate) + header, _, blockID, fpResp := makeHeaderPartsResponsesParams(t, state, &cp) + validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = types.PB2TM.ValidatorUpdatesFromValidatorSet(fpResp.ValidatorSetUpdate) require.NoError(t, err) - rs, err := abci.MarshalTxResults(responses.TxResults) + rs, err := abci.MarshalTxResults(fpResp.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) // Any node pro tx hash should do firstNodeProTxHash, _ := state.Validators.GetByIndex(0) - state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + state, err = state.Update(firstNodeProTxHash, blockID, &header, h, fpResp.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) require.NoError(t, err) err = stateStore.Save(state) @@ -1111,20 +1110,18 @@ func TestState_StateID(t *testing.T) { func blockExecutorFunc(t *testing.T, firstProTxHash crypto.ProTxHash) func(prevState, state sm.State, vsu *abci.ValidatorSetUpdate) sm.State { return func(prevState, state sm.State, vsu *abci.ValidatorSetUpdate) sm.State { t.Helper() - resp := &tmstate.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorSetUpdate: vsu}, - } + fpResp := &abci.ResponseFinalizeBlock{ValidatorSetUpdate: vsu} validatorUpdates, thresholdPubKey, quorumHash, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(resp.FinalizeBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(fpResp.ValidatorSetUpdate) require.NoError(t, err) block, err := statefactory.MakeBlock(prevState, prevState.LastBlockHeight+1, new(types.Commit), nil, 0) require.NoError(t, err) blockID, err := block.BlockID() require.NoError(t, err) - rs, err := abci.MarshalTxResults(resp.FinalizeBlock.TxResults) + rs, err := abci.MarshalTxResults(fpResp.TxResults) require.NoError(t, err) h := merkle.HashFromByteSlices(rs) - state, err = state.Update(firstProTxHash, blockID, &block.Header, h, resp.FinalizeBlock.ConsensusParamUpdates, + state, err = state.Update(firstProTxHash, blockID, &block.Header, h, fpResp.ConsensusParamUpdates, validatorUpdates, thresholdPubKey, quorumHash) require.NoError(t, err) return state diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 6ee1c78d89..303a729dfb 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -465,7 +465,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) } - state, _, lastExtCommit = makeAndCommitGoodBlock( + state, _, lastCommit = makeAndCommitGoodBlock( ctx, t, state, @@ -478,7 +478,5 @@ func TestValidateBlockEvidence(t *testing.T) { evidence, 0, ) - lastCommit = lastExtCommit.ToCommit() - } } diff --git a/internal/store/store.go b/internal/store/store.go index 5a3b36f742..74cbaef4b0 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -268,23 +268,19 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { // and it comes from the block.LastCommit for `height+1`. // If no commit is found for the given height, it returns nil. func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { - var pbc = new(tmproto.Commit) bz, err := bs.db.Get(blockCommitKey(height)) if err != nil { panic(err) } - if len(bz) == 0 { - return nil - } - err = proto.Unmarshal(bz, pbc) - if err != nil { - panic(fmt.Errorf("error reading block commit: %w", err)) - } - commit, err := types.CommitFromProto(pbc) + return mustDecodeCommit(bz) +} + +func (bs *BlockStore) LoadSeenCommitAt(height int64) *types.Commit { + bz, err := bs.db.Get(seenCommitAtKey(height)) if err != nil { - panic(fmt.Errorf("converting commit to proto: %w", err)) + panic(err) } - return commit + return mustDecodeCommit(bz) } // LoadSeenCommit returns the last locally seen Commit before being @@ -528,8 +524,8 @@ func (bs *BlockStore) saveBlockToBatch(batch dbm.Batch, block *types.Block, bloc return err } - // stores seen-commit with a height, because tendermint does the same but only for extended commit - if err := batch.Set(blockCommitKey(height), seenCommitBytes); err != nil { + // stores seen-commit at height, because tendermint does the same but only for extended commit + if err := batch.Set(seenCommitAtKey(height), seenCommitBytes); err != nil { return err } @@ -617,12 +613,12 @@ func (bs *BlockStore) Close() error { // * internal/p2p/peermanager.go [1] const ( // prefixes are unique across all tm db's - prefixBlockMeta = int64(0) - prefixBlockPart = int64(1) - prefixBlockCommit = int64(2) - prefixSeenCommit = int64(3) - prefixBlockHash = int64(4) - prefixExtCommit = int64(13) + prefixBlockMeta = int64(0) + prefixBlockPart = int64(1) + prefixBlockCommit = int64(2) + prefixSeenCommit = int64(3) + prefixBlockHash = int64(4) + prefixSeenCommitAt = int64(13) ) func blockMetaKey(height int64) []byte { @@ -672,8 +668,8 @@ func seenCommitKey() []byte { return key } -func extCommitKey(height int64) []byte { - key, err := orderedcode.Append(nil, prefixExtCommit, height) +func seenCommitAtKey(height int64) []byte { + key, err := orderedcode.Append(nil, prefixSeenCommitAt, height) if err != nil { panic(err) } @@ -698,3 +694,19 @@ func mustEncode(pb proto.Message) []byte { } return bz } + +func mustDecodeCommit(bz []byte) *types.Commit { + if len(bz) == 0 { + return nil + } + var pbc = new(tmproto.Commit) + err := proto.Unmarshal(bz, pbc) + if err != nil { + panic(fmt.Errorf("error reading block commit: %w", err)) + } + commit, err := types.CommitFromProto(pbc) + if err != nil { + panic(fmt.Errorf("converting commit to proto: %w", err)) + } + return commit +} diff --git a/internal/store/store_test.go b/internal/store/store_test.go index d2617445cc..da7d5b6619 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -97,7 +97,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { require.NoError(t, err) part2 := validPartSet.GetPart(1) - seenCommit := makeTestCommit(state, 10, tmtime.Now()) + seenCommit := makeTestCommit(state, block.Header.Height, tmtime.Now()) bs.SaveBlock(block, validPartSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") @@ -492,7 +492,7 @@ func TestBlockFetchAtHeight(t *testing.T) { partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(state, 10, tmtime.Now()) + seenCommit := makeTestCommit(state, block.Header.Height, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -546,6 +546,8 @@ func TestSeenAndCanonicalCommit(t *testing.T) { require.Nil(t, c5) c6 := store.LoadBlockCommit(h - 1) require.Equal(t, blockCommit.Hash(), c6.Hash()) + c7 := store.LoadSeenCommitAt(h) + require.Equal(t, seenCommit.Hash(), c7.Hash()) } } diff --git a/internal/test/factory/params.go b/internal/test/factory/params.go index c6fa3f9fca..dda8e2b3ca 100644 --- a/internal/test/factory/params.go +++ b/internal/test/factory/params.go @@ -18,6 +18,5 @@ func ConsensusParams() *types.ConsensusParams { VoteDelta: 1 * time.Millisecond, BypassCommitTimeout: true, } - c.ABCI.VoteExtensionsEnableHeight = 1 return c } diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 89ca14f957..5a2b3824f0 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -576,16 +576,6 @@ func (m *TimeoutParams) GetBypassCommitTimeout() bool { // ABCIParams configure functionality specific to the Application Blockchain Interface. type ABCIParams struct { - // vote_extensions_enable_height configures the first height during which - // vote extensions will be enabled. During this specified height, and for all - // subsequent heights, precommit messages that do not contain valid extension data - // will be considered invalid. Prior to this height, vote extensions will not - // be used or accepted by validators on the network. - // - // Once enabled, vote extensions will be created by the application in ExtendVote, - // passed to the application for validation in VerifyVoteExtension and given - // to the application to use when proposing a block during PrepareProposal. - VoteExtensionsEnableHeight int64 `protobuf:"varint,1,opt,name=vote_extensions_enable_height,json=voteExtensionsEnableHeight,proto3" json:"vote_extensions_enable_height,omitempty"` // Indicates if CheckTx should be called on all the transactions // remaining in the mempool after a block is executed. RecheckTx bool `protobuf:"varint,2,opt,name=recheck_tx,json=recheckTx,proto3" json:"recheck_tx,omitempty"` @@ -624,13 +614,6 @@ func (m *ABCIParams) XXX_DiscardUnknown() { var xxx_messageInfo_ABCIParams proto.InternalMessageInfo -func (m *ABCIParams) GetVoteExtensionsEnableHeight() int64 { - if m != nil { - return m.VoteExtensionsEnableHeight - } - return 0 -} - func (m *ABCIParams) GetRecheckTx() bool { if m != nil { return m.RecheckTx @@ -653,55 +636,53 @@ func init() { func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } var fileDescriptor_e12598271a686f57 = []byte{ - // 762 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xdf, 0x6e, 0xdb, 0x36, - 0x14, 0xc6, 0xad, 0xd8, 0x71, 0xec, 0xe3, 0x38, 0x0e, 0xb8, 0x0d, 0xd3, 0xb2, 0x59, 0xce, 0x74, - 0x31, 0x04, 0x18, 0x20, 0x07, 0x09, 0x86, 0x60, 0xc0, 0xfe, 0x20, 0xb6, 0x83, 0x65, 0x18, 0x32, - 0x0c, 0x5a, 0xda, 0x8b, 0xdc, 0x08, 0x94, 0xcc, 0xca, 0x42, 0x2c, 0x51, 0x10, 0x29, 0xc3, 0x7a, - 0x8b, 0x5e, 0x15, 0x7d, 0x84, 0xf6, 0xa6, 0xcf, 0x91, 0xcb, 0x5c, 0xf6, 0xaa, 0x2d, 0x9c, 0x37, - 0xe8, 0x13, 0x14, 0xa4, 0xa8, 0x38, 0x76, 0x9a, 0xc6, 0x57, 0xa6, 0x79, 0xbe, 0x1f, 0x0f, 0xf9, - 0x9d, 0x23, 0x12, 0xda, 0x9c, 0x44, 0x43, 0x92, 0x84, 0x41, 0xc4, 0xbb, 0x3c, 0x8b, 0x09, 0xeb, - 0xc6, 0x38, 0xc1, 0x21, 0xb3, 0xe2, 0x84, 0x72, 0x8a, 0xb6, 0xe7, 0x61, 0x4b, 0x86, 0x77, 0xbe, - 0xf6, 0xa9, 0x4f, 0x65, 0xb0, 0x2b, 0x46, 0xb9, 0x6e, 0xc7, 0xf0, 0x29, 0xf5, 0xc7, 0xa4, 0x2b, - 0xff, 0xb9, 0xe9, 0xb3, 0xee, 0x30, 0x4d, 0x30, 0x0f, 0x68, 0x94, 0xc7, 0xcd, 0x37, 0x65, 0x68, - 0xf5, 0x69, 0xc4, 0x48, 0xc4, 0x52, 0xf6, 0x9f, 0xcc, 0x80, 0x0e, 0x61, 0xdd, 0x1d, 0x53, 0xef, - 0x52, 0xd7, 0x76, 0xb5, 0xbd, 0xc6, 0x41, 0xdb, 0x5a, 0xce, 0x65, 0xf5, 0x44, 0x38, 0x57, 0xdb, - 0xb9, 0x16, 0xfd, 0x06, 0x35, 0x32, 0x09, 0x86, 0x24, 0xf2, 0x88, 0xbe, 0x26, 0xb9, 0xdd, 0xfb, - 0xdc, 0x89, 0x52, 0x28, 0xf4, 0x96, 0x40, 0x7f, 0x42, 0x7d, 0x82, 0xc7, 0xc1, 0x10, 0x73, 0x9a, - 0xe8, 0x65, 0x89, 0xff, 0x78, 0x1f, 0x7f, 0x5a, 0x48, 0x14, 0x3f, 0x67, 0xd0, 0xaf, 0xb0, 0x31, - 0x21, 0x09, 0x0b, 0x68, 0xa4, 0x57, 0x24, 0xde, 0xf9, 0x0c, 0x9e, 0x0b, 0x14, 0x5c, 0xe8, 0x45, - 0x6e, 0x96, 0x45, 0xde, 0x28, 0xa1, 0x51, 0xa6, 0xaf, 0x3f, 0x94, 0xfb, 0xff, 0x42, 0x52, 0xe4, - 0xbe, 0x65, 0x44, 0x6e, 0x1e, 0x84, 0x84, 0xa6, 0x5c, 0xaf, 0x3e, 0x94, 0xfb, 0x3c, 0x17, 0x14, - 0xb9, 0x95, 0x1e, 0xed, 0x43, 0x05, 0xbb, 0x5e, 0xa0, 0x6f, 0x48, 0xee, 0x87, 0xfb, 0xdc, 0x71, - 0xaf, 0xff, 0xb7, 0x82, 0xa4, 0xd2, 0xec, 0x43, 0xe3, 0x8e, 0xfb, 0xe8, 0x7b, 0xa8, 0x87, 0x78, - 0xea, 0xb8, 0x19, 0x27, 0x4c, 0xd6, 0xab, 0x6c, 0xd7, 0x42, 0x3c, 0xed, 0x89, 0xff, 0xe8, 0x5b, - 0xd8, 0x10, 0x41, 0x1f, 0x33, 0x59, 0x92, 0xb2, 0x5d, 0x0d, 0xf1, 0xf4, 0x2f, 0xcc, 0xcc, 0xd7, - 0x1a, 0x6c, 0x2d, 0xd6, 0x02, 0xfd, 0x0c, 0x48, 0x68, 0xb1, 0x4f, 0x9c, 0x28, 0x0d, 0x1d, 0x59, - 0xd4, 0x62, 0xc5, 0x56, 0x88, 0xa7, 0xc7, 0x3e, 0xf9, 0x37, 0x0d, 0x65, 0x6a, 0x86, 0xce, 0x60, - 0xbb, 0x10, 0x17, 0xfd, 0xa4, 0x8a, 0xfe, 0x9d, 0x95, 0x37, 0x9c, 0x55, 0x34, 0x9c, 0x35, 0x50, - 0x82, 0x5e, 0xed, 0xea, 0x5d, 0xa7, 0xf4, 0xf2, 0x7d, 0x47, 0xb3, 0xb7, 0xf2, 0xf5, 0x8a, 0xc8, - 0xe2, 0x21, 0xca, 0x8b, 0x87, 0x30, 0x7f, 0x81, 0xd6, 0x52, 0xdd, 0x91, 0x09, 0xcd, 0x38, 0x75, - 0x9d, 0x4b, 0x92, 0x39, 0xd2, 0x25, 0x5d, 0xdb, 0x2d, 0xef, 0xd5, 0xed, 0x46, 0x9c, 0xba, 0xff, - 0x90, 0xec, 0x5c, 0x4c, 0x99, 0xfb, 0xd0, 0x5c, 0xa8, 0x37, 0xea, 0x40, 0x03, 0xc7, 0xb1, 0x53, - 0x74, 0x89, 0x38, 0x59, 0xc5, 0x06, 0x1c, 0xc7, 0x4a, 0x66, 0x5e, 0xc0, 0xe6, 0x29, 0x66, 0x23, - 0x32, 0x54, 0xc0, 0x4f, 0xd0, 0x92, 0x2e, 0x38, 0xcb, 0x06, 0x37, 0xe5, 0xf4, 0x59, 0xe1, 0xb2, - 0x09, 0xcd, 0xb9, 0x6e, 0xee, 0x75, 0xa3, 0x50, 0x09, 0xc3, 0x5f, 0x68, 0xd0, 0x5a, 0xea, 0x20, - 0x34, 0x80, 0x66, 0x48, 0x18, 0x93, 0x26, 0x92, 0x31, 0xce, 0xd4, 0xe7, 0xf6, 0x05, 0x07, 0x2b, - 0xd2, 0xbd, 0x4d, 0x45, 0x0d, 0x04, 0x84, 0x7e, 0x87, 0x7a, 0x9c, 0x10, 0x2f, 0x60, 0x2b, 0xd5, - 0x20, 0x5f, 0x61, 0x4e, 0x98, 0x1f, 0xd7, 0xa0, 0xb9, 0xd0, 0x9b, 0xa2, 0x9b, 0xe3, 0x84, 0xc6, - 0x94, 0x91, 0x55, 0x37, 0x54, 0xe8, 0xc5, 0x89, 0xd4, 0x50, 0x9c, 0x88, 0xe3, 0x55, 0xf7, 0xb3, - 0xa9, 0xa8, 0x81, 0x80, 0xd0, 0x21, 0x54, 0x26, 0x94, 0x13, 0x75, 0x0d, 0x3c, 0x0a, 0x4b, 0x31, - 0xfa, 0x03, 0x40, 0xfc, 0xaa, 0xbc, 0x95, 0x15, 0x7d, 0x10, 0x48, 0x9e, 0xf4, 0x08, 0xaa, 0x1e, - 0x0d, 0xc3, 0x80, 0xab, 0x1b, 0xe0, 0x51, 0x56, 0xc9, 0xd1, 0x01, 0x7c, 0xe3, 0x66, 0x31, 0x66, - 0xcc, 0xc9, 0x27, 0x9c, 0xbb, 0x57, 0x41, 0xcd, 0xfe, 0x2a, 0x0f, 0xf6, 0x65, 0x4c, 0x19, 0x6d, - 0x46, 0x00, 0xf3, 0xef, 0x1a, 0x1d, 0x43, 0x5b, 0x6e, 0x9d, 0x4c, 0x39, 0x89, 0x44, 0x51, 0x98, - 0x43, 0x22, 0xec, 0x8e, 0x89, 0x33, 0x22, 0x81, 0x3f, 0xe2, 0xaa, 0xeb, 0x76, 0x84, 0xe8, 0xe4, - 0x56, 0x73, 0x22, 0x25, 0xa7, 0x52, 0x81, 0xda, 0x00, 0x09, 0xf1, 0x46, 0xc4, 0xbb, 0x74, 0xf8, - 0x54, 0xba, 0x5e, 0xb3, 0xeb, 0x6a, 0xe6, 0x7c, 0xda, 0x7b, 0xf2, 0x6a, 0x66, 0x68, 0x57, 0x33, - 0x43, 0xbb, 0x9e, 0x19, 0xda, 0x87, 0x99, 0xa1, 0x3d, 0xbf, 0x31, 0x4a, 0xd7, 0x37, 0x46, 0xe9, - 0xed, 0x8d, 0x51, 0xba, 0x38, 0xf2, 0x03, 0x3e, 0x4a, 0x5d, 0xcb, 0xa3, 0x61, 0xf7, 0xee, 0xa3, - 0x33, 0x1f, 0xe6, 0xaf, 0xca, 0xf2, 0x83, 0xe4, 0x56, 0xe5, 0xfc, 0xe1, 0xa7, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xf0, 0x06, 0x54, 0xd3, 0xab, 0x06, 0x00, 0x00, + // 723 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xdf, 0x6e, 0xd3, 0x30, + 0x14, 0xc6, 0x9b, 0xb5, 0xeb, 0xda, 0xd3, 0x75, 0x9d, 0x0c, 0x88, 0x30, 0x58, 0x3a, 0x72, 0x81, + 0x26, 0x4d, 0x4a, 0xa7, 0x4d, 0x68, 0x42, 0xe2, 0x8f, 0xd6, 0x16, 0x01, 0x42, 0x43, 0x28, 0x0c, + 0x2e, 0x76, 0x13, 0x39, 0xa9, 0x49, 0xa3, 0x35, 0x71, 0x14, 0x27, 0x55, 0xf3, 0x16, 0x5c, 0x21, + 0x1e, 0x01, 0x6e, 0x78, 0x8e, 0x5d, 0xee, 0x92, 0x2b, 0x40, 0xdd, 0x1b, 0xf0, 0x04, 0xc8, 0x8e, + 0xb3, 0xae, 0x1d, 0x63, 0xbd, 0x8a, 0xe3, 0xf3, 0xfd, 0x7c, 0xe2, 0xef, 0x9c, 0xd8, 0xb0, 0x1e, + 0x93, 0xa0, 0x47, 0x22, 0xdf, 0x0b, 0xe2, 0x56, 0x9c, 0x86, 0x84, 0xb5, 0x42, 0x1c, 0x61, 0x9f, + 0x19, 0x61, 0x44, 0x63, 0x8a, 0x56, 0x27, 0x61, 0x43, 0x84, 0xd7, 0x6e, 0xba, 0xd4, 0xa5, 0x22, + 0xd8, 0xe2, 0xa3, 0x4c, 0xb7, 0xa6, 0xb9, 0x94, 0xba, 0x03, 0xd2, 0x12, 0x6f, 0x76, 0xf2, 0xb1, + 0xd5, 0x4b, 0x22, 0x1c, 0x7b, 0x34, 0xc8, 0xe2, 0xfa, 0xf7, 0x22, 0x34, 0x3a, 0x34, 0x60, 0x24, + 0x60, 0x09, 0x7b, 0x2b, 0x32, 0xa0, 0x5d, 0x58, 0xb4, 0x07, 0xd4, 0x39, 0x56, 0x95, 0x0d, 0x65, + 0xb3, 0xb6, 0xb3, 0x6e, 0xcc, 0xe6, 0x32, 0xda, 0x3c, 0x9c, 0xa9, 0xcd, 0x4c, 0x8b, 0x1e, 0x43, + 0x85, 0x0c, 0xbd, 0x1e, 0x09, 0x1c, 0xa2, 0x2e, 0x08, 0x6e, 0xe3, 0x32, 0xf7, 0x5c, 0x2a, 0x24, + 0x7a, 0x4e, 0xa0, 0x67, 0x50, 0x1d, 0xe2, 0x81, 0xd7, 0xc3, 0x31, 0x8d, 0xd4, 0xa2, 0xc0, 0xef, + 0x5f, 0xc6, 0x3f, 0xe4, 0x12, 0xc9, 0x4f, 0x18, 0xf4, 0x08, 0x96, 0x86, 0x24, 0x62, 0x1e, 0x0d, + 0xd4, 0x92, 0xc0, 0x9b, 0xff, 0xc0, 0x33, 0x81, 0x84, 0x73, 0x3d, 0xcf, 0xcd, 0xd2, 0xc0, 0xe9, + 0x47, 0x34, 0x48, 0xd5, 0xc5, 0xab, 0x72, 0xbf, 0xcb, 0x25, 0x79, 0xee, 0x73, 0x86, 0xe7, 0x8e, + 0x3d, 0x9f, 0xd0, 0x24, 0x56, 0xcb, 0x57, 0xe5, 0x3e, 0xcc, 0x04, 0x79, 0x6e, 0xa9, 0x47, 0xdb, + 0x50, 0xc2, 0xb6, 0xe3, 0xa9, 0x4b, 0x82, 0xbb, 0x77, 0x99, 0xdb, 0x6f, 0x77, 0x5e, 0x49, 0x48, + 0x28, 0xf5, 0x0e, 0xd4, 0x2e, 0xb8, 0x8f, 0xee, 0x42, 0xd5, 0xc7, 0x23, 0xcb, 0x4e, 0x63, 0xc2, + 0x44, 0xbd, 0x8a, 0x66, 0xc5, 0xc7, 0xa3, 0x36, 0x7f, 0x47, 0xb7, 0x61, 0x89, 0x07, 0x5d, 0xcc, + 0x44, 0x49, 0x8a, 0x66, 0xd9, 0xc7, 0xa3, 0x17, 0x98, 0xe9, 0xdf, 0x14, 0x58, 0x99, 0xae, 0x05, + 0xda, 0x02, 0xc4, 0xb5, 0xd8, 0x25, 0x56, 0x90, 0xf8, 0x96, 0x28, 0x6a, 0xbe, 0x62, 0xc3, 0xc7, + 0xa3, 0x7d, 0x97, 0xbc, 0x49, 0x7c, 0x91, 0x9a, 0xa1, 0x03, 0x58, 0xcd, 0xc5, 0x79, 0x3f, 0xc9, + 0xa2, 0xdf, 0x31, 0xb2, 0x86, 0x33, 0xf2, 0x86, 0x33, 0xba, 0x52, 0xd0, 0xae, 0x9c, 0xfc, 0x6c, + 0x16, 0xbe, 0xfc, 0x6a, 0x2a, 0xe6, 0x4a, 0xb6, 0x5e, 0x1e, 0x99, 0xde, 0x44, 0x71, 0x7a, 0x13, + 0xfa, 0x43, 0x68, 0xcc, 0xd4, 0x1d, 0xe9, 0x50, 0x0f, 0x13, 0xdb, 0x3a, 0x26, 0xa9, 0x25, 0x5c, + 0x52, 0x95, 0x8d, 0xe2, 0x66, 0xd5, 0xac, 0x85, 0x89, 0xfd, 0x9a, 0xa4, 0x87, 0x7c, 0x4a, 0xdf, + 0x86, 0xfa, 0x54, 0xbd, 0x51, 0x13, 0x6a, 0x38, 0x0c, 0xad, 0xbc, 0x4b, 0xf8, 0xce, 0x4a, 0x26, + 0xe0, 0x30, 0x94, 0x32, 0xfd, 0x08, 0x96, 0x5f, 0x62, 0xd6, 0x27, 0x3d, 0x09, 0x3c, 0x80, 0x86, + 0x70, 0xc1, 0x9a, 0x35, 0xb8, 0x2e, 0xa6, 0x0f, 0x72, 0x97, 0x75, 0xa8, 0x4f, 0x74, 0x13, 0xaf, + 0x6b, 0xb9, 0x8a, 0x1b, 0xfe, 0x59, 0x81, 0xc6, 0x4c, 0x07, 0xa1, 0x2e, 0xd4, 0x7d, 0xc2, 0x98, + 0x30, 0x91, 0x0c, 0x70, 0x2a, 0x7f, 0xb7, 0xff, 0x38, 0x58, 0x12, 0xee, 0x2d, 0x4b, 0xaa, 0xcb, + 0x21, 0xf4, 0x04, 0xaa, 0x61, 0x44, 0x1c, 0x8f, 0xcd, 0x55, 0x83, 0x6c, 0x85, 0x09, 0xa1, 0xff, + 0x59, 0x80, 0xfa, 0x54, 0x6f, 0xf2, 0x6e, 0x0e, 0x23, 0x1a, 0x52, 0x46, 0xe6, 0xfd, 0xa0, 0x5c, + 0xcf, 0x77, 0x24, 0x87, 0x7c, 0x47, 0x31, 0x9e, 0xf7, 0x7b, 0x96, 0x25, 0xd5, 0xe5, 0x10, 0xda, + 0x85, 0xd2, 0x90, 0xc6, 0x44, 0x1e, 0x03, 0xd7, 0xc2, 0x42, 0x8c, 0x9e, 0x02, 0xf0, 0xa7, 0xcc, + 0x5b, 0x9a, 0xd3, 0x07, 0x8e, 0x64, 0x49, 0xf7, 0xa0, 0xec, 0x50, 0xdf, 0xf7, 0x62, 0x79, 0x02, + 0x5c, 0xcb, 0x4a, 0x39, 0xda, 0x81, 0x5b, 0x76, 0x1a, 0x62, 0xc6, 0xac, 0x6c, 0xc2, 0xba, 0x78, + 0x14, 0x54, 0xcc, 0x1b, 0x59, 0xb0, 0x23, 0x62, 0xd2, 0x68, 0x7d, 0x0b, 0x60, 0xf2, 0x5f, 0xa3, + 0x75, 0x80, 0x88, 0x38, 0x7d, 0xe2, 0x1c, 0x5b, 0xf1, 0x48, 0x58, 0x56, 0x31, 0xab, 0x72, 0xe6, + 0x70, 0xd4, 0x7e, 0xff, 0x75, 0xac, 0x29, 0x27, 0x63, 0x4d, 0x39, 0x1d, 0x6b, 0xca, 0xef, 0xb1, + 0xa6, 0x7c, 0x3a, 0xd3, 0x0a, 0xa7, 0x67, 0x5a, 0xe1, 0xc7, 0x99, 0x56, 0x38, 0xda, 0x73, 0xbd, + 0xb8, 0x9f, 0xd8, 0x86, 0x43, 0xfd, 0xd6, 0xc5, 0x1b, 0x63, 0x32, 0xcc, 0xae, 0x84, 0xd9, 0xdb, + 0xc4, 0x2e, 0x8b, 0xf9, 0xdd, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0x18, 0xb7, 0x2e, 0x68, + 0x06, 0x00, 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -1010,9 +991,6 @@ func (this *ABCIParams) Equal(that interface{}) bool { } else if this == nil { return false } - if this.VoteExtensionsEnableHeight != that1.VoteExtensionsEnableHeight { - return false - } if this.RecheckTx != that1.RecheckTx { return false } @@ -1448,11 +1426,6 @@ func (m *ABCIParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x10 } - if m.VoteExtensionsEnableHeight != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.VoteExtensionsEnableHeight)) - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } @@ -1633,9 +1606,6 @@ func (m *ABCIParams) Size() (n int) { } var l int _ = l - if m.VoteExtensionsEnableHeight != 0 { - n += 1 + sovParams(uint64(m.VoteExtensionsEnableHeight)) - } if m.RecheckTx { n += 2 } @@ -2799,25 +2769,6 @@ func (m *ABCIParams) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ABCIParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteExtensionsEnableHeight", wireType) - } - m.VoteExtensionsEnableHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VoteExtensionsEnableHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RecheckTx", wireType) diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index 058e30155f..6b30b415ee 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -131,17 +131,6 @@ message TimeoutParams { // ABCIParams configure functionality specific to the Application Blockchain Interface. message ABCIParams { - // vote_extensions_enable_height configures the first height during which - // vote extensions will be enabled. During this specified height, and for all - // subsequent heights, precommit messages that do not contain valid extension data - // will be considered invalid. Prior to this height, vote extensions will not - // be used or accepted by validators on the network. - // - // Once enabled, vote extensions will be created by the application in ExtendVote, - // passed to the application for validation in VerifyVoteExtension and given - // to the application to use when proposing a block during PrepareProposal. - int64 vote_extensions_enable_height = 1; - // Indicates if CheckTx should be called on all the transactions // remaining in the mempool after a block is executed. bool recheck_tx = 2; diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 5ebea5c3ad..ac8d3c00d1 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -16,6 +16,7 @@ import ( "github.com/dashevo/dashd-go/btcjson" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/privval" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" @@ -24,7 +25,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index e3895b08ce..28ecd79405 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -70,7 +70,6 @@ type ResultBlockResults struct { TxsResults []*abci.ExecTxResult `json:"txs_results"` TotalGasUsed int64 `json:"total_gas_used,string"` FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` - ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` ValidatorSetUpdate *abci.ValidatorSetUpdate `json:"validator_set_updates"` ConsensusParamUpdates *types.ConsensusParams `json:"consensus_param_updates"` } @@ -268,7 +267,7 @@ type ResultBroadcastTx struct { Codespace string `json:"codespace"` Hash bytes.HexBytes `json:"hash"` - Info string `json:"info"` + Info string `json:"info"` } // CheckTx and DeliverTx results diff --git a/rpc/coretypes/responses_test.go b/rpc/coretypes/responses_test.go index bf66db0c95..1b9be7e0bf 100644 --- a/rpc/coretypes/responses_test.go +++ b/rpc/coretypes/responses_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" pbcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" "github.com/tendermint/tendermint/types" ) @@ -44,36 +45,54 @@ func TestStatusIndexer(t *testing.T) { // A regression test for https://github.com/tendermint/tendermint/issues/8583. func TestResultBlockResults_regression8583(t *testing.T) { - const keyData = "0123456789abcdef0123456789abcdef" // 32 bytes - wantKey := base64.StdEncoding.EncodeToString([]byte(keyData)) + const keyData = "0123456789abcdef0123456789abcdef0123456789abcdef" // 48 bytes + quorumHash := crypto.RandQuorumHash() + quorumHashB64 := base64.StdEncoding.EncodeToString(quorumHash.Bytes()) rsp := &ResultBlockResults{ - ValidatorUpdates: []abci.ValidatorUpdate{{ - PubKey: pbcrypto.PublicKey{ - Sum: &pbcrypto.PublicKey_Ed25519{Ed25519: []byte(keyData)}, + ValidatorSetUpdate: &abci.ValidatorSetUpdate{ + ValidatorUpdates: []abci.ValidatorUpdate{ + { + PubKey: &pbcrypto.PublicKey{ + Sum: &pbcrypto.PublicKey_Bls12381{Bls12381: []byte(keyData)}, + }, + Power: 400, + }, }, - Power: 400, - }}, + ThresholdPublicKey: pbcrypto.PublicKey{ + Sum: &pbcrypto.PublicKey_Bls12381{Bls12381: []byte(keyData)}, + }, + QuorumHash: quorumHash, + }, } // Use compact here so the test data remain legible. The output from the // marshaler will have whitespace folded out so we need to do that too for // the comparison to be valid. var buf bytes.Buffer - require.NoError(t, json.Compact(&buf, []byte(fmt.Sprintf(` -{ + require.NoError(t, json.Compact(&buf, []byte(fmt.Sprintf(`{ "height": "0", "txs_results": null, "total_gas_used": "0", "finalize_block_events": null, - "validator_updates": [ - { - "pub_key":{"type": "tendermint/PubKeyEd25519", "value": "%s"}, - "power": "400" - } - ], + "validator_set_updates": { + "validator_updates": [ + { + "pub_key": { + "type": "tendermint/PubKeyBLS12381", + "value": "MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWYwMTIzNDU2Nzg5YWJjZGVm" + }, + "power": "400" + } + ], + "threshold_public_key": { + "type": "tendermint/PubKeyBLS12381", + "value": "MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWYwMTIzNDU2Nzg5YWJjZGVm" + }, + "quorum_hash": "%s" + }, "consensus_param_updates": null -}`, wantKey)))) +}`, quorumHashB64)))) bits, err := json.Marshal(rsp) if err != nil { diff --git a/types/params.go b/types/params.go index d507807318..19a1e72299 100644 --- a/types/params.go +++ b/types/params.go @@ -96,8 +96,7 @@ type TimeoutParams struct { // ABCIParams configure ABCI functionality specific to the Application Blockchain // Interface. type ABCIParams struct { - VoteExtensionsEnableHeight int64 `json:"vote_extensions_enable_height"` - RecheckTx bool `json:"recheck_tx"` + RecheckTx bool `json:"recheck_tx"` } // DefaultConsensusParams returns a default ConsensusParams. @@ -183,8 +182,6 @@ func DefaultTimeoutParams() TimeoutParams { func DefaultABCIParams() ABCIParams { return ABCIParams{ - // When set to 0, vote extensions are not required. - VoteExtensionsEnableHeight: 0, // When true, run CheckTx on each transaction in the mempool after each height. RecheckTx: true, } @@ -320,9 +317,6 @@ func (params ConsensusParams) ValidateConsensusParams() error { if params.Timeout.Commit <= 0 { return fmt.Errorf("timeout.Commit must be greater than 0. Got: %d", params.Timeout.Commit) } - if params.ABCI.VoteExtensionsEnableHeight < 0 { - return fmt.Errorf("ABCI.VoteExtensionsEnableHeight cannot be negative. Got: %d", params.ABCI.VoteExtensionsEnableHeight) - } if len(params.Validator.PubKeyTypes) == 0 { return errors.New("len(Validator.PubKeyTypes) must be greater than 0") @@ -340,30 +334,6 @@ func (params ConsensusParams) ValidateConsensusParams() error { return nil } -func (params ConsensusParams) ValidateUpdate(updated *tmproto.ConsensusParams, h int64) error { - if updated.Abci == nil { - return nil - } - if params.ABCI.VoteExtensionsEnableHeight == updated.Abci.VoteExtensionsEnableHeight { - return nil - } - if params.ABCI.VoteExtensionsEnableHeight != 0 && updated.Abci.VoteExtensionsEnableHeight == 0 { - return errors.New("vote extensions cannot be disabled once enabled") - } - if updated.Abci.VoteExtensionsEnableHeight <= h { - return fmt.Errorf("VoteExtensionsEnableHeight cannot be updated to a past height, "+ - "initial height: %d, current height %d", - params.ABCI.VoteExtensionsEnableHeight, h) - } - if params.ABCI.VoteExtensionsEnableHeight <= h { - return fmt.Errorf("VoteExtensionsEnableHeight cannot be updated modified once"+ - "the initial height has occurred, "+ - "initial height: %d, current height %d", - params.ABCI.VoteExtensionsEnableHeight, h) - } - return nil -} - // Hash returns a hash of a subset of the parameters to store in the block header. // Only the Block.MaxBytes and Block.MaxGas are included in the hash. // This allows the ConsensusParams to evolve more without breaking the block @@ -449,7 +419,6 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa res.Timeout.BypassCommitTimeout = params2.Timeout.GetBypassCommitTimeout() } if params2.Abci != nil { - res.ABCI.VoteExtensionsEnableHeight = params2.Abci.GetVoteExtensionsEnableHeight() res.ABCI.RecheckTx = params2.Abci.GetRecheckTx() } return res @@ -485,8 +454,7 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams { BypassCommitTimeout: params.Timeout.BypassCommitTimeout, }, Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: params.ABCI.VoteExtensionsEnableHeight, - RecheckTx: params.ABCI.RecheckTx, + RecheckTx: params.ABCI.RecheckTx, }, } } @@ -536,7 +504,6 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams c.Timeout.BypassCommitTimeout = pbParams.Timeout.BypassCommitTimeout } if pbParams.Abci != nil { - c.ABCI.VoteExtensionsEnableHeight = pbParams.Abci.GetVoteExtensionsEnableHeight() c.ABCI.RecheckTx = pbParams.Abci.GetRecheckTx() } return c diff --git a/types/params_test.go b/types/params_test.go index b40c2b7c26..37a334bbb6 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -7,8 +7,6 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -189,8 +187,6 @@ type makeParamsArgs struct { vote *time.Duration voteDelta *time.Duration commit *time.Duration - - abciExtensionHeight int64 } func makeParams(args makeParamsArgs) ConsensusParams { @@ -238,8 +234,7 @@ func makeParams(args makeParamsArgs) ConsensusParams { BypassCommitTimeout: args.bypassCommitTimeout, }, ABCI: ABCIParams{ - VoteExtensionsEnableHeight: args.abciExtensionHeight, - RecheckTx: args.recheck, + RecheckTx: args.recheck, }, } } @@ -294,20 +289,6 @@ func TestConsensusParamsUpdate(t *testing.T) { }, updatedParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: 2 * time.Second, messageDelay: 4 * time.Second}), }, - { - // update timeout params - initialParams: makeParams(makeParamsArgs{ - abciExtensionHeight: 1, - }), - updates: &tmproto.ConsensusParams{ - Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: 10, - }, - }, - updatedParams: makeParams(makeParamsArgs{ - abciExtensionHeight: 10, - }), - }, { // update timeout params initialParams: makeParams(makeParamsArgs{ @@ -401,78 +382,6 @@ func TestConsensusParamsUpdate_AppVersion(t *testing.T) { assert.EqualValues(t, 1, updated.Version.AppVersion) } -func TestConsensusParamsUpdate_VoteExtensionsEnableHeight(t *testing.T) { - t.Run("set to height but initial height already run", func(*testing.T) { - initialParams := makeParams(makeParamsArgs{ - abciExtensionHeight: 1, - }) - update := &tmproto.ConsensusParams{ - Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: 10, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 1)) - require.Error(t, initialParams.ValidateUpdate(update, 5)) - }) - t.Run("reset to 0", func(t *testing.T) { - initialParams := makeParams(makeParamsArgs{ - abciExtensionHeight: 1, - }) - update := &tmproto.ConsensusParams{ - Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: 0, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 1)) - }) - t.Run("set to height before current height run", func(*testing.T) { - initialParams := makeParams(makeParamsArgs{ - abciExtensionHeight: 100, - }) - update := &tmproto.ConsensusParams{ - Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: 10, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 11)) - require.Error(t, initialParams.ValidateUpdate(update, 99)) - }) - t.Run("set to height after current height run", func(*testing.T) { - initialParams := makeParams(makeParamsArgs{ - abciExtensionHeight: 300, - }) - update := &tmproto.ConsensusParams{ - Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: 99, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 11)) - require.NoError(t, initialParams.ValidateUpdate(update, 98)) - }) - t.Run("no error when unchanged", func(*testing.T) { - initialParams := makeParams(makeParamsArgs{ - abciExtensionHeight: 100, - }) - update := &tmproto.ConsensusParams{ - Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: 100, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 500)) - }) - t.Run("updated from 0 to 0", func(t *testing.T) { - initialParams := makeParams(makeParamsArgs{ - abciExtensionHeight: 0, - }) - update := &tmproto.ConsensusParams{ - Abci: &tmproto.ABCIParams{ - VoteExtensionsEnableHeight: 0, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 100)) - }) -} - func TestProto(t *testing.T) { params := []ConsensusParams{ makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}), @@ -485,8 +394,6 @@ func TestProto(t *testing.T) { makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}), makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}), - makeParams(makeParamsArgs{abciExtensionHeight: 100}), - makeParams(makeParamsArgs{abciExtensionHeight: 100}), makeParams(makeParamsArgs{ propose: durationPtr(2 * time.Second), proposeDelta: durationPtr(400 * time.Millisecond), diff --git a/types/validator_set_test.go b/types/validator_set_test.go index e467e15f37..5f753af62d 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1422,8 +1422,6 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { // create a commit with n validators commit, err := makeCommit(ctx, blockID, stateID, h, 0, voteSet, vals) require.NoError(b, err) - commit := extCommit.ToCommit() - for i := 0; i < b.N/n; i++ { err = valSet.VerifyCommit(chainID, blockID, stateID, h, commit) assert.NoError(b, err) diff --git a/types/vote.go b/types/vote.go index 7cc9df7ca3..84b1767cc4 100644 --- a/types/vote.go +++ b/types/vote.go @@ -355,12 +355,19 @@ func (vote *Vote) ValidateBasic() error { } // We should only ever see vote extensions in precommits. - if vote.Type != tmproto.PrecommitType { + if vote.Type != tmproto.PrecommitType || (vote.Type == tmproto.PrecommitType && vote.BlockID.IsNil()) { if !vote.VoteExtensions.IsEmpty() { return errors.New("unexpected vote extensions") } } + if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() { + err := vote.VoteExtensions.Validate() + if err != nil { + return err + } + } + return nil } diff --git a/types/vote_test.go b/types/vote_test.go index c01912860c..3a9af89bd3 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -19,14 +19,14 @@ import ( const ( //nolint: lll - preCommitTestStr = `Vote{56789:6AF1F4111082 12345/2 Precommit 8B01023386C3 000000000000 0 @ 2017-12-25T03:00:01.234Z}` + preCommitTestStr = `Vote{56789:959A8F5EF2BE 12345/02/Precommit(8B01023386C3) 000000000000 000000000000 000000000000}` //nolint: lll - preVoteTestStr = `Vote{56789:6AF1F4111082 12345/2 Prevote 8B01023386C3 000000000000 0 @ 2017-12-25T03:00:01.234Z}` + preVoteTestStr = `Vote{56789:959A8F5EF2BE 12345/02/Prevote(8B01023386C3) 000000000000 000000000000 000000000000}` ) var ( // nolint: lll - nilVoteTestStr = fmt.Sprintf(`Vote{56789:6AF1F4111082 12345/2 Precommit %s 000000000000 0 @ 2017-12-25T03:00:01.234Z}`, nilVoteStr) + nilVoteTestStr = fmt.Sprintf(`Vote{56789:959A8F5EF2BE 12345/02/Precommit(%s) 000000000000 000000000000 000000000000}`, nilVoteStr) formatNonEmptyVoteExtensionFn = func(voteExtensionLength int) string { // nolint: lll return fmt.Sprintf(`Vote{56789:6AF1F4111082 12345/2 Precommit 8B01023386C3 000000000000 %d @ 2017-12-25T03:00:01.234Z}`, voteExtensionLength) @@ -266,7 +266,7 @@ func TestVoteExtension(t *testing.T) { { name: "all fields present", extensions: VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Extension: []byte("extension")}}, + tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("extension")}}, }, includeSignature: true, expectError: false, @@ -274,12 +274,7 @@ func TestVoteExtension(t *testing.T) { { name: "no extension signature", extensions: VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{ - { - Extension: []byte("extension"), - Signature: nil, - }, - }, + tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("extension")}}, }, includeSignature: false, expectError: true, @@ -289,11 +284,6 @@ func TestVoteExtension(t *testing.T) { includeSignature: true, expectError: false, }, - { - name: "no extension and no signature", - includeSignature: false, - expectError: true, - }, } logger := log.NewTestingLogger(t) @@ -320,7 +310,6 @@ func TestVoteExtension(t *testing.T) { BlockID: blockID, VoteExtensions: tc.extensions, } - v := vote.ToProto() err = privVal.SignVote(ctx, "test_chain_id", btcjson.LLMQType_5_60, quorumHash, v, stateID, logger) require.NoError(t, err) @@ -559,7 +548,7 @@ func TestInvalidPrecommitExtensions(t *testing.T) { { "vote extension present without signature", func(v *Vote) { v.VoteExtensions = VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: {{Extension: []byte("extension")}}, + tmproto.VoteExtensionType_THRESHOLD_RECOVER: {{Extension: []byte("extension")}}, } }, }, @@ -569,19 +558,22 @@ func TestInvalidPrecommitExtensions(t *testing.T) { "oversized vote extension signature", func(v *Vote) { v.VoteExtensions = VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Signature: make([]byte, SignatureSize+1)}}, + tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Signature: make([]byte, SignatureSize+1)}}, } }, }, } - for _, tc := range testCases { - precommit := examplePrecommit(t) - v := precommit.ToProto() - stateID := RandStateID().WithHeight(v.Height - 1) - signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) - tc.malleateVote(precommit) - // ValidateBasic ensures that vote extensions, if present, are well formed - require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + precommit := examplePrecommit(t) + v := precommit.ToProto() + stateID := RandStateID().WithHeight(v.Height - 1) + signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) + tc.malleateVote(precommit) + // ValidateBasic ensures that vote extensions, if present, are well formed + require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) + require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + }) } } From 5f3a1bb581aabc3b4f33d59d7af2b1a3e07170c9 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 18 Aug 2022 10:10:28 +0200 Subject: [PATCH 194/203] fix: stability fixes --- abci/types/types.go | 36 ++++++----- internal/blocksync/pool.go | 32 ++++----- internal/blocksync/reactor.go | 26 -------- internal/consensus/reactor_test.go | 97 ---------------------------- internal/consensus/state.go | 3 +- node/node_test.go | 1 - test/e2e/node/main.go | 6 +- test/e2e/pkg/infra/docker/compose.go | 36 +++++++++-- test/e2e/pkg/infra/docker/infra.go | 2 +- test/e2e/pkg/testnet.go | 2 +- test/e2e/runner/setup.go | 86 +----------------------- test/e2e/tests/app_test.go | 27 -------- types/quorum.go | 12 ++++ types/vote.go | 18 +++--- 14 files changed, 98 insertions(+), 286 deletions(-) diff --git a/abci/types/types.go b/abci/types/types.go index 78cb13ed42..a63279df42 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -190,36 +190,40 @@ type validatorSetUpdateJSON struct { } func (m *ValidatorSetUpdate) MarshalJSON() ([]byte, error) { - key, err := encoding.PubKeyFromProto(m.ThresholdPublicKey) - if err != nil { - return nil, err - } - jkey, err := jsontypes.Marshal(key) - if err != nil { - return nil, err - } - return json.Marshal(validatorSetUpdateJSON{ + ret := validatorSetUpdateJSON{ ValidatorUpdates: m.ValidatorUpdates, - ThresholdPubKey: jkey, QuorumHash: m.QuorumHash, - }) + } + if m.ThresholdPublicKey.Sum != nil { + key, err := encoding.PubKeyFromProto(m.ThresholdPublicKey) + if err != nil { + return nil, err + } + ret.ThresholdPubKey, err = jsontypes.Marshal(key) + if err != nil { + return nil, err + } + } + return json.Marshal(ret) } func (m *ValidatorSetUpdate) UnmarshalJSON(data []byte) error { var vsu validatorSetUpdateJSON - if err := json.Unmarshal(data, &vsu); err != nil { + err := json.Unmarshal(data, &vsu) + if err != nil { return err } var key crypto.PubKey if err := jsontypes.Unmarshal(vsu.ThresholdPubKey, &key); err != nil { return err } - pkey, err := encoding.PubKeyToProto(key) - if err != nil { - return err + if key != nil { + m.ThresholdPublicKey, err = encoding.PubKeyToProto(key) + if err != nil { + return err + } } m.ValidatorUpdates = vsu.ValidatorUpdates - m.ThresholdPublicKey = pkey m.QuorumHash = vsu.QuorumHash return nil } diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index 1c856f22d0..0ac9a124f4 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -207,13 +207,13 @@ func (pool *BlockPool) IsCaughtUp() bool { // as we switch from block sync to consensus mode. // // The caller will verify the commit. -func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.Commit) { +func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstCommit *types.Commit) { pool.mtx.RLock() defer pool.mtx.RUnlock() if r := pool.requesters[pool.height]; r != nil { first = r.getBlock() - firstExtCommit = r.getCommit() + firstCommit = r.getCommit() } if r := pool.requesters[pool.height+1]; r != nil { second = r.getBlock() @@ -276,12 +276,12 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID { // height of the extended commit and the height of the block do not match, we // do not add the block and return an error. // TODO: ensure that blocks come in order for each peer. -func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, extCommit *types.Commit, blockSize int) error { +func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, commit *types.Commit, blockSize int) error { pool.mtx.Lock() defer pool.mtx.Unlock() - if extCommit != nil && block.Height != extCommit.Height { - return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, extCommit.Height) + if commit != nil && block.Height != commit.Height { + return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, commit.Height) } requester := pool.requesters[block.Height] @@ -296,7 +296,7 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, extComm return fmt.Errorf("peer sent us a block we didn't expect (peer: %s, current height: %d, block height: %d)", peerID, pool.height, block.Height) } - if requester.setBlock(block, extCommit, peerID) { + if requester.setBlock(block, commit, peerID) { atomic.AddInt32(&pool.numPending, -1) peer := pool.peers[peerID] if peer != nil { @@ -472,7 +472,7 @@ func (pool *BlockPool) debug() string { } else { str += fmt.Sprintf("H(%v):", h) str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil) - str += fmt.Sprintf("C?(%v) ", pool.requesters[h].extCommit != nil) + str += fmt.Sprintf("C?(%v) ", pool.requesters[h].commit != nil) } } return str @@ -561,10 +561,10 @@ type bpRequester struct { gotBlockCh chan struct{} redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat - mtx sync.Mutex - peerID types.NodeID - block *types.Block - extCommit *types.Commit + mtx sync.Mutex + peerID types.NodeID + block *types.Block + commit *types.Commit } func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester { @@ -590,15 +590,15 @@ func (bpr *bpRequester) OnStart(ctx context.Context) error { func (*bpRequester) OnStop() {} // Returns true if the peer matches and block doesn't already exist. -func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.Commit, peerID types.NodeID) bool { +func (bpr *bpRequester) setBlock(block *types.Block, commit *types.Commit, peerID types.NodeID) bool { bpr.mtx.Lock() if bpr.block != nil || bpr.peerID != peerID { bpr.mtx.Unlock() return false } bpr.block = block - if extCommit != nil { - bpr.extCommit = extCommit + if commit != nil { + bpr.commit = commit } bpr.mtx.Unlock() @@ -618,7 +618,7 @@ func (bpr *bpRequester) getBlock() *types.Block { func (bpr *bpRequester) getCommit() *types.Commit { bpr.mtx.Lock() defer bpr.mtx.Unlock() - return bpr.extCommit + return bpr.commit } func (bpr *bpRequester) getPeerID() types.NodeID { @@ -638,7 +638,7 @@ func (bpr *bpRequester) reset() { bpr.peerID = "" bpr.block = nil - bpr.extCommit = nil + bpr.commit = nil } // Tells bpRequester to pick another peer and try again. diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 7bd9f9a0c8..448dbbd7e1 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -450,8 +450,6 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh lastRate = 0.0 didProcessCh = make(chan struct{}, 1) - - initialCommitHasExtensions = (r.initialState.LastBlockHeight > 0 && r.store.LoadBlockCommit(r.initialState.LastBlockHeight) != nil) ) defer trySyncTicker.Stop() @@ -474,30 +472,6 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh switch { - // The case statement below is a bit confusing, so here is a breakdown - // of its logic and purpose: - // - // If extensions were required during state.LastBlockHeight and we have - // sync'd at least one block, then we are guaranteed to have extensions. - // BlockSync requires that the blocks it fetches have extensions if - // extensions were enabled during the height. - // - // If extensions were required during state.LastBlockHeight and we have - // not sync'd any blocks, then we can only transition to Consensus - // if we already had extensions for the initial height. - // If any of these conditions is not met, we continue the loop, looking - // for extensions. - case blocksSynced == 0 && !initialCommitHasExtensions: - r.logger.Info( - "no extended commit yet", - "height", height, - "last_block_height", state.LastBlockHeight, - "initial_height", state.InitialHeight, - "max_peer_height", r.pool.MaxPeerHeight(), - "timeout_in", syncTimeout-time.Since(lastAdvance), - ) - continue - case r.pool.IsCaughtUp(): r.logger.Info("switching to consensus reactor", "height", height) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index cdc9c14460..466b864bb5 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -33,7 +33,6 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -606,102 +605,6 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { wg.Wait() } -// TestSwitchToConsensusVoteExtensions tests that the SwitchToConsensus correctly -// checks for vote extension data when required. -func TestSwitchToConsensusVoteExtensions(t *testing.T) { - for _, testCase := range []struct { - name string - storedHeight int64 - initialRequiredHeight int64 - shouldPanic bool - }{ - { - name: "no vote extensions but not required", - initialRequiredHeight: 0, - storedHeight: 2, - shouldPanic: false, - }, - { - name: "no vote extensions but required this height", - initialRequiredHeight: 2, - storedHeight: 2, - shouldPanic: true, - }, - { - name: "no vote extensions and required in future", - initialRequiredHeight: 3, - storedHeight: 2, - shouldPanic: false, - }, - { - name: "no vote extensions and required previous height", - initialRequiredHeight: 1, - storedHeight: 2, - shouldPanic: true, - }, - { - name: "vote extensions and required previous height", - initialRequiredHeight: 1, - storedHeight: 2, - shouldPanic: false, - }, - } { - t.Run(testCase.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - defer cancel() - cs, vs := makeState(ctx, t, makeStateArgs{validators: 1}) - validator := vs[0] - validator.Height = testCase.storedHeight - - cs.state.LastBlockHeight = testCase.storedHeight - cs.state.LastValidators = cs.state.Validators.Copy() - cs.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.initialRequiredHeight - - propBlock, err := cs.createProposalBlock(ctx) - require.NoError(t, err) - - // Consensus is preparing to do the next height after the stored height. - cs.Height = testCase.storedHeight + 1 - propBlock.Height = testCase.storedHeight - blockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) - require.NoError(t, err) - - voteSet := types.NewVoteSet(cs.state.ChainID, testCase.storedHeight, 0, tmproto.PrecommitType, cs.state.Validators, cs.state.StateID()) - signedVote := signVote(ctx, t, validator, tmproto.PrecommitType, cs.state.ChainID, types.BlockID{ - Hash: propBlock.Hash(), - PartSetHeader: blockParts.Header(), - }, - cs.state.AppHash, - cs.Validators.QuorumType, - cs.Validators.QuorumHash, - ) - - added, err := voteSet.AddVote(signedVote) - require.NoError(t, err) - require.True(t, added) - - cs.blockStore.SaveBlock(propBlock, blockParts, voteSet.MakeCommit()) - reactor := NewReactor( - log.NewNopLogger(), - cs, - nil, - nil, - cs.eventBus, - true, - NopMetrics(), - ) - - if testCase.shouldPanic { - assert.Panics(t, func() { - reactor.SwitchToConsensus(ctx, cs.state, false) - }) - } else { - reactor.SwitchToConsensus(ctx, cs.state, false) - } - }) - } -} - func TestReactorRecordsVotesAndBlockParts(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 76e5fbf745..0b36914499 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -2833,7 +2833,8 @@ func (cs *State) addVote( // Here, we verify the signature of the vote extension included in the vote // message. _, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex) - if err := vote.VerifyExtension(cs.state.ChainID, val.PubKey); err != nil { + qt, qh := cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash + if err := vote.VerifyExtension(cs.state.ChainID, val.PubKey, qt, qh); err != nil { return false, err } diff --git a/node/node_test.go b/node/node_test.go index fffcd5c335..5bbb5e08ab 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -37,7 +37,6 @@ import ( "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index e72bbea626..6c36a005c1 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -104,9 +104,6 @@ func run(ctx context.Context, configFile string) error { } func startAppServer(ctx context.Context, cfg *Config, logger log.Logger) error { - if cfg.Mode == string(e2e.ModeLight) { - return startLightNode(ctx, logger, cfg) - } // Start remote signer (must start before node if running builtin). if cfg.PrivValServer != "" { err := startRemoteSigner(ctx, cfg, logger) @@ -114,6 +111,9 @@ func startAppServer(ctx context.Context, cfg *Config, logger log.Logger) error { return err } } + if cfg.Mode == string(e2e.ModeLight) { + return startLightNode(ctx, logger, cfg) + } switch cfg.Protocol { case "socket", "grpc": return startApp(ctx, logger, cfg) diff --git a/test/e2e/pkg/infra/docker/compose.go b/test/e2e/pkg/infra/docker/compose.go index 3ea5845ee7..8291d73961 100644 --- a/test/e2e/pkg/infra/docker/compose.go +++ b/test/e2e/pkg/infra/docker/compose.go @@ -2,6 +2,7 @@ package docker import ( "bytes" + "os" "text/template" e2e "github.com/tendermint/tendermint/test/e2e/pkg" @@ -17,6 +18,9 @@ func makeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { "isBuiltin": func(protocol e2e.Protocol, mode e2e.Mode) bool { return mode == e2e.ModeLight || protocol == e2e.ProtocolBuiltin }, + "debugPort": func(index int) int { + return 40000 + index + 1 + }, }).Parse(`version: '2.4' networks: @@ -33,25 +37,40 @@ networks: - subnet: {{ .IP }} services: -{{- range .Nodes }} +{{- range $index, $node := .Nodes }} {{ .Name }}: labels: e2e: true container_name: {{ .Name }} - image: tendermint/e2e-node + image: tenderdash/e2e-node {{- if isBuiltin $.ABCIProtocol .Mode }} entrypoint: /usr/bin/entrypoint-builtin {{- else if .LogLevel }} command: start --log-level {{ .LogLevel }} {{- end }} init: true +{{- if $.Debug }} + environment: + - DEBUG=1 + - DEBUG_PORT={{ debugPort $index }} +{{- end }} ports: - 26656 - {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660 - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 - 6060 +{{- if $.Debug }} + - {{ debugPort $index }}:{{ debugPort $index }} + security_opt: + - "seccomp:unconfined" + cap_add: + - SYS_PTRACE +{{- end }} volumes: - - ./{{ .Name }}:/tendermint + - ./{{ .Name }}:/tenderdash +{{- if ne $.PreCompiledAppPath "" }} + - {{ $.PreCompiledAppPath }}:/usr/bin/app +{{- end }} networks: {{ $.Name }}: ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} @@ -60,8 +79,17 @@ services: if err != nil { return nil, err } + data := &struct { + *e2e.Testnet + PreCompiledAppPath string + Debug bool + }{ + Testnet: testnet, + PreCompiledAppPath: os.Getenv("PRE_COMPILED_APP_PATH"), + Debug: os.Getenv("DEBUG") != "", + } var buf bytes.Buffer - err = tmpl.Execute(&buf, testnet) + err = tmpl.Execute(&buf, data) if err != nil { return nil, err } diff --git a/test/e2e/pkg/infra/docker/infra.go b/test/e2e/pkg/infra/docker/infra.go index 9827be2414..382576c91d 100644 --- a/test/e2e/pkg/infra/docker/infra.go +++ b/test/e2e/pkg/infra/docker/infra.go @@ -131,7 +131,7 @@ func (ti *testnetInfra) Cleanup(ctx context.Context) error { return err } err = execDocker(ctx, "run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), - "tendermint/e2e-node", "sh", "-c", "rm -rf /network/*/") + "tenderdash/e2e-node", "sh", "-c", "rm -rf /network/*/") if err != nil { return err } diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 794948ac12..61cc466165 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -630,7 +630,7 @@ func (n Node) AddressRPC() string { // Client returns an RPC client for a node. func (n Node) Client() (*rpchttp.HTTP, error) { - return rpchttp.New(fmt.Sprintf("http://%s", n.AddressRPC())) + return rpchttp.New(fmt.Sprintf("http://127.0.0.1:%v", n.ProxyPort)) } // Stateless returns true if the node is either a seed node or a light node diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 06a80d73bf..aa1f2e4eb5 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -22,6 +22,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" e2e "github.com/tendermint/tendermint/test/e2e/pkg" @@ -156,91 +157,6 @@ func Setup(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infr return nil } -// MakeDockerCompose generates a Docker Compose config for a testnet. -func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { - // Must use version 2 Docker Compose format, to support IPv6. - tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ - "debugPort": func(index int) int { - return 40000 + index + 1 - }, - "addUint32": func(x, y uint32) uint32 { - return x + y - }, - }).Parse(`version: '2.4' - -networks: - {{ .Name }}: - labels: - e2e: true - driver: bridge -{{- if .IPv6 }} - enable_ipv6: true -{{- end }} - ipam: - driver: default - config: - - subnet: {{ .IP }} - -services: -{{- range $index, $node := .Nodes }} - {{ .Name }}: - labels: - e2e: true - container_name: {{ .Name }} - image: tenderdash/e2e-node -{{- if eq .ABCIProtocol "builtin" }} - entrypoint: /usr/bin/entrypoint-builtin -{{- else if .LogLevel }} - command: start --log-level {{ .LogLevel }} -{{- end }} - init: true -{{- if $.Debug }} - environment: - - DEBUG=1 - - DEBUG_PORT={{ debugPort $index }} -{{- end }} - ports: - - 26656 - - {{ if .ProxyPort }}{{ addUint32 .ProxyPort 1000 }}:{{ end }}26660 - - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 - - 6060 -{{- if $.Debug }} - - {{ debugPort $index }}:{{ debugPort $index }} - security_opt: - - "seccomp:unconfined" - cap_add: - - SYS_PTRACE -{{- end }} - volumes: - - ./{{ .Name }}:/tenderdash -{{- if ne $.PreCompiledAppPath "" }} - - {{ $.PreCompiledAppPath }}:/usr/bin/app -{{- end }} - networks: - {{ $.Name }}: - ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} - -{{end}}`) - if err != nil { - return nil, err - } - var buf bytes.Buffer - data := &struct { - *e2e.Testnet - PreCompiledAppPath string - Debug bool - }{ - Testnet: testnet, - PreCompiledAppPath: os.Getenv("PRE_COMPILED_APP_PATH"), - Debug: os.Getenv("DEBUG") != "", - } - err = tmpl.Execute(&buf, data) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - // MakeGenesis generates a genesis document. func MakeGenesis(testnet *e2e.Testnet, genesisTime time.Time) (types.GenesisDoc, error) { genesis := types.GenesisDoc{ diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 6b378225a1..8cae2efade 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -3,10 +3,8 @@ package e2e_test import ( "bytes" "context" - "errors" "fmt" "math/rand" - "strconv" "testing" "time" @@ -188,28 +186,3 @@ func TestApp_Tx(t *testing.T) { } } - -func TestApp_VoteExtensions(t *testing.T) { - testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { - client, err := node.Client() - require.NoError(t, err) - info, err := client.ABCIInfo(ctx) - require.NoError(t, err) - - // This special value should have been created by way of vote extensions - resp, err := client.ABCIQuery(ctx, "", []byte("extensionSum")) - require.NoError(t, err) - - extSum, err := strconv.Atoi(string(resp.Response.Value)) - // if extensions are not enabled on the network, we should not expect - // the app to have any extension value set. - if node.Testnet.VoteExtensionsEnableHeight == 0 || - info.Response.LastBlockHeight < node.Testnet.VoteExtensionsEnableHeight+1 { - target := &strconv.NumError{} - require.True(t, errors.As(err, &target)) - } else { - require.NoError(t, err) - require.GreaterOrEqual(t, extSum, 0) - } - }) -} diff --git a/types/quorum.go b/types/quorum.go index 56b20350b7..3d53b00985 100644 --- a/types/quorum.go +++ b/types/quorum.go @@ -110,6 +110,7 @@ func MakeThresholdVoteExtensions(extensions []VoteExtension, thresholdSigs [][]b // QuorumSingsVerifier ... type QuorumSingsVerifier struct { QuorumSignData + shouldVerifyBlock bool shouldVerifyState bool shouldVerifyVoteExtensions bool } @@ -121,6 +122,13 @@ func WithVerifyExtensions(shouldVerify bool) func(*QuorumSingsVerifier) { } } +// WithVerifyBlock sets a flag that tells QuorumSingsVerifier to verify block signature or not +func WithVerifyBlock(shouldVerify bool) func(*QuorumSingsVerifier) { + return func(verifier *QuorumSingsVerifier) { + verifier.shouldVerifyBlock = shouldVerify + } +} + // WithVerifyState sets a flag that tells QuorumSingsVerifier to verify stateID signature or not func WithVerifyState(shouldVerify bool) func(*QuorumSingsVerifier) { return func(verifier *QuorumSingsVerifier) { @@ -142,6 +150,7 @@ func WithVerifyReachedQuorum(quorumReached bool) func(*QuorumSingsVerifier) { func NewQuorumSingsVerifier(quorumData QuorumSignData, opts ...func(*QuorumSingsVerifier)) *QuorumSingsVerifier { verifier := &QuorumSingsVerifier{ QuorumSignData: quorumData, + shouldVerifyBlock: true, shouldVerifyState: true, shouldVerifyVoteExtensions: true, } @@ -165,6 +174,9 @@ func (q *QuorumSingsVerifier) Verify(pubKey crypto.PubKey, signs QuorumSigns) er } func (q *QuorumSingsVerifier) verifyBlock(pubKey crypto.PubKey, signs QuorumSigns) error { + if !q.shouldVerifyBlock { + return nil + } if !pubKey.VerifySignatureDigest(q.Block.ID, signs.BlockSign) { return fmt.Errorf( "threshold block signature is invalid: (%X) signID=%X: %w", diff --git a/types/vote.go b/types/vote.go index 84b1767cc4..d3b91fc1a1 100644 --- a/types/vote.go +++ b/types/vote.go @@ -266,18 +266,20 @@ func (vote *Vote) verifyBasic(proTxHash ProTxHash, pubKey crypto.PubKey) error { // VerifyExtension checks whether the vote extension signature corresponds to the // given chain ID and public key. -func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey) error { +func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash) error { if vote.Type != tmproto.PrecommitType || vote.BlockID.IsNil() { return nil } - v := vote.ToProto() - for _, ve := range v.VoteExtensions { - extSignBytes := VoteExtensionSignBytes(chainID, vote.Height, vote.Round, ve) - if !pubKey.VerifySignature(extSignBytes, ve.Signature) { - return ErrVoteInvalidSignature - } + quorumSignData, err := MakeQuorumSigns(chainID, quorumType, quorumHash, vote.ToProto(), StateID{}) + if err != nil { + return err } - return nil + verifier := NewQuorumSingsVerifier( + quorumSignData, + WithVerifyBlock(false), + WithVerifyState(false), + ) + return verifier.Verify(pubKey, vote.makeQuorumSigns()) } func (vote *Vote) verifySign( From ca2c31e52fb652de3aeb98c9405763e4659719c8 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 18 Aug 2022 11:14:05 +0200 Subject: [PATCH 195/203] fix: lint issues, code style, abci test --- abci/example/counter/counter.go | 6 ++---- abci/example/example_test.go | 4 ++++ abci/tests/test_cli/ex1.abci | 2 +- abci/tests/test_cli/ex1.abci.out | 2 +- abci/tests/test_cli/ex2.abci.out | 14 +++++++------- abci/types/types.go | 1 + .../commands/rollback_test.go | 2 +- internal/consensus/state.go | 1 - internal/consensus/state_test.go | 1 - libs/log/default.go | 8 ++++---- proto/tendermint/abci/types.proto | 1 - test/e2e/node/main.go | 3 +-- types/vote_test.go | 6 +----- 13 files changed, 23 insertions(+), 28 deletions(-) rename cmd/{tendermint => tenderdash}/commands/rollback_test.go (97%) diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 4b041ea9c9..8edf5543c6 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -54,10 +54,8 @@ func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) if app.txCount == 0 { return &types.ResponseCommit{}, nil } - hash := make([]byte, 24) - endHash := make([]byte, 8) - binary.BigEndian.PutUint64(endHash, uint64(app.txCount)) - hash = append(hash, endHash...) + hash := make([]byte, 32) + binary.BigEndian.PutUint64(hash[24:], uint64(app.txCount)) return &types.ResponseCommit{}, nil } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 066d4071d7..4b4fdf1d67 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -28,6 +28,10 @@ func init() { rand.Seed(time.Now().UnixNano()) } +func TestPop(t *testing.T) { + +} + func TestKVStore(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci index eba06028a5..aa6acc3272 100644 --- a/abci/tests/test_cli/ex1.abci +++ b/abci/tests/test_cli/ex1.abci @@ -15,4 +15,4 @@ process_proposal "preparedef" prepare_proposal process_proposal finalize_block -commit \ No newline at end of file +commit diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index b41b1af82f..b95343a70d 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -78,7 +78,7 @@ > finalize_block -> code: OK --> data.hex: 0x0600000000000000 +-> data.hex: 0x0600000000000000000000000000000000000000000000000000000000000000 > commit -> code: OK diff --git a/abci/tests/test_cli/ex2.abci.out b/abci/tests/test_cli/ex2.abci.out index e29a353682..39340dde5e 100644 --- a/abci/tests/test_cli/ex2.abci.out +++ b/abci/tests/test_cli/ex2.abci.out @@ -7,9 +7,9 @@ > finalize_block 0x00 -> code: OK -> code: OK --> data.hex: 0x0200000000000000 +-> data.hex: 0x0200000000000000000000000000000000000000000000000000000000000000 -> commit +> commit -> code: OK > check_tx 0x00 @@ -18,20 +18,20 @@ > finalize_block 0x01 -> code: OK -> code: OK --> data.hex: 0x0400000000000000 +-> data.hex: 0x0400000000000000000000000000000000000000000000000000000000000000 -> commit +> commit -> code: OK > finalize_block 0x04 -> code: OK -> code: OK --> data.hex: 0x0600000000000000 +-> data.hex: 0x0600000000000000000000000000000000000000000000000000000000000000 -> commit +> commit -> code: OK -> info +> info -> code: OK -> data: {"size":3} -> data.hex: 0x7B2273697A65223A337D diff --git a/abci/types/types.go b/abci/types/types.go index a63279df42..d02d9551d6 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -5,6 +5,7 @@ import ( "encoding/json" "github.com/gogo/protobuf/jsonpb" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/jsontypes" diff --git a/cmd/tendermint/commands/rollback_test.go b/cmd/tenderdash/commands/rollback_test.go similarity index 97% rename from cmd/tendermint/commands/rollback_test.go rename to cmd/tenderdash/commands/rollback_test.go index 9aae403cb3..8e2ba87bfc 100644 --- a/cmd/tendermint/commands/rollback_test.go +++ b/cmd/tenderdash/commands/rollback_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/cmd/tendermint/commands" + "github.com/tendermint/tendermint/cmd/tenderdash/commands" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/rpc/client/local" rpctest "github.com/tendermint/tendermint/rpc/test" diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 0b36914499..8b63f3f6b5 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -755,7 +755,6 @@ func (cs *State) reconstructLastCommit(state sm.State) { panic(fmt.Sprintf("failed to reconstruct last commit; %s", err)) } cs.LastCommit = commit - return } func (cs *State) votesFromSeenCommit(state sm.State) (*types.Commit, error) { diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 407b541290..c0fc8267fd 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -2215,7 +2215,6 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round - cs1.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = cs1.Height proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) diff --git a/libs/log/default.go b/libs/log/default.go index 99b045b2ab..7bd35de18b 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -63,19 +63,19 @@ func NewLogger(level string, logWriter io.Writer) (Logger, error) { } func (l defaultLogger) Info(msg string, keyVals ...interface{}) { - l.Logger.Info().Fields(keyVals).Msg(msg) + l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg) } func (l defaultLogger) Error(msg string, keyVals ...interface{}) { - l.Logger.Error().Fields(keyVals).Msg(msg) + l.Logger.Error().Fields(getLogFields(keyVals...)).Msg(msg) } func (l defaultLogger) Debug(msg string, keyVals ...interface{}) { - l.Logger.Debug().Fields(keyVals).Msg(msg) + l.Logger.Debug().Fields(getLogFields(keyVals...)).Msg(msg) } func (l defaultLogger) With(keyVals ...interface{}) Logger { - return &defaultLogger{Logger: l.Logger.With().Fields(keyVals).Logger()} + return &defaultLogger{Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger()} } // OverrideWithNewLogger replaces an existing logger's internal with diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 7483a12b54..735dc1ecb9 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -5,7 +5,6 @@ option go_package = "github.com/tendermint/tendermint/abci/types"; import "tendermint/crypto/proof.proto"; import "tendermint/types/dash.proto"; -import "tendermint/types/types.proto"; import "tendermint/crypto/keys.proto"; import "tendermint/types/params.proto"; import "tendermint/version/types.proto"; diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 6c36a005c1..160719dda9 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -120,9 +120,8 @@ func startAppServer(ctx context.Context, cfg *Config, logger log.Logger) error { case "builtin": if cfg.Mode == string(e2e.ModeSeed) { return startSeedNode(ctx) - } else { - return startNode(ctx, cfg) } + return startNode(ctx, cfg) } return fmt.Errorf("invalid protocol %q", cfg.Protocol) } diff --git a/types/vote_test.go b/types/vote_test.go index 3a9af89bd3..a51b1f04ee 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -26,11 +26,7 @@ const ( var ( // nolint: lll - nilVoteTestStr = fmt.Sprintf(`Vote{56789:959A8F5EF2BE 12345/02/Precommit(%s) 000000000000 000000000000 000000000000}`, nilVoteStr) - formatNonEmptyVoteExtensionFn = func(voteExtensionLength int) string { - // nolint: lll - return fmt.Sprintf(`Vote{56789:6AF1F4111082 12345/2 Precommit 8B01023386C3 000000000000 %d @ 2017-12-25T03:00:01.234Z}`, voteExtensionLength) - } + nilVoteTestStr = fmt.Sprintf(`Vote{56789:959A8F5EF2BE 12345/02/Precommit(%s) 000000000000 000000000000 000000000000}`, nilVoteStr) ) func examplePrevote(t *testing.T) *Vote { From 10fc6bead277f2ee50087fbf074bc52bda7e5007 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 18 Aug 2022 11:39:43 +0200 Subject: [PATCH 196/203] fix: validator_conn_executor_test.go TestFinalizeBlock --- dash/quorum/validator_conn_executor_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/dash/quorum/validator_conn_executor_test.go b/dash/quorum/validator_conn_executor_test.go index c40c058e49..6bf50f26e8 100644 --- a/dash/quorum/validator_conn_executor_test.go +++ b/dash/quorum/validator_conn_executor_test.go @@ -374,6 +374,7 @@ func TestFinalizeBlock(t *testing.T) { testifymock.Anything, testifymock.Anything, testifymock.Anything, + testifymock.Anything, testifymock.Anything).Return(nil) blockExec := sm.NewBlockExecutor( From aea1fe3291f3d74066adeca59a7bc37d0c488294 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 18 Aug 2022 13:25:31 +0200 Subject: [PATCH 197/203] fix: rollback.go --- internal/state/rollback.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/state/rollback.go b/internal/state/rollback.go index 6fdc01e03f..546768aff2 100644 --- a/internal/state/rollback.go +++ b/internal/state/rollback.go @@ -3,7 +3,6 @@ package state import ( "errors" "fmt" - "github.com/tendermint/tendermint/version" ) @@ -38,6 +37,8 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { // state store height is equal to blockstore height. We're good to proceed with rolling back state rollbackHeight := invalidState.LastBlockHeight - 1 rollbackBlock := bs.LoadBlockMeta(rollbackHeight) + commit := bs.LoadBlockCommit(rollbackHeight) + if rollbackBlock == nil { return -1, nil, fmt.Errorf("block at height %d not found", rollbackHeight) } @@ -85,6 +86,9 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { LastBlockHeight: rollbackBlock.Header.Height, LastBlockID: rollbackBlock.BlockID, LastBlockTime: rollbackBlock.Header.Time, + LastStateID: commit.StateID, + + LastCoreChainLockedBlockHeight: rollbackBlock.Header.CoreChainLockedHeight, NextValidators: invalidState.Validators, Validators: invalidState.LastValidators, From 5424e5ecfb3523a47735b3804a3e2c53c01d07ab Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 18 Aug 2022 13:55:45 +0200 Subject: [PATCH 198/203] fix: rollback.go --- internal/state/rollback.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/state/rollback.go b/internal/state/rollback.go index 546768aff2..bc8b428c55 100644 --- a/internal/state/rollback.go +++ b/internal/state/rollback.go @@ -3,6 +3,7 @@ package state import ( "errors" "fmt" + "github.com/tendermint/tendermint/version" ) From ce86b72c752d5ab2714efb7dba293272962f14f3 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 18 Aug 2022 16:48:39 +0200 Subject: [PATCH 199/203] fix: unit tests --- internal/consensus/common_test.go | 2 + internal/consensus/mempool_test.go | 4 +- internal/consensus/state_test.go | 103 ++++++++++++----------------- internal/state/rollback_test.go | 3 + 4 files changed, 50 insertions(+), 62 deletions(-) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 1b1757fa13..d377a6a21b 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -111,6 +111,8 @@ func (vs *validatorStub) signVote( return nil, fmt.Errorf("can't get proTxHash: %w", err) } + fmt.Printf("proTxHash = = %s\n", proTxHash.ShortString()) + vote := &types.Vote{ Type: voteType, Height: vs.Height, diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index fc56de464d..2bc515b61b 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -309,8 +309,8 @@ func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.Reques res := &abci.ResponseFinalizeBlock{TxResults: respTxs} if app.txCount > 0 { - res.AppHash = make([]byte, 8) - binary.BigEndian.PutUint64(res.AppHash, uint64(app.txCount)) + res.AppHash = make([]byte, 32) + binary.BigEndian.PutUint64(res.AppHash[24:], uint64(app.txCount)) } return res, nil diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index c0fc8267fd..381da1733f 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -2103,25 +2103,25 @@ func TestFinalizeBlockCalled(t *testing.T) { } } -// TestExtendVoteCalledWhenEnabled tests that the vote extension methods are called at the -// correct point in the consensus algorithm when vote extensions are enabled. -func TestExtendVoteCalledWhenEnabled(t *testing.T) { +func TestExtendVote(t *testing.T) { config := configSetup(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + voteExtensions := []*abci.ExtendVoteExtension{ + { + Type: tmproto.VoteExtensionType_DEFAULT, + Extension: []byte("extension"), + }, + } + m := abcimocks.NewApplication(t) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, + VoteExtensions: voteExtensions, }, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + verifyVoteExtensionCall := m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ Status: abci.ResponseVerifyVoteExtension_ACCEPT, }, nil) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() @@ -2129,6 +2129,12 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round + proTxHashMap := make(map[string]struct{}) + for _, vs := range vss { + pth, _ := vs.GetProTxHash(ctx) + proTxHashMap[pth.String()] = struct{}{} + } + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) proTxHash, err := cs1.privValidator.GetProTxHash(ctx) @@ -2152,43 +2158,18 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { ensurePrecommit(t, voteCh, height, round) - m.AssertCalled(t, "ExtendVote", ctx, &abci.RequestExtendVote{ - Height: height, - Hash: blockID.Hash, - }) - - m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorProTxHash: proTxHash, - Height: height, - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, + verifyVoteExtensionCall.Arguments[1] = mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { + _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] + return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && + assert.Equal(t, req.Height, height) && + assert.Equal(t, req.VoteExtensions, voteExtensions) && + assert.True(t, ok) }) signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) ensureNewRound(t, newRoundCh, height+1, 0) m.AssertExpectations(t) - // Only 3 of the vote extensions are seen, as consensus proceeds as soon as the +2/3 threshold - // is observed by the consensus engine. - for _, pv := range vss[:3] { - proTxHash, err := pv.GetProTxHash(ctx) - require.NoError(t, err) - m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorProTxHash: proTxHash, - Height: height, - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, - }) - } + mock.AssertExpectationsForObjects(t, m) } // TestVerifyVoteExtensionNotCalledOnAbsentPrecommit tests that the VerifyVoteExtension @@ -2197,25 +2178,31 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { config := configSetup(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - + voteExtensions := []*abci.ExtendVoteExtension{ + { + Type: tmproto.VoteExtensionType_DEFAULT, + Extension: []byte("extension"), + }, + } m := abcimocks.NewApplication(t) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, + VoteExtensions: voteExtensions, }, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + verifyVoteExtensionCall := m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ Status: abci.ResponseVerifyVoteExtension_ACCEPT, }, nil) m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round + proTxHashMap := make(map[string]struct{}) + for _, vs := range vss { + pth, _ := vs.GetProTxHash(ctx) + proTxHashMap[pth.String()] = struct{}{} + } + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) proTxHash, err := cs1.privValidator.GetProTxHash(ctx) @@ -2241,16 +2228,12 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Hash: blockID.Hash, }) - m.AssertCalled(t, "VerifyVoteExtension", mock.Anything, &abci.RequestVerifyVoteExtension{ - Hash: blockID.Hash, - ValidatorProTxHash: proTxHash, - Height: height, - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - }, + verifyVoteExtensionCall.Arguments[1] = mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { + _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] + return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && + assert.Equal(t, req.Height, height) && + assert.Equal(t, req.VoteExtensions, voteExtensions) && + assert.True(t, ok) }) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go index ddd51b224e..2d3a4defb1 100644 --- a/internal/state/rollback_test.go +++ b/internal/state/rollback_test.go @@ -52,6 +52,7 @@ func TestRollback(t *testing.T) { LastResultsHash: initialState.LastResultsHash, }, } + commit := &types.Commit{} nextBlock := &types.BlockMeta{ BlockID: initialState.LastBlockID, Header: types.Header{ @@ -62,6 +63,7 @@ func TestRollback(t *testing.T) { }, } blockStore.On("LoadBlockMeta", height).Return(block) + blockStore.On("LoadBlockCommit", height).Return(commit) blockStore.On("LoadBlockMeta", nextHeight).Return(nextBlock) blockStore.On("Height").Return(nextHeight) @@ -94,6 +96,7 @@ func TestRollbackNoBlocks(t *testing.T) { blockStore := &mocks.BlockStore{} blockStore.On("Height").Return(height) blockStore.On("LoadBlockMeta", height).Return(nil) + blockStore.On("LoadBlockCommit", height-1).Return(&types.Commit{}) blockStore.On("LoadBlockMeta", height-1).Return(nil) _, _, err := state.Rollback(blockStore, stateStore) From 3f18c31882cfb8872a676eda955986f58006f29e Mon Sep 17 00:00:00 2001 From: shotonoff Date: Thu, 18 Aug 2022 17:03:59 +0200 Subject: [PATCH 200/203] refactor: remove redundant log --- internal/consensus/common_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index d377a6a21b..1b1757fa13 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -111,8 +111,6 @@ func (vs *validatorStub) signVote( return nil, fmt.Errorf("can't get proTxHash: %w", err) } - fmt.Printf("proTxHash = = %s\n", proTxHash.ShortString()) - vote := &types.Vote{ Type: voteType, Height: vs.Height, From 6e86d154fb699aae0998d050b32758a8d525add1 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Fri, 19 Aug 2022 10:21:55 +0200 Subject: [PATCH 201/203] fix: mempool_test.go TestMempoolTxConcurrentWithCommit --- internal/consensus/mempool_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 2bc515b61b..b7b9152c3f 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -338,14 +338,6 @@ func txAsUint64(tx []byte) uint64 { } func (app *CounterApplication) Commit(context.Context) (*abci.ResponseCommit, error) { - app.mu.Lock() - defer app.mu.Unlock() - app.mempoolTxCount = app.txCount - if app.txCount == 0 { - return &abci.ResponseCommit{}, nil - } - hash := make([]byte, 32) - binary.BigEndian.PutUint64(hash, uint64(app.txCount)) return &abci.ResponseCommit{}, nil } From ddf18b11b5a42463eb8aa5ff927bed28ad855eb0 Mon Sep 17 00:00:00 2001 From: shotonoff Date: Fri, 19 Aug 2022 11:02:48 +0200 Subject: [PATCH 202/203] refactor: remove redundant TestPop --- abci/example/example_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 4b4fdf1d67..066d4071d7 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -28,10 +28,6 @@ func init() { rand.Seed(time.Now().UnixNano()) } -func TestPop(t *testing.T) { - -} - func TestKVStore(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() From 80e5a640ddd8899e8ddb1cc8e788bbbe888b936d Mon Sep 17 00:00:00 2001 From: shotonoff Date: Fri, 2 Sep 2022 11:05:49 +0200 Subject: [PATCH 203/203] refactor: changes by feedback --- internal/consensus/state.go | 6 ++--- internal/consensus/state_test.go | 38 ++++++++++++++++++-------------- types/quorum.go | 4 ++-- types/quorum_sign_data.go | 2 +- types/vote.go | 8 +++---- types/vote_set.go | 2 +- 6 files changed, 32 insertions(+), 28 deletions(-) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 8b63f3f6b5..f5acf4d84c 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -750,14 +750,14 @@ func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) { // the method will panic on an absent ExtendedCommit or an ExtendedCommit without // extension data. func (cs *State) reconstructLastCommit(state sm.State) { - commit, err := cs.votesFromSeenCommit(state) + commit, err := cs.loadLastCommit(state) if err != nil { panic(fmt.Sprintf("failed to reconstruct last commit; %s", err)) } cs.LastCommit = commit } -func (cs *State) votesFromSeenCommit(state sm.State) (*types.Commit, error) { +func (cs *State) loadLastCommit(state sm.State) (*types.Commit, error) { commit := cs.blockStore.LoadSeenCommit() if commit == nil || commit.Height != state.LastBlockHeight { commit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight) @@ -2833,7 +2833,7 @@ func (cs *State) addVote( // message. _, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex) qt, qh := cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash - if err := vote.VerifyExtension(cs.state.ChainID, val.PubKey, qt, qh); err != nil { + if err := vote.VerifyExtensionSign(cs.state.ChainID, val.PubKey, qt, qh); err != nil { return false, err } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 381da1733f..e8ef52c21b 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -2118,12 +2118,6 @@ func TestExtendVote(t *testing.T) { m := abcimocks.NewApplication(t) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtensions: voteExtensions, - }, nil) - verifyVoteExtensionCall := m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, - }, nil) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) @@ -2153,18 +2147,28 @@ func TestExtendVote(t *testing.T) { Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } - signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) - ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - - ensurePrecommit(t, voteCh, height, round) - - verifyVoteExtensionCall.Arguments[1] = mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { + reqExtendVoteFunc := mock.MatchedBy(func(req *abci.RequestExtendVote) bool { + return assert.Equal(t, req.Height, height) && assert.Equal(t, []byte(blockID.Hash), req.Hash) + }) + m.On("ExtendVote", mock.Anything, reqExtendVoteFunc).Return(&abci.ResponseExtendVote{ + VoteExtensions: voteExtensions, + }, nil) + reqVerifyVoteExtFunc := mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && assert.Equal(t, req.Height, height) && assert.Equal(t, req.VoteExtensions, voteExtensions) && assert.True(t, ok) }) + m.On("VerifyVoteExtension", mock.Anything, reqVerifyVoteExtFunc). + Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) + + ensurePrecommit(t, voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) ensureNewRound(t, newRoundCh, height+1, 0) m.AssertExpectations(t) @@ -2190,9 +2194,6 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ VoteExtensions: voteExtensions, }, nil) - verifyVoteExtensionCall := m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, - }, nil) m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round @@ -2227,14 +2228,17 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Height: height, Hash: blockID.Hash, }) - - verifyVoteExtensionCall.Arguments[1] = mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { + reqVerifyVoteExtFunc := mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && assert.Equal(t, req.Height, height) && assert.Equal(t, req.VoteExtensions, voteExtensions) && assert.True(t, ok) }) + m.On("VerifyVoteExtension", mock.Anything, reqVerifyVoteExtFunc). + Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[2:]...) diff --git a/types/quorum.go b/types/quorum.go index 3d53b00985..777fe618d8 100644 --- a/types/quorum.go +++ b/types/quorum.go @@ -145,9 +145,9 @@ func WithVerifyReachedQuorum(quorumReached bool) func(*QuorumSingsVerifier) { } } -// NewQuorumSingsVerifier creates and returns an instance of QuorumSingsVerifier that is used for verification +// NewQuorumSignsVerifier creates and returns an instance of QuorumSingsVerifier that is used for verification // quorum signatures -func NewQuorumSingsVerifier(quorumData QuorumSignData, opts ...func(*QuorumSingsVerifier)) *QuorumSingsVerifier { +func NewQuorumSignsVerifier(quorumData QuorumSignData, opts ...func(*QuorumSingsVerifier)) *QuorumSingsVerifier { verifier := &QuorumSingsVerifier{ QuorumSignData: quorumData, shouldVerifyBlock: true, diff --git a/types/quorum_sign_data.go b/types/quorum_sign_data.go index 1b3a57e865..d224ba9724 100644 --- a/types/quorum_sign_data.go +++ b/types/quorum_sign_data.go @@ -24,7 +24,7 @@ type QuorumSignData struct { // Verify verifies a quorum signatures: block, state and vote-extensions func (q QuorumSignData) Verify(pubKey crypto.PubKey, signs QuorumSigns) error { - return NewQuorumSingsVerifier(q).Verify(pubKey, signs) + return NewQuorumSignsVerifier(q).Verify(pubKey, signs) } // SignItem represents quorum sign data, like a request id, message bytes, sha256 hash of message and signID diff --git a/types/vote.go b/types/vote.go index d3b91fc1a1..c4c45181c3 100644 --- a/types/vote.go +++ b/types/vote.go @@ -264,9 +264,9 @@ func (vote *Vote) verifyBasic(proTxHash ProTxHash, pubKey crypto.PubKey) error { return nil } -// VerifyExtension checks whether the vote extension signature corresponds to the +// VerifyExtensionSign checks whether the vote extension signature corresponds to the // given chain ID and public key. -func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash) error { +func (vote *Vote) VerifyExtensionSign(chainID string, pubKey crypto.PubKey, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash) error { if vote.Type != tmproto.PrecommitType || vote.BlockID.IsNil() { return nil } @@ -274,7 +274,7 @@ func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey, quorumTy if err != nil { return err } - verifier := NewQuorumSingsVerifier( + verifier := NewQuorumSignsVerifier( quorumSignData, WithVerifyBlock(false), WithVerifyState(false), @@ -287,7 +287,7 @@ func (vote *Vote) verifySign( quorumSignData QuorumSignData, opts ...func(verifier *QuorumSingsVerifier), ) error { - verifier := NewQuorumSingsVerifier( + verifier := NewQuorumSignsVerifier( quorumSignData, append(opts, WithVerifyState(vote.BlockID.Hash != nil))..., ) diff --git a/types/vote_set.go b/types/vote_set.go index bdb7ce1783..fd6372bfb5 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -355,7 +355,7 @@ func (voteSet *VoteSet) recoverThresholdSignsAndVerify(blockVotes *blockVotes, q if err != nil { return err } - verifier := NewQuorumSingsVerifier( + verifier := NewQuorumSignsVerifier( quorumDataSigns, WithVerifyReachedQuorum(voteSet.IsQuorumReached()), )