diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c31db2770..c1d2fbac5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,13 +31,9 @@ jobs: with: submodules: recursive fetch-depth: 0 # fetch git tags for "git describe" - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: - go-version: 1.18.x - - uses: actions/cache@v3 - with: - path: ~/go/pkg/mod - key: ${{ matrix.os }}-go-${{ hashFiles('**/go.sum') }} + go-version: '1.19' - name: Install deps if: matrix.os == 'ubuntu-20.04' @@ -53,9 +49,11 @@ jobs: if: matrix.os == 'ubuntu-20.04' uses: golangci/golangci-lint-action@v3 with: - version: v1.50 - args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number + version: v1.52 - name: Test win if: matrix.os == 'windows-2022' run: make test-no-fuzz + - name: Test + if: matrix.os != 'windows-2022' + run: make test diff --git a/.golangci.yml b/.golangci.yml index 6d594c6be..3717f3d40 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,28 +2,28 @@ run: deadline: 10m linters: - disable-all: true + presets: + - bugs + - error + - unused + - performance + disable: + - exhaustive + - musttag + - contextcheck + - wrapcheck + - goerr113 + - unparam + - makezero enable: - - errorlint - unconvert - predeclared -# - wastedassign # go1.18 + - wastedassign - thelper - gofmt - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - unused -# - gocritic - - bodyclose # go1.18 - - gosec -# - forcetypeassert - - prealloc -# - contextcheck -# - goerr113 + - gocritic # - revive +# - forcetypeassert # - stylecheck linters-settings: diff --git a/Makefile b/Makefile index a3b0cd4e9..1208ada87 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ $(GOBINREL): $(GOBINREL)/protoc: | $(GOBINREL) $(eval PROTOC_TMP := $(shell mktemp -d)) - curl -sSL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-$(PROTOC_OS)-$(ARCH).zip -o "$(PROTOC_TMP)/protoc.zip" + curl -sSL https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-$(PROTOC_OS)-$(ARCH).zip -o "$(PROTOC_TMP)/protoc.zip" cd "$(PROTOC_TMP)" && unzip protoc.zip cp "$(PROTOC_TMP)/bin/protoc" "$(GOBIN)" mkdir -p "$(PROTOC_INCLUDE)" @@ -67,7 +67,8 @@ $(GOBINREL)/moq: | $(GOBINREL) $(GOBUILD) -o "$(GOBIN)/moq" github.com/matryer/moq mocks: $(GOBINREL)/moq - rm gointerfaces/remote/mocks.go + rm -f gointerfaces/remote/mocks.go + rm -f gointerfaces/sentry/mocks.go PATH="$(GOBIN):$(PATH)" go generate ./... lint: $(GOBINREL)/golangci-lint @@ -79,7 +80,7 @@ lintci-deps-clean: golangci-lint-clean # download and build golangci-lint (https://golangci-lint.run) $(GOBINREL)/golangci-lint: | $(GOBINREL) - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b "$(GOBIN)" v1.50.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b "$(GOBIN)" v1.52.1 golangci-lint-clean: rm -f "$(GOBIN)/golangci-lint" diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go deleted file mode 100644 index 8849a451a..000000000 --- a/aggregator/aggregator.go +++ /dev/null @@ -1,3302 +0,0 @@ -/* - Copyright 2022 Erigon contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package aggregator - -import ( - "bufio" - "bytes" - "container/heap" - "context" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/fs" - "math" - "os" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/RoaringBitmap/roaring/roaring64" - "github.com/google/btree" - "github.com/ledgerwatch/log/v3" - "github.com/spaolacci/murmur3" - "golang.org/x/crypto/sha3" - "golang.org/x/exp/slices" - - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/etl" - - "github.com/ledgerwatch/erigon-lib/commitment" - "github.com/ledgerwatch/erigon-lib/common/length" - "github.com/ledgerwatch/erigon-lib/compress" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/recsplit" - "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" -) - -// Aggregator of multiple state files to support state reader and state writer -// The convension for the file names are as follows -// State is composed of three types of files: -// 1. Accounts. keys are addresses (20 bytes), values are encoding of accounts -// 2. Contract storage. Keys are concatenation of addresses (20 bytes) and storage locations (32 bytes), values have their leading zeroes removed -// 3. Contract codes. Keys are addresses (20 bytes), values are bycodes -// Within each type, any file can cover an interval of block numbers, for example, `accounts.1-16` represents changes in accounts -// that were effected by the blocks from 1 to 16, inclusively. The second component of the interval will be called "end block" for the file. -// Finally, for each type and interval, there are two files - one with the compressed data (extension `dat`), -// and another with the index (extension `idx`) consisting of the minimal perfect hash table mapping keys to the offsets of corresponding keys -// in the data file -// Aggregator consists (apart from the file it is aggregating) of the 4 parts: -// 1. Persistent table of expiration time for each of the files. Key - name of the file, value - timestamp, at which the file can be removed -// 2. Transient (in-memory) mapping the "end block" of each file to the objects required for accessing the file (compress.Decompressor and resplit.Index) -// 3. Persistent tables (one for accounts, one for contract storage, and one for contract code) summarising all the 1-block state diff files -// that were not yet merged together to form larger files. In these tables, keys are the same as keys in the state diff files, but values are also -// augemented by the number of state diff files this key is present. This number gets decremented every time when a 1-block state diff files is removed -// from the summary table (due to being merged). And when this number gets to 0, the record is deleted from the summary table. -// This number is encoded into first 4 bytes of the value -// 4. Aggregating persistent hash table. Maps state keys to the block numbers for the use in the part 2 (which is not necessarily the block number where -// the item last changed, but it is guaranteed to find correct element in the Transient mapping of part 2 - -type FileType int - -const ( - Account FileType = iota - Storage - Code - Commitment - AccountHistory - StorageHistory - CodeHistory - AccountBitmap - StorageBitmap - CodeBitmap - NumberOfTypes -) - -const ( - FirstType = Account - NumberOfAccountStorageTypes = Code - NumberOfStateTypes = AccountHistory -) - -func (ft FileType) String() string { - switch ft { - case Account: - return "account" - case Storage: - return "storage" - case Code: - return "code" - case Commitment: - return "commitment" - case AccountHistory: - return "ahistory" - case CodeHistory: - return "chistory" - case StorageHistory: - return "shistory" - case AccountBitmap: - return "abitmap" - case CodeBitmap: - return "cbitmap" - case StorageBitmap: - return "sbitmap" - default: - panic(fmt.Sprintf("unknown file type: %d", ft)) - } -} - -func (ft FileType) Table() string { - switch ft { - case Account: - return kv.StateAccounts - case Storage: - return kv.StateStorage - case Code: - return kv.StateCode - case Commitment: - return kv.StateCommitment - default: - panic(fmt.Sprintf("unknown file type: %d", ft)) - } -} - -func ParseFileType(s string) (FileType, bool) { - switch s { - case "account": - return Account, true - case "storage": - return Storage, true - case "code": - return Code, true - case "commitment": - return Commitment, true - case "ahistory": - return AccountHistory, true - case "chistory": - return CodeHistory, true - case "shistory": - return StorageHistory, true - case "abitmap": - return AccountBitmap, true - case "cbitmap": - return CodeBitmap, true - case "sbitmap": - return StorageBitmap, true - default: - return NumberOfTypes, false - } -} - -type Aggregator struct { - files [NumberOfTypes]*btree.BTree - hph commitment.Trie //*commitment.HexPatriciaHashed - archHasher murmur3.Hash128 - keccak hash.Hash - historyChannel chan struct{} - mergeChannel chan struct{} - tracedKeys map[string]struct{} // Set of keys being traced during aggregations - changesBtree *btree.BTree // btree of ChangesItem - historyError chan error - mergeError chan error - aggChannel chan *AggregationTask - aggError chan error - diffDir string // Directory where the state diff files are stored - arches [NumberOfStateTypes][]uint32 // Over-arching hash tables containing the block number of last aggregation - historyWg sync.WaitGroup - aggWg sync.WaitGroup - mergeWg sync.WaitGroup - unwindLimit uint64 // How far the chain may unwind - aggregationStep uint64 // How many items (block, but later perhaps txs or changes) are required to form one state diff file - fileHits uint64 // Counter for state file hit ratio - fileMisses uint64 // Counter for state file hit ratio - fileLocks [NumberOfTypes]sync.RWMutex - commitments bool // Whether to calculate commitments - changesets bool // Whether to generate changesets (off by default) - trace bool // Turns on tracing for specific accounts and locations -} - -type ChangeFile struct { - r *bufio.Reader - rTx *bufio.Reader - w *bufio.Writer - fileTx *os.File - wTx *bufio.Writer - file *os.File - pathTx string - path string - dir string - namebase string - words []byte // Words pending for the next block record, in the same slice - wordOffsets []int // Offsets of words in the `words` slice - step uint64 - txNum uint64 // Currently read transaction number - txRemaining uint64 // Remaining number of bytes to read in the current transaction -} - -func (cf *ChangeFile) closeFile() error { - if len(cf.wordOffsets) > 0 { - return fmt.Errorf("closeFile without finish") - } - if cf.w != nil { - if err := cf.w.Flush(); err != nil { - return err - } - cf.w = nil - } - if cf.file != nil { - if err := cf.file.Close(); err != nil { - return err - } - cf.file = nil - } - if cf.wTx != nil { - if err := cf.wTx.Flush(); err != nil { - return err - } - cf.wTx = nil - } - if cf.fileTx != nil { - if err := cf.fileTx.Close(); err != nil { - return err - } - cf.fileTx = nil - } - return nil -} - -func (cf *ChangeFile) openFile(blockNum uint64, write bool) error { - if len(cf.wordOffsets) > 0 { - return fmt.Errorf("openFile without finish") - } - rem := blockNum % cf.step - startBlock := blockNum - rem - endBlock := startBlock + cf.step - 1 - if cf.w == nil { - cf.path = filepath.Join(cf.dir, fmt.Sprintf("%s.%d-%d.chg", cf.namebase, startBlock, endBlock)) - cf.pathTx = filepath.Join(cf.dir, fmt.Sprintf("%s.%d-%d.ctx", cf.namebase, startBlock, endBlock)) - var err error - if write { - if cf.file, err = os.OpenFile(cf.path, os.O_RDWR|os.O_CREATE, 0755); err != nil { - return err - } - if cf.fileTx, err = os.OpenFile(cf.pathTx, os.O_RDWR|os.O_CREATE, 0755); err != nil { - return err - } - if _, err = cf.file.Seek(0, 2 /* relative to the end of the file */); err != nil { - return err - } - if _, err = cf.fileTx.Seek(0, 2 /* relative to the end of the file */); err != nil { - return err - } - } else { - if cf.file, err = os.Open(cf.path); err != nil { - return err - } - if cf.fileTx, err = os.Open(cf.pathTx); err != nil { - return err - } - } - if write { - cf.w = bufio.NewWriter(cf.file) - cf.wTx = bufio.NewWriter(cf.fileTx) - } - cf.r = bufio.NewReader(cf.file) - cf.rTx = bufio.NewReader(cf.fileTx) - } - return nil -} - -func (cf *ChangeFile) rewind() error { - var err error - if _, err = cf.file.Seek(0, 0); err != nil { - return err - } - cf.r = bufio.NewReader(cf.file) - if _, err = cf.fileTx.Seek(0, 0); err != nil { - return err - } - cf.rTx = bufio.NewReader(cf.fileTx) - return nil -} - -func (cf *ChangeFile) add(word []byte) { - cf.words = append(cf.words, word...) - cf.wordOffsets = append(cf.wordOffsets, len(cf.words)) -} - -func (cf *ChangeFile) finish(txNum uint64) error { - var numBuf [10]byte - // Write out words - lastOffset := 0 - var size uint64 - for _, offset := range cf.wordOffsets { - word := cf.words[lastOffset:offset] - n := binary.PutUvarint(numBuf[:], uint64(len(word))) - if _, err := cf.w.Write(numBuf[:n]); err != nil { - return err - } - if len(word) > 0 { - if _, err := cf.w.Write(word); err != nil { - return err - } - } - size += uint64(n + len(word)) - lastOffset = offset - } - cf.words = cf.words[:0] - cf.wordOffsets = cf.wordOffsets[:0] - n := binary.PutUvarint(numBuf[:], txNum) - if _, err := cf.wTx.Write(numBuf[:n]); err != nil { - return err - } - n = binary.PutUvarint(numBuf[:], size) - if _, err := cf.wTx.Write(numBuf[:n]); err != nil { - return err - } - return nil -} - -// prevTx positions the reader to the beginning -// of the transaction -func (cf *ChangeFile) nextTx() (bool, error) { - var err error - if cf.txNum, err = binary.ReadUvarint(cf.rTx); err != nil { - if errors.Is(err, io.EOF) { - return false, nil - } - return false, err - } - if cf.txRemaining, err = binary.ReadUvarint(cf.rTx); err != nil { - return false, err - } - return true, nil -} - -func (cf *ChangeFile) nextWord(wordBuf []byte) ([]byte, bool, error) { - if cf.txRemaining == 0 { - return wordBuf, false, nil - } - ws, err := binary.ReadUvarint(cf.r) - if err != nil { - return wordBuf, false, fmt.Errorf("word size: %w", err) - } - var buf []byte - if total := len(wordBuf) + int(ws); cap(wordBuf) >= total { - buf = wordBuf[:total] // Reuse the space in wordBuf, is it has enough capacity - } else { - buf = make([]byte, total) - copy(buf, wordBuf) - } - if _, err = io.ReadFull(cf.r, buf[len(wordBuf):]); err != nil { - return wordBuf, false, fmt.Errorf("read word (%d %d): %w", ws, len(buf[len(wordBuf):]), err) - } - var numBuf [10]byte - n := binary.PutUvarint(numBuf[:], ws) - cf.txRemaining -= uint64(n) + ws - return buf, true, nil -} - -func (cf *ChangeFile) deleteFile() error { - if err := os.Remove(cf.path); err != nil { - return err - } - if err := os.Remove(cf.pathTx); err != nil { - return err - } - return nil -} - -type Changes struct { - namebase string - dir string - keys ChangeFile - before ChangeFile - after ChangeFile - step uint64 - beforeOn bool -} - -func (c *Changes) Init(namebase string, step uint64, dir string, beforeOn bool) { - c.namebase = namebase - c.step = step - c.dir = dir - c.keys.namebase = namebase + ".keys" - c.keys.dir = dir - c.keys.step = step - c.before.namebase = namebase + ".before" - c.before.dir = dir - c.before.step = step - c.after.namebase = namebase + ".after" - c.after.dir = dir - c.after.step = step - c.beforeOn = beforeOn -} - -func (c *Changes) closeFiles() error { - if err := c.keys.closeFile(); err != nil { - return err - } - if c.beforeOn { - if err := c.before.closeFile(); err != nil { - return err - } - } - if err := c.after.closeFile(); err != nil { - return err - } - return nil -} - -func (c *Changes) openFiles(blockNum uint64, write bool) error { - if err := c.keys.openFile(blockNum, write); err != nil { - return err - } - if c.beforeOn { - if err := c.before.openFile(blockNum, write); err != nil { - return err - } - } - if err := c.after.openFile(blockNum, write); err != nil { - return err - } - return nil -} - -func (c *Changes) insert(key, after []byte) { - c.keys.add(key) - if c.beforeOn { - c.before.add(nil) - } - c.after.add(after) -} - -func (c *Changes) update(key, before, after []byte) { - c.keys.add(key) - if c.beforeOn { - c.before.add(before) - } - c.after.add(after) -} - -func (c *Changes) delete(key, before []byte) { - c.keys.add(key) - if c.beforeOn { - c.before.add(before) - } - c.after.add(nil) -} - -func (c *Changes) finish(txNum uint64) error { - if err := c.keys.finish(txNum); err != nil { - return err - } - if c.beforeOn { - if err := c.before.finish(txNum); err != nil { - return err - } - } - if err := c.after.finish(txNum); err != nil { - return err - } - return nil -} - -func (c *Changes) nextTx() (bool, uint64, error) { - bkeys, err := c.keys.nextTx() - if err != nil { - return false, 0, err - } - var bbefore, bafter bool - if c.beforeOn { - if bbefore, err = c.before.nextTx(); err != nil { - return false, 0, err - } - } - if bafter, err = c.after.nextTx(); err != nil { - return false, 0, err - } - if c.beforeOn && bkeys != bbefore { - return false, 0, fmt.Errorf("inconsistent tx iteration") - } - if bkeys != bafter { - return false, 0, fmt.Errorf("inconsistent tx iteration") - } - txNum := c.keys.txNum - if c.beforeOn { - if txNum != c.before.txNum { - return false, 0, fmt.Errorf("inconsistent txNum, keys: %d, before: %d", txNum, c.before.txNum) - } - } - if txNum != c.after.txNum { - return false, 0, fmt.Errorf("inconsistent txNum, keys: %d, after: %d", txNum, c.after.txNum) - } - return bkeys, txNum, nil -} - -func (c *Changes) rewind() error { - if err := c.keys.rewind(); err != nil { - return err - } - if c.beforeOn { - if err := c.before.rewind(); err != nil { - return err - } - } - if err := c.after.rewind(); err != nil { - return err - } - return nil -} - -func (c *Changes) nextTriple(keyBuf, beforeBuf, afterBuf []byte) ([]byte, []byte, []byte, bool, error) { - key, bkeys, err := c.keys.nextWord(keyBuf) - if err != nil { - return keyBuf, beforeBuf, afterBuf, false, fmt.Errorf("next key: %w", err) - } - var before, after []byte - var bbefore, bafter bool - if c.beforeOn { - if before, bbefore, err = c.before.nextWord(beforeBuf); err != nil { - return keyBuf, beforeBuf, afterBuf, false, fmt.Errorf("next before: %w", err) - } - } - if c.beforeOn && bkeys != bbefore { - return keyBuf, beforeBuf, afterBuf, false, fmt.Errorf("inconsistent word iteration") - } - if after, bafter, err = c.after.nextWord(afterBuf); err != nil { - return keyBuf, beforeBuf, afterBuf, false, fmt.Errorf("next after: %w", err) - } - if bkeys != bafter { - return keyBuf, beforeBuf, afterBuf, false, fmt.Errorf("inconsistent word iteration") - } - return key, before, after, bkeys, nil -} - -func (c *Changes) deleteFiles() error { - if err := c.keys.deleteFile(); err != nil { - return err - } - if c.beforeOn { - if err := c.before.deleteFile(); err != nil { - return err - } - } - if err := c.after.deleteFile(); err != nil { - return err - } - return nil -} - -func buildIndex(d *compress.Decompressor, idxPath, tmpDir string, count int) (*recsplit.Index, error) { - var rs *recsplit.RecSplit - var err error - if rs, err = recsplit.NewRecSplit(recsplit.RecSplitArgs{ - KeyCount: count, - Enums: false, - BucketSize: 2000, - LeafSize: 8, - TmpDir: tmpDir, - IndexFile: idxPath, - EtlBufLimit: etl.BufferOptimalSize / 2, - }); err != nil { - return nil, err - } - defer rs.Close() - rs.LogLvl(log.LvlDebug) - - word := make([]byte, 0, 256) - var pos uint64 - g := d.MakeGetter() - for { - g.Reset(0) - for g.HasNext() { - word, _ = g.Next(word[:0]) - if err = rs.AddKey(word, pos); err != nil { - return nil, err - } - // Skip value - pos = g.Skip() - } - if err = rs.Build(); err != nil { - if rs.Collision() { - log.Info("Building recsplit. Collision happened. It's ok. Restarting...") - rs.ResetNextSalt() - } else { - return nil, err - } - } else { - break - } - } - var idx *recsplit.Index - if idx, err = recsplit.OpenIndex(idxPath); err != nil { - return nil, err - } - return idx, nil -} - -// aggregate gathers changes from the changefiles into a B-tree, and "removes" them from the database -// This function is time-critical because it needs to be run in the same go-routine (thread) as the general -// execution (due to read-write tx). After that, we can optimistically execute the rest in the background -func (c *Changes) aggregate(blockFrom, blockTo uint64, prefixLen int, tx kv.RwTx, table string, commitMerger commitmentMerger) (*btree.BTreeG[*AggregateItem], error) { - if err := c.openFiles(blockTo, false /* write */); err != nil { - return nil, fmt.Errorf("open files: %w", err) - } - bt := btree.NewG[*AggregateItem](32, AggregateItemLess) - err := c.aggregateToBtree(bt, prefixLen, commitMerger) - if err != nil { - return nil, fmt.Errorf("aggregateToBtree: %w", err) - } - // Clean up the DB table - var e error - bt.Ascend(func(item *AggregateItem) bool { - if item.count == 0 { - return true - } - dbPrefix := item.k - prevV, err := tx.GetOne(table, dbPrefix) - if err != nil { - e = err - return false - } - if prevV == nil { - e = fmt.Errorf("record not found in db for %s key %x", table, dbPrefix) - return false - } - - prevNum := binary.BigEndian.Uint32(prevV[:4]) - if prevNum < item.count { - e = fmt.Errorf("record count too low for %s key %s count %d, subtracting %d", table, dbPrefix, prevNum, item.count) - return false - } - if prevNum == item.count { - if e = tx.Delete(table, dbPrefix); e != nil { - return false - } - } else { - v := make([]byte, len(prevV)) - binary.BigEndian.PutUint32(v[:4], prevNum-item.count) - copy(v[4:], prevV[4:]) - - if e = tx.Put(table, dbPrefix, v); e != nil { - return false - } - } - return true - }) - if e != nil { - return nil, fmt.Errorf("clean up table %s after aggregation: %w", table, e) - } - return bt, nil -} - -func (a *Aggregator) updateArch(bt *btree.BTreeG[*AggregateItem], fType FileType, blockNum32 uint32) { - arch := a.arches[fType] - h := a.archHasher - n := uint64(len(arch)) - if n == 0 { - return - } - bt.Ascend(func(item *AggregateItem) bool { - if item.count == 0 { - return true - } - h.Reset() - h.Write(item.k) //nolint:errcheck - p, _ := h.Sum128() - p = p % n - v := atomic.LoadUint32(&arch[p]) - if v < blockNum32 { - //fmt.Printf("Updated %s arch [%x]=%d %d\n", fType.String(), item.k, p, blockNum32) - atomic.StoreUint32(&arch[p], blockNum32) - } - return true - }) -} - -type AggregateItem struct { - k, v []byte - count uint32 -} - -func AggregateItemLess(a, than *AggregateItem) bool { return bytes.Compare(a.k, than.k) < 0 } -func (i *AggregateItem) Less(than btree.Item) bool { - return bytes.Compare(i.k, than.(*AggregateItem).k) < 0 -} - -func (c *Changes) produceChangeSets(blockFrom, blockTo uint64, historyType, bitmapType FileType) (*compress.Decompressor, *recsplit.Index, *compress.Decompressor, *recsplit.Index, error) { - chsetDatPath := filepath.Join(c.dir, fmt.Sprintf("%s.%d-%d.dat", historyType.String(), blockFrom, blockTo)) - chsetIdxPath := filepath.Join(c.dir, fmt.Sprintf("%s.%d-%d.idx", historyType.String(), blockFrom, blockTo)) - bitmapDatPath := filepath.Join(c.dir, fmt.Sprintf("%s.%d-%d.dat", bitmapType.String(), blockFrom, blockTo)) - bitmapIdxPath := filepath.Join(c.dir, fmt.Sprintf("%s.%d-%d.idx", bitmapType.String(), blockFrom, blockTo)) - var blockSuffix [8]byte - binary.BigEndian.PutUint64(blockSuffix[:], blockTo) - bitmaps := map[string]*roaring64.Bitmap{} - comp, err := compress.NewCompressor(context.Background(), AggregatorPrefix, chsetDatPath, c.dir, compress.MinPatternScore, 1, log.LvlDebug) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets NewCompressor: %w", err) - } - defer func() { - if comp != nil { - comp.Close() - } - }() - var totalRecords int - var b bool - var e error - var txNum uint64 - var key, before, after []byte - if err = c.rewind(); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets rewind: %w", err) - } - var txKey = make([]byte, 8, 60) - for b, txNum, e = c.nextTx(); b && e == nil; b, txNum, e = c.nextTx() { - binary.BigEndian.PutUint64(txKey[:8], txNum) - for key, before, after, b, e = c.nextTriple(key[:0], before[:0], after[:0]); b && e == nil; key, before, after, b, e = c.nextTriple(key[:0], before[:0], after[:0]) { - totalRecords++ - txKey = append(txKey[:8], key...) - // In the inital files and most merged file, the txKey is added to the file, but it gets removed in the final merge - if err = comp.AddUncompressedWord(txKey); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets AddWord key: %w", err) - } - if err = comp.AddUncompressedWord(before); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets AddWord before: %w", err) - } - //if historyType == AccountHistory { - // fmt.Printf("produce %s.%d-%d [%x]=>[%x]\n", historyType.String(), blockFrom, blockTo, txKey, before) - //} - var bitmap *roaring64.Bitmap - var ok bool - if bitmap, ok = bitmaps[string(key)]; !ok { - bitmap = roaring64.New() - bitmaps[string(key)] = bitmap - } - bitmap.Add(txNum) - } - if e != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets nextTriple: %w", e) - } - } - if e != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets prevTx: %w", e) - } - if err = comp.Compress(); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets Compress: %w", err) - } - comp.Close() - comp = nil - var d *compress.Decompressor - var index *recsplit.Index - if d, err = compress.NewDecompressor(chsetDatPath); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets changeset decompressor: %w", err) - } - if index, err = buildIndex(d, chsetIdxPath, c.dir, totalRecords); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets changeset buildIndex: %w", err) - } - // Create bitmap files - bitmapC, err := compress.NewCompressor(context.Background(), AggregatorPrefix, bitmapDatPath, c.dir, compress.MinPatternScore, 1, log.LvlDebug) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets bitmap NewCompressor: %w", err) - } - defer func() { - if bitmapC != nil { - bitmapC.Close() - } - }() - idxKeys := make([]string, len(bitmaps)) - i := 0 - var buf []byte - for key := range bitmaps { - idxKeys[i] = key - i++ - } - slices.Sort(idxKeys) - for _, key := range idxKeys { - if err = bitmapC.AddUncompressedWord([]byte(key)); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets bitmap add key: %w", err) - } - bitmap := bitmaps[key] - ef := eliasfano32.NewEliasFano(bitmap.GetCardinality(), bitmap.Maximum()) - it := bitmap.Iterator() - for it.HasNext() { - v := it.Next() - ef.AddOffset(v) - } - ef.Build() - buf = ef.AppendBytes(buf[:0]) - if err = bitmapC.AddUncompressedWord(buf); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets bitmap add val: %w", err) - } - } - if err = bitmapC.Compress(); err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets bitmap Compress: %w", err) - } - bitmapC.Close() - bitmapC = nil - bitmapD, err := compress.NewDecompressor(bitmapDatPath) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets bitmap decompressor: %w", err) - } - - bitmapI, err := buildIndex(bitmapD, bitmapIdxPath, c.dir, len(idxKeys)) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("produceChangeSets bitmap buildIndex: %w", err) - } - return d, index, bitmapD, bitmapI, nil -} - -// aggregateToBtree iterates over all available changes in the change files covered by this instance `c` -// (there are 3 of them, one for "keys", one for values "before" every change, and one for values "after" every change) -// and create a B-tree where each key is only represented once, with the value corresponding to the "after" value -// of the latest change. -func (c *Changes) aggregateToBtree(bt *btree.BTreeG[*AggregateItem], prefixLen int, commitMerge commitmentMerger) error { - var b bool - var e error - var key, before, after []byte - var ai AggregateItem - var prefix []byte - // Note that the following loop iterates over transactions forwards, therefore it replace entries in the B-tree - for b, _, e = c.nextTx(); b && e == nil; b, _, e = c.nextTx() { - // Within each transaction, keys are unique, but they can appear in any order - for key, before, after, b, e = c.nextTriple(key[:0], before[:0], after[:0]); b && e == nil; key, before, after, b, e = c.nextTriple(key[:0], before[:0], after[:0]) { - if prefixLen > 0 && !bytes.Equal(prefix, key[:prefixLen]) { - prefix = common.Copy(key[:prefixLen]) - item := &AggregateItem{k: prefix, count: 0} - bt.ReplaceOrInsert(item) - } - - ai.k = key - i, ok := bt.Get(&ai) - if !ok || i == nil { - item := &AggregateItem{k: common.Copy(key), v: common.Copy(after), count: 1} - bt.ReplaceOrInsert(item) - continue - } - - item := i - if commitMerge != nil { - mergedVal, err := commitMerge(item.v, after, nil) - if err != nil { - return fmt.Errorf("merge branches (%T) : %w", commitMerge, err) - } - //fmt.Printf("aggregateToBtree prefix [%x], [%x]+[%x]=>[%x]\n", commitment.CompactToHex(key), after, item.v, mergedVal) - item.v = mergedVal - } else { - item.v = common.Copy(after) - } - item.count++ - } - if e != nil { - return fmt.Errorf("aggregateToBtree nextTriple: %w", e) - } - } - if e != nil { - return fmt.Errorf("aggregateToBtree prevTx: %w", e) - } - return nil -} - -const AggregatorPrefix = "aggregator" - -func btreeToFile(bt *btree.BTreeG[*AggregateItem], datPath, tmpdir string, trace bool, workers int) (int, error) { - comp, err := compress.NewCompressor(context.Background(), AggregatorPrefix, datPath, tmpdir, compress.MinPatternScore, workers, log.LvlDebug) - if err != nil { - return 0, err - } - defer comp.Close() - comp.SetTrace(trace) - count := 0 - bt.Ascend(func(item *AggregateItem) bool { - //fmt.Printf("btreeToFile %s [%x]=>[%x]\n", datPath, item.k, item.v) - if err = comp.AddUncompressedWord(item.k); err != nil { - return false - } - count++ // Only counting keys, not values - if err = comp.AddUncompressedWord(item.v); err != nil { - return false - } - return true - }) - if err != nil { - return 0, err - } - if err = comp.Compress(); err != nil { - return 0, err - } - return count, nil -} - -type ChangesItem struct { - endBlock uint64 - startBlock uint64 - fileCount int -} - -func (i *ChangesItem) Less(than btree.Item) bool { - if i.endBlock == than.(*ChangesItem).endBlock { - // Larger intevals will come last - return i.startBlock > than.(*ChangesItem).startBlock - } - return i.endBlock < than.(*ChangesItem).endBlock -} - -type byEndBlockItem struct { - decompressor *compress.Decompressor - getter *compress.Getter // reader for the decompressor - getterMerge *compress.Getter // reader for the decompressor used in the background merge thread - index *recsplit.Index - indexReader *recsplit.IndexReader // reader for the index - readerMerge *recsplit.IndexReader // index reader for the background merge thread - tree *btree.BTreeG[*AggregateItem] // Substitute for decompressor+index combination - startBlock uint64 - endBlock uint64 -} - -func ByEndBlockItemLess(i, than *byEndBlockItem) bool { - if i.endBlock == than.endBlock { - return i.startBlock > than.startBlock - } - return i.endBlock < than.endBlock -} - -func (i *byEndBlockItem) Less(than btree.Item) bool { - if i.endBlock == than.(*byEndBlockItem).endBlock { - return i.startBlock > than.(*byEndBlockItem).startBlock - } - return i.endBlock < than.(*byEndBlockItem).endBlock -} - -func (a *Aggregator) scanStateFiles(files []fs.DirEntry) { - typeStrings := make([]string, NumberOfTypes) - for fType := FileType(0); fType < NumberOfTypes; fType++ { - typeStrings[fType] = fType.String() - } - re := regexp.MustCompile("^(" + strings.Join(typeStrings, "|") + ").([0-9]+)-([0-9]+).(dat|idx)$") - var err error - for _, f := range files { - name := f.Name() - subs := re.FindStringSubmatch(name) - if len(subs) != 5 { - if len(subs) != 0 { - log.Warn("File ignored by aggregator, more than 4 submatches", "name", name, "submatches", len(subs)) - } - continue - } - var startBlock, endBlock uint64 - if startBlock, err = strconv.ParseUint(subs[2], 10, 64); err != nil { - log.Warn("File ignored by aggregator, parsing startBlock", "error", err, "name", name) - continue - } - if endBlock, err = strconv.ParseUint(subs[3], 10, 64); err != nil { - log.Warn("File ignored by aggregator, parsing endBlock", "error", err, "name", name) - continue - } - if startBlock > endBlock { - log.Warn("File ignored by aggregator, startBlock > endBlock", "name", name) - continue - } - fType, ok := ParseFileType(subs[1]) - if !ok { - log.Warn("File ignored by aggregator, type unknown", "type", subs[1]) - } - var item = &byEndBlockItem{startBlock: startBlock, endBlock: endBlock} - var foundI *byEndBlockItem - a.files[fType].AscendGreaterOrEqual(&byEndBlockItem{startBlock: endBlock, endBlock: endBlock}, func(i btree.Item) bool { - it := i.(*byEndBlockItem) - if it.endBlock == endBlock { - foundI = it - } - return false - }) - if foundI == nil || foundI.startBlock > startBlock { - log.Info("Load state file", "name", name, "type", fType.String(), "startBlock", startBlock, "endBlock", endBlock) - a.files[fType].ReplaceOrInsert(item) - } - } -} - -func NewAggregator(diffDir string, unwindLimit uint64, aggregationStep uint64, changesets, commitments bool, minArch uint64, trie commitment.Trie, tx kv.RwTx) (*Aggregator, error) { - a := &Aggregator{ - diffDir: diffDir, - unwindLimit: unwindLimit, - aggregationStep: aggregationStep, - tracedKeys: map[string]struct{}{}, - keccak: sha3.NewLegacyKeccak256(), - hph: trie, - aggChannel: make(chan *AggregationTask, 1024), - aggError: make(chan error, 1), - mergeChannel: make(chan struct{}, 1), - mergeError: make(chan error, 1), - historyChannel: make(chan struct{}, 1), - historyError: make(chan error, 1), - changesets: changesets, - commitments: commitments, - archHasher: murmur3.New128WithSeed(0), // TODO: Randomise salt - } - for fType := FirstType; fType < NumberOfTypes; fType++ { - a.files[fType] = btree.New(32) - } - var closeStateFiles = true // It will be set to false in case of success at the end of the function - defer func() { - // Clean up all decompressor and indices upon error - if closeStateFiles { - a.Close() - } - }() - // Scan the diff directory and create the mapping of end blocks to files - files, err := os.ReadDir(diffDir) - if err != nil { - return nil, err - } - a.scanStateFiles(files) - // Check for overlaps and holes - for fType := FirstType; fType < NumberOfTypes; fType++ { - if err := checkOverlaps(fType.String(), a.files[fType]); err != nil { - return nil, err - } - } - // Open decompressor and index files for all items in state trees - for fType := FirstType; fType < NumberOfTypes; fType++ { - if err := a.openFiles(fType, minArch); err != nil { - return nil, fmt.Errorf("opening %s state files: %w", fType.String(), err) - } - } - a.changesBtree = btree.New(32) - re := regexp.MustCompile(`^(account|storage|code|commitment).(keys|before|after).([0-9]+)-([0-9]+).chg$`) - for _, f := range files { - name := f.Name() - subs := re.FindStringSubmatch(name) - if len(subs) != 5 { - if len(subs) != 0 { - log.Warn("File ignored by changes scan, more than 4 submatches", "name", name, "submatches", len(subs)) - } - continue - } - var startBlock, endBlock uint64 - if startBlock, err = strconv.ParseUint(subs[3], 10, 64); err != nil { - log.Warn("File ignored by changes scan, parsing startBlock", "error", err, "name", name) - continue - } - if endBlock, err = strconv.ParseUint(subs[4], 10, 64); err != nil { - log.Warn("File ignored by changes scan, parsing endBlock", "error", err, "name", name) - continue - } - if startBlock > endBlock { - log.Warn("File ignored by changes scan, startBlock > endBlock", "name", name) - continue - } - if endBlock != startBlock+aggregationStep-1 { - log.Warn("File ignored by changes scan, endBlock != startBlock+aggregationStep-1", "name", name) - continue - } - var item = &ChangesItem{fileCount: 1, startBlock: startBlock, endBlock: endBlock} - i := a.changesBtree.Get(item) - if i == nil { - a.changesBtree.ReplaceOrInsert(item) - } else { - item = i.(*ChangesItem) - if item.startBlock == startBlock { - item.fileCount++ - } else { - return nil, fmt.Errorf("change files overlap [%d-%d] with [%d-%d]", item.startBlock, item.endBlock, startBlock, endBlock) - } - } - } - // Check for holes in change files - minStart := uint64(math.MaxUint64) - a.changesBtree.Descend(func(i btree.Item) bool { - item := i.(*ChangesItem) - if item.startBlock < minStart { - if item.endBlock >= minStart { - err = fmt.Errorf("overlap of change files [%d-%d] with %d", item.startBlock, item.endBlock, minStart) - return false - } - if minStart != math.MaxUint64 && item.endBlock+1 != minStart { - err = fmt.Errorf("whole in change files [%d-%d]", item.endBlock, minStart) - return false - } - minStart = item.startBlock - } else { - err = fmt.Errorf("overlap of change files [%d-%d] with %d", item.startBlock, item.endBlock, minStart) - return false - } - return true - }) - if err != nil { - return nil, err - } - for fType := FirstType; fType < NumberOfStateTypes; fType++ { - if err = checkOverlapWithMinStart(fType.String(), a.files[fType], minStart); err != nil { - return nil, err - } - } - if err = a.rebuildRecentState(tx); err != nil { - return nil, fmt.Errorf("rebuilding recent state from change files: %w", err) - } - closeStateFiles = false - a.aggWg.Add(1) - go a.backgroundAggregation() - a.mergeWg.Add(1) - go a.backgroundMerge() - if a.changesets { - a.historyWg.Add(1) - go a.backgroundHistoryMerge() - } - return a, nil -} - -// rebuildRecentState reads change files and reconstructs the recent state -func (a *Aggregator) rebuildRecentState(tx kv.RwTx) error { - t := time.Now() - var err error - trees := map[FileType]*btree.BTreeG[*AggregateItem]{} - - a.changesBtree.Ascend(func(i btree.Item) bool { - item := i.(*ChangesItem) - for fType := FirstType; fType < NumberOfStateTypes; fType++ { - tree, ok := trees[fType] - if !ok { - tree = btree.NewG[*AggregateItem](32, AggregateItemLess) - trees[fType] = tree - } - var changes Changes - changes.Init(fType.String(), a.aggregationStep, a.diffDir, false /* beforeOn */) - if err = changes.openFiles(item.startBlock, false /* write */); err != nil { - return false - } - var prefixLen int - if fType == Storage { - prefixLen = length.Addr - } - - var commitMerger commitmentMerger - if fType == Commitment { - commitMerger = mergeCommitments - } - - if err = changes.aggregateToBtree(tree, prefixLen, commitMerger); err != nil { - return false - } - if err = changes.closeFiles(); err != nil { - return false - } - } - return true - }) - if err != nil { - return err - } - for fType, tree := range trees { - table := fType.Table() - tree.Ascend(func(item *AggregateItem) bool { - if len(item.v) == 0 { - return true - } - var v []byte - if v, err = tx.GetOne(table, item.k); err != nil { - return false - } - if item.count != binary.BigEndian.Uint32(v[:4]) { - err = fmt.Errorf("mismatched count for %x: change file %d, db: %d", item.k, item.count, binary.BigEndian.Uint32(v[:4])) - return false - } - if !bytes.Equal(item.v, v[4:]) { - err = fmt.Errorf("mismatched v for %x: change file [%x], db: [%x]", item.k, item.v, v[4:]) - return false - } - return true - }) - } - if err != nil { - return err - } - log.Info("reconstructed recent state", "in", time.Since(t)) - return nil -} - -type AggregationTask struct { - bt [NumberOfStateTypes]*btree.BTreeG[*AggregateItem] - changes [NumberOfStateTypes]Changes - blockFrom uint64 - blockTo uint64 -} - -func (a *Aggregator) removeLocked(fType FileType, toRemove []*byEndBlockItem, item *byEndBlockItem) { - a.fileLocks[fType].Lock() - defer a.fileLocks[fType].Unlock() - if len(toRemove) > 1 { - for _, ag := range toRemove { - a.files[fType].Delete(ag) - } - a.files[fType].ReplaceOrInsert(item) - } -} - -func (a *Aggregator) removeLockedState( - accountsToRemove []*byEndBlockItem, accountsItem *byEndBlockItem, - codeToRemove []*byEndBlockItem, codeItem *byEndBlockItem, - storageToRemove []*byEndBlockItem, storageItem *byEndBlockItem, - commitmentToRemove []*byEndBlockItem, commitmentItem *byEndBlockItem, -) { - for fType := FirstType; fType < NumberOfStateTypes; fType++ { - a.fileLocks[fType].Lock() - defer a.fileLocks[fType].Unlock() - } - if len(accountsToRemove) > 1 { - for _, ag := range accountsToRemove { - a.files[Account].Delete(ag) - } - a.files[Account].ReplaceOrInsert(accountsItem) - } - if len(codeToRemove) > 1 { - for _, ag := range codeToRemove { - a.files[Code].Delete(ag) - } - a.files[Code].ReplaceOrInsert(codeItem) - } - if len(storageToRemove) > 1 { - for _, ag := range storageToRemove { - a.files[Storage].Delete(ag) - } - a.files[Storage].ReplaceOrInsert(storageItem) - } - if len(commitmentToRemove) > 1 { - for _, ag := range commitmentToRemove { - a.files[Commitment].Delete(ag) - } - a.files[Commitment].ReplaceOrInsert(commitmentItem) - } -} - -func removeFiles(fType FileType, diffDir string, toRemove []*byEndBlockItem) error { - // Close all the memory maps etc - for _, ag := range toRemove { - if err := ag.index.Close(); err != nil { - return fmt.Errorf("close index: %w", err) - } - if err := ag.decompressor.Close(); err != nil { - return fmt.Errorf("close decompressor: %w", err) - } - } - // Delete files - // TODO: in a non-test version, this is delayed to allow other participants to roll over to the next file - for _, ag := range toRemove { - if err := os.Remove(path.Join(diffDir, fmt.Sprintf("%s.%d-%d.dat", fType.String(), ag.startBlock, ag.endBlock))); err != nil { - return fmt.Errorf("remove decompressor file %s.%d-%d.dat: %w", fType.String(), ag.startBlock, ag.endBlock, err) - } - if err := os.Remove(path.Join(diffDir, fmt.Sprintf("%s.%d-%d.idx", fType.String(), ag.startBlock, ag.endBlock))); err != nil { - return fmt.Errorf("remove index file %s.%d-%d.idx: %w", fType.String(), ag.startBlock, ag.endBlock, err) - } - } - return nil -} - -// backgroundAggregation is the functin that runs in a background go-routine and performs creation of initial state files -// allowing the main goroutine to proceed -func (a *Aggregator) backgroundAggregation() { - defer a.aggWg.Done() - for aggTask := range a.aggChannel { - if a.changesets { - if historyD, historyI, bitmapD, bitmapI, err := aggTask.changes[Account].produceChangeSets(aggTask.blockFrom, aggTask.blockTo, AccountHistory, AccountBitmap); err == nil { - var historyItem = &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo} - historyItem.decompressor = historyD - historyItem.index = historyI - historyItem.getter = historyItem.decompressor.MakeGetter() - historyItem.getterMerge = historyItem.decompressor.MakeGetter() - historyItem.indexReader = recsplit.NewIndexReader(historyItem.index) - historyItem.readerMerge = recsplit.NewIndexReader(historyItem.index) - a.addLocked(AccountHistory, historyItem) - var bitmapItem = &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo} - bitmapItem.decompressor = bitmapD - bitmapItem.index = bitmapI - bitmapItem.getter = bitmapItem.decompressor.MakeGetter() - bitmapItem.getterMerge = bitmapItem.decompressor.MakeGetter() - bitmapItem.indexReader = recsplit.NewIndexReader(bitmapItem.index) - bitmapItem.readerMerge = recsplit.NewIndexReader(bitmapItem.index) - a.addLocked(AccountBitmap, bitmapItem) - } else { - a.aggError <- fmt.Errorf("produceChangeSets %s: %w", Account.String(), err) - return - } - if historyD, historyI, bitmapD, bitmapI, err := aggTask.changes[Storage].produceChangeSets(aggTask.blockFrom, aggTask.blockTo, StorageHistory, StorageBitmap); err == nil { - var historyItem = &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo} - historyItem.decompressor = historyD - historyItem.index = historyI - historyItem.getter = historyItem.decompressor.MakeGetter() - historyItem.getterMerge = historyItem.decompressor.MakeGetter() - historyItem.indexReader = recsplit.NewIndexReader(historyItem.index) - historyItem.readerMerge = recsplit.NewIndexReader(historyItem.index) - a.addLocked(StorageHistory, historyItem) - var bitmapItem = &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo} - bitmapItem.decompressor = bitmapD - bitmapItem.index = bitmapI - bitmapItem.getter = bitmapItem.decompressor.MakeGetter() - bitmapItem.getterMerge = bitmapItem.decompressor.MakeGetter() - bitmapItem.indexReader = recsplit.NewIndexReader(bitmapItem.index) - bitmapItem.readerMerge = recsplit.NewIndexReader(bitmapItem.index) - a.addLocked(StorageBitmap, bitmapItem) - } else { - a.aggError <- fmt.Errorf("produceChangeSets %s: %w", Storage.String(), err) - return - } - if historyD, historyI, bitmapD, bitmapI, err := aggTask.changes[Code].produceChangeSets(aggTask.blockFrom, aggTask.blockTo, CodeHistory, CodeBitmap); err == nil { - var historyItem = &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo} - historyItem.decompressor = historyD - historyItem.index = historyI - historyItem.getter = historyItem.decompressor.MakeGetter() - historyItem.getterMerge = historyItem.decompressor.MakeGetter() - historyItem.indexReader = recsplit.NewIndexReader(historyItem.index) - historyItem.readerMerge = recsplit.NewIndexReader(historyItem.index) - a.addLocked(CodeHistory, historyItem) - var bitmapItem = &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo} - bitmapItem.decompressor = bitmapD - bitmapItem.index = bitmapI - bitmapItem.getter = bitmapItem.decompressor.MakeGetter() - bitmapItem.getterMerge = bitmapItem.decompressor.MakeGetter() - bitmapItem.indexReader = recsplit.NewIndexReader(bitmapItem.index) - bitmapItem.readerMerge = recsplit.NewIndexReader(bitmapItem.index) - a.addLocked(CodeBitmap, bitmapItem) - } else { - a.aggError <- fmt.Errorf("produceChangeSets %s: %w", Code.String(), err) - return - } - } - typesLimit := Commitment - if a.commitments { - typesLimit = AccountHistory - } - for fType := FirstType; fType < typesLimit; fType++ { - var err error - if err = aggTask.changes[fType].closeFiles(); err != nil { - a.aggError <- fmt.Errorf("close %sChanges: %w", fType.String(), err) - return - } - var item = &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo} - if item.decompressor, item.index, err = createDatAndIndex(fType.String(), a.diffDir, aggTask.bt[fType], aggTask.blockFrom, aggTask.blockTo); err != nil { - a.aggError <- fmt.Errorf("createDatAndIndex %s: %w", fType.String(), err) - return - } - item.getter = item.decompressor.MakeGetter() - item.getterMerge = item.decompressor.MakeGetter() - item.indexReader = recsplit.NewIndexReader(item.index) - item.readerMerge = recsplit.NewIndexReader(item.index) - if err = aggTask.changes[fType].deleteFiles(); err != nil { - a.aggError <- fmt.Errorf("delete %sChanges: %w", fType.String(), err) - return - } - a.addLocked(fType, item) - } - // At this point, 3 new state files (containing latest changes) has been created for accounts, code, and storage - // Corresponding items has been added to the registy of state files, and B-tree are not necessary anymore, change files can be removed - // What follows can be performed by the 2nd background goroutine - select { - case a.mergeChannel <- struct{}{}: - default: - } - select { - case a.historyChannel <- struct{}{}: - default: - } - } -} - -type CommitmentValTransform struct { - pre [NumberOfAccountStorageTypes][]*byEndBlockItem // List of state files before the merge - post [NumberOfAccountStorageTypes][]*byEndBlockItem // List of state files after the merge -} - -func decodeU64(from []byte) uint64 { - var i uint64 - for _, b := range from { - i = (i << 8) | uint64(b) - } - return i -} - -func encodeU64(i uint64, to []byte) []byte { - // writes i to b in big endian byte order, using the least number of bytes needed to represent i. - switch { - case i < (1 << 8): - return append(to, byte(i)) - case i < (1 << 16): - return append(to, byte(i>>8), byte(i)) - case i < (1 << 24): - return append(to, byte(i>>16), byte(i>>8), byte(i)) - case i < (1 << 32): - return append(to, byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) - case i < (1 << 40): - return append(to, byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) - case i < (1 << 48): - return append(to, byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) - case i < (1 << 56): - return append(to, byte(i>>48), byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) - default: - return append(to, byte(i>>56), byte(i>>48), byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) - } -} - -// commitmentValTransform parses the value of the commitment record to extract references -// to accounts and storage items, then looks them up in the new, merged files, and replaces them with -// the updated references -func (cvt *CommitmentValTransform) commitmentValTransform(val, transValBuf commitment.BranchData) ([]byte, error) { - if len(val) == 0 { - return transValBuf, nil - } - - accountPlainKeys, storagePlainKeys, err := val.ExtractPlainKeys() - if err != nil { - return nil, err - } - transAccountPks := make([][]byte, 0, len(accountPlainKeys)) - var apkBuf, spkBuf []byte - for _, accountPlainKey := range accountPlainKeys { - if len(accountPlainKey) == length.Addr { - // Non-optimised key originating from a database record - apkBuf = append(apkBuf[:0], accountPlainKey...) - } else { - // Optimised key referencing a state file record (file number and offset within the file) - fileI := int(accountPlainKey[0]) - offset := decodeU64(accountPlainKey[1:]) - g := cvt.pre[Account][fileI].getterMerge - g.Reset(offset) - apkBuf, _ = g.Next(apkBuf[:0]) - //fmt.Printf("replacing account [%x] from [%x]\n", apkBuf, accountPlainKey) - } - // Look up apkBuf in the post account files - for j := len(cvt.post[Account]); j > 0; j-- { - item := cvt.post[Account][j-1] - if item.index.Empty() { - continue - } - offset := item.readerMerge.Lookup(apkBuf) - g := item.getterMerge - g.Reset(offset) - if g.HasNext() { - if keyMatch, _ := g.Match(apkBuf); keyMatch { - accountPlainKey = encodeU64(offset, []byte{byte(j - 1)}) - //fmt.Printf("replaced account [%x]=>[%x] for file [%d-%d]\n", apkBuf, accountPlainKey, item.startBlock, item.endBlock) - break - } else if j == 0 { - fmt.Printf("could not find replacement key [%x], file=%s.%d-%d]\n\n", apkBuf, Account.String(), item.startBlock, item.endBlock) - } - } - } - transAccountPks = append(transAccountPks, accountPlainKey) - } - - transStoragePks := make([][]byte, 0, len(storagePlainKeys)) - for _, storagePlainKey := range storagePlainKeys { - if len(storagePlainKey) == length.Addr+length.Hash { - // Non-optimised key originating from a database record - spkBuf = append(spkBuf[:0], storagePlainKey...) - } else { - // Optimised key referencing a state file record (file number and offset within the file) - fileI := int(storagePlainKey[0]) - offset := decodeU64(storagePlainKey[1:]) - g := cvt.pre[Storage][fileI].getterMerge - g.Reset(offset) - //fmt.Printf("offsetToKey storage [%x] offset=%d, file=%d-%d\n", storagePlainKey, offset, cvt.pre[Storage][fileI].startBlock, cvt.pre[Storage][fileI].endBlock) - spkBuf, _ = g.Next(spkBuf[:0]) - } - // Lookup spkBuf in the post storage files - for j := len(cvt.post[Storage]); j > 0; j-- { - item := cvt.post[Storage][j-1] - if item.index.Empty() { - continue - } - offset := item.readerMerge.Lookup(spkBuf) - g := item.getterMerge - g.Reset(offset) - if g.HasNext() { - if keyMatch, _ := g.Match(spkBuf); keyMatch { - storagePlainKey = encodeU64(offset, []byte{byte(j - 1)}) - //fmt.Printf("replacing storage [%x] => [fileI=%d, offset=%d, file=%s.%d-%d]\n", spkBuf, j-1, offset, Storage.String(), item.startBlock, item.endBlock) - break - } else if j == 0 { - fmt.Printf("could not find replacement key [%x], file=%s.%d-%d]\n\n", spkBuf, Storage.String(), item.startBlock, item.endBlock) - } - } - } - transStoragePks = append(transStoragePks, storagePlainKey) - } - if transValBuf, err = val.ReplacePlainKeys(transAccountPks, transStoragePks, transValBuf); err != nil { - return nil, err - } - return transValBuf, nil -} - -func (a *Aggregator) backgroundMerge() { - defer a.mergeWg.Done() - for range a.mergeChannel { - t := time.Now() - var err error - var cvt CommitmentValTransform - var toRemove [NumberOfStateTypes][]*byEndBlockItem - var newItems [NumberOfStateTypes]*byEndBlockItem - var blockFrom, blockTo uint64 - lastType := Code - typesLimit := Commitment - if a.commitments { - lastType = Commitment - typesLimit = AccountHistory - } - // Lock the set of commitment (or code if commitments are off) files - those are the smallest, because account, storage and code files may be added by the aggregation thread first - toRemove[lastType], _, _, blockFrom, blockTo = a.findLargestMerge(lastType, uint64(math.MaxUint64) /* maxBlockTo */, uint64(math.MaxUint64) /* maxSpan */) - - for fType := FirstType; fType < typesLimit; fType++ { - var pre, post []*byEndBlockItem - var from, to uint64 - if fType == lastType { - from = blockFrom - to = blockTo - } else { - toRemove[fType], pre, post, from, to = a.findLargestMerge(fType, blockTo, uint64(math.MaxUint64) /* maxSpan */) - if from != blockFrom { - a.mergeError <- fmt.Errorf("%sFrom %d != blockFrom %d", fType.String(), from, blockFrom) - return - } - if to != blockTo { - a.mergeError <- fmt.Errorf("%sTo %d != blockTo %d", fType.String(), to, blockTo) - return - } - } - if len(toRemove[fType]) > 1 { - var valTransform func(commitment.BranchData, commitment.BranchData) ([]byte, error) - var mergeFunc commitmentMerger - if fType == Commitment { - valTransform = cvt.commitmentValTransform - mergeFunc = mergeCommitments - } else { - mergeFunc = mergeReplace - } - var prefixLen int - if fType == Storage { - prefixLen = length.Addr - } - if newItems[fType], err = a.computeAggregation(fType, toRemove[fType], from, to, valTransform, mergeFunc, true /* valCompressed */, true /* withIndex */, prefixLen); err != nil { - a.mergeError <- fmt.Errorf("computeAggreation %s: %w", fType.String(), err) - return - } - post = append(post, newItems[fType]) - } - if fType < NumberOfAccountStorageTypes { - cvt.pre[fType] = pre - cvt.post[fType] = post - } - } - // Switch aggregator to new state files, close and remove old files - a.removeLockedState(toRemove[Account], newItems[Account], toRemove[Code], newItems[Code], toRemove[Storage], newItems[Storage], toRemove[Commitment], newItems[Commitment]) - removed := 0 - for fType := FirstType; fType < typesLimit; fType++ { - if len(toRemove[fType]) > 1 { - removeFiles(fType, a.diffDir, toRemove[fType]) - removed += len(toRemove[fType]) - 1 - } - } - mergeTime := time.Since(t) - if mergeTime > time.Minute { - log.Info("Long merge", "from", blockFrom, "to", blockTo, "files", removed, "time", time.Since(t)) - } - } -} - -func (a *Aggregator) reduceHistoryFiles(fType FileType, item *byEndBlockItem) error { - datTmpPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.dat.tmp", fType.String(), item.startBlock, item.endBlock)) - datPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.dat", fType.String(), item.startBlock, item.endBlock)) - idxPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.idx", fType.String(), item.startBlock, item.endBlock)) - comp, err := compress.NewCompressor(context.Background(), AggregatorPrefix, datTmpPath, a.diffDir, compress.MinPatternScore, 1, log.LvlDebug) - if err != nil { - return fmt.Errorf("reduceHistoryFiles create compressor %s: %w", datPath, err) - } - defer comp.Close() - g := item.getter - var val []byte - var count int - g.Reset(0) - var key []byte - for g.HasNext() { - g.Skip() // Skip key on on the first pass - val, _ = g.Next(val[:0]) - //fmt.Printf("reduce1 [%s.%d-%d] [%x]=>[%x]\n", fType.String(), item.startBlock, item.endBlock, key, val) - if err = comp.AddWord(val); err != nil { - return fmt.Errorf("reduceHistoryFiles AddWord: %w", err) - } - count++ - } - if err = comp.Compress(); err != nil { - return fmt.Errorf("reduceHistoryFiles compress: %w", err) - } - var d *compress.Decompressor - if d, err = compress.NewDecompressor(datTmpPath); err != nil { - return fmt.Errorf("reduceHistoryFiles create decompressor: %w", err) - } - var rs *recsplit.RecSplit - if rs, err = recsplit.NewRecSplit(recsplit.RecSplitArgs{ - KeyCount: count, - Enums: false, - BucketSize: 2000, - LeafSize: 8, - TmpDir: a.diffDir, - IndexFile: idxPath, - }); err != nil { - return fmt.Errorf("reduceHistoryFiles NewRecSplit: %w", err) - } - rs.LogLvl(log.LvlDebug) - - g1 := d.MakeGetter() - for { - g.Reset(0) - g1.Reset(0) - var lastOffset uint64 - for g.HasNext() { - key, _ = g.Next(key[:0]) - g.Skip() // Skip value - _, pos := g1.Next(nil) - //fmt.Printf("reduce2 [%s.%d-%d] [%x]==>%d\n", fType.String(), item.startBlock, item.endBlock, key, lastOffset) - if err = rs.AddKey(key, lastOffset); err != nil { - return fmt.Errorf("reduceHistoryFiles %p AddKey: %w", rs, err) - } - lastOffset = pos - } - if err = rs.Build(); err != nil { - if rs.Collision() { - log.Info("Building reduceHistoryFiles. Collision happened. It's ok. Restarting...") - rs.ResetNextSalt() - } else { - return fmt.Errorf("reduceHistoryFiles Build: %w", err) - } - } else { - break - } - } - if err = item.decompressor.Close(); err != nil { - return fmt.Errorf("reduceHistoryFiles close decompressor: %w", err) - } - if err = os.Remove(datPath); err != nil { - return fmt.Errorf("reduceHistoryFiles remove: %w", err) - } - if err = os.Rename(datTmpPath, datPath); err != nil { - return fmt.Errorf("reduceHistoryFiles rename: %w", err) - } - if item.decompressor, err = compress.NewDecompressor(datPath); err != nil { - return fmt.Errorf("reduceHistoryFiles create new decompressor: %w", err) - } - item.getter = item.decompressor.MakeGetter() - item.getterMerge = item.decompressor.MakeGetter() - if item.index, err = recsplit.OpenIndex(idxPath); err != nil { - return fmt.Errorf("reduceHistoryFiles open index: %w", err) - } - item.indexReader = recsplit.NewIndexReader(item.index) - item.readerMerge = recsplit.NewIndexReader(item.index) - return nil -} - -type commitmentMerger func(prev, current, target commitment.BranchData) (commitment.BranchData, error) - -func mergeReplace(preval, val, buf commitment.BranchData) (commitment.BranchData, error) { - return append(buf, val...), nil -} - -func mergeBitmaps(preval, val, buf commitment.BranchData) (commitment.BranchData, error) { - preef, _ := eliasfano32.ReadEliasFano(preval) - ef, _ := eliasfano32.ReadEliasFano(val) - //fmt.Printf("mergeBitmaps [%x] (count=%d,max=%d) + [%x] (count=%d,max=%d)\n", preval, preef.Count(), preef.Max(), val, ef.Count(), ef.Max()) - preIt := preef.Iterator() - efIt := ef.Iterator() - newEf := eliasfano32.NewEliasFano(preef.Count()+ef.Count(), ef.Max()) - for preIt.HasNext() { - v, _ := preIt.Next() - newEf.AddOffset(v) - } - for efIt.HasNext() { - v, _ := efIt.Next() - newEf.AddOffset(v) - } - newEf.Build() - return newEf.AppendBytes(buf), nil -} - -func mergeCommitments(preval, val, buf commitment.BranchData) (commitment.BranchData, error) { - return preval.MergeHexBranches(val, buf) -} - -func (a *Aggregator) backgroundHistoryMerge() { - defer a.historyWg.Done() - for range a.historyChannel { - t := time.Now() - var err error - var toRemove [NumberOfTypes][]*byEndBlockItem - var newItems [NumberOfTypes]*byEndBlockItem - var blockFrom, blockTo uint64 - // Lock the set of commitment files - those are the smallest, because account, storage and code files may be added by the aggregation thread first - toRemove[CodeBitmap], _, _, blockFrom, blockTo = a.findLargestMerge(CodeBitmap, uint64(math.MaxUint64) /* maxBlockTo */, 500_000 /* maxSpan */) - - finalMerge := blockTo-blockFrom+1 == 500_000 - for fType := AccountHistory; fType < NumberOfTypes; fType++ { - var from, to uint64 - if fType == CodeBitmap { - from = blockFrom - to = blockTo - } else { - toRemove[fType], _, _, from, to = a.findLargestMerge(fType, blockTo, 500_000 /* maxSpan */) - if from != blockFrom { - a.historyError <- fmt.Errorf("%sFrom %d != blockFrom %d", fType.String(), from, blockFrom) - return - } - if to != blockTo { - a.historyError <- fmt.Errorf("%sTo %d != blockTo %d", fType.String(), to, blockTo) - return - } - } - if len(toRemove[fType]) > 1 { - isBitmap := fType == AccountBitmap || fType == StorageBitmap || fType == CodeBitmap - - var mergeFunc commitmentMerger - switch { - case isBitmap: - mergeFunc = mergeBitmaps - case fType == Commitment: - mergeFunc = mergeCommitments - default: - mergeFunc = mergeReplace - } - - if newItems[fType], err = a.computeAggregation(fType, toRemove[fType], from, to, nil /* valTransform */, mergeFunc, - !isBitmap /* valCompressed */, !finalMerge || isBitmap /* withIndex */, 0 /* prefixLen */); err != nil { - a.historyError <- fmt.Errorf("computeAggreation %s: %w", fType.String(), err) - return - } - } - } - if finalMerge { - // Special aggregation for blockTo - blockFrom + 1 == 500_000 - // Remove keys from the .dat files assuming that they will only be used after querying the bitmap index - // and therefore, there is no situation where non-existent key is queried. - if err = a.reduceHistoryFiles(AccountHistory, newItems[AccountHistory]); err != nil { - a.historyError <- fmt.Errorf("reduceHistoryFiles %s: %w", AccountHistory.String(), err) - return - } - if err = a.reduceHistoryFiles(StorageHistory, newItems[StorageHistory]); err != nil { - a.historyError <- fmt.Errorf("reduceHistoryFiles %s: %w", StorageHistory.String(), err) - return - } - if err = a.reduceHistoryFiles(CodeHistory, newItems[CodeHistory]); err != nil { - a.historyError <- fmt.Errorf("reduceHistoryFiles %s: %w", CodeHistory.String(), err) - return - } - } - for fType := AccountHistory; fType < NumberOfTypes; fType++ { - a.removeLocked(fType, toRemove[fType], newItems[fType]) - } - removed := 0 - for fType := AccountHistory; fType < NumberOfTypes; fType++ { - if len(toRemove[fType]) > 1 { - removeFiles(fType, a.diffDir, toRemove[fType]) - removed += len(toRemove[fType]) - 1 - } - } - mergeTime := time.Since(t) - if mergeTime > time.Minute { - log.Info("Long history merge", "from", blockFrom, "to", blockTo, "files", removed, "time", time.Since(t)) - } - } -} - -// checkOverlaps does not lock tree, because it is only called from the constructor of aggregator -func checkOverlaps(treeName string, tree *btree.BTree) error { - var minStart uint64 = math.MaxUint64 - var err error - tree.Descend(func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.startBlock < minStart { - if item.endBlock >= minStart { - err = fmt.Errorf("overlap of %s state files [%d-%d] with %d", treeName, item.startBlock, item.endBlock, minStart) - return false - } - if minStart != math.MaxUint64 && item.endBlock+1 != minStart { - err = fmt.Errorf("hole in %s state files [%d-%d]", treeName, item.endBlock, minStart) - return false - } - minStart = item.startBlock - } - return true - }) - return err -} - -func (a *Aggregator) openFiles(fType FileType, minArch uint64) error { - var err error - var totalKeys uint64 - a.files[fType].Ascend(func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.decompressor, err = compress.NewDecompressor(path.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.dat", fType.String(), item.startBlock, item.endBlock))); err != nil { - return false - } - if item.index, err = recsplit.OpenIndex(path.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.idx", fType.String(), item.startBlock, item.endBlock))); err != nil { - return false - } - totalKeys += item.index.KeyCount() - item.getter = item.decompressor.MakeGetter() - item.getterMerge = item.decompressor.MakeGetter() - item.indexReader = recsplit.NewIndexReader(item.index) - item.readerMerge = recsplit.NewIndexReader(item.index) - return true - }) - if fType >= NumberOfStateTypes { - return nil - } - log.Info("Creating arch...", "type", fType.String(), "total keys in all state files", totalKeys) - // Allocate arch of double of total keys - n := totalKeys * 2 - if n < minArch { - n = minArch - } - a.arches[fType] = make([]uint32, n) - arch := a.arches[fType] - var key []byte - h := a.archHasher - collisions := 0 - a.files[fType].Ascend(func(i btree.Item) bool { - item := i.(*byEndBlockItem) - g := item.getter - g.Reset(0) - blockNum := uint32(item.endBlock) - for g.HasNext() { - key, _ = g.Next(key[:0]) - h.Reset() - h.Write(key) //nolint:errcheck - p, _ := h.Sum128() - p = p % n - if arch[p] != 0 { - collisions++ - } - arch[p] = blockNum - g.Skip() - } - return true - }) - log.Info("Created arch", "type", fType.String(), "collisions", collisions) - return err -} - -func (a *Aggregator) closeFiles(fType FileType) { - a.fileLocks[fType].Lock() - defer a.fileLocks[fType].Unlock() - a.files[fType].Ascend(func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.decompressor != nil { - item.decompressor.Close() - } - if item.index != nil { - item.index.Close() - } - return true - }) -} - -func (a *Aggregator) Close() { - close(a.aggChannel) - a.aggWg.Wait() // Need to wait for the background aggregation to finish because it sends to merge channels - // Drain channel before closing - select { - case <-a.mergeChannel: - default: - } - close(a.mergeChannel) - if a.changesets { - // Drain channel before closing - select { - case <-a.historyChannel: - default: - } - close(a.historyChannel) - a.historyWg.Wait() - } - a.mergeWg.Wait() - // Closing state files only after background aggregation goroutine is finished - for fType := FirstType; fType < NumberOfTypes; fType++ { - a.closeFiles(fType) - } -} - -// checkOverlapWithMinStart does not need to lock tree lock, because it is only used in the constructor of Aggregator -func checkOverlapWithMinStart(treeName string, tree *btree.BTree, minStart uint64) error { - if lastStateI := tree.Max(); lastStateI != nil { - item := lastStateI.(*byEndBlockItem) - if minStart != math.MaxUint64 && item.endBlock+1 != minStart { - return fmt.Errorf("hole or overlap between %s state files and change files [%d-%d]", treeName, item.endBlock, minStart) - } - } - return nil -} - -func (a *Aggregator) readFromFiles(fType FileType, lock bool, blockNum uint64, filekey []byte, trace bool) ([]byte, uint64) { - if lock { - if fType == Commitment { - for lockFType := FirstType; lockFType < NumberOfStateTypes; lockFType++ { - a.fileLocks[lockFType].RLock() - defer a.fileLocks[lockFType].RUnlock() - } - } else { - a.fileLocks[fType].RLock() - defer a.fileLocks[fType].RUnlock() - } - } - h := a.archHasher - arch := a.arches[fType] - n := uint64(len(arch)) - if n > 0 { - h.Reset() - h.Write(filekey) //nolint:errcheck - p, _ := h.Sum128() - p = p % n - v := uint64(atomic.LoadUint32(&arch[p])) - //fmt.Printf("Reading from %s arch key [%x]=%d, %d\n", fType.String(), filekey, p, arch[p]) - if v == 0 { - return nil, 0 - } - a.files[fType].AscendGreaterOrEqual(&byEndBlockItem{startBlock: v, endBlock: v}, func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.endBlock < blockNum { - blockNum = item.endBlock - } - return false - }) - } - var val []byte - var startBlock uint64 - a.files[fType].DescendLessOrEqual(&byEndBlockItem{endBlock: blockNum}, func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if trace { - fmt.Printf("read %s %x: search in file [%d-%d]\n", fType.String(), filekey, item.startBlock, item.endBlock) - } - if item.tree != nil { - ai, ok := item.tree.Get(&AggregateItem{k: filekey}) - if !ok { - return true - } - if ai == nil { - return true - } - val = ai.v - startBlock = item.startBlock - - return false - } - if item.index.Empty() { - return true - } - offset := item.indexReader.Lookup(filekey) - g := item.getter - g.Reset(offset) - if g.HasNext() { - if keyMatch, _ := g.Match(filekey); keyMatch { - val, _ = g.Next(nil) - if trace { - fmt.Printf("read %s %x: found [%x] in file [%d-%d]\n", fType.String(), filekey, val, item.startBlock, item.endBlock) - } - startBlock = item.startBlock - atomic.AddUint64(&a.fileHits, 1) - return false - } - } - atomic.AddUint64(&a.fileMisses, 1) - return true - }) - - if fType == Commitment { - // Transform references - if len(val) > 0 { - accountPlainKeys, storagePlainKeys, err := commitment.BranchData(val).ExtractPlainKeys() - if err != nil { - panic(fmt.Errorf("value %x: %w", val, err)) - } - var transAccountPks [][]byte - var transStoragePks [][]byte - for _, accountPlainKey := range accountPlainKeys { - var apkBuf []byte - if len(accountPlainKey) == length.Addr { - // Non-optimised key originating from a database record - apkBuf = accountPlainKey - } else { - // Optimised key referencing a state file record (file number and offset within the file) - fileI := int(accountPlainKey[0]) - offset := decodeU64(accountPlainKey[1:]) - apkBuf, _ = a.readByOffset(Account, fileI, offset) - } - transAccountPks = append(transAccountPks, apkBuf) - } - for _, storagePlainKey := range storagePlainKeys { - var spkBuf []byte - if len(storagePlainKey) == length.Addr+length.Hash { - // Non-optimised key originating from a database record - spkBuf = storagePlainKey - } else { - // Optimised key referencing a state file record (file number and offset within the file) - fileI := int(storagePlainKey[0]) - offset := decodeU64(storagePlainKey[1:]) - //fmt.Printf("readbyOffset(comm file %d-%d) file=%d offset=%d\n", ii.startBlock, ii.endBlock, fileI, offset) - spkBuf, _ = a.readByOffset(Storage, fileI, offset) - } - transStoragePks = append(transStoragePks, spkBuf) - } - if val, err = commitment.BranchData(val).ReplacePlainKeys(transAccountPks, transStoragePks, nil); err != nil { - panic(err) - } - } - } - return val, startBlock -} - -// readByOffset is assumed to be invoked under a read lock -func (a *Aggregator) readByOffset(fType FileType, fileI int, offset uint64) ([]byte, []byte) { - var key, val []byte - fi := 0 - a.files[fType].Ascend(func(i btree.Item) bool { - if fi < fileI { - fi++ - return true - } - item := i.(*byEndBlockItem) - //fmt.Printf("fileI=%d, file=%s.%d-%d\n", fileI, fType.String(), item.startBlock, item.endBlock) - g := item.getter - g.Reset(offset) - key, _ = g.Next(nil) - val, _ = g.Next(nil) - - return false - }) - return key, val -} - -func (a *Aggregator) MakeStateReader(blockNum uint64, tx kv.Tx) *Reader { - r := &Reader{ - a: a, - blockNum: blockNum, - tx: tx, - } - return r -} - -type Reader struct { - a *Aggregator - tx kv.Getter - blockNum uint64 -} - -func (r *Reader) ReadAccountData(addr []byte, trace bool) ([]byte, error) { - v, err := r.tx.GetOne(kv.StateAccounts, addr) - if err != nil { - return nil, err - } - if v != nil { - return v[4:], nil - } - v, _ = r.a.readFromFiles(Account, true /* lock */, r.blockNum, addr, trace) - return v, nil -} - -func (r *Reader) ReadAccountStorage(addr []byte, loc []byte, trace bool) ([]byte, error) { - // Look in the summary table first - dbkey := make([]byte, len(addr)+len(loc)) - copy(dbkey[0:], addr) - copy(dbkey[len(addr):], loc) - v, err := r.tx.GetOne(kv.StateStorage, dbkey) - if err != nil { - return nil, err - } - if v != nil { - if len(v) == 4 { - return nil, nil - } - return v[4:], nil - } - v, _ = r.a.readFromFiles(Storage, true /* lock */, r.blockNum, dbkey, trace) - return v, nil -} - -func (r *Reader) ReadAccountCode(addr []byte, trace bool) ([]byte, error) { - // Look in the summary table first - v, err := r.tx.GetOne(kv.StateCode, addr) - if err != nil { - return nil, err - } - if v != nil { - if len(v) == 4 { - return nil, nil - } - return v[4:], nil - } - // Look in the files - v, _ = r.a.readFromFiles(Code, true /* lock */, r.blockNum, addr, trace) - return v, nil -} - -func (r *Reader) ReadAccountCodeSize(addr []byte, trace bool) (int, error) { - // Look in the summary table first - v, err := r.tx.GetOne(kv.StateCode, addr) - if err != nil { - return 0, err - } - if v != nil { - return len(v) - 4, nil - } - // Look in the files. TODO - use specialised function to only lookup size - v, _ = r.a.readFromFiles(Code, true /* lock */, r.blockNum, addr, trace) - return len(v), nil -} - -type Writer struct { - tx kv.RwTx - a *Aggregator - commTree *btree.BTreeG[*CommitmentItem] // BTree used for gathering commitment data - changes [NumberOfStateTypes]Changes - blockNum uint64 - changeFileNum uint64 // Block number associated with the current change files. It is the last block number whose changes will go into that file -} - -func (a *Aggregator) MakeStateWriter(beforeOn bool) *Writer { - w := &Writer{ - a: a, - commTree: btree.NewG[*CommitmentItem](32, commitmentItemLess), - } - for fType := FirstType; fType < NumberOfStateTypes; fType++ { - w.changes[fType].Init(fType.String(), a.aggregationStep, a.diffDir, w.a.changesets && fType != Commitment /* we do not unwind commitment ? */) - } - return w -} - -func (w *Writer) Close() { - typesLimit := Commitment - if w.a.commitments { - typesLimit = AccountHistory - } - for fType := FirstType; fType < typesLimit; fType++ { - w.changes[fType].closeFiles() - } -} - -func (w *Writer) Reset(blockNum uint64, tx kv.RwTx) error { - w.tx = tx - w.blockNum = blockNum - typesLimit := Commitment - if w.a.commitments { - typesLimit = AccountHistory - } - if blockNum > w.changeFileNum { - for fType := FirstType; fType < typesLimit; fType++ { - if err := w.changes[fType].closeFiles(); err != nil { - return err - } - } - if w.changeFileNum != 0 { - w.a.changesBtree.ReplaceOrInsert(&ChangesItem{startBlock: w.changeFileNum + 1 - w.a.aggregationStep, endBlock: w.changeFileNum, fileCount: 12}) - } - } - if w.changeFileNum == 0 || blockNum > w.changeFileNum { - for fType := FirstType; fType < typesLimit; fType++ { - if err := w.changes[fType].openFiles(blockNum, true /* write */); err != nil { - return err - } - } - w.changeFileNum = blockNum - (blockNum % w.a.aggregationStep) + w.a.aggregationStep - 1 - } - return nil -} - -type CommitmentItem struct { - plainKey []byte - hashedKey []byte - u commitment.Update -} - -func commitmentItemLess(i, j *CommitmentItem) bool { - return bytes.Compare(i.hashedKey, j.hashedKey) < 0 -} -func (i *CommitmentItem) Less(than btree.Item) bool { - return bytes.Compare(i.hashedKey, than.(*CommitmentItem).hashedKey) < 0 -} - -func (w *Writer) branchFn(prefix []byte) ([]byte, error) { - for lockFType := FirstType; lockFType < NumberOfStateTypes; lockFType++ { - w.a.fileLocks[lockFType].RLock() - defer w.a.fileLocks[lockFType].RUnlock() - } - // Look in the summary table first - mergedVal, err := w.tx.GetOne(kv.StateCommitment, prefix) - if err != nil { - return nil, err - } - if mergedVal != nil { - mergedVal = mergedVal[4:] - } - // Look in the files and merge, while it becomes complete - var startBlock = w.blockNum + 1 - for mergedVal == nil || !commitment.BranchData(mergedVal).IsComplete() { - if startBlock == 0 { - panic(fmt.Sprintf("Incomplete branch data prefix [%x], mergeVal=[%x], startBlock=%d\n", commitment.CompactedKeyToHex(prefix), mergedVal, startBlock)) - } - var val commitment.BranchData - val, startBlock = w.a.readFromFiles(Commitment, false /* lock */, startBlock-1, prefix, false /* trace */) - if val == nil { - if mergedVal == nil { - return nil, nil - } - panic(fmt.Sprintf("Incomplete branch data prefix [%x], mergeVal=[%x], startBlock=%d\n", commitment.CompactedKeyToHex(prefix), mergedVal, startBlock)) - } - var err error - //fmt.Printf("Pre-merge prefix [%x] [%x]+[%x], startBlock %d\n", commitment.CompactToHex(prefix), val, mergedVal, startBlock) - if mergedVal == nil { - mergedVal = val - } else if mergedVal, err = val.MergeHexBranches(mergedVal, nil); err != nil { - return nil, err - } - //fmt.Printf("Post-merge prefix [%x] [%x], startBlock %d\n", commitment.CompactToHex(prefix), mergedVal, startBlock) - } - if mergedVal == nil { - return nil, nil - } - //fmt.Printf("Returning branch data prefix [%x], mergeVal=[%x], startBlock=%d\n", commitment.CompactToHex(prefix), mergedVal, startBlock) - return mergedVal[2:], nil // Skip touchMap but keep afterMap -} - -func bytesToUint64(buf []byte) (x uint64) { - for i, b := range buf { - x = x<<8 + uint64(b) - if i == 7 { - return - } - } - return -} - -func (w *Writer) accountFn(plainKey []byte, cell *commitment.Cell) error { - // Look in the summary table first - enc, err := w.tx.GetOne(kv.StateAccounts, plainKey) - if err != nil { - return err - } - if enc != nil { - enc = enc[4:] - } else { - // Look in the files - enc, _ = w.a.readFromFiles(Account, true /* lock */, w.blockNum, plainKey, false /* trace */) - } - cell.Nonce = 0 - cell.Balance.Clear() - copy(cell.CodeHash[:], commitment.EmptyCodeHash) - - if len(enc) > 0 { - pos := 0 - nonceBytes := int(enc[pos]) - pos++ - if nonceBytes > 0 { - cell.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) - pos += nonceBytes - } - balanceBytes := int(enc[pos]) - pos++ - if balanceBytes > 0 { - cell.Balance.SetBytes(enc[pos : pos+balanceBytes]) - } - } - enc, err = w.tx.GetOne(kv.StateCode, plainKey) - if err != nil { - return err - } - if enc != nil { - enc = enc[4:] - } else { - // Look in the files - enc, _ = w.a.readFromFiles(Code, true /* lock */, w.blockNum, plainKey, false /* trace */) - } - if len(enc) > 0 { - w.a.keccak.Reset() - w.a.keccak.Write(enc) - w.a.keccak.(io.Reader).Read(cell.CodeHash[:]) - } - return nil -} - -func (w *Writer) storageFn(plainKey []byte, cell *commitment.Cell) error { - // Look in the summary table first - enc, err := w.tx.GetOne(kv.StateStorage, plainKey) - if err != nil { - return err - } - if enc != nil { - enc = enc[4:] - } else { - // Look in the files - enc, _ = w.a.readFromFiles(Storage, true /* lock */, w.blockNum, plainKey, false /* trace */) - } - cell.StorageLen = len(enc) - copy(cell.Storage[:], enc) - return nil -} - -func (w *Writer) captureCommitmentType(fType FileType, trace bool, f func(commTree *btree.BTreeG[*CommitmentItem], h hash.Hash, key, val []byte)) { - lastOffsetKey := 0 - lastOffsetVal := 0 - for i, offsetKey := range w.changes[fType].keys.wordOffsets { - offsetVal := w.changes[fType].after.wordOffsets[i] - key := w.changes[fType].keys.words[lastOffsetKey:offsetKey] - val := w.changes[fType].after.words[lastOffsetVal:offsetVal] - if trace { - fmt.Printf("captureCommitmentData %s [%x]=>[%x]\n", fType.String(), key, val) - } - f(w.commTree, w.a.keccak, key, val) - lastOffsetKey = offsetKey - lastOffsetVal = offsetVal - } -} - -func (w *Writer) captureCommitmentData(trace bool) { - if trace { - fmt.Printf("captureCommitmentData start w.commTree.Len()=%d\n", w.commTree.Len()) - } - w.captureCommitmentType(Code, trace, func(commTree *btree.BTreeG[*CommitmentItem], h hash.Hash, key, val []byte) { - h.Reset() - h.Write(key) - hashedKey := h.Sum(nil) - var c = &CommitmentItem{plainKey: common.Copy(key), hashedKey: make([]byte, len(hashedKey)*2)} - for i, b := range hashedKey { - c.hashedKey[i*2] = (b >> 4) & 0xf - c.hashedKey[i*2+1] = b & 0xf - } - c.u.Flags = commitment.CODE_UPDATE - item, found := commTree.Get(&CommitmentItem{hashedKey: c.hashedKey}) - if found && item != nil { - if item.u.Flags&commitment.BALANCE_UPDATE != 0 { - c.u.Flags |= commitment.BALANCE_UPDATE - c.u.Balance.Set(&item.u.Balance) - } - if item.u.Flags&commitment.NONCE_UPDATE != 0 { - c.u.Flags |= commitment.NONCE_UPDATE - c.u.Nonce = item.u.Nonce - } - if item.u.Flags == commitment.DELETE_UPDATE && len(val) == 0 { - c.u.Flags = commitment.DELETE_UPDATE - } else { - h.Reset() - h.Write(val) - h.(io.Reader).Read(c.u.CodeHashOrStorage[:]) - } - } else { - h.Reset() - h.Write(val) - h.(io.Reader).Read(c.u.CodeHashOrStorage[:]) - } - commTree.ReplaceOrInsert(c) - }) - w.captureCommitmentType(Account, trace, func(commTree *btree.BTreeG[*CommitmentItem], h hash.Hash, key, val []byte) { - h.Reset() - h.Write(key) - hashedKey := h.Sum(nil) - var c = &CommitmentItem{plainKey: common.Copy(key), hashedKey: make([]byte, len(hashedKey)*2)} - for i, b := range hashedKey { - c.hashedKey[i*2] = (b >> 4) & 0xf - c.hashedKey[i*2+1] = b & 0xf - } - if len(val) == 0 { - c.u.Flags = commitment.DELETE_UPDATE - } else { - c.u.DecodeForStorage(val) - c.u.Flags = commitment.BALANCE_UPDATE | commitment.NONCE_UPDATE - item, found := commTree.Get(&CommitmentItem{hashedKey: c.hashedKey}) - - if found && item != nil { - if item.u.Flags&commitment.CODE_UPDATE != 0 { - c.u.Flags |= commitment.CODE_UPDATE - copy(c.u.CodeHashOrStorage[:], item.u.CodeHashOrStorage[:]) - } - } - } - commTree.ReplaceOrInsert(c) - }) - w.captureCommitmentType(Storage, trace, func(commTree *btree.BTreeG[*CommitmentItem], h hash.Hash, key, val []byte) { - hashedKey := make([]byte, 2*length.Hash) - h.Reset() - h.Write(key[:length.Addr]) - h.(io.Reader).Read(hashedKey[:length.Hash]) - h.Reset() - h.Write(key[length.Addr:]) - h.(io.Reader).Read(hashedKey[length.Hash:]) - var c = &CommitmentItem{plainKey: common.Copy(key), hashedKey: make([]byte, len(hashedKey)*2)} - for i, b := range hashedKey { - c.hashedKey[i*2] = (b >> 4) & 0xf - c.hashedKey[i*2+1] = b & 0xf - } - c.u.ValLength = len(val) - if len(val) > 0 { - copy(c.u.CodeHashOrStorage[:], val) - } - if len(val) == 0 { - c.u.Flags = commitment.DELETE_UPDATE - } else { - c.u.Flags = commitment.STORAGE_UPDATE - } - commTree.ReplaceOrInsert(c) - }) - if trace { - fmt.Printf("captureCommitmentData end w.commTree.Len()=%d\n", w.commTree.Len()) - } -} - -// computeCommitment is computing the commitment to the state after -// the change would have been applied. -// It assumes that the state accessible via the aggregator has already been -// modified with the new values -// At the moment, it is specific version for hex merkle patricia tree commitment -// but it will be extended to support other types of commitments -func (w *Writer) computeCommitment(trace bool) ([]byte, error) { - if trace { - fmt.Printf("computeCommitment w.commTree.Len()=%d\n", w.commTree.Len()) - } - - plainKeys := make([][]byte, w.commTree.Len()) - hashedKeys := make([][]byte, w.commTree.Len()) - updates := make([]commitment.Update, w.commTree.Len()) - j := 0 - w.commTree.Ascend(func(item *CommitmentItem) bool { - plainKeys[j] = item.plainKey - hashedKeys[j] = item.hashedKey - updates[j] = item.u - j++ - return true - }) - - if len(plainKeys) == 0 { - return w.a.hph.RootHash() - } - - w.a.hph.Reset() - w.a.hph.ResetFns(w.branchFn, w.accountFn, w.storageFn) - w.a.hph.SetTrace(trace) - - rootHash, branchNodeUpdates, err := w.a.hph.ProcessUpdates(plainKeys, hashedKeys, updates) - if err != nil { - return nil, err - } - - for prefixStr, branchNodeUpdate := range branchNodeUpdates { - if branchNodeUpdate == nil { - continue - } - prefix := []byte(prefixStr) - var prevV []byte - var prevNum uint32 - if prevV, err = w.tx.GetOne(kv.StateCommitment, prefix); err != nil { - return nil, err - } - if prevV != nil { - prevNum = binary.BigEndian.Uint32(prevV[:4]) - } - - var original commitment.BranchData - if prevV == nil { - original, _ = w.a.readFromFiles(Commitment, true /* lock */, w.blockNum, prefix, false) - } else { - original = prevV[4:] - } - if original != nil { - // try to merge previous (original) and current (branchNodeUpdate) into one update - mergedVal, err := original.MergeHexBranches(branchNodeUpdate, nil) - if err != nil { - return nil, err - } - if w.a.trace { - fmt.Printf("computeCommitment merge [%x] [%x]+[%x]=>[%x]\n", commitment.CompactedKeyToHex(prefix), original, branchNodeUpdate, mergedVal) - } - branchNodeUpdate = mergedVal - } - - //fmt.Printf("computeCommitment set [%x] [%x]\n", commitment.CompactToHex(prefix), branchNodeUpdate) - v := make([]byte, 4+len(branchNodeUpdate)) - binary.BigEndian.PutUint32(v[:4], prevNum+1) - copy(v[4:], branchNodeUpdate) - - if err = w.tx.Put(kv.StateCommitment, prefix, v); err != nil { - return nil, err - } - if len(branchNodeUpdate) == 0 { - w.changes[Commitment].delete(prefix, original) - } else { - if prevV == nil && len(original) == 0 { - w.changes[Commitment].insert(prefix, branchNodeUpdate) - } else { - w.changes[Commitment].update(prefix, original, branchNodeUpdate) - } - } - } - - return rootHash, nil -} - -func (w *Writer) FinishTx(txNum uint64, trace bool) error { - if w.a.commitments { - w.captureCommitmentData(trace) - } - var err error - for fType := FirstType; fType < Commitment; fType++ { - if err = w.changes[fType].finish(txNum); err != nil { - return fmt.Errorf("finish %sChanges: %w", fType.String(), err) - } - } - return nil -} - -func (w *Writer) ComputeCommitment(trace bool) ([]byte, error) { - if !w.a.commitments { - return nil, fmt.Errorf("commitments turned off") - } - comm, err := w.computeCommitment(trace) - if err != nil { - return nil, fmt.Errorf("compute commitment: %w", err) - } - w.commTree.Clear(true) - if err = w.changes[Commitment].finish(w.blockNum); err != nil { - return nil, fmt.Errorf("finish commChanges: %w", err) - } - return comm, nil -} - -// Aggegate should be called to check if the aggregation is required, and -// if it is required, perform it -func (w *Writer) Aggregate(trace bool) error { - if w.blockNum < w.a.unwindLimit+w.a.aggregationStep-1 { - return nil - } - diff := w.blockNum - w.a.unwindLimit - if (diff+1)%w.a.aggregationStep != 0 { - return nil - } - if err := w.aggregateUpto(diff+1-w.a.aggregationStep, diff); err != nil { - return fmt.Errorf("aggregateUpto(%d, %d): %w", diff+1-w.a.aggregationStep, diff, err) - } - return nil -} - -func (w *Writer) UpdateAccountData(addr []byte, account []byte, trace bool) error { - var prevNum uint32 - prevV, err := w.tx.GetOne(kv.StateAccounts, addr) - if err != nil { - return err - } - if prevV != nil { - prevNum = binary.BigEndian.Uint32(prevV[:4]) - } - var original []byte - if prevV == nil { - original, _ = w.a.readFromFiles(Account, true /* lock */, w.blockNum, addr, trace) - } else { - original = prevV[4:] - } - if bytes.Equal(account, original) { - // No change - return nil - } - v := make([]byte, 4+len(account)) - binary.BigEndian.PutUint32(v[:4], prevNum+1) - copy(v[4:], account) - if err = w.tx.Put(kv.StateAccounts, addr, v); err != nil { - return err - } - if prevV == nil && len(original) == 0 { - w.changes[Account].insert(addr, account) - } else { - w.changes[Account].update(addr, original, account) - } - if trace { - w.a.trace = true - w.a.tracedKeys[string(addr)] = struct{}{} - } - return nil -} - -func (w *Writer) UpdateAccountCode(addr []byte, code []byte, trace bool) error { - var prevNum uint32 - prevV, err := w.tx.GetOne(kv.StateCode, addr) - if err != nil { - return err - } - if prevV != nil { - prevNum = binary.BigEndian.Uint32(prevV[:4]) - } - var original []byte - if prevV == nil { - original, _ = w.a.readFromFiles(Code, true /* lock */, w.blockNum, addr, trace) - } else { - original = prevV[4:] - } - v := make([]byte, 4+len(code)) - binary.BigEndian.PutUint32(v[:4], prevNum+1) - copy(v[4:], code) - if err = w.tx.Put(kv.StateCode, addr, v); err != nil { - return err - } - if prevV == nil && len(original) == 0 { - w.changes[Code].insert(addr, code) - } else { - w.changes[Code].update(addr, original, code) - } - if trace { - w.a.trace = true - w.a.tracedKeys[string(addr)] = struct{}{} - } - return nil -} - -type CursorType uint8 - -const ( - FILE_CURSOR CursorType = iota - DB_CURSOR - TREE_CURSOR -) - -// CursorItem is the item in the priority queue used to do merge interation -// over storage of a given account -type CursorItem struct { - c kv.Cursor - dg *compress.Getter - tree *btree.BTreeG[*AggregateItem] - key []byte - val []byte - endBlock uint64 - t CursorType // Whether this item represents state file or DB record, or tree -} - -type CursorHeap []*CursorItem - -func (ch CursorHeap) Len() int { - return len(ch) -} - -func (ch CursorHeap) Less(i, j int) bool { - cmp := bytes.Compare(ch[i].key, ch[j].key) - if cmp == 0 { - // when keys match, the items with later blocks are preferred - return ch[i].endBlock > ch[j].endBlock - } - return cmp < 0 -} - -func (ch *CursorHeap) Swap(i, j int) { - (*ch)[i], (*ch)[j] = (*ch)[j], (*ch)[i] -} - -func (ch *CursorHeap) Push(x interface{}) { - *ch = append(*ch, x.(*CursorItem)) -} - -func (ch *CursorHeap) Pop() interface{} { - old := *ch - n := len(old) - x := old[n-1] - old[n-1] = nil - *ch = old[0 : n-1] - return x -} - -func (w *Writer) deleteAccount(addr []byte, trace bool) (bool, error) { - prevV, err := w.tx.GetOne(kv.StateAccounts, addr) - if err != nil { - return false, err - } - var prevNum uint32 - if prevV != nil { - prevNum = binary.BigEndian.Uint32(prevV[:4]) - } - var original []byte - if prevV == nil { - original, _ = w.a.readFromFiles(Account, true /* lock */, w.blockNum, addr, trace) - if original == nil { - return false, nil - } - } else { - original = prevV[4:] - } - v := make([]byte, 4) - binary.BigEndian.PutUint32(v[:4], prevNum+1) - if err = w.tx.Put(kv.StateAccounts, addr, v); err != nil { - return false, err - } - w.changes[Account].delete(addr, original) - return true, nil -} - -func (w *Writer) deleteCode(addr []byte, trace bool) error { - prevV, err := w.tx.GetOne(kv.StateCode, addr) - if err != nil { - return err - } - var prevNum uint32 - if prevV != nil { - prevNum = binary.BigEndian.Uint32(prevV[:4]) - } - var original []byte - if prevV == nil { - original, _ = w.a.readFromFiles(Code, true /* lock */, w.blockNum, addr, trace) - if original == nil { - // Nothing to do - return nil - } - } else { - original = prevV[4:] - } - v := make([]byte, 4) - binary.BigEndian.PutUint32(v[:4], prevNum+1) - if err = w.tx.Put(kv.StateCode, addr, v); err != nil { - return err - } - w.changes[Code].delete(addr, original) - return nil -} - -func (w *Writer) DeleteAccount(addr []byte, trace bool) error { - deleted, err := w.deleteAccount(addr, trace) - if err != nil { - return err - } - if !deleted { - return nil - } - w.a.fileLocks[Storage].RLock() - defer w.a.fileLocks[Storage].RUnlock() - w.deleteCode(addr, trace) - // Find all storage items for this address - var cp CursorHeap - heap.Init(&cp) - var c kv.Cursor - if c, err = w.tx.Cursor(kv.StateStorage); err != nil { - return err - } - defer c.Close() - var k, v []byte - if k, v, err = c.Seek(addr); err != nil { - return err - } - if k != nil && bytes.HasPrefix(k, addr) { - heap.Push(&cp, &CursorItem{t: DB_CURSOR, key: common.Copy(k), val: common.Copy(v), c: c, endBlock: w.blockNum}) - } - w.a.files[Storage].Ascend(func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.tree != nil { - item.tree.AscendGreaterOrEqual(&AggregateItem{k: addr}, func(aitem *AggregateItem) bool { - if !bytes.HasPrefix(aitem.k, addr) { - return false - } - if len(aitem.k) == len(addr) { - return true - } - heap.Push(&cp, &CursorItem{t: TREE_CURSOR, key: aitem.k, val: aitem.v, tree: item.tree, endBlock: item.endBlock}) - return false - }) - return true - } - if item.index.Empty() { - return true - } - offset := item.indexReader.Lookup(addr) - g := item.getter - g.Reset(offset) - if g.HasNext() { - if keyMatch, _ := g.Match(addr); !keyMatch { - //fmt.Printf("DeleteAccount %x - not found anchor in file [%d-%d]\n", addr, item.startBlock, item.endBlock) - return true - } - g.Skip() - } - if g.HasNext() { - key, _ := g.Next(nil) - if bytes.HasPrefix(key, addr) { - val, _ := g.Next(nil) - heap.Push(&cp, &CursorItem{t: FILE_CURSOR, key: key, val: val, dg: g, endBlock: item.endBlock}) - } - } - return true - }) - for cp.Len() > 0 { - lastKey := common.Copy(cp[0].key) - lastVal := common.Copy(cp[0].val) - // Advance all the items that have this key (including the top) - for cp.Len() > 0 && bytes.Equal(cp[0].key, lastKey) { - ci1 := cp[0] - switch ci1.t { - case FILE_CURSOR: - if ci1.dg.HasNext() { - ci1.key, _ = ci1.dg.Next(ci1.key[:0]) - if bytes.HasPrefix(ci1.key, addr) { - ci1.val, _ = ci1.dg.Next(ci1.val[:0]) - heap.Fix(&cp, 0) - } else { - heap.Pop(&cp) - } - } else { - heap.Pop(&cp) - } - case DB_CURSOR: - k, v, err = ci1.c.Next() - if err != nil { - return err - } - if k != nil && bytes.HasPrefix(k, addr) { - ci1.key = common.Copy(k) - ci1.val = common.Copy(v) - heap.Fix(&cp, 0) - } else { - heap.Pop(&cp) - } - case TREE_CURSOR: - skip := true - var aitem *AggregateItem - ci1.tree.AscendGreaterOrEqual(&AggregateItem{k: ci1.key}, func(ai *AggregateItem) bool { - if skip { - skip = false - return true - } - aitem = ai - return false - }) - if aitem != nil && bytes.HasPrefix(aitem.k, addr) { - ci1.key = aitem.k - ci1.val = aitem.v - heap.Fix(&cp, 0) - } else { - heap.Pop(&cp) - } - } - } - var prevV []byte - prevV, err = w.tx.GetOne(kv.StateStorage, lastKey) - if err != nil { - return err - } - var prevNum uint32 - if prevV != nil { - prevNum = binary.BigEndian.Uint32(prevV[:4]) - } - v = make([]byte, 4) - binary.BigEndian.PutUint32(v[:4], prevNum+1) - if err = w.tx.Put(kv.StateStorage, lastKey, v); err != nil { - return err - } - w.changes[Storage].delete(lastKey, lastVal) - } - if trace { - w.a.trace = true - w.a.tracedKeys[string(addr)] = struct{}{} - } - return nil -} - -func (w *Writer) WriteAccountStorage(addr, loc []byte, value []byte, trace bool) error { - dbkey := make([]byte, len(addr)+len(loc)) - copy(dbkey[0:], addr) - copy(dbkey[len(addr):], loc) - prevV, err := w.tx.GetOne(kv.StateStorage, dbkey) - if err != nil { - return err - } - var prevNum uint32 - if prevV != nil { - prevNum = binary.BigEndian.Uint32(prevV[:4]) - } - var original []byte - if prevV == nil { - original, _ = w.a.readFromFiles(Storage, true /* lock */, w.blockNum, dbkey, trace) - } else { - original = prevV[4:] - } - if bytes.Equal(value, original) { - // No change - return nil - } - v := make([]byte, 4+len(value)) - binary.BigEndian.PutUint32(v[:4], prevNum+1) - copy(v[4:], value) - if err = w.tx.Put(kv.StateStorage, dbkey, v); err != nil { - return err - } - if prevV == nil && len(original) == 0 { - w.changes[Storage].insert(dbkey, value) - } else { - w.changes[Storage].update(dbkey, original, value) - } - if trace { - w.a.trace = true - w.a.tracedKeys[string(dbkey)] = struct{}{} - } - return nil -} - -// findLargestMerge looks through the state files of the speficied type and determines the largest merge that can be undertaken -// a state file block [a; b] is valid if its length is a divisor of its starting block, or `(b-a+1) = 0 mod a` -func (a *Aggregator) findLargestMerge(fType FileType, maxTo uint64, maxSpan uint64) (toAggregate []*byEndBlockItem, pre []*byEndBlockItem, post []*byEndBlockItem, aggFrom uint64, aggTo uint64) { - a.fileLocks[fType].RLock() - defer a.fileLocks[fType].RUnlock() - var maxEndBlock uint64 - a.files[fType].DescendLessOrEqual(&byEndBlockItem{endBlock: maxTo}, func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.decompressor == nil { - return true - } - maxEndBlock = item.endBlock - return false - }) - if maxEndBlock == 0 { - return - } - a.files[fType].Ascend(func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.decompressor == nil { - return true // Skip B-tree based items - } - pre = append(pre, item) - if aggTo == 0 { - var doubleEnd uint64 - nextDouble := item.endBlock - for nextDouble <= maxEndBlock && nextDouble-item.startBlock < maxSpan { - doubleEnd = nextDouble - nextDouble = doubleEnd + (doubleEnd - item.startBlock) + 1 - } - if doubleEnd != item.endBlock { - aggFrom = item.startBlock - aggTo = doubleEnd - } else { - post = append(post, item) - return true - } - } - toAggregate = append(toAggregate, item) - return item.endBlock < aggTo - }) - return -} - -func (a *Aggregator) computeAggregation(fType FileType, - toAggregate []*byEndBlockItem, aggFrom uint64, aggTo uint64, - valTransform func(val, transValBuf commitment.BranchData) ([]byte, error), - mergeFunc commitmentMerger, - valCompressed bool, - withIndex bool, prefixLen int) (*byEndBlockItem, error) { - var item2 = &byEndBlockItem{startBlock: aggFrom, endBlock: aggTo} - var cp CursorHeap - heap.Init(&cp) - for _, ag := range toAggregate { - g := ag.decompressor.MakeGetter() - g.Reset(0) - if g.HasNext() { - key, _ := g.Next(nil) - val, _ := g.Next(nil) - heap.Push(&cp, &CursorItem{t: FILE_CURSOR, dg: g, key: key, val: val, endBlock: ag.endBlock}) - } - } - var err error - var count int - if item2.decompressor, count, err = a.mergeIntoStateFile(&cp, prefixLen, fType, aggFrom, aggTo, a.diffDir, valTransform, mergeFunc, valCompressed); err != nil { - return nil, fmt.Errorf("mergeIntoStateFile %s [%d-%d]: %w", fType.String(), aggFrom, aggTo, err) - } - item2.getter = item2.decompressor.MakeGetter() - item2.getterMerge = item2.decompressor.MakeGetter() - if withIndex { - idxPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.idx", fType.String(), aggFrom, aggTo)) - if item2.index, err = buildIndex(item2.decompressor, idxPath, a.diffDir, count); err != nil { - return nil, fmt.Errorf("mergeIntoStateFile buildIndex %s [%d-%d]: %w", fType.String(), aggFrom, aggTo, err) - } - item2.indexReader = recsplit.NewIndexReader(item2.index) - item2.readerMerge = recsplit.NewIndexReader(item2.index) - } - return item2, nil -} - -func createDatAndIndex(treeName string, diffDir string, bt *btree.BTreeG[*AggregateItem], blockFrom uint64, blockTo uint64) (*compress.Decompressor, *recsplit.Index, error) { - datPath := filepath.Join(diffDir, fmt.Sprintf("%s.%d-%d.dat", treeName, blockFrom, blockTo)) - idxPath := filepath.Join(diffDir, fmt.Sprintf("%s.%d-%d.idx", treeName, blockFrom, blockTo)) - count, err := btreeToFile(bt, datPath, diffDir, false /* trace */, 1 /* workers */) - if err != nil { - return nil, nil, fmt.Errorf("createDatAndIndex %s build btree: %w", treeName, err) - } - var d *compress.Decompressor - if d, err = compress.NewDecompressor(datPath); err != nil { - return nil, nil, fmt.Errorf("createDatAndIndex %s decompressor: %w", treeName, err) - } - var index *recsplit.Index - if index, err = buildIndex(d, idxPath, diffDir, count); err != nil { - return nil, nil, fmt.Errorf("createDatAndIndex %s buildIndex: %w", treeName, err) - } - return d, index, nil -} - -func (a *Aggregator) addLocked(fType FileType, item *byEndBlockItem) { - a.fileLocks[fType].Lock() - defer a.fileLocks[fType].Unlock() - a.files[fType].ReplaceOrInsert(item) -} - -func (w *Writer) aggregateUpto(blockFrom, blockTo uint64) error { - // React on any previous error of aggregation or merge - select { - case err := <-w.a.aggError: - return err - case err := <-w.a.mergeError: - return err - case err := <-w.a.historyError: - return err - default: - } - typesLimit := Commitment - if w.a.commitments { - typesLimit = AccountHistory - } - t0 := time.Now() - t := time.Now() - i := w.a.changesBtree.Get(&ChangesItem{startBlock: blockFrom, endBlock: blockTo}) - if i == nil { - return fmt.Errorf("did not find change files for [%d-%d], w.a.changesBtree.Len() = %d", blockFrom, blockTo, w.a.changesBtree.Len()) - } - item := i.(*ChangesItem) - if item.startBlock != blockFrom { - return fmt.Errorf("expected change files[%d-%d], got [%d-%d]", blockFrom, blockTo, item.startBlock, item.endBlock) - } - w.a.changesBtree.Delete(i) - var aggTask AggregationTask - for fType := FirstType; fType < typesLimit; fType++ { - aggTask.changes[fType].Init(fType.String(), w.a.aggregationStep, w.a.diffDir, w.a.changesets && fType != Commitment) - } - var err error - for fType := FirstType; fType < typesLimit; fType++ { - var prefixLen int - if fType == Storage { - prefixLen = length.Addr - } - - var commitMerger commitmentMerger - if fType == Commitment { - commitMerger = mergeCommitments - } - - if aggTask.bt[fType], err = aggTask.changes[fType].aggregate(blockFrom, blockTo, prefixLen, w.tx, fType.Table(), commitMerger); err != nil { - return fmt.Errorf("aggregate %sChanges: %w", fType.String(), err) - } - } - aggTask.blockFrom = blockFrom - aggTask.blockTo = blockTo - aggTime := time.Since(t) - t = time.Now() - // At this point, all the changes are gathered in 4 B-trees (accounts, code, storage and commitment) and removed from the database - // What follows can be done in the 1st background goroutine - for fType := FirstType; fType < typesLimit; fType++ { - if fType < NumberOfStateTypes { - w.a.updateArch(aggTask.bt[fType], fType, uint32(aggTask.blockTo)) - } - } - updateArchTime := time.Since(t) - t = time.Now() - for fType := FirstType; fType < typesLimit; fType++ { - w.a.addLocked(fType, &byEndBlockItem{startBlock: aggTask.blockFrom, endBlock: aggTask.blockTo, tree: aggTask.bt[fType]}) - } - switchTime := time.Since(t) - w.a.aggChannel <- &aggTask - handoverTime := time.Since(t0) - if handoverTime > time.Second { - log.Info("Long handover to background aggregation", "from", blockFrom, "to", blockTo, "composition", aggTime, "arch update", updateArchTime, "switch", switchTime) - } - return nil -} - -// mergeIntoStateFile assumes that all entries in the cp heap have type FILE_CURSOR -func (a *Aggregator) mergeIntoStateFile(cp *CursorHeap, prefixLen int, - fType FileType, startBlock, endBlock uint64, dir string, - valTransform func(val, transValBuf commitment.BranchData) ([]byte, error), - mergeFunc commitmentMerger, - valCompressed bool, -) (*compress.Decompressor, int, error) { - datPath := filepath.Join(dir, fmt.Sprintf("%s.%d-%d.dat", fType.String(), startBlock, endBlock)) - comp, err := compress.NewCompressor(context.Background(), AggregatorPrefix, datPath, dir, compress.MinPatternScore, 1, log.LvlDebug) - if err != nil { - return nil, 0, fmt.Errorf("compressor %s: %w", datPath, err) - } - defer comp.Close() - count := 0 - // In the loop below, the pair `keyBuf=>valBuf` is always 1 item behind `lastKey=>lastVal`. - // `lastKey` and `lastVal` are taken from the top of the multi-way merge (assisted by the CursorHeap cp), but not processed right away - // instead, the pair from the previous iteration is processed first - `keyBuf=>valBuf`. After that, `keyBuf` and `valBuf` are assigned - // to `lastKey` and `lastVal` correspondingly, and the next step of multi-way merge happens. Therefore, after the multi-way merge loop - // (when CursorHeap cp is empty), there is a need to process the last pair `keyBuf=>valBuf`, because it was one step behind - var keyBuf, valBuf, transValBuf []byte - for cp.Len() > 0 { - lastKey := common.Copy((*cp)[0].key) - lastVal := common.Copy((*cp)[0].val) - var mergedOnce bool - if a.trace { - if _, ok := a.tracedKeys[string(lastKey)]; ok { - fmt.Printf("looking at key %x val [%x] endBlock %d to merge into [%d-%d]\n", lastKey, lastVal, (*cp)[0].endBlock, startBlock, endBlock) - } - } - // Advance all the items that have this key (including the top) - for cp.Len() > 0 && bytes.Equal((*cp)[0].key, lastKey) { - ci1 := (*cp)[0] - if a.trace { - if _, ok := a.tracedKeys[string(ci1.key)]; ok { - fmt.Printf("skipping same key %x val [%x] endBlock %d to merge into [%d-%d]\n", ci1.key, ci1.val, ci1.endBlock, startBlock, endBlock) - } - } - if ci1.t != FILE_CURSOR { - return nil, 0, fmt.Errorf("mergeIntoStateFile: cursor of unexpected type: %d", ci1.t) - } - if mergedOnce { - //fmt.Printf("mergeIntoStateFile pre-merge prefix [%x], [%x]+[%x]\n", commitment.CompactToHex(lastKey), ci1.val, lastVal) - if lastVal, err = mergeFunc(ci1.val, lastVal, nil); err != nil { - return nil, 0, fmt.Errorf("mergeIntoStateFile: merge values: %w", err) - } - //fmt.Printf("mergeIntoStateFile post-merge prefix [%x], [%x]\n", commitment.CompactToHex(lastKey), lastVal) - } else { - mergedOnce = true - } - if ci1.dg.HasNext() { - ci1.key, _ = ci1.dg.Next(ci1.key[:0]) - if valCompressed { - ci1.val, _ = ci1.dg.Next(ci1.val[:0]) - } else { - ci1.val, _ = ci1.dg.NextUncompressed() - } - - heap.Fix(cp, 0) - } else { - heap.Pop(cp) - } - } - var skip bool - switch fType { - case Storage: - // Inside storage files, there is a special item with empty value, and the key equal to the contract's address - // This special item is inserted before the contract storage items, in order to find them using un-ordered index - // (for the purposes of SELF-DESTRUCT and some RPC methods that require enumeration of contract storage) - // We will only skip this special item if there are no more corresponding storage items left - // (this is checked further down with `bytes.HasPrefix(lastKey, keyBuf)`) - skip = startBlock == 0 && len(lastVal) == 0 && len(lastKey) != prefixLen - case Commitment: - // For commitments, the 3rd and 4th bytes of the value (zero-based 2 and 3) contain so-called `afterMap` - // Its bit are set for children that are present in the tree, and unset for those that are not (deleted, for example) - // If all bits are zero (check below), this branch can be skipped, since it is empty - skip = startBlock == 0 && len(lastVal) >= 4 && lastVal[2] == 0 && lastVal[3] == 0 - case AccountHistory, StorageHistory, CodeHistory: - skip = false - default: - // For the rest of types, empty value means deletion - skip = startBlock == 0 && len(lastVal) == 0 - } - if skip { // Deleted marker can be skipped if we merge into the first file, except for the storage addr marker - if _, ok := a.tracedKeys[string(keyBuf)]; ok { - fmt.Printf("skipped key %x for [%d-%d]\n", keyBuf, startBlock, endBlock) - } - } else { - // The check `bytes.HasPrefix(lastKey, keyBuf)` is checking whether the `lastKey` is the first item - // of some contract's storage, and `keyBuf` (the item just before that) is the special item with the - // key being contract's address. If so, the special item (keyBuf => []) needs to be preserved - if keyBuf != nil && (prefixLen == 0 || len(keyBuf) != prefixLen || bytes.HasPrefix(lastKey, keyBuf)) { - if err = comp.AddWord(keyBuf); err != nil { - return nil, 0, err - } - if a.trace { - if _, ok := a.tracedKeys[string(keyBuf)]; ok { - fmt.Printf("merge key %x val [%x] into [%d-%d]\n", keyBuf, valBuf, startBlock, endBlock) - } - } - count++ // Only counting keys, not values - if valTransform != nil { - if transValBuf, err = valTransform(valBuf, transValBuf[:0]); err != nil { - return nil, 0, fmt.Errorf("mergeIntoStateFile -valTransform [%x]: %w", valBuf, err) - } - - if err = comp.AddWord(transValBuf); err != nil { - return nil, 0, err - } - } else if valCompressed { - if err = comp.AddWord(valBuf); err != nil { - return nil, 0, err - } - } else { - if err = comp.AddUncompressedWord(valBuf); err != nil { - return nil, 0, err - } - } - //if fType == Storage { - // fmt.Printf("merge %s.%d-%d [%x]=>[%x]\n", fType.String(), startBlock, endBlock, keyBuf, valBuf) - //} - } - - keyBuf = append(keyBuf[:0], lastKey...) - valBuf = append(valBuf[:0], lastVal...) - } - } - if keyBuf != nil { - if err = comp.AddWord(keyBuf); err != nil { - return nil, 0, err - } - if a.trace { - if _, ok := a.tracedKeys[string(keyBuf)]; ok { - fmt.Printf("merge key %x val [%x] into [%d-%d]\n", keyBuf, valBuf, startBlock, endBlock) - } - } - count++ // Only counting keys, not values - if valTransform != nil { - if transValBuf, err = valTransform(valBuf, transValBuf[:0]); err != nil { - return nil, 0, fmt.Errorf("mergeIntoStateFile valTransform [%x]: %w", valBuf, err) - } - if err = comp.AddWord(transValBuf); err != nil { - return nil, 0, err - } - } else if valCompressed { - if err = comp.AddWord(valBuf); err != nil { - return nil, 0, err - } - } else { - if err = comp.AddUncompressedWord(valBuf); err != nil { - return nil, 0, err - } - } - //if fType == Storage { - // fmt.Printf("merge %s.%d-%d [%x]=>[%x]\n", fType.String(), startBlock, endBlock, keyBuf, valBuf) - //} - } - if err = comp.Compress(); err != nil { - return nil, 0, err - } - var d *compress.Decompressor - if d, err = compress.NewDecompressor(datPath); err != nil { - return nil, 0, fmt.Errorf("decompressor: %w", err) - } - return d, count, nil -} - -func (a *Aggregator) stats(fType FileType) (count int, datSize, idxSize int64) { - a.fileLocks[fType].RLock() - defer a.fileLocks[fType].RUnlock() - count = 0 - datSize = 0 - idxSize = 0 - a.files[fType].Ascend(func(i btree.Item) bool { - item := i.(*byEndBlockItem) - if item.decompressor != nil { - count++ - datSize += item.decompressor.Size() - count++ - idxSize += item.index.Size() - } - return true - }) - return -} - -type FilesStats struct { - AccountsCount int - AccountsDatSize int64 - AccountsIdxSize int64 - CodeCount int - CodeDatSize int64 - CodeIdxSize int64 - StorageCount int - StorageDatSize int64 - StorageIdxSize int64 - CommitmentCount int - CommitmentDatSize int64 - CommitmentIdxSize int64 - Hits uint64 - Misses uint64 -} - -func (a *Aggregator) Stats() FilesStats { - var fs FilesStats - fs.AccountsCount, fs.AccountsDatSize, fs.AccountsIdxSize = a.stats(Account) - fs.CodeCount, fs.CodeDatSize, fs.CodeIdxSize = a.stats(Code) - fs.StorageCount, fs.StorageDatSize, fs.StorageIdxSize = a.stats(Storage) - fs.CommitmentCount, fs.CommitmentDatSize, fs.CommitmentIdxSize = a.stats(Commitment) - fs.Hits = atomic.LoadUint64(&a.fileHits) - fs.Misses = atomic.LoadUint64(&a.fileMisses) - return fs -} diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go deleted file mode 100644 index 6a7f396bf..000000000 --- a/aggregator/aggregator_test.go +++ /dev/null @@ -1,314 +0,0 @@ -/* - Copyright 2022 Erigon contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package aggregator - -import ( - "bytes" - "encoding/binary" - "testing" - - "github.com/holiman/uint256" - - "github.com/ledgerwatch/erigon-lib/commitment" - "github.com/ledgerwatch/erigon-lib/kv/memdb" -) - -func int160(i uint64) []byte { - b := make([]byte, 20) - binary.BigEndian.PutUint64(b[12:], i) - return b -} - -func int256(i uint64) []byte { - b := make([]byte, 32) - binary.BigEndian.PutUint64(b[24:], i) - return b -} - -func accountWithBalance(i uint64) []byte { - balance := uint256.NewInt(i) - var l int - l++ - l++ - if i > 0 { - l += balance.ByteLen() - } - l++ - l++ - value := make([]byte, l) - pos := 0 - value[pos] = 0 - pos++ - if balance.IsZero() { - value[pos] = 0 - pos++ - } else { - balanceBytes := balance.ByteLen() - value[pos] = byte(balanceBytes) - pos++ - balance.WriteToSlice(value[pos : pos+balanceBytes]) - pos += balanceBytes - } - value[pos] = 0 - pos++ - value[pos] = 0 - return value -} - -func TestSimpleAggregator(t *testing.T) { - _, rwTx := memdb.NewTestTx(t) - - tmpDir := t.TempDir() - trie := commitment.InitializeTrie(commitment.VariantHexPatriciaTrie) - a, err := NewAggregator(tmpDir, 16, 4, true, true, 1000, trie, rwTx) - if err != nil { - t.Fatal(err) - } - defer a.Close() - w := a.MakeStateWriter(true /* beforeOn */) - if err = w.Reset(0, rwTx); err != nil { - t.Fatal(err) - } - defer w.Close() - var account1 = accountWithBalance(1) - w.UpdateAccountData(int160(1), account1, false /* trace */) - if err = w.FinishTx(0, false); err != nil { - t.Fatal(err) - } - if err = w.Aggregate(false /* trace */); err != nil { - t.Fatal(err) - } - r := a.MakeStateReader(2, rwTx) - acc, err := r.ReadAccountData(int160(1), false /* trace */) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(acc, account1) { - t.Errorf("read account %x, expected account %x", acc, account1) - } - if err = rwTx.Commit(); err != nil { - t.Fatal(err) - } -} - -func TestLoopAggregator(t *testing.T) { - _, rwTx := memdb.NewTestTx(t) - - tmpDir := t.TempDir() - trie := commitment.InitializeTrie(commitment.VariantHexPatriciaTrie) - a, err := NewAggregator(tmpDir, 16, 4, true, true, 1000, trie, rwTx) - if err != nil { - t.Fatal(err) - } - defer a.Close() - var account1 = accountWithBalance(1) - w := a.MakeStateWriter(true /* beforeOn */) - defer w.Close() - for blockNum := uint64(0); blockNum < 1000; blockNum++ { - accountKey := int160(blockNum/10 + 1) - //fmt.Printf("blockNum = %d\n", blockNum) - if err = w.Reset(blockNum, rwTx); err != nil { - t.Fatal(err) - } - w.UpdateAccountData(accountKey, account1, false /* trace */) - if err = w.FinishTx(blockNum, false /* trace */); err != nil { - t.Fatal(err) - } - if err = w.Aggregate(false /* trace */); err != nil { - t.Fatal(err) - } - r := a.MakeStateReader(blockNum+1, rwTx) - acc, err := r.ReadAccountData(accountKey, false /* trace */) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(acc, account1) { - t.Errorf("read account %x, expected account %x for block %d", acc, account1, blockNum) - } - account1 = accountWithBalance(blockNum + 2) - } - if err = rwTx.Commit(); err != nil { - t.Fatal(err) - } -} - -func TestRecreateAccountWithStorage(t *testing.T) { - _, rwTx := memdb.NewTestTx(t) - tmpDir := t.TempDir() - - trie := commitment.InitializeTrie(commitment.VariantHexPatriciaTrie) - a, err := NewAggregator(tmpDir, 16, 4, true, true, 1000, trie, rwTx) - if err != nil { - t.Fatal(err) - } - defer a.Close() - accountKey := int160(1) - var account1 = accountWithBalance(1) - var account2 = accountWithBalance(2) - w := a.MakeStateWriter(true /* beforeOn */) - defer w.Close() - for blockNum := uint64(0); blockNum < 100; blockNum++ { - if err = w.Reset(blockNum, rwTx); err != nil { - t.Fatal(err) - } - switch blockNum { - case 1: - w.UpdateAccountData(accountKey, account1, false /* trace */) - for s := uint64(0); s < 100; s++ { - w.WriteAccountStorage(accountKey, int256(s), uint256.NewInt(s+1).Bytes(), false /* trace */) - } - case 22: - w.DeleteAccount(accountKey, false /* trace */) - case 45: - w.UpdateAccountData(accountKey, account2, false /* trace */) - for s := uint64(50); s < 150; s++ { - w.WriteAccountStorage(accountKey, int256(s), uint256.NewInt(2*s+1).Bytes(), false /* trace */) - } - } - if err = w.FinishTx(blockNum, false /* trace */); err != nil { - t.Fatal(err) - } - if err = w.Aggregate(false /* trace */); err != nil { - t.Fatal(err) - } - r := a.MakeStateReader(blockNum+1, rwTx) - switch blockNum { - case 1: - acc, err := r.ReadAccountData(accountKey, false /* trace */) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(account1, acc) { - t.Errorf("wrong account after block %d, expected %x, got %x", blockNum, account1, acc) - } - for s := uint64(0); s < 100; s++ { - v, err := r.ReadAccountStorage(accountKey, int256(s), false /* trace */) - if err != nil { - t.Fatal(err) - } - if !uint256.NewInt(s + 1).Eq(uint256.NewInt(0).SetBytes(v)) { - t.Errorf("wrong storage value after block %d, expected %d, got %d", blockNum, s+1, uint256.NewInt(0).SetBytes(v)) - } - } - case 22, 44: - acc, err := r.ReadAccountData(accountKey, false /* trace */) - if err != nil { - t.Fatal(err) - } - if len(acc) > 0 { - t.Errorf("wrong account after block %d, expected nil, got %x", blockNum, acc) - } - for s := uint64(0); s < 100; s++ { - v, err := r.ReadAccountStorage(accountKey, int256(s), false /* trace */) - if err != nil { - t.Fatal(err) - } - if v != nil { - t.Errorf("wrong storage value after block %d, expected nil, got %d", blockNum, uint256.NewInt(0).SetBytes(v)) - } - } - case 66: - acc, err := r.ReadAccountData(accountKey, false /* trace */) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(account2, acc) { - t.Errorf("wrong account after block %d, expected %x, got %x", blockNum, account1, acc) - } - for s := uint64(0); s < 150; s++ { - v, err := r.ReadAccountStorage(accountKey, int256(s), false /* trace */) - if err != nil { - t.Fatal(err) - } - if s < 50 { - if v != nil { - t.Errorf("wrong storage value after block %d, expected nil, got %d", blockNum, uint256.NewInt(0).SetBytes(v)) - } - } else if v == nil || !uint256.NewInt(2*s+1).Eq(uint256.NewInt(0).SetBytes(v)) { - t.Errorf("wrong storage value after block %d, expected %d, got %d", blockNum, 2*s+1, uint256.NewInt(0).SetBytes(v)) - } - } - } - } - if err = rwTx.Commit(); err != nil { - t.Fatal(err) - } -} - -func TestChangeCode(t *testing.T) { - _, rwTx := memdb.NewTestTx(t) - - tmpDir := t.TempDir() - trie := commitment.InitializeTrie(commitment.VariantHexPatriciaTrie) - a, err := NewAggregator(tmpDir, 16, 4, true, true, 1000, trie, rwTx) - if err != nil { - t.Fatal(err) - } - defer a.Close() - accountKey := int160(1) - var account1 = accountWithBalance(1) - var code1 = []byte("This is the code number 1") - w := a.MakeStateWriter(true /* beforeOn */) - defer w.Close() - for blockNum := uint64(0); blockNum < 100; blockNum++ { - if err = w.Reset(blockNum, rwTx); err != nil { - t.Fatal(err) - } - switch blockNum { - case 1: - w.UpdateAccountData(accountKey, account1, false /* trace */) - w.UpdateAccountCode(accountKey, code1, false /* trace */) - case 25: - w.DeleteAccount(accountKey, false /* trace */) - } - if err = w.FinishTx(blockNum, false /* trace */); err != nil { - t.Fatal(err) - } - if err = w.Aggregate(false /* trace */); err != nil { - t.Fatal(err) - } - r := a.MakeStateReader(blockNum+1, rwTx) - switch blockNum { - case 22: - acc, err := r.ReadAccountData(accountKey, false /* trace */) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(account1, acc) { - t.Errorf("wrong account after block %d, expected %x, got %x", blockNum, account1, acc) - } - code, err := r.ReadAccountCode(accountKey, false /* trace */) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(code1, code) { - t.Errorf("wrong code after block %d, expected %x, got %x", blockNum, code1, code) - } - case 47: - code, err := r.ReadAccountCode(accountKey, false /* trace */) - if err != nil { - t.Fatal(err) - } - if code != nil { - t.Errorf("wrong code after block %d, expected nil, got %x", blockNum, code) - } - } - } - if err = rwTx.Commit(); err != nil { - t.Fatal(err) - } -} diff --git a/aggregator/history.go b/aggregator/history.go deleted file mode 100644 index b45c83462..000000000 --- a/aggregator/history.go +++ /dev/null @@ -1,354 +0,0 @@ -/* - Copyright 2022 Erigon contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package aggregator - -import ( - "encoding/binary" - "fmt" - "io/fs" - "os" - "path" - "regexp" - "strconv" - "strings" - - "github.com/google/btree" - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/compress" - "github.com/ledgerwatch/erigon-lib/recsplit" - "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" - "github.com/ledgerwatch/log/v3" -) - -// History is a utility class that allows reading history of state -// from state files, history files, and bitmap files produced by an Aggregator -type History struct { - files [NumberOfTypes]*btree.BTreeG[*byEndBlockItem] - diffDir string // Directory where the state diff files are stored - aggregationStep uint64 -} - -func NewHistory(diffDir string, blockTo uint64, aggregationStep uint64) (*History, error) { - h := &History{ - diffDir: diffDir, - aggregationStep: aggregationStep, - } - for fType := FirstType; fType < NumberOfTypes; fType++ { - h.files[fType] = btree.NewG(32, ByEndBlockItemLess) - } - var closeStateFiles = true // It will be set to false in case of success at the end of the function - defer func() { - // Clean up all decompressor and indices upon error - if closeStateFiles { - h.Close() - } - }() - // Scan the diff directory and create the mapping of end blocks to files - files, err := os.ReadDir(diffDir) - if err != nil { - return nil, err - } - h.scanStateFiles(files, blockTo) - for fType := FirstType; fType < NumberOfTypes; fType++ { - if err := h.openFiles(fType); err != nil { - return nil, fmt.Errorf("opening %s state files: %w", fType.String(), err) - } - } - closeStateFiles = false - return h, nil -} - -func (h *History) scanStateFiles(files []fs.DirEntry, blockTo uint64) { - typeStrings := make([]string, NumberOfTypes) - for fType := FileType(0); fType < NumberOfTypes; fType++ { - typeStrings[fType] = fType.String() - } - re := regexp.MustCompile("^(" + strings.Join(typeStrings, "|") + ").([0-9]+)-([0-9]+).(dat|idx)$") - var err error - for _, f := range files { - name := f.Name() - subs := re.FindStringSubmatch(name) - if len(subs) != 5 { - if len(subs) != 0 { - log.Warn("File ignored by history, more than 4 submatches", "name", name, "submatches", len(subs)) - } - continue - } - var startBlock, endBlock uint64 - if startBlock, err = strconv.ParseUint(subs[2], 10, 64); err != nil { - log.Warn("File ignored by history, parsing startBlock", "error", err, "name", name) - continue - } - if endBlock, err = strconv.ParseUint(subs[3], 10, 64); err != nil { - log.Warn("File ignored by history, parsing endBlock", "error", err, "name", name) - continue - } - if startBlock > endBlock { - log.Warn("File ignored by history, startBlock > endBlock", "name", name) - continue - } - if endBlock > blockTo { - // Only load files up to specified block - continue - } - fType, ok := ParseFileType(subs[1]) - if !ok { - log.Warn("File ignored by history, type unknown", "type", subs[1]) - } - var item = &byEndBlockItem{startBlock: startBlock, endBlock: endBlock} - var foundI *byEndBlockItem - h.files[fType].AscendGreaterOrEqual(&byEndBlockItem{startBlock: endBlock, endBlock: endBlock}, func(it *byEndBlockItem) bool { - if it.endBlock == endBlock { - foundI = it - } - return false - }) - if foundI == nil || foundI.startBlock > startBlock { - h.files[fType].ReplaceOrInsert(item) - log.Info("Load file", "name", name, "type", fType.String(), "endBlock", item.endBlock) - } - } -} - -func (h *History) openFiles(fType FileType) error { - var err error - h.files[fType].Ascend(func(item *byEndBlockItem) bool { - if item.decompressor, err = compress.NewDecompressor(path.Join(h.diffDir, fmt.Sprintf("%s.%d-%d.dat", fType.String(), item.startBlock, item.endBlock))); err != nil { - return false - } - if item.index, err = recsplit.OpenIndex(path.Join(h.diffDir, fmt.Sprintf("%s.%d-%d.idx", fType.String(), item.startBlock, item.endBlock))); err != nil { - return false - } - item.getter = item.decompressor.MakeGetter() - item.getterMerge = item.decompressor.MakeGetter() - item.indexReader = recsplit.NewIndexReader(item.index) - item.readerMerge = recsplit.NewIndexReader(item.index) - return true - }) - return err -} - -func (h *History) closeFiles(fType FileType) { - h.files[fType].Ascend(func(item *byEndBlockItem) bool { - if item.decompressor != nil { - item.decompressor.Close() - } - if item.index != nil { - item.index.Close() - } - return true - }) -} - -func (h *History) Close() { - // Closing state files only after background aggregation goroutine is finished - for fType := FirstType; fType < NumberOfTypes; fType++ { - h.closeFiles(fType) - } -} - -func (h *History) MakeHistoryReader() *HistoryReader { - r := &HistoryReader{ - h: h, - } - return r -} - -type HistoryReader struct { - h *History - search byEndBlockItem - blockNum uint64 - txNum uint64 - lastTx bool // Whether it is the last transaction in the block -} - -func (hr *HistoryReader) SetNums(blockNum, txNum uint64, lastTx bool) { - hr.blockNum = blockNum - hr.txNum = txNum - hr.lastTx = lastTx -} - -func (hr *HistoryReader) searchInHistory(bitmapType, historyType FileType, key []byte, trace bool) (bool, []byte, error) { - if trace { - fmt.Printf("searchInHistory %s %s [%x] blockNum %d, txNum %d\n", bitmapType.String(), historyType.String(), key, hr.blockNum, hr.txNum) - } - searchBlock := hr.blockNum - if hr.lastTx { - searchBlock++ - } - searchTx := hr.txNum - hr.search.endBlock = searchBlock - hr.search.startBlock = searchBlock - (searchBlock % 500_000) - var eliasVal []byte - var err error - var found bool - var foundTxNum uint64 - var foundEndBlock uint64 - hr.h.files[bitmapType].AscendGreaterOrEqual(&hr.search, func(item *byEndBlockItem) bool { - offset := item.indexReader.Lookup(key) - g := item.getter - g.Reset(offset) - if keyMatch, _ := g.Match(key); keyMatch { - if trace { - fmt.Printf("Found bitmap for [%x] in %s.[%d-%d]\n", key, bitmapType.String(), item.startBlock, item.endBlock) - } - eliasVal, _ = g.NextUncompressed() - ef, _ := eliasfano32.ReadEliasFano(eliasVal) - it := ef.Iterator() - if trace { - for it.HasNext() { - v, _ := it.Next() - fmt.Printf(" %d", v) - } - fmt.Printf("\n") - } - foundTxNum, found = ef.Search(searchTx) - if found { - foundEndBlock = item.endBlock - return false - } - } - // Not found, next - return true - }) - if err != nil { - return false, nil, err - } - if !found { - return false, nil, nil - } - if trace { - fmt.Printf("found in tx %d, endBlock %d\n", foundTxNum, foundEndBlock) - } - var lookupKey = make([]byte, len(key)+8) - binary.BigEndian.PutUint64(lookupKey, foundTxNum) - copy(lookupKey[8:], key) - var historyItem *byEndBlockItem - hr.search.endBlock = foundEndBlock - hr.search.startBlock = foundEndBlock - 499_999 - var ok bool - historyItem, ok = hr.h.files[historyType].Get(&hr.search) - if !ok || historyItem == nil { - return false, nil, fmt.Errorf("no %s file found for %d", historyType.String(), foundEndBlock) - } - offset := historyItem.indexReader.Lookup(lookupKey) - if trace { - fmt.Printf("Lookup [%x] in %s.[%d-%d].idx = %d\n", lookupKey, historyType.String(), historyItem.startBlock, historyItem.endBlock, offset) - } - historyItem.getter.Reset(offset) - v, _ := historyItem.getter.Next(nil) - return true, v, nil -} - -func (hr *HistoryReader) ReadAccountData(addr []byte, trace bool) ([]byte, error) { - // Look in the history first - hOk, v, err := hr.searchInHistory(AccountBitmap, AccountHistory, addr, trace) - if err != nil { - return nil, err - } - if hOk { - if trace { - fmt.Printf("ReadAccountData %x, found in history [%x]\n", addr, v) - } - return v, nil - } - if trace { - fmt.Printf("ReadAccountData %x, not found in history, get from the state\n", addr) - } - // Not found in history - look in the state files - return hr.h.readFromFiles(Account, addr, trace), nil -} - -func (hr *HistoryReader) ReadAccountStorage(addr []byte, loc []byte, trace bool) (*uint256.Int, error) { - // Look in the history first - dbkey := make([]byte, len(addr)+len(loc)) - copy(dbkey[0:], addr) - copy(dbkey[len(addr):], loc) - hOk, v, err := hr.searchInHistory(StorageBitmap, StorageHistory, dbkey, trace) - if err != nil { - return nil, err - } - if hOk { - return new(uint256.Int).SetBytes(v), nil - } - // Not found in history, look in the state files - v = hr.h.readFromFiles(Storage, dbkey, trace) - if v != nil { - return new(uint256.Int).SetBytes(v), nil - } - return nil, nil -} - -func (hr *HistoryReader) ReadAccountCode(addr []byte, trace bool) ([]byte, error) { - // Look in the history first - hOk, v, err := hr.searchInHistory(CodeBitmap, CodeHistory, addr, false) - if err != nil { - return nil, err - } - if hOk { - return v, err - } - // Not found in history, look in the history files - return hr.h.readFromFiles(Code, addr, trace), nil -} - -func (hr *HistoryReader) ReadAccountCodeSize(addr []byte, trace bool) (int, error) { - // Look in the history first - hOk, v, err := hr.searchInHistory(CodeBitmap, CodeHistory, addr, false) - if err != nil { - return 0, err - } - if hOk { - return len(v), err - } - // Not found in history, look in the history files - return len(hr.h.readFromFiles(Code, addr, trace)), nil -} - -func (h *History) readFromFiles(fType FileType, filekey []byte, trace bool) []byte { - var val []byte - h.files[fType].Descend(func(item *byEndBlockItem) bool { - if trace { - fmt.Printf("read %s %x: search in file [%d-%d]\n", fType.String(), filekey, item.startBlock, item.endBlock) - } - if item.tree != nil { - ai, ok := item.tree.Get(&AggregateItem{k: filekey}) - if !ok || ai == nil { - return true - } - val = ai.v - return false - } - if item.index.Empty() { - return true - } - offset := item.indexReader.Lookup(filekey) - g := item.getter - g.Reset(offset) - if g.HasNext() { - if keyMatch, _ := g.Match(filekey); keyMatch { - val, _ = g.Next(nil) - if trace { - fmt.Printf("read %s %x: found [%x] in file [%d-%d]\n", fType.String(), filekey, val, item.startBlock, item.endBlock) - } - return false - } - } - return true - }) - return val -} diff --git a/chain/aura_config.go b/chain/aura_config.go new file mode 100644 index 000000000..a8fa90190 --- /dev/null +++ b/chain/aura_config.go @@ -0,0 +1,93 @@ +/* + Copyright 2023 The Erigon contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package chain + +import ( + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutility" +) + +// Different ways of specifying validators. +type ValidatorSetJson struct { + // A simple list of authorities. + List []common.Address `json:"list"` + // Address of a contract that indicates the list of authorities. + SafeContract *common.Address `json:"safeContract"` + // Address of a contract that indicates the list of authorities and enables reporting of their misbehaviour using transactions. + Contract *common.Address `json:"contract"` + // A map of starting blocks for each validator set. + Multi map[uint64]*ValidatorSetJson `json:"multi"` +} + +// AuRaConfig is the consensus engine configs for proof-of-authority based sealing. +type AuRaConfig struct { + StepDuration *uint64 `json:"stepDuration"` // Block duration, in seconds. + Validators *ValidatorSetJson `json:"validators"` // Valid authorities + + // Starting step. Determined automatically if not specified. + // To be used for testing only. + StartStep *uint64 `json:"startStep"` + ValidateScoreTransition *uint64 `json:"validateScoreTransition"` // Block at which score validation should start. + ValidateStepTransition *uint64 `json:"validateStepTransition"` // Block from which monotonic steps start. + ImmediateTransitions *bool `json:"immediateTransitions"` // Whether transitions should be immediate. + BlockReward *uint64 `json:"blockReward"` // Reward per block in wei. + // Block at which the block reward contract should start being used. This option allows one to + // add a single block reward contract transition and is compatible with the multiple address + // option `block_reward_contract_transitions` below. + BlockRewardContractTransition *uint64 `json:"blockRewardContractTransition"` + /// Block reward contract address which overrides the `block_reward` setting. This option allows + /// one to add a single block reward contract address and is compatible with the multiple + /// address option `block_reward_contract_transitions` below. + BlockRewardContractAddress *common.Address `json:"blockRewardContractAddress"` + // Block reward contract addresses with their associated starting block numbers. + // + // Setting the block reward contract overrides `block_reward`. If the single block reward + // contract address is also present then it is added into the map at the block number stored in + // `block_reward_contract_transition` or 0 if that block number is not provided. Therefore both + // a single block reward contract transition and a map of reward contract transitions can be + // used simultaneously in the same configuration. In such a case the code requires that the + // block number of the single transition is strictly less than any of the block numbers in the + // map. + BlockRewardContractTransitions map[uint]common.Address `json:"blockRewardContractTransitions"` + // Block at which maximum uncle count should be considered. + MaximumUncleCountTransition *uint64 `json:"maximumUncleCountTransition"` + // Maximum number of accepted uncles. + MaximumUncleCount *uint `json:"maximumUncleCount"` + // Strict validation of empty steps transition block. + StrictEmptyStepsTransition *uint `json:"strictEmptyStepsTransition"` + // The random number contract's address, or a map of contract transitions. + RandomnessContractAddress map[uint64]common.Address `json:"randomnessContractAddress"` + // The addresses of contracts that determine the block gas limit starting from the block number + // associated with each of those contracts. + BlockGasLimitContractTransitions map[uint64]common.Address `json:"blockGasLimitContractTransitions"` + // The block number at which the consensus engine switches from AuRa to AuRa with POSDAO + // modifications. + PosdaoTransition *uint64 `json:"PosdaoTransition"` + // Stores human-readable keys associated with addresses, like DNS information. + // This contract is primarily required to store the address of the Certifier contract. + Registrar *common.Address `json:"registrar"` + + // See https://github.com/gnosischain/specs/blob/master/execution/withdrawals.md + WithdrawalContractAddress *common.Address `json:"withdrawalContractAddress"` + + RewriteBytecode map[uint64]map[common.Address]hexutility.Bytes `json:"rewriteBytecode"` +} + +// String implements the stringer interface, returning the consensus engine details. +func (c *AuRaConfig) String() string { + return "aura" +} diff --git a/chain/chain_config.go b/chain/chain_config.go index 657c170e8..ffda7c544 100644 --- a/chain/chain_config.go +++ b/chain/chain_config.go @@ -36,48 +36,37 @@ type Config struct { Consensus ConsensusName `json:"consensus,omitempty"` // aura, ethash or clique - HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead) - - DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"` // TheDAO hard-fork switch block (nil = no fork) - DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork - - // Tangerine Whistle (EIP150) implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150) - TangerineWhistleBlock *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork) - TangerineWhistleHash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (needed for header only clients as only gas pricing changed) - - SpuriousDragonBlock *big.Int `json:"eip155Block,omitempty"` // Spurious Dragon HF block - - ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium) - ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated) - PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople) - IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul) - MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // EIP-2384 (bomb delay) switch block (nil = no fork, 0 = already activated) - BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin) - LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london) - ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // EIP-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) - GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // EIP-5133 (bomb delay) switch block (nil = no fork, 0 = already activated) - - // EIP-3675: Upgrade consensus to Proof-of-Stake + // *Block fields activate the corresponding hard fork at a certain block number, + // while *Time fields do so based on the block's time stamp. + // nil means that the hard-fork is not scheduled, + // while 0 means that it's already activated from genesis. + + // ETH mainnet upgrades + // See https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades + HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` + DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"` + TangerineWhistleBlock *big.Int `json:"eip150Block,omitempty"` + SpuriousDragonBlock *big.Int `json:"eip155Block,omitempty"` + ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` + ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` + PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` + IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` + MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` + BerlinBlock *big.Int `json:"berlinBlock,omitempty"` + LondonBlock *big.Int `json:"londonBlock,omitempty"` + ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` + GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` + + // EIP-3675: Upgrade consensus to Proof-of-Stake (a.k.a. "Paris", "The Merge") TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"` // The merge happens when terminal total difficulty is reached TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"` // Disable PoW sync for networks that have already passed through the Merge MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter; see FORK_NEXT_VALUE in EIP-3675 - ShanghaiTime *big.Int `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already activated) - CancunTime *big.Int `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already activated) - ShardingForkTime *big.Int `json:"shardingForkTime,omitempty"` // Mini-Danksharding switch block (nil = no fork, 0 = already activated) - - // Parlia fork blocks - RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated) - NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated) - MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated) - BrunoBlock *big.Int `json:"brunoBlock,omitempty" toml:",omitempty"` // brunoBlock switch block (nil = no fork, 0 = already activated) - EulerBlock *big.Int `json:"eulerBlock,omitempty" toml:",omitempty"` // eulerBlock switch block (nil = no fork, 0 = already activated) - GibbsBlock *big.Int `json:"gibbsBlock,omitempty" toml:",omitempty"` // gibbsBlock switch block (nil = no fork, 0 = already activated) - NanoBlock *big.Int `json:"nanoBlock,omitempty" toml:",omitempty"` // nanoBlock switch block (nil = no fork, 0 = already activated) - MoranBlock *big.Int `json:"moranBlock,omitempty" toml:",omitempty"` // moranBlock switch block (nil = no fork, 0 = already activated) - - // Gnosis Chain fork blocks - PosdaoBlock *big.Int `json:"posdaoBlock,omitempty"` + // Mainnet fork scheduling switched from block numbers to timestamps after The Merge + ShanghaiTime *big.Int `json:"shanghaiTime,omitempty"` + ShardingForkTime *big.Int `json:"shanghaiTime,omitempty"` + CancunTime *big.Int `json:"cancunTime,omitempty"` + PragueTime *big.Int `json:"pragueTime,omitempty"` Eip1559FeeCollector *common.Address `json:"eip1559FeeCollector,omitempty"` // (Optional) Address where burnt EIP-1559 fees go to Eip1559FeeCollectorTransition *big.Int `json:"eip1559FeeCollectorTransition,omitempty"` // (Optional) Block from which burnt EIP-1559 fees go to the Eip1559FeeCollector @@ -86,34 +75,16 @@ type Config struct { Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` Aura *AuRaConfig `json:"aura,omitempty"` - Parlia *ParliaConfig `json:"parlia,omitempty" toml:",omitempty"` Bor *BorConfig `json:"bor,omitempty"` } func (c *Config) String() string { engine := c.getEngine() - if c.Consensus == ParliaConsensus { - return fmt.Sprintf("{ChainID: %v Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Gibbs: %v, Engine: %v}", - c.ChainID, - c.RamanujanBlock, - c.NielsBlock, - c.MirrorSyncBlock, - c.BrunoBlock, - c.EulerBlock, - c.GibbsBlock, - c.NanoBlock, - c.MoranBlock, - c.GibbsBlock, - engine, - ) - } - - return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Merge Netsplit: %v, Shanghai: %v, Sharding: %v, Cancun: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Merge Netsplit: %v, Shanghai: %v, Sharding: %v, Cancun: %v, Prague: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, - c.DAOForkSupport, c.TangerineWhistleBlock, c.SpuriousDragonBlock, c.ByzantiumBlock, @@ -130,6 +101,7 @@ func (c *Config) String() string { c.ShanghaiTime, c.ShardingForkTime, c.CancunTime, + c.PragueTime, engine, ) } @@ -140,8 +112,6 @@ func (c *Config) getEngine() string { return c.Ethash.String() case c.Clique != nil: return c.Clique.String() - case c.Parlia != nil: - return c.Parlia.String() case c.Bor != nil: return c.Bor.String() case c.Aura != nil: @@ -181,81 +151,6 @@ func (c *Config) IsConstantinople(num uint64) bool { return isForked(c.ConstantinopleBlock, num) } -// IsRamanujan returns whether num is either equal to the IsRamanujan fork block or greater. -func (c *Config) IsRamanujan(num uint64) bool { - return isForked(c.RamanujanBlock, num) -} - -// IsOnRamanujan returns whether num is equal to the Ramanujan fork block -func (c *Config) IsOnRamanujan(num *big.Int) bool { - return numEqual(c.RamanujanBlock, num) -} - -// IsNiels returns whether num is either equal to the Niels fork block or greater. -func (c *Config) IsNiels(num uint64) bool { - return isForked(c.NielsBlock, num) -} - -// IsOnNiels returns whether num is equal to the IsNiels fork block -func (c *Config) IsOnNiels(num *big.Int) bool { - return numEqual(c.NielsBlock, num) -} - -// IsMirrorSync returns whether num is either equal to the MirrorSync fork block or greater. -func (c *Config) IsMirrorSync(num uint64) bool { - return isForked(c.MirrorSyncBlock, num) -} - -// IsOnMirrorSync returns whether num is equal to the MirrorSync fork block -func (c *Config) IsOnMirrorSync(num *big.Int) bool { - return numEqual(c.MirrorSyncBlock, num) -} - -// IsBruno returns whether num is either equal to the Burn fork block or greater. -func (c *Config) IsBruno(num uint64) bool { - return isForked(c.BrunoBlock, num) -} - -// IsOnBruno returns whether num is equal to the Burn fork block -func (c *Config) IsOnBruno(num *big.Int) bool { - return numEqual(c.BrunoBlock, num) -} - -// IsEuler returns whether num is either equal to the euler fork block or greater. -func (c *Config) IsEuler(num *big.Int) bool { - return isForked(c.EulerBlock, num.Uint64()) -} - -func (c *Config) IsOnEuler(num *big.Int) bool { - return numEqual(c.EulerBlock, num) -} - -// IsGibbs returns whether num is either equal to the euler fork block or greater. -func (c *Config) IsGibbs(num *big.Int) bool { - return isForked(c.GibbsBlock, num.Uint64()) -} - -func (c *Config) IsOnGibbs(num *big.Int) bool { - return numEqual(c.GibbsBlock, num) -} - -func (c *Config) IsMoran(num uint64) bool { - return isForked(c.MoranBlock, num) -} - -func (c *Config) IsOnMoran(num *big.Int) bool { - return numEqual(c.MoranBlock, num) -} - -// IsNano returns whether num is either equal to the euler fork block or greater. -func (c *Config) IsNano(num uint64) bool { - return isForked(c.NanoBlock, num) -} - -func (c *Config) IsOnNano(num *big.Int) bool { - return numEqual(c.NanoBlock, num) -} - // IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater. func (c *Config) IsMuirGlacier(num uint64) bool { return isForked(c.MuirGlacierBlock, num) @@ -298,9 +193,10 @@ func (c *Config) IsShanghai(time uint64) bool { return isForked(c.ShanghaiTime, time) } -// IsSharding returns whether time is either equal to the Mini-Danksharding fork time or greater. -func (c *Config) IsSharding(time uint64) bool { - return isForked(c.ShardingForkTime, time) +// IsSharding returns whether time is either equal to the Sharding fork time or greater. +// Ultimately this should be part of Cancun but we keep it separate for devnet testing. +func (c *Config) IsShanghai(time uint64) bool { + return isForked(c.ShanghaiTime, time) } // IsCancun returns whether time is either equal to the Cancun fork time or greater. @@ -308,6 +204,11 @@ func (c *Config) IsCancun(time uint64) bool { return isForked(c.CancunTime, time) } +// IsPrague returns whether time is either equal to the Prague fork time or greater. +func (c *Config) IsPrague(time uint64) bool { + return isForked(c.PragueTime, time) +} + func (c *Config) IsEip1559FeeCollector(num uint64) bool { return c.Eip1559FeeCollector != nil && isForked(c.Eip1559FeeCollectorTransition, num) } @@ -330,32 +231,28 @@ func (c *Config) CheckCompatible(newcfg *Config, height uint64) *ConfigCompatErr return lasterr } -type forkPoint struct { - name string - block *big.Int - canSkip bool // if true, the fork may be nil and next fork is still allowed -} - -func (c *Config) forkPoints() []forkPoint { - return []forkPoint{ - {name: "homesteadBlock", block: c.HomesteadBlock}, - {name: "daoForkBlock", block: c.DAOForkBlock, canSkip: true}, - {name: "eip150Block", block: c.TangerineWhistleBlock}, - {name: "eip155Block", block: c.SpuriousDragonBlock}, - {name: "byzantiumBlock", block: c.ByzantiumBlock}, - {name: "constantinopleBlock", block: c.ConstantinopleBlock}, - {name: "petersburgBlock", block: c.PetersburgBlock}, - {name: "istanbulBlock", block: c.IstanbulBlock}, - {name: "muirGlacierBlock", block: c.MuirGlacierBlock, canSkip: true}, - {name: "eulerBlock", block: c.EulerBlock, canSkip: true}, - {name: "gibbsBlock", block: c.GibbsBlock, canSkip: true}, - {name: "berlinBlock", block: c.BerlinBlock}, - {name: "londonBlock", block: c.LondonBlock}, - {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, canSkip: true}, - {name: "grayGlacierBlock", block: c.GrayGlacierBlock, canSkip: true}, - {name: "mergeNetsplitBlock", block: c.MergeNetsplitBlock, canSkip: true}, - // {name: "shanghaiTime", timestamp: c.ShanghaiTime}, - // {name: "shardingForkTime", timestamp: c.ShardingForkTime}, +type forkBlockNumber struct { + name string + blockNumber *big.Int + optional bool // if true, the fork may be nil and next fork is still allowed +} + +func (c *Config) forkBlockNumbers() []forkBlockNumber { + return []forkBlockNumber{ + {name: "homesteadBlock", blockNumber: c.HomesteadBlock}, + {name: "daoForkBlock", blockNumber: c.DAOForkBlock, optional: true}, + {name: "eip150Block", blockNumber: c.TangerineWhistleBlock}, + {name: "eip155Block", blockNumber: c.SpuriousDragonBlock}, + {name: "byzantiumBlock", blockNumber: c.ByzantiumBlock}, + {name: "constantinopleBlock", blockNumber: c.ConstantinopleBlock}, + {name: "petersburgBlock", blockNumber: c.PetersburgBlock}, + {name: "istanbulBlock", blockNumber: c.IstanbulBlock}, + {name: "muirGlacierBlock", blockNumber: c.MuirGlacierBlock, optional: true}, + {name: "berlinBlock", blockNumber: c.BerlinBlock}, + {name: "londonBlock", blockNumber: c.LondonBlock}, + {name: "arrowGlacierBlock", blockNumber: c.ArrowGlacierBlock, optional: true}, + {name: "grayGlacierBlock", blockNumber: c.GrayGlacierBlock, optional: true}, + {name: "mergeNetsplitBlock", blockNumber: c.MergeNetsplitBlock, optional: true}, } } @@ -365,24 +262,24 @@ func (c *Config) CheckConfigForkOrder() error { return nil } - var lastFork forkPoint + var lastFork forkBlockNumber - for _, fork := range c.forkPoints() { + for _, fork := range c.forkBlockNumbers() { if lastFork.name != "" { // Next one must be higher number - if lastFork.block == nil && fork.block != nil { + if lastFork.blockNumber == nil && fork.blockNumber != nil { return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at %v", - lastFork.name, fork.name, fork.block) + lastFork.name, fork.name, fork.blockNumber) } - if lastFork.block != nil && fork.block != nil { - if lastFork.block.Cmp(fork.block) > 0 { + if lastFork.blockNumber != nil && fork.blockNumber != nil { + if lastFork.blockNumber.Cmp(fork.blockNumber) > 0 { return fmt.Errorf("unsupported fork ordering: %v enabled at %v, but %v enabled at %v", - lastFork.name, lastFork.block, fork.name, fork.block) + lastFork.name, lastFork.blockNumber, fork.name, fork.blockNumber) } } // If it was optional and not set, then ignore it } - if !fork.canSkip || fork.block != nil { + if !fork.optional || fork.blockNumber != nil { lastFork = fork } } @@ -402,9 +299,6 @@ func (c *Config) checkCompatible(newcfg *Config, head uint64) *ConfigCompatError if incompatible(c.DAOForkBlock, newcfg.DAOForkBlock, head) { return newCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock) } - if c.IsDAOFork(head) && c.DAOForkSupport != newcfg.DAOForkSupport { - return newCompatError("DAO fork support flag", c.DAOForkBlock, newcfg.DAOForkBlock) - } if incompatible(c.TangerineWhistleBlock, newcfg.TangerineWhistleBlock, head) { return newCompatError("Tangerine Whistle fork block", c.TangerineWhistleBlock, newcfg.TangerineWhistleBlock) } @@ -449,31 +343,6 @@ func (c *Config) checkCompatible(newcfg *Config, head uint64) *ConfigCompatError return newCompatError("Merge netsplit block", c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock) } - // Parlia forks - if incompatible(c.RamanujanBlock, newcfg.RamanujanBlock, head) { - return newCompatError("Ramanujan fork block", c.RamanujanBlock, newcfg.RamanujanBlock) - } - if incompatible(c.NielsBlock, newcfg.NielsBlock, head) { - return newCompatError("Niels fork block", c.NielsBlock, newcfg.NielsBlock) - } - if incompatible(c.MirrorSyncBlock, newcfg.MirrorSyncBlock, head) { - return newCompatError("MirrorSync fork block", c.MirrorSyncBlock, newcfg.MirrorSyncBlock) - } - if incompatible(c.BrunoBlock, newcfg.BrunoBlock, head) { - return newCompatError("Bruno fork block", c.BrunoBlock, newcfg.BrunoBlock) - } - if incompatible(c.EulerBlock, newcfg.EulerBlock, head) { - return newCompatError("Euler fork block", c.EulerBlock, newcfg.EulerBlock) - } - if incompatible(c.GibbsBlock, newcfg.GibbsBlock, head) { - return newCompatError("Gibbs fork block", c.GibbsBlock, newcfg.GibbsBlock) - } - if incompatible(c.NanoBlock, newcfg.NanoBlock, head) { - return newCompatError("Nano fork block", c.NanoBlock, newcfg.NanoBlock) - } - if incompatible(c.MoranBlock, newcfg.MoranBlock, head) { - return newCompatError("moran fork block", c.MoranBlock, newcfg.MoranBlock) - } return nil } @@ -537,30 +406,6 @@ func (c *CliqueConfig) String() string { return "clique" } -// AuRaConfig is the consensus engine configs for proof-of-authority based sealing. -type AuRaConfig struct { - DBPath string - InMemory bool - Etherbase common.Address // same as miner etherbase -} - -// String implements the stringer interface, returning the consensus engine details. -func (c *AuRaConfig) String() string { - return "aura" -} - -type ParliaConfig struct { - DBPath string - InMemory bool - Period uint64 `json:"period"` // Number of seconds between blocks to enforce - Epoch uint64 `json:"epoch"` // Epoch length to update validatorSet -} - -// String implements the stringer interface, returning the consensus engine details. -func (b *ParliaConfig) String() string { - return "parlia" -} - // BorConfig is the consensus engine configs for Matic bor based sealing. type BorConfig struct { Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce @@ -573,8 +418,9 @@ type BorConfig struct { OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count BlockAlloc map[string]interface{} `json:"blockAlloc"` - JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur) - DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi) + CalcuttaBlock *big.Int `json:"calcuttaBlock"` // Calcutta switch block (nil = no fork, 0 = already on calcutta) + JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur) + DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi) } // String implements the stringer interface, returning the consensus engine details. @@ -606,6 +452,14 @@ func (c *BorConfig) IsDelhi(number uint64) bool { return isForked(c.DelhiBlock, number) } +func (c *BorConfig) IsCalcutta(number uint64) bool { + return isForked(c.CalcuttaBlock, number) +} + +func (c *BorConfig) IsOnCalcutta(number *big.Int) bool { + return numEqual(c.CalcuttaBlock, number) +} + func (c *BorConfig) calcConfig(field map[string]uint64, number uint64) uint64 { keys := sortMapKeys(field) for i := 0; i < len(keys)-1; i++ { @@ -648,14 +502,11 @@ func sortMapKeys(m map[string]uint64) []string { // Rules is a one time interface meaning that it shouldn't be used in between transition // phases. type Rules struct { - ChainID *big.Int - IsHomestead, IsTangerineWhistle, IsSpuriousDragon bool - IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool - IsBerlin, IsLondon, IsShanghai, IsCancun bool - IsSharding bool - IsNano, IsMoran, IsGibbs bool - IsEip1559FeeCollector bool - IsParlia, IsAura bool + ChainID *big.Int + IsHomestead, IsTangerineWhistle, IsSpuriousDragon bool + IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool + IsBerlin, IsLondon, IsShanghai, IsSharding, IsCancun, IsPrague bool + IsEip1559FeeCollector, IsAura bool } // Rules ensures c's ChainID is not nil and returns a new Rules instance @@ -679,10 +530,8 @@ func (c *Config) Rules(num uint64, time uint64) *Rules { IsShanghai: c.IsShanghai(time), IsSharding: c.IsSharding(time), IsCancun: c.IsCancun(time), - IsNano: c.IsNano(num), - IsMoran: c.IsMoran(num), + IsPrague: c.IsPrague(time), IsEip1559FeeCollector: c.IsEip1559FeeCollector(num), - IsParlia: c.Parlia != nil, IsAura: c.Aura != nil, } } diff --git a/chain/consensus.go b/chain/consensus.go index 76c012415..bf7ec9fba 100644 --- a/chain/consensus.go +++ b/chain/consensus.go @@ -6,6 +6,5 @@ const ( AuRaConsensus ConsensusName = "aura" EtHashConsensus ConsensusName = "ethash" CliqueConsensus ConsensusName = "clique" - ParliaConsensus ConsensusName = "parlia" BorConsensus ConsensusName = "bor" ) diff --git a/commitment/bin_patricia_hashed.go b/commitment/bin_patricia_hashed.go index 837e866ca..deb3fd7a3 100644 --- a/commitment/bin_patricia_hashed.go +++ b/commitment/bin_patricia_hashed.go @@ -1542,7 +1542,7 @@ func (bph *BinPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, upd update := updates[i] // Update the cell - if update.Flags == DELETE_UPDATE { + if update.Flags == DeleteUpdate { bph.deleteBinaryCell(hashedKey) if bph.trace { fmt.Printf("key %x deleted\n", plainKey) @@ -1552,19 +1552,19 @@ func (bph *BinPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, upd if bph.trace { fmt.Printf("accountFn updated key %x =>", plainKey) } - if update.Flags&BALANCE_UPDATE != 0 { + if update.Flags&BalanceUpdate != 0 { if bph.trace { fmt.Printf(" balance=%d", update.Balance.Uint64()) } cell.Balance.Set(&update.Balance) } - if update.Flags&NONCE_UPDATE != 0 { + if update.Flags&NonceUpdate != 0 { if bph.trace { fmt.Printf(" nonce=%d", update.Nonce) } cell.Nonce = update.Nonce } - if update.Flags&CODE_UPDATE != 0 { + if update.Flags&CodeUpdate != 0 { if bph.trace { fmt.Printf(" codeHash=%x", update.CodeHashOrStorage) } @@ -1573,7 +1573,7 @@ func (bph *BinPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, upd if bph.trace { fmt.Printf("\n") } - if update.Flags&STORAGE_UPDATE != 0 { + if update.Flags&StorageUpdate != 0 { cell.setStorage(update.CodeHashOrStorage[:update.ValLength]) if bph.trace { fmt.Printf("\rstorageFn filled key %x => %x\n", plainKey, update.CodeHashOrStorage[:update.ValLength]) @@ -1715,7 +1715,7 @@ func (s *binState) Encode(buf []byte) ([]byte, error) { if err := binary.Write(ee, binary.BigEndian, uint16(len(s.Root))); err != nil { return nil, fmt.Errorf("encode root len: %w", err) } - if n, err := ee.Write(s.Root[:]); err != nil || n != len(s.Root) { + if n, err := ee.Write(s.Root); err != nil || n != len(s.Root) { return nil, fmt.Errorf("encode root: %w", err) } d := make([]byte, len(s.Depths)) diff --git a/commitment/commitment.go b/commitment/commitment.go index cb7a36bc1..a51cfcb59 100644 --- a/commitment/commitment.go +++ b/commitment/commitment.go @@ -571,3 +571,16 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData copy(target, m.buf.Bytes()) return target, nil } + +func ParseTrieVariant(s string) TrieVariant { + var trieVariant TrieVariant + switch s { + case "bin": + trieVariant = VariantBinPatriciaTrie + case "hex": + fallthrough + default: + trieVariant = VariantHexPatriciaTrie + } + return trieVariant +} diff --git a/commitment/hex_patricia_hashed.go b/commitment/hex_patricia_hashed.go index 9a0d11809..0fc7d63f8 100644 --- a/commitment/hex_patricia_hashed.go +++ b/commitment/hex_patricia_hashed.go @@ -1743,7 +1743,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, upd update := updates[i] // Update the cell - if update.Flags == DELETE_UPDATE { + if update.Flags == DeleteUpdate { hph.deleteCell(hashedKey) if hph.trace { fmt.Printf("key %x deleted\n", plainKey) @@ -1753,19 +1753,19 @@ func (hph *HexPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, upd if hph.trace { fmt.Printf("accountFn updated key %x =>", plainKey) } - if update.Flags&BALANCE_UPDATE != 0 { + if update.Flags&BalanceUpdate != 0 { if hph.trace { fmt.Printf(" balance=%d", update.Balance.Uint64()) } cell.Balance.Set(&update.Balance) } - if update.Flags&NONCE_UPDATE != 0 { + if update.Flags&NonceUpdate != 0 { if hph.trace { fmt.Printf(" nonce=%d", update.Nonce) } cell.Nonce = update.Nonce } - if update.Flags&CODE_UPDATE != 0 { + if update.Flags&CodeUpdate != 0 { if hph.trace { fmt.Printf(" codeHash=%x", update.CodeHashOrStorage) } @@ -1774,7 +1774,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, upd if hph.trace { fmt.Printf("\n") } - if update.Flags&STORAGE_UPDATE != 0 { + if update.Flags&StorageUpdate != 0 { cell.setStorage(update.CodeHashOrStorage[:update.ValLength]) if hph.trace { fmt.Printf("\rstorageFn filled key %x => %x\n", plainKey, update.CodeHashOrStorage[:update.ValLength]) @@ -1825,28 +1825,28 @@ func (hph *HexPatriciaHashed) hashAndNibblizeKey(key []byte) []byte { type UpdateFlags uint8 const ( - CODE_UPDATE UpdateFlags = 1 - DELETE_UPDATE UpdateFlags = 2 - BALANCE_UPDATE UpdateFlags = 4 - NONCE_UPDATE UpdateFlags = 8 - STORAGE_UPDATE UpdateFlags = 16 + CodeUpdate UpdateFlags = 1 + DeleteUpdate UpdateFlags = 2 + BalanceUpdate UpdateFlags = 4 + NonceUpdate UpdateFlags = 8 + StorageUpdate UpdateFlags = 16 ) func (uf UpdateFlags) String() string { var sb strings.Builder - if uf == DELETE_UPDATE { + if uf == DeleteUpdate { sb.WriteString("Delete") } else { - if uf&BALANCE_UPDATE != 0 { + if uf&BalanceUpdate != 0 { sb.WriteString("+Balance") } - if uf&NONCE_UPDATE != 0 { + if uf&NonceUpdate != 0 { sb.WriteString("+Nonce") } - if uf&CODE_UPDATE != 0 { + if uf&CodeUpdate != 0 { sb.WriteString("+Code") } - if uf&STORAGE_UPDATE != 0 { + if uf&StorageUpdate != 0 { sb.WriteString("+Storage") } } @@ -1888,18 +1888,18 @@ func (u *Update) DecodeForStorage(enc []byte) { func (u *Update) Encode(buf []byte, numBuf []byte) []byte { buf = append(buf, byte(u.Flags)) - if u.Flags&BALANCE_UPDATE != 0 { + if u.Flags&BalanceUpdate != 0 { buf = append(buf, byte(u.Balance.ByteLen())) buf = append(buf, u.Balance.Bytes()...) } - if u.Flags&NONCE_UPDATE != 0 { + if u.Flags&NonceUpdate != 0 { n := binary.PutUvarint(numBuf, u.Nonce) buf = append(buf, numBuf[:n]...) } - if u.Flags&CODE_UPDATE != 0 { + if u.Flags&CodeUpdate != 0 { buf = append(buf, u.CodeHashOrStorage[:]...) } - if u.Flags&STORAGE_UPDATE != 0 { + if u.Flags&StorageUpdate != 0 { n := binary.PutUvarint(numBuf, uint64(u.ValLength)) buf = append(buf, numBuf[:n]...) if u.ValLength > 0 { @@ -1915,7 +1915,7 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { } u.Flags = UpdateFlags(buf[pos]) pos++ - if u.Flags&BALANCE_UPDATE != 0 { + if u.Flags&BalanceUpdate != 0 { if len(buf) < pos+1 { return 0, fmt.Errorf("decode Update: buffer too small for balance len") } @@ -1927,7 +1927,7 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { u.Balance.SetBytes(buf[pos : pos+balanceLen]) pos += balanceLen } - if u.Flags&NONCE_UPDATE != 0 { + if u.Flags&NonceUpdate != 0 { var n int u.Nonce, n = binary.Uvarint(buf[pos:]) if n == 0 { @@ -1938,14 +1938,14 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { } pos += n } - if u.Flags&CODE_UPDATE != 0 { + if u.Flags&CodeUpdate != 0 { if len(buf) < pos+32 { return 0, fmt.Errorf("decode Update: buffer too small for codeHash") } copy(u.CodeHashOrStorage[:], buf[pos:pos+32]) pos += 32 } - if u.Flags&STORAGE_UPDATE != 0 { + if u.Flags&StorageUpdate != 0 { l, n := binary.Uvarint(buf[pos:]) if n == 0 { return 0, fmt.Errorf("decode Update: buffer too small for storage len") @@ -1967,16 +1967,16 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { func (u *Update) String() string { var sb strings.Builder sb.WriteString(fmt.Sprintf("Flags: [%s]", u.Flags)) - if u.Flags&BALANCE_UPDATE != 0 { + if u.Flags&BalanceUpdate != 0 { sb.WriteString(fmt.Sprintf(", Balance: [%d]", &u.Balance)) } - if u.Flags&NONCE_UPDATE != 0 { + if u.Flags&NonceUpdate != 0 { sb.WriteString(fmt.Sprintf(", Nonce: [%d]", u.Nonce)) } - if u.Flags&CODE_UPDATE != 0 { + if u.Flags&CodeUpdate != 0 { sb.WriteString(fmt.Sprintf(", CodeHash: [%x]", u.CodeHashOrStorage)) } - if u.Flags&STORAGE_UPDATE != 0 { + if u.Flags&StorageUpdate != 0 { sb.WriteString(fmt.Sprintf(", Storage: [%x]", u.CodeHashOrStorage[:u.ValLength])) } return sb.String() diff --git a/commitment/hex_patricia_hashed_fuzz_test.go b/commitment/hex_patricia_hashed_fuzz_test.go index 53432f83f..d97b84522 100644 --- a/commitment/hex_patricia_hashed_fuzz_test.go +++ b/commitment/hex_patricia_hashed_fuzz_test.go @@ -105,12 +105,12 @@ func Fuzz_ProcessUpdates_ArbitraryUpdateCount(f *testing.F) { aux := make([]byte, 32) - flg := UpdateFlags(updateSeed.Intn(int(CODE_UPDATE | DELETE_UPDATE | STORAGE_UPDATE | NONCE_UPDATE | BALANCE_UPDATE))) + flg := UpdateFlags(updateSeed.Intn(int(CodeUpdate | DeleteUpdate | StorageUpdate | NonceUpdate | BalanceUpdate))) switch { - case flg&BALANCE_UPDATE != 0: + case flg&BalanceUpdate != 0: builder.Balance(pkey, updateSeed.Uint64()).Nonce(pkey, updateSeed.Uint64()) continue - case flg&CODE_UPDATE != 0: + case flg&CodeUpdate != 0: keccak := sha3.NewLegacyKeccak256().(keccakState) var s [8]byte n, err := updateSeed.Read(s[:]) @@ -121,7 +121,7 @@ func Fuzz_ProcessUpdates_ArbitraryUpdateCount(f *testing.F) { builder.CodeHash(pkey, hex.EncodeToString(aux)) continue - case flg&STORAGE_UPDATE != 0: + case flg&StorageUpdate != 0: sz := updateSeed.Intn(length.Hash) n, err = updateSeed.Read(aux[:sz]) require.NoError(t, err) @@ -131,7 +131,7 @@ func Fuzz_ProcessUpdates_ArbitraryUpdateCount(f *testing.F) { keysSeed.Read(loc) builder.Storage(pkey, hex.EncodeToString(loc), hex.EncodeToString(aux[:sz])) continue - case flg&DELETE_UPDATE != 0: + case flg&DeleteUpdate != 0: continue default: continue diff --git a/commitment/patricia_state_mock_test.go b/commitment/patricia_state_mock_test.go index 499c6e9b8..82dc932a2 100644 --- a/commitment/patricia_state_mock_test.go +++ b/commitment/patricia_state_mock_test.go @@ -55,25 +55,25 @@ func (ms MockState) accountFn(plainKey []byte, cell *Cell) error { ms.t.Fatalf("accountFn key [%x] leftover bytes in [%x], comsumed %x", plainKey, exBytes, pos) return nil } - if ex.Flags&STORAGE_UPDATE != 0 { + if ex.Flags&StorageUpdate != 0 { ms.t.Logf("accountFn reading storage item for key [%x]", plainKey) return fmt.Errorf("storage read by accountFn") } - if ex.Flags&DELETE_UPDATE != 0 { + if ex.Flags&DeleteUpdate != 0 { ms.t.Fatalf("accountFn reading deleted account for key [%x]", plainKey) return nil } - if ex.Flags&BALANCE_UPDATE != 0 { + if ex.Flags&BalanceUpdate != 0 { cell.Balance.Set(&ex.Balance) } else { cell.Balance.Clear() } - if ex.Flags&NONCE_UPDATE != 0 { + if ex.Flags&NonceUpdate != 0 { cell.Nonce = ex.Nonce } else { cell.Nonce = 0 } - if ex.Flags&CODE_UPDATE != 0 { + if ex.Flags&CodeUpdate != 0 { copy(cell.CodeHash[:], ex.CodeHashOrStorage[:]) } else { copy(cell.CodeHash[:], EmptyCodeHash) @@ -98,23 +98,23 @@ func (ms MockState) storageFn(plainKey []byte, cell *Cell) error { ms.t.Fatalf("storageFn key [%x] leftover bytes in [%x], comsumed %x", plainKey, exBytes, pos) return nil } - if ex.Flags&BALANCE_UPDATE != 0 { + if ex.Flags&BalanceUpdate != 0 { ms.t.Logf("storageFn reading balance for key [%x]", plainKey) return nil } - if ex.Flags&NONCE_UPDATE != 0 { + if ex.Flags&NonceUpdate != 0 { ms.t.Fatalf("storageFn reading nonce for key [%x]", plainKey) return nil } - if ex.Flags&CODE_UPDATE != 0 { + if ex.Flags&CodeUpdate != 0 { ms.t.Fatalf("storageFn reading codeHash for key [%x]", plainKey) return nil } - if ex.Flags&DELETE_UPDATE != 0 { + if ex.Flags&DeleteUpdate != 0 { ms.t.Fatalf("storageFn reading deleted item for key [%x]", plainKey) return nil } - if ex.Flags&STORAGE_UPDATE != 0 { + if ex.Flags&StorageUpdate != 0 { copy(cell.Storage[:], ex.CodeHashOrStorage[:]) cell.StorageLen = len(ex.CodeHashOrStorage) } else { @@ -127,7 +127,7 @@ func (ms MockState) storageFn(plainKey []byte, cell *Cell) error { func (ms *MockState) applyPlainUpdates(plainKeys [][]byte, updates []Update) error { for i, key := range plainKeys { update := updates[i] - if update.Flags&DELETE_UPDATE != 0 { + if update.Flags&DeleteUpdate != 0 { delete(ms.sm, string(key)) } else { if exBytes, ok := ms.sm[string(key)]; ok { @@ -139,20 +139,20 @@ func (ms *MockState) applyPlainUpdates(plainKeys [][]byte, updates []Update) err if pos != len(exBytes) { return fmt.Errorf("applyPlainUpdates key [%x] leftover bytes in [%x], comsumed %x", key, exBytes, pos) } - if update.Flags&BALANCE_UPDATE != 0 { - ex.Flags |= BALANCE_UPDATE + if update.Flags&BalanceUpdate != 0 { + ex.Flags |= BalanceUpdate ex.Balance.Set(&update.Balance) } - if update.Flags&NONCE_UPDATE != 0 { - ex.Flags |= NONCE_UPDATE + if update.Flags&NonceUpdate != 0 { + ex.Flags |= NonceUpdate ex.Nonce = update.Nonce } - if update.Flags&CODE_UPDATE != 0 { - ex.Flags |= CODE_UPDATE + if update.Flags&CodeUpdate != 0 { + ex.Flags |= CodeUpdate copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:]) } - if update.Flags&STORAGE_UPDATE != 0 { - ex.Flags |= STORAGE_UPDATE + if update.Flags&StorageUpdate != 0 { + ex.Flags |= StorageUpdate copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:]) } ms.sm[string(key)] = ex.Encode(nil, ms.numBuf[:]) @@ -386,31 +386,31 @@ func (ub *UpdateBuilder) Build() (plainKeys, hashedKeys [][]byte, updates []Upda u := &updates[i] if key2 == nil { if balance, ok := ub.balances[string(key)]; ok { - u.Flags |= BALANCE_UPDATE + u.Flags |= BalanceUpdate u.Balance.Set(balance) } if nonce, ok := ub.nonces[string(key)]; ok { - u.Flags |= NONCE_UPDATE + u.Flags |= NonceUpdate u.Nonce = nonce } if codeHash, ok := ub.codeHashes[string(key)]; ok { - u.Flags |= CODE_UPDATE + u.Flags |= CodeUpdate copy(u.CodeHashOrStorage[:], codeHash[:]) } if _, del := ub.deletes[string(key)]; del { - u.Flags = DELETE_UPDATE + u.Flags = DeleteUpdate continue } } else { if dm, ok1 := ub.deletes2[string(key)]; ok1 { if _, ok2 := dm[string(key2)]; ok2 { - u.Flags = DELETE_UPDATE + u.Flags = DeleteUpdate continue } } if sm, ok1 := ub.storages[string(key)]; ok1 { if storage, ok2 := sm[string(key2)]; ok2 { - u.Flags |= STORAGE_UPDATE + u.Flags |= StorageUpdate u.CodeHashOrStorage = [length.Hash]byte{} u.ValLength = len(storage) copy(u.CodeHashOrStorage[:], storage) diff --git a/common/background/progress.go b/common/background/progress.go index 342c9ec7f..283bd1175 100644 --- a/common/background/progress.go +++ b/common/background/progress.go @@ -20,14 +20,14 @@ import ( "fmt" "strings" "sync" + "sync/atomic" btree2 "github.com/tidwall/btree" - "go.uber.org/atomic" ) // Progress - tracks background job progress type Progress struct { - Name atomic.String + Name atomic.Pointer[string] Processed, Total atomic.Uint64 i int } @@ -48,7 +48,13 @@ type ProgressSet struct { func NewProgressSet() *ProgressSet { return &ProgressSet{list: btree2.NewMap[int, *Progress](128)} } - +func (s *ProgressSet) AddNew(fName string, total uint64) *Progress { + p := &Progress{} + p.Name.Store(&fName) + p.Total.Store(total) + s.Add(p) + return p +} func (s *ProgressSet) Add(p *Progress) { s.lock.Lock() defer s.lock.Unlock() @@ -62,6 +68,11 @@ func (s *ProgressSet) Delete(p *Progress) { defer s.lock.Unlock() s.list.Delete(p.i) } +func (s *ProgressSet) Has() bool { + s.lock.Lock() + defer s.lock.Unlock() + return s.list.Len() > 0 +} func (s *ProgressSet) String() string { s.lock.RLock() @@ -69,7 +80,7 @@ func (s *ProgressSet) String() string { var sb strings.Builder var i int s.list.Scan(func(_ int, p *Progress) bool { - sb.WriteString(fmt.Sprintf("%s=%d%%", p.Name.Load(), p.percent())) + sb.WriteString(fmt.Sprintf("%s=%d%%", *p.Name.Load(), p.percent())) i++ if i != s.list.Len() { sb.WriteString(", ") diff --git a/common/hash.go b/common/hash.go index ca620b920..72c4b570f 100644 --- a/common/hash.go +++ b/common/hash.go @@ -173,3 +173,7 @@ type CodeRecord struct { TxNumber uint64 CodeHash Hash } + +func FromHex(in string) []byte { + return hexutility.MustDecodeHex(in) +} diff --git a/common/hexutility/bytes.go b/common/hexutility/bytes.go new file mode 100644 index 000000000..fe40256c2 --- /dev/null +++ b/common/hexutility/bytes.go @@ -0,0 +1,66 @@ +/* + Copyright 2023 The Erigon contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package hexutility + +import ( + "encoding/hex" + "encoding/json" + "reflect" +) + +var bytesT = reflect.TypeOf(Bytes(nil)) + +// Bytes marshals/unmarshals as a JSON string with 0x prefix. +// The empty slice marshals as "0x". +type Bytes []byte + +const hexPrefix = `0x` + +// MarshalText implements encoding.TextMarshaler +func (b Bytes) MarshalText() ([]byte, error) { + result := make([]byte, len(b)*2+2) + copy(result, hexPrefix) + hex.Encode(result[2:], b) + return result, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (b *Bytes) UnmarshalJSON(input []byte) error { + if !isString(input) { + return &json.UnmarshalTypeError{Value: "non-string", Type: bytesT} + } + return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bytesT) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (b *Bytes) UnmarshalText(input []byte) error { + raw, err := checkText(input, true) + if err != nil { + return err + } + dec := make([]byte, len(raw)/2) + _, err = hex.Decode(dec, raw) + if err == nil { + *b = dec + } + return err +} + +// String returns the hex encoding of b. +func (b Bytes) String() string { + return Encode(b) +} diff --git a/common/hexutility/text.go b/common/hexutility/text.go index 888b2aad6..0c51aeec1 100644 --- a/common/hexutility/text.go +++ b/common/hexutility/text.go @@ -28,13 +28,13 @@ const ( // UnmarshalFixedText decodes the input as a string with 0x prefix. The length of out // determines the required input length. This function is commonly used to implement the // UnmarshalText method for fixed-size types. -func UnmarshalFixedText(typname string, input, out []byte) error { +func UnmarshalFixedText(typeName string, input, out []byte) error { raw, err := checkText(input, true) if err != nil { return err } if len(raw)/2 != len(out) { - return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typname) + return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typeName) } // Pre-verify syntax before modifying out. for _, b := range raw { diff --git a/common/length/length.go b/common/length/length.go index 07836a690..c3794b6fa 100644 --- a/common/length/length.go +++ b/common/length/length.go @@ -18,6 +18,7 @@ package length // Lengths of hashes and addresses in bytes. const ( + PeerID = 64 // Hash is the expected length of the hash (in bytes) Hash = 32 // Addr is the expected length of the address (in bytes) diff --git a/common/metrics/metrics_enabled.go b/common/metrics/metrics_enabled.go index cfbe739c4..dff515439 100644 --- a/common/metrics/metrics_enabled.go +++ b/common/metrics/metrics_enabled.go @@ -16,36 +16,6 @@ package metrics -import ( - "os" - "strings" -) - -// Enabled is checked by the constructor functions for all of the -// standard metrics. If it is true, the metric returned is a stub. -// -// This global kill-switch helps quantify the observer effect and makes -// for less cluttered pprof profiles. -var Enabled = false - -// Init enables or disables the metrics system. Since we need this to run before -// any other code gets to create meters and timers, we'll actually do an ugly hack -// and peek into the command line args for the metrics flag. -func init() { - for _, arg := range os.Args { - flag := strings.TrimLeft(arg, "-") - - for _, enabler := range enablerFlags { - if !Enabled && flag == enabler { - Enabled = true - } - } - } -} - -// enablerFlags is the CLI flag names to use to enable metrics collections. -var enablerFlags = []string{"metrics"} - // Config contains the configuration for the metric collection. type Config struct { //nolint:maligned Enabled bool `toml:",omitempty"` diff --git a/compress/compress.go b/compress/compress.go index 63ec5716d..7a3c64ba1 100644 --- a/compress/compress.go +++ b/compress/compress.go @@ -129,6 +129,12 @@ func (c *Compressor) SetTrace(trace bool) { func (c *Compressor) Count() int { return int(c.wordsCount) } func (c *Compressor) AddWord(word []byte) error { + select { + case <-c.ctx.Done(): + return c.ctx.Err() + default: + } + c.wordsCount++ l := 2*len(word) + 2 if c.superstringLen+l > superstringLimit { @@ -152,6 +158,12 @@ func (c *Compressor) AddWord(word []byte) error { } func (c *Compressor) AddUncompressedWord(word []byte) error { + select { + case <-c.ctx.Done(): + return c.ctx.Err() + default: + } + c.wordsCount++ return c.uncompressedFile.AppendUncompressed(word) } @@ -729,11 +741,11 @@ func (r CompressionRatio) String() string { return fmt.Sprintf("%.2f", r) } func Ratio(f1, f2 string) (CompressionRatio, error) { s1, err := os.Stat(f1) if err != nil { - return 0, nil + return 0, err } s2, err := os.Stat(f2) if err != nil { - return 0, nil + return 0, err } return CompressionRatio(float64(s1.Size()) / float64(s2.Size())), nil } diff --git a/compress/decompress.go b/compress/decompress.go index 3d0b2bf11..0770b3961 100644 --- a/compress/decompress.go +++ b/compress/decompress.go @@ -187,7 +187,7 @@ func NewDecompressor(compressedFilePath string) (*Decompressor, error) { for i < dictSize { d, ns := binary.Uvarint(data[i:]) - if d > 2048 { + if d > 64 { // mainnet has maxDepth 31 return nil, fmt.Errorf("dictionary is invalid: patternMaxDepth=%d", d) } depths = append(depths, d) diff --git a/compress/decompress_test.go b/compress/decompress_test.go index 9d903abd8..209ad9992 100644 --- a/compress/decompress_test.go +++ b/compress/decompress_test.go @@ -98,6 +98,57 @@ func TestDecompressMatchOK(t *testing.T) { } } +func prepareStupidDict(t *testing.T, size int) *Decompressor { + t.Helper() + tmpDir := t.TempDir() + file := filepath.Join(tmpDir, "compressed2") + t.Name() + c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2, log.LvlDebug) + if err != nil { + t.Fatal(err) + } + defer c.Close() + for i := 0; i < size; i++ { + if err = c.AddWord([]byte(fmt.Sprintf("word-%d", i))); err != nil { + t.Fatal(err) + } + } + if err = c.Compress(); err != nil { + t.Fatal(err) + } + var d *Decompressor + if d, err = NewDecompressor(file); err != nil { + t.Fatal(err) + } + return d +} + +func TestDecompressMatchOKCondensed(t *testing.T) { + condensePatternTableBitThreshold = 4 + d := prepareStupidDict(t, 10000) + defer func() { condensePatternTableBitThreshold = 9 }() + defer d.Close() + + g := d.MakeGetter() + i := 0 + for g.HasNext() { + if i%2 != 0 { + expected := fmt.Sprintf("word-%d", i) + ok, _ := g.Match([]byte(expected)) + if !ok { + t.Errorf("expexted match with %s", expected) + } + } else { + word, _ := g.Next(nil) + expected := fmt.Sprintf("word-%d", i) + if string(word) != expected { + t.Errorf("expected %s, got (hex) %s", expected, word) + } + } + i++ + } +} + func TestDecompressMatchNotOK(t *testing.T) { d := prepareLoremDict(t) defer d.Close() diff --git a/compress/parallel_compress.go b/compress/parallel_compress.go index 76251fbcc..1cd3752a9 100644 --- a/compress/parallel_compress.go +++ b/compress/parallel_compress.go @@ -35,7 +35,6 @@ import ( "github.com/ledgerwatch/erigon-lib/patricia" "github.com/ledgerwatch/erigon-lib/sais" "github.com/ledgerwatch/log/v3" - atomic2 "go.uber.org/atomic" "golang.org/x/exp/slices" ) @@ -181,7 +180,7 @@ func optimiseCluster(trace bool, input []byte, mf2 *patricia.MatchFinder2, outpu return output, patterns, uncovered } -func reduceDictWorker(trace bool, inputCh chan *CompressionWord, outCh chan *CompressionWord, completion *sync.WaitGroup, trie *patricia.PatriciaTree, inputSize, outputSize *atomic2.Uint64, posMap map[uint64]uint64) { +func reduceDictWorker(trace bool, inputCh chan *CompressionWord, outCh chan *CompressionWord, completion *sync.WaitGroup, trie *patricia.PatriciaTree, inputSize, outputSize *atomic.Uint64, posMap map[uint64]uint64) { defer completion.Done() var output = make([]byte, 0, 256) var uncovered = make([]int, 256) @@ -262,7 +261,7 @@ func reducedict(ctx context.Context, trace bool, logPrefix, segmentFilePath stri log.Log(lvl, fmt.Sprintf("[%s] dictionary file parsed", logPrefix), "entries", len(code2pattern)) } ch := make(chan *CompressionWord, 10_000) - inputSize, outputSize := atomic2.NewUint64(0), atomic2.NewUint64(0) + inputSize, outputSize := &atomic.Uint64{}, &atomic.Uint64{} var collectors []*etl.Collector defer func() { diff --git a/direct/sentry_client.go b/direct/sentry_client.go index 9fe9114fa..5b06195c7 100644 --- a/direct/sentry_client.go +++ b/direct/sentry_client.go @@ -206,10 +206,6 @@ func (c *SentryClientDirect) PeerMinBlock(ctx context.Context, in *sentry.PeerMi return c.server.PeerMinBlock(ctx, in) } -func (c *SentryClientDirect) PeerUseless(ctx context.Context, in *sentry.PeerUselessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - return c.server.PeerUseless(ctx, in) -} - func (c *SentryClientDirect) SendMessageByMinBlock(ctx context.Context, in *sentry.SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*sentry.SentPeers, error) { return c.server.SendMessageByMinBlock(ctx, in) } diff --git a/downloader/downloader.go b/downloader/downloader.go index 8a64d0b89..15ccb18af 100644 --- a/downloader/downloader.go +++ b/downloader/downloader.go @@ -23,6 +23,7 @@ import ( "os" "path/filepath" "sync" + "sync/atomic" "time" "github.com/anacrolix/torrent" @@ -34,7 +35,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/log/v3" - "go.uber.org/atomic" "golang.org/x/sync/semaphore" ) @@ -50,7 +50,9 @@ type Downloader struct { statsLock *sync.RWMutex stats AggStats - folder storage.ClientImplCloser + folder storage.ClientImplCloser + stopMainLoop context.CancelFunc + wg sync.WaitGroup } type AggStats struct { @@ -83,7 +85,7 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg) (*Downloader, error) { return nil, err } - db, c, m, torrentClient, err := openClient(ctx, cfg.ClientConfig) + db, c, m, torrentClient, err := openClient(cfg.ClientConfig) if err != nil { return nil, fmt.Errorf("openClient: %w", err) } @@ -115,6 +117,104 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg) (*Downloader, error) { return d, nil } +func (d *Downloader) MainLoopInBackground(ctx context.Context, silent bool) { + ctx, d.stopMainLoop = context.WithCancel(ctx) + d.wg.Add(1) + go func() { + defer d.wg.Done() + d.mainLoop(ctx, silent) + }() +} + +func (d *Downloader) mainLoop(ctx context.Context, silent bool) { + var sem = semaphore.NewWeighted(int64(d.cfg.DownloadSlots)) + + go func() { + for { + torrents := d.Torrent().Torrents() + for _, t := range torrents { + <-t.GotInfo() + if t.Complete.Bool() { + continue + } + if err := sem.Acquire(ctx, 1); err != nil { + return + } + t.AllowDataDownload() + t.DownloadAll() + go func(t *torrent.Torrent) { + defer sem.Release(1) + //r := t.NewReader() + //r.SetReadahead(t.Length()) + //_, _ = io.Copy(io.Discard, r) // enable streaming - it will prioritize sequential download + + <-t.Complete.On() + }(t) + } + time.Sleep(30 * time.Second) + } + }() + + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + statInterval := 20 * time.Second + statEvery := time.NewTicker(statInterval) + defer statEvery.Stop() + + justCompleted := true + for { + select { + case <-ctx.Done(): + return + case <-statEvery.C: + d.ReCalcStats(statInterval) + + case <-logEvery.C: + if silent { + continue + } + + stats := d.Stats() + + if stats.MetadataReady < stats.FilesTotal { + log.Info(fmt.Sprintf("[snapshots] Waiting for torrents metadata: %d/%d", stats.MetadataReady, stats.FilesTotal)) + continue + } + + if stats.Completed { + if justCompleted { + justCompleted = false + // force fsync of db. to not loose results of downloading on power-off + _ = d.db.Update(ctx, func(tx kv.RwTx) error { return nil }) + } + + log.Info("[snapshots] Seeding", + "up", common2.ByteCount(stats.UploadRate)+"/s", + "peers", stats.PeersUnique, + "conns", stats.ConnectionsTotal, + "files", stats.FilesTotal) + continue + } + + log.Info("[snapshots] Downloading", + "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, common2.ByteCount(stats.BytesCompleted), common2.ByteCount(stats.BytesTotal)), + "download", common2.ByteCount(stats.DownloadRate)+"/s", + "upload", common2.ByteCount(stats.UploadRate)+"/s", + "peers", stats.PeersUnique, + "conns", stats.ConnectionsTotal, + "files", stats.FilesTotal) + + if stats.PeersUnique == 0 { + ips := d.Torrent().BadPeerIPs() + if len(ips) > 0 { + log.Info("[snapshots] Stats", "banned", ips) + } + } + } + } +} + func (d *Downloader) SnapDir() string { d.clientLock.RLock() defer d.clientLock.RUnlock() @@ -217,7 +317,7 @@ func (d *Downloader) verify() error { defer logEvery.Stop() wg := &sync.WaitGroup{} - j := atomic.NewInt64(0) + j := atomic.Int64{} for _, t := range d.torrentClient.Torrents() { wg.Add(1) @@ -225,7 +325,7 @@ func (d *Downloader) verify() error { defer wg.Done() <-t.GotInfo() for i := 0; i < t.NumPieces(); i++ { - j.Inc() + j.Add(1) t.Piece(i).VerifyData() select { @@ -260,7 +360,7 @@ func (d *Downloader) addSegments() error { } files = append(files, files2...) wg := &sync.WaitGroup{} - i := atomic.NewInt64(0) + i := atomic.Int64{} for _, f := range files { wg.Add(1) go func(f string) { @@ -271,10 +371,10 @@ func (d *Downloader) addSegments() error { return } - i.Inc() + i.Add(1) select { case <-logEvery.C: - log.Info("[snpshots] initializing", "files", fmt.Sprintf("%s/%d", i.String(), len(files))) + log.Info("[snpshots] initializing", "files", fmt.Sprintf("%d/%d", i.Load(), len(files))) default: } }(f) @@ -290,12 +390,14 @@ func (d *Downloader) Stats() AggStats { } func (d *Downloader) Close() { + d.stopMainLoop() + d.wg.Wait() d.torrentClient.Close() if err := d.folder.Close(); err != nil { - log.Warn("[Snapshots] folder.close", "err", err) + log.Warn("[snapshots] folder.close", "err", err) } if err := d.pieceCompletionDB.Close(); err != nil { - log.Warn("[Snapshots] pieceCompletionDB.close", "err", err) + log.Warn("[snapshots] pieceCompletionDB.close", "err", err) } d.db.Close() } @@ -322,7 +424,7 @@ func (d *Downloader) Torrent() *torrent.Client { return d.torrentClient } -func openClient(ctx context.Context, cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletion, m storage.ClientImplCloser, torrentClient *torrent.Client, err error) { +func openClient(cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletion, m storage.ClientImplCloser, torrentClient *torrent.Client, err error) { snapDir := cfg.DataDir db, err = mdbx.NewMDBX(log.New()). Label(kv.DownloaderDB). @@ -333,7 +435,7 @@ func openClient(ctx context.Context, cfg *torrent.ClientConfig) (db kv.RwDB, c s if err != nil { return nil, nil, nil, nil, err } - c, err = NewMdbxPieceCompletion(ctx, db) + c, err = NewMdbxPieceCompletion(db) if err != nil { return nil, nil, nil, nil, fmt.Errorf("torrentcfg.NewMdbxPieceCompletion: %w", err) } @@ -353,92 +455,3 @@ func openClient(ctx context.Context, cfg *torrent.ClientConfig) (db kv.RwDB, c s return db, c, m, torrentClient, nil } - -func MainLoop(ctx context.Context, d *Downloader, silent bool) { - var sem = semaphore.NewWeighted(int64(d.cfg.DownloadSlots)) - - go func() { - for { - torrents := d.Torrent().Torrents() - for _, t := range torrents { - <-t.GotInfo() - if t.Complete.Bool() { - continue - } - if err := sem.Acquire(ctx, 1); err != nil { - return - } - t.AllowDataDownload() - t.DownloadAll() - go func(t *torrent.Torrent) { - defer sem.Release(1) - //r := t.NewReader() - //r.SetReadahead(t.Length()) - //_, _ = io.Copy(io.Discard, r) // enable streaming - it will prioritize sequential download - - <-t.Complete.On() - }(t) - } - time.Sleep(30 * time.Second) - } - }() - - logEvery := time.NewTicker(20 * time.Second) - defer logEvery.Stop() - - statInterval := 20 * time.Second - statEvery := time.NewTicker(statInterval) - defer statEvery.Stop() - - justCompleted := true - for { - select { - case <-ctx.Done(): - return - case <-statEvery.C: - d.ReCalcStats(statInterval) - - case <-logEvery.C: - if silent { - continue - } - - stats := d.Stats() - - if stats.MetadataReady < stats.FilesTotal { - log.Info(fmt.Sprintf("[Snapshots] Waiting for torrents metadata: %d/%d", stats.MetadataReady, stats.FilesTotal)) - continue - } - - if stats.Completed { - if justCompleted { - justCompleted = false - // force fsync of db. to not loose results of downloading on power-off - _ = d.db.Update(ctx, func(tx kv.RwTx) error { return nil }) - } - - log.Info("[Snapshots] Seeding", - "up", common2.ByteCount(stats.UploadRate)+"/s", - "peers", stats.PeersUnique, - "connections", stats.ConnectionsTotal, - "files", stats.FilesTotal) - continue - } - - log.Info("[Snapshots] Downloading", - "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, common2.ByteCount(stats.BytesCompleted), common2.ByteCount(stats.BytesTotal)), - "download", common2.ByteCount(stats.DownloadRate)+"/s", - "upload", common2.ByteCount(stats.UploadRate)+"/s", - "peers", stats.PeersUnique, - "connections", stats.ConnectionsTotal, - "files", stats.FilesTotal) - - if stats.PeersUnique == 0 { - ips := d.Torrent().BadPeerIPs() - if len(ips) > 0 { - log.Info("[Snapshots] Stats", "banned", ips) - } - } - } - } -} diff --git a/downloader/downloadercfg/downloadercfg.go b/downloader/downloadercfg/downloadercfg.go index 7df81a7df..aa5dbf241 100644 --- a/downloader/downloadercfg/downloadercfg.go +++ b/downloader/downloadercfg/downloadercfg.go @@ -18,9 +18,11 @@ package downloadercfg import ( "io/ioutil" + "net" "runtime" "strings" + "github.com/anacrolix/dht/v2" lg "github.com/anacrolix/log" "github.com/anacrolix/torrent" "github.com/c2h5oh/datasize" @@ -68,7 +70,7 @@ func Default() *torrent.ClientConfig { return torrentConfig } -func New(snapDir string, version string, verbosity lg.Level, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile, downloadSlots int) (*Cfg, error) { +func New(snapDir string, version string, verbosity lg.Level, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile, downloadSlots int, staticPeers []string) (*Cfg, error) { torrentConfig := Default() torrentConfig.ExtendedHandshakeClientVersion = version @@ -95,6 +97,42 @@ func New(snapDir string, version string, verbosity lg.Level, downloadRate, uploa torrentConfig.Logger = lg.Default.FilterLevel(verbosity) torrentConfig.Logger.Handlers = []lg.Handler{adapterHandler{}} + if len(staticPeers) > 0 { + torrentConfig.NoDHT = false + //defaultNodes := torrentConfig.DhtStartingNodes + torrentConfig.DhtStartingNodes = func(network string) dht.StartingNodesGetter { + return func() ([]dht.Addr, error) { + addrs, err := dht.GlobalBootstrapAddrs(network) + if err != nil { + return nil, err + } + + for _, seed := range staticPeers { + if network == "udp" { + var addr *net.UDPAddr + addr, err := net.ResolveUDPAddr(network, seed+":80") + if err != nil { + log.Warn("[downloader] Cannot UDP resolve address", "network", network, "addr", seed) + continue + } + addrs = append(addrs, dht.NewAddr(addr)) + } + if network == "tcp" { + var addr *net.TCPAddr + addr, err := net.ResolveTCPAddr(network, seed+":80") + if err != nil { + log.Warn("[downloader] Cannot TCP resolve address", "network", network, "addr", seed) + continue + } + addrs = append(addrs, dht.NewAddr(addr)) + } + } + return addrs, nil + } + } + //staticPeers + } + return &Cfg{ClientConfig: torrentConfig, DownloadSlots: downloadSlots}, nil } diff --git a/downloader/downloadercfg/logger.go b/downloader/downloadercfg/logger.go index 30b8905b4..59bff4811 100644 --- a/downloader/downloadercfg/logger.go +++ b/downloader/downloadercfg/logger.go @@ -91,6 +91,12 @@ func (b adapterHandler) Handle(r lg.Record) { if strings.Contains(str, "requested chunk too long") { // suppress useless errors break } + if strings.Contains(str, "reservation cancelled") { // suppress useless errors + break + } + if strings.Contains(str, "received invalid reject") { // suppress useless errors + break + } log.Warn(str) case lg.Error: diff --git a/downloader/mdbx_piece_completion.go b/downloader/mdbx_piece_completion.go index 89eb56e1b..6f209cd19 100644 --- a/downloader/mdbx_piece_completion.go +++ b/downloader/mdbx_piece_completion.go @@ -32,19 +32,18 @@ const ( ) type mdbxPieceCompletion struct { - db kv.RwDB - ctx context.Context + db kv.RwDB } var _ storage.PieceCompletion = (*mdbxPieceCompletion)(nil) -func NewMdbxPieceCompletion(ctx context.Context, db kv.RwDB) (ret storage.PieceCompletion, err error) { - ret = &mdbxPieceCompletion{ctx: ctx, db: db} +func NewMdbxPieceCompletion(db kv.RwDB) (ret storage.PieceCompletion, err error) { + ret = &mdbxPieceCompletion{db: db} return } func (m mdbxPieceCompletion) Get(pk metainfo.PieceKey) (cn storage.Completion, err error) { - err = m.db.View(m.ctx, func(tx kv.Tx) error { + err = m.db.View(context.Background(), func(tx kv.Tx) error { var key [infohash.Size + 4]byte copy(key[:], pk.InfoHash[:]) binary.BigEndian.PutUint32(key[infohash.Size:], uint32(pk.Index)) @@ -84,12 +83,12 @@ func (m mdbxPieceCompletion) Set(pk metainfo.PieceKey, b bool) error { // 1K fsyncs/2minutes it's quite expensive, but even on cloud (high latency) drive it allow download 100mb/s // and Erigon doesn't do anything when downloading snapshots if b { - tx, err = m.db.BeginRwNosync(m.ctx) + tx, err = m.db.BeginRwNosync(context.Background()) if err != nil { return err } } else { - tx, err = m.db.BeginRw(m.ctx) + tx, err = m.db.BeginRw(context.Background()) if err != nil { return err } diff --git a/downloader/mdbx_piece_completion_test.go b/downloader/mdbx_piece_completion_test.go index 683ae402d..c2035501a 100644 --- a/downloader/mdbx_piece_completion_test.go +++ b/downloader/mdbx_piece_completion_test.go @@ -17,7 +17,6 @@ package downloader import ( - "context" "testing" "github.com/ledgerwatch/erigon-lib/kv/memdb" @@ -30,7 +29,7 @@ import ( func TestMdbxPieceCompletion(t *testing.T) { db := memdb.NewTestDownloaderDB(t) - pc, err := NewMdbxPieceCompletion(context.Background(), db) + pc, err := NewMdbxPieceCompletion(db) require.NoError(t, err) defer pc.Close() diff --git a/downloader/util.go b/downloader/util.go index f478e05e6..92e282111 100644 --- a/downloader/util.go +++ b/downloader/util.go @@ -30,6 +30,7 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" "time" "github.com/anacrolix/torrent" @@ -45,7 +46,7 @@ import ( "github.com/ledgerwatch/erigon-lib/downloader/trackers" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/log/v3" - atomic2 "go.uber.org/atomic" + "golang.org/x/sync/semaphore" ) @@ -235,14 +236,14 @@ func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) ([]string, err wg := &sync.WaitGroup{} workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) * 2 var sem = semaphore.NewWeighted(int64(workers)) - i := atomic2.NewInt32(0) + i := atomic.Int32{} for _, f := range files { wg.Add(1) if err := sem.Acquire(ctx, 1); err != nil { return nil, err } go func(f string) { - defer i.Inc() + defer i.Add(1) defer sem.Release(1) defer wg.Done() if err := buildTorrentIfNeed(f, snapDir); err != nil { @@ -254,7 +255,7 @@ func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) ([]string, err case <-ctx.Done(): errs <- ctx.Err() case <-logEvery.C: - log.Info("[Snapshots] Creating .torrent files", "Progress", fmt.Sprintf("%d/%d", i.Load(), len(files))) + log.Info("[snapshots] Creating .torrent files", "Progress", fmt.Sprintf("%d/%d", i.Load(), len(files))) } }(f) } @@ -432,12 +433,12 @@ func VerifyDtaFiles(ctx context.Context, snapDir string) error { j++ if !good { failsAmount++ - log.Error("[Snapshots] Verify hash mismatch", "at piece", i, "file", info.Name) + log.Error("[snapshots] Verify hash mismatch", "at piece", i, "file", info.Name) return ErrSkip } select { case <-logEvery.C: - log.Info("[Snapshots] Verify", "Progress", fmt.Sprintf("%.2f%%", 100*float64(j)/float64(totalPieces))) + log.Info("[snapshots] Verify", "Progress", fmt.Sprintf("%.2f%%", 100*float64(j)/float64(totalPieces))) case <-ctx.Done(): return ctx.Err() default: @@ -453,7 +454,7 @@ func VerifyDtaFiles(ctx context.Context, snapDir string) error { if failsAmount > 0 { return fmt.Errorf("not all files are valid") } - log.Info("[Snapshots] Verify done") + log.Info("[snapshots] Verify done") return nil } diff --git a/etl/README.md b/etl/README.md index c26a7ccc2..9a97c2706 100644 --- a/etl/README.md +++ b/etl/README.md @@ -152,7 +152,7 @@ To avoid that, the ETL framework allows storing progress by setting `OnLoadCommi Then we can use this data to know the progress the ETL transformation made. -You can also specify `ExtractStartKey` and `ExtractEndKey` to limit the nubmer +You can also specify `ExtractStartKey` and `ExtractEndKey` to limit the number of items transformed. ## Ways to work with ETL framework diff --git a/etl/buffers.go b/etl/buffers.go index 09597c59c..b73ecb5f4 100644 --- a/etl/buffers.go +++ b/etl/buffers.go @@ -80,16 +80,19 @@ type sortableBuffer struct { // Put adds key and value to the buffer. These slices will not be accessed later, // so no copying is necessary func (b *sortableBuffer) Put(k, v []byte) { - b.offsets = append(b.offsets, len(b.data)) - b.lens = append(b.lens, len(k)) - if len(k) > 0 { - b.data = append(b.data, k...) + lk, lv := len(k), len(v) + if k == nil { + lk = -1 } - b.offsets = append(b.offsets, len(b.data)) - b.lens = append(b.lens, len(v)) - if len(v) > 0 { - b.data = append(b.data, v...) + if v == nil { + lv = -1 } + b.lens = append(b.lens, lk, lv) + + b.offsets = append(b.offsets, len(b.data)) + b.data = append(b.data, k...) + b.offsets = append(b.offsets, len(b.data)) + b.data = append(b.data, v...) } func (b *sortableBuffer) Size() int { @@ -121,9 +124,25 @@ func (b *sortableBuffer) Get(i int, keyBuf, valBuf []byte) ([]byte, []byte) { keyLen, valLen := b.lens[i2], b.lens[i2+1] if keyLen > 0 { keyBuf = append(keyBuf, b.data[keyOffset:keyOffset+keyLen]...) + } else if keyLen == 0 { + if keyBuf != nil { + keyBuf = keyBuf[:0] + } else { + keyBuf = []byte{} + } + } else { + keyBuf = nil } if valLen > 0 { valBuf = append(valBuf, b.data[valOffset:valOffset+valLen]...) + } else if valLen == 0 { + if valBuf != nil { + valBuf = valBuf[:0] + } else { + valBuf = []byte{} + } + } else { + valBuf = nil } return keyBuf, valBuf } @@ -148,10 +167,13 @@ func (b *sortableBuffer) Write(w io.Writer) error { var numBuf [binary.MaxVarintLen64]byte for i, offset := range b.offsets { l := b.lens[i] - n := binary.PutUvarint(numBuf[:], uint64(l)) + n := binary.PutVarint(numBuf[:], int64(l)) if _, err := w.Write(numBuf[:n]); err != nil { return err } + if l <= 0 { + continue + } if _, err := w.Write(b.data[offset : offset+l]); err != nil { return err } @@ -221,14 +243,22 @@ func (b *appendSortableBuffer) Write(w io.Writer) error { var numBuf [binary.MaxVarintLen64]byte entries := b.sortedBuf for _, entry := range entries { - n := binary.PutUvarint(numBuf[:], uint64(len(entry.key))) + lk := int64(len(entry.key)) + if entry.key == nil { + lk = -1 + } + n := binary.PutVarint(numBuf[:], lk) if _, err := w.Write(numBuf[:n]); err != nil { return err } if _, err := w.Write(entry.key); err != nil { return err } - n = binary.PutUvarint(numBuf[:], uint64(len(entry.value))) + lv := int64(len(entry.key)) + if entry.value == nil { + lv = -1 + } + n = binary.PutVarint(numBuf[:], lv) if _, err := w.Write(numBuf[:n]); err != nil { return err } @@ -307,14 +337,22 @@ func (b *oldestEntrySortableBuffer) Write(w io.Writer) error { var numBuf [binary.MaxVarintLen64]byte entries := b.sortedBuf for _, entry := range entries { - n := binary.PutUvarint(numBuf[:], uint64(len(entry.key))) + lk := int64(len(entry.key)) + if entry.key == nil { + lk = -1 + } + n := binary.PutVarint(numBuf[:], lk) if _, err := w.Write(numBuf[:n]); err != nil { return err } if _, err := w.Write(entry.key); err != nil { return err } - n = binary.PutUvarint(numBuf[:], uint64(len(entry.value))) + lv := int64(len(entry.value)) + if entry.value == nil { + lv = -1 + } + n = binary.PutVarint(numBuf[:], lv) if _, err := w.Write(numBuf[:n]); err != nil { return err } diff --git a/etl/collector.go b/etl/collector.go index 457b565d6..8ad886bde 100644 --- a/etl/collector.go +++ b/etl/collector.go @@ -108,17 +108,17 @@ func (c *Collector) flushBuffer(canStoreInRam bool) error { return nil } var provider dataProvider - var err error c.buf.Sort() if canStoreInRam && len(c.dataProviders) == 0 { provider = KeepInRAM(c.buf) c.allFlushed = true } else { doFsync := !c.autoClean /* is critical collector */ + var err error provider, err = FlushToDisk(c.logPrefix, c.buf, c.tmpdir, doFsync, c.logLvl) - } - if err != nil { - return err + if err != nil { + return err + } } if provider != nil { c.dataProviders = append(c.dataProviders, provider) @@ -163,7 +163,7 @@ func (c *Collector) Load(db kv.RwTx, toBucket string, loadFunc LoadFunc, args Tr i := 0 var prevK []byte - loadNextFunc := func(originalK, k, v []byte) error { + loadNextFunc := func(_, k, v []byte) error { if i == 0 { isEndOfBucket := lastKey == nil || bytes.Compare(lastKey, k) == -1 canUseAppend = haveSortingGuaranties && isEndOfBucket @@ -192,13 +192,16 @@ func (c *Collector) Load(db kv.RwTx, toBucket string, loadFunc LoadFunc, args Tr logArs = append(logArs, "current_prefix", makeCurrentKeyStr(k)) } - log.Info(fmt.Sprintf("[%s] ETL [2/2] Loading", c.logPrefix), logArs...) + log.Log(c.logLvl, fmt.Sprintf("[%s] ETL [2/2] Loading", c.logPrefix), logArs...) } - if canUseAppend && len(v) == 0 { - return nil // nothing to delete after end of bucket - } - if len(v) == 0 { + isNil := (c.bufType == SortableSliceBuffer && v == nil) || + (c.bufType == SortableAppendBuffer && len(v) == 0) || //backward compatibility + (c.bufType == SortableOldestAppearedBuffer && len(v) == 0) + if isNil { + if canUseAppend { + return nil // nothing to delete after end of bucket + } if err := cursor.Delete(k); err != nil { return err } diff --git a/etl/dataprovider.go b/etl/dataprovider.go index 28cd41985..baab747b9 100644 --- a/etl/dataprovider.go +++ b/etl/dataprovider.go @@ -103,13 +103,13 @@ func (p *fileDataProvider) String() string { } func readElementFromDisk(r io.Reader, br io.ByteReader, keyBuf, valBuf []byte) ([]byte, []byte, error) { - n, err := binary.ReadUvarint(br) + n, err := binary.ReadVarint(br) if err != nil { return nil, nil, err } - if n > 0 { + if n >= 0 { // Reallocate the slice or extend it if there is enough capacity - if len(keyBuf)+int(n) > cap(keyBuf) { + if keyBuf == nil || len(keyBuf)+int(n) > cap(keyBuf) { newKeyBuf := make([]byte, len(keyBuf)+int(n)) copy(newKeyBuf, keyBuf) keyBuf = newKeyBuf @@ -119,13 +119,15 @@ func readElementFromDisk(r io.Reader, br io.ByteReader, keyBuf, valBuf []byte) ( if _, err = io.ReadFull(r, keyBuf[len(keyBuf)-int(n):]); err != nil { return nil, nil, err } + } else { + keyBuf = nil } - if n, err = binary.ReadUvarint(br); err != nil { + if n, err = binary.ReadVarint(br); err != nil { return nil, nil, err } - if n > 0 { + if n >= 0 { // Reallocate the slice or extend it if there is enough capacity - if len(valBuf)+int(n) > cap(valBuf) { + if valBuf == nil || len(valBuf)+int(n) > cap(valBuf) { newValBuf := make([]byte, len(valBuf)+int(n)) copy(newValBuf, valBuf) valBuf = newValBuf @@ -135,6 +137,8 @@ func readElementFromDisk(r io.Reader, br io.ByteReader, keyBuf, valBuf []byte) ( if _, err = io.ReadFull(r, valBuf[len(valBuf)-int(n):]); err != nil { return nil, nil, err } + } else { + valBuf = nil } return keyBuf, valBuf, err } diff --git a/etl/etl_test.go b/etl/etl_test.go index d549b0e22..863f391a9 100644 --- a/etl/etl_test.go +++ b/etl/etl_test.go @@ -39,6 +39,76 @@ func decodeHex(in string) []byte { return payload } +func TestEmptyValueIsNotANil(t *testing.T) { + t.Run("sortable", func(t *testing.T) { + collector := NewCollector(t.Name(), "", NewSortableBuffer(1)) + defer collector.Close() + require := require.New(t) + require.NoError(collector.Collect([]byte{1}, []byte{})) + require.NoError(collector.Collect([]byte{2}, nil)) + require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error { + if k[0] == 1 { + require.Equal([]byte{}, v) + } else { + require.Nil(v) + } + return nil + }, TransformArgs{})) + }) + t.Run("append", func(t *testing.T) { + // append buffer doesn't support nil values + collector := NewCollector(t.Name(), "", NewAppendBuffer(1)) + defer collector.Close() + require := require.New(t) + require.NoError(collector.Collect([]byte{1}, []byte{})) + require.NoError(collector.Collect([]byte{2}, nil)) + require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error { + require.Nil(v) + return nil + }, TransformArgs{})) + }) + t.Run("oldest", func(t *testing.T) { + collector := NewCollector(t.Name(), "", NewOldestEntryBuffer(1)) + defer collector.Close() + require := require.New(t) + require.NoError(collector.Collect([]byte{1}, []byte{})) + require.NoError(collector.Collect([]byte{2}, nil)) + require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error { + if k[0] == 1 { + require.Equal([]byte{}, v) + } else { + require.Nil(v) + } + return nil + }, TransformArgs{})) + }) +} + +func TestEmptyKeyValue(t *testing.T) { + _, tx := memdb.NewTestTx(t) + require := require.New(t) + table := kv.ChaindataTables[0] + collector := NewCollector(t.Name(), "", NewSortableBuffer(1)) + defer collector.Close() + require.NoError(collector.Collect([]byte{2}, []byte{})) + require.NoError(collector.Collect([]byte{1}, []byte{1})) + require.NoError(collector.Load(tx, table, IdentityLoadFunc, TransformArgs{})) + v, err := tx.GetOne(table, []byte{2}) + require.NoError(err) + require.Equal([]byte{}, v) + v, err = tx.GetOne(table, []byte{1}) + require.NoError(err) + require.Equal([]byte{1}, v) + + collector = NewCollector(t.Name(), "", NewSortableBuffer(1)) + defer collector.Close() + require.NoError(collector.Collect([]byte{}, nil)) + require.NoError(collector.Load(tx, table, IdentityLoadFunc, TransformArgs{})) + v, err = tx.GetOne(table, []byte{}) + require.NoError(err) + require.Nil(v) +} + func TestWriteAndReadBufferEntry(t *testing.T) { b := NewSortableBuffer(128) buffer := bytes.NewBuffer(make([]byte, 0)) @@ -281,17 +351,24 @@ func generateTestData(t *testing.T, db kv.Putter, bucket string, count int) { func testExtractToMapFunc(k, v []byte, next ExtractNextFunc) error { valueMap := make(map[string][]byte) valueMap["value"] = v - out, _ := json.Marshal(valueMap) + out, err := json.Marshal(valueMap) + if err != nil { + return err + } return next(k, k, out) } func testExtractDoubleToMapFunc(k, v []byte, next ExtractNextFunc) error { + var err error valueMap := make(map[string][]byte) valueMap["value"] = append(v, 0xAA) k1 := append(k, 0xAA) - out, _ := json.Marshal(valueMap) + out, err := json.Marshal(valueMap) + if err != nil { + panic(err) + } - err := next(k, k1, out) + err = next(k, k1, out) if err != nil { return err } @@ -299,7 +376,10 @@ func testExtractDoubleToMapFunc(k, v []byte, next ExtractNextFunc) error { valueMap = make(map[string][]byte) valueMap["value"] = append(v, 0xBB) k2 := append(k, 0xBB) - out, _ = json.Marshal(valueMap) + out, err = json.Marshal(valueMap) + if err != nil { + panic(err) + } return next(k, k2, out) } diff --git a/go.mod b/go.mod index c1ed95188..cb83b743e 100644 --- a/go.mod +++ b/go.mod @@ -1,45 +1,45 @@ module github.com/ledgerwatch/erigon-lib -go 1.18 +go 1.19 require ( - github.com/ledgerwatch/interfaces v0.0.0-20230210062155-539b8171d9f0 + github.com/ledgerwatch/interfaces v0.0.0-20230412092010-e1c4a1a4279e github.com/ledgerwatch/log/v3 v3.7.0 github.com/ledgerwatch/secp256k1 v1.0.0 - github.com/ledgerwatch/trackerslist v1.0.0 - github.com/torquem-ch/mdbx-go v0.27.5 + github.com/ledgerwatch/trackerslist v1.1.0 + github.com/torquem-ch/mdbx-go v0.27.10 ) require ( - github.com/RoaringBitmap/roaring v1.2.2 + github.com/RoaringBitmap/roaring v1.2.3 github.com/VictoriaMetrics/metrics v1.23.1 + github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444 github.com/anacrolix/go-libutp v1.2.0 github.com/anacrolix/log v0.13.2-0.20221123232138-02e2764801c3 - github.com/anacrolix/torrent v1.48.0 + github.com/anacrolix/torrent v1.48.1-0.20230219022425-e8971ea0f1bf github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b - github.com/deckarep/golang-set/v2 v2.1.0 + github.com/deckarep/golang-set/v2 v2.3.0 github.com/edsrzf/mmap-go v1.1.0 github.com/go-stack/stack v1.8.1 github.com/google/btree v1.1.2 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d - github.com/holiman/uint256 v1.2.1 - github.com/matryer/moq v0.3.0 + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/hashicorp/golang-lru/v2 v2.0.2 + github.com/holiman/uint256 v1.2.2 + github.com/matryer/moq v0.3.1 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/protolambda/go-kzg v0.0.0-20221224134646-c91cee5e954e github.com/quasilyte/go-ruleguard/dsl v0.3.22 github.com/spaolacci/murmur3 v1.1.0 - github.com/stretchr/testify v1.8.1 - github.com/tidwall/btree v1.5.0 - go.uber.org/atomic v1.10.0 - golang.org/x/crypto v0.6.0 - golang.org/x/exp v0.0.0-20230206171751-46f607a40771 + github.com/stretchr/testify v1.8.2 + github.com/tidwall/btree v1.6.0 + golang.org/x/crypto v0.7.0 + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.5.0 + golang.org/x/sys v0.6.0 golang.org/x/time v0.3.0 - google.golang.org/grpc v1.53.0 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 + google.golang.org/protobuf v1.30.0 ) require ( @@ -47,12 +47,11 @@ require ( github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/anacrolix/chansync v0.3.0 // indirect - github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444 // indirect github.com/anacrolix/envpprof v1.2.1 // indirect github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/perf v1.0.0 // indirect - github.com/anacrolix/missinggo/v2 v2.7.0 // indirect + github.com/anacrolix/missinggo/v2 v2.7.1 // indirect github.com/anacrolix/mmsg v1.0.0 // indirect github.com/anacrolix/multiless v0.3.0 // indirect github.com/anacrolix/stm v0.4.0 // indirect @@ -67,18 +66,18 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/herumi/bls-eth-go-binary v1.28.1 // indirect - github.com/huandu/xstrings v1.3.2 // indirect + github.com/huandu/xstrings v1.4.0 // indirect github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 // indirect github.com/lispad/go-generics-tools v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mschoch/smat v0.2.0 // indirect github.com/pion/datachannel v1.5.2 // indirect - github.com/pion/dtls/v2 v2.1.5 // indirect + github.com/pion/dtls/v2 v2.2.4 // indirect github.com/pion/ice/v2 v2.2.6 // indirect github.com/pion/interceptor v0.1.11 // indirect github.com/pion/logging v0.2.2 // indirect @@ -91,8 +90,9 @@ require ( github.com/pion/srtp/v2 v2.0.9 // indirect github.com/pion/stun v0.3.5 // indirect github.com/pion/transport v0.13.1 // indirect + github.com/pion/transport/v2 v2.0.0 // indirect github.com/pion/turn/v2 v2.0.8 // indirect - github.com/pion/udp v0.1.1 // indirect + github.com/pion/udp v0.1.4 // indirect github.com/pion/webrtc/v3 v3.1.42 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -102,12 +102,12 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect go.opentelemetry.io/otel v1.8.0 // indirect go.opentelemetry.io/otel/trace v1.8.0 // indirect - golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.6.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/tools v0.3.0 // indirect - google.golang.org/genproto v0.0.0-20230202175211-008b39050e57 // indirect + golang.org/x/mod v0.9.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/text v0.8.0 // indirect + golang.org/x/tools v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) -replace github.com/ledgerwatch/interfaces => github.com/roberto-bayardo/interfaces v0.0.0-20230125140013-a42dd5f11cb5 +replace github.com/ledgerwatch/interfaces => github.com/roberto-bayardo/interfaces v0.0.0-20230415160431-75e636779b49 diff --git a/go.sum b/go.sum index 924d1ac54..904ff0678 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v1.2.2 h1:RT+1qfb7a8rkOIxPnyJdvU4G8Ynmhc2YYP6MvzqEtwk= -github.com/RoaringBitmap/roaring v1.2.2/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= +github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY= +github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VictoriaMetrics/metrics v1.23.1 h1:/j8DzeJBxSpL2qSIdqnRFLvQQhbJyJbbEi22yMm7oL0= @@ -58,8 +58,8 @@ github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5ur github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY= github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= github.com/anacrolix/missinggo/v2 v2.5.2/go.mod h1:yNvsLrtZYRYCOI+KRH/JM8TodHjtIE/bjOGhQaLOWIE= -github.com/anacrolix/missinggo/v2 v2.7.0 h1:4fzOAAn/VCvfWGviLmh64MPMttrlYew81JdPO7nSHvI= -github.com/anacrolix/missinggo/v2 v2.7.0/go.mod h1:2IZIvmRTizALNYFYXsPR7ofXPzJgyBpKZ4kMqMEICkI= +github.com/anacrolix/missinggo/v2 v2.7.1 h1:Y+wL0JC6D2icpwhDpcrRM4THQB/uFcPNYUtZMbYvQgI= +github.com/anacrolix/missinggo/v2 v2.7.1/go.mod h1:2IZIvmRTizALNYFYXsPR7ofXPzJgyBpKZ4kMqMEICkI= github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw= github.com/anacrolix/mmsg v1.0.0 h1:btC7YLjOn29aTUAExJiVUhQOuf/8rhm+/nWCMAnL3Hg= github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc= @@ -75,8 +75,8 @@ github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DC github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/torrent v1.48.0 h1:OQe1aQb8WnhDzpcI7r3yWoHzHWKyPbfhXGfO9Q/pvbY= -github.com/anacrolix/torrent v1.48.0/go.mod h1:3UtkJ8BnxXDRwvk+eT+uwiZalfFJ8YzAhvxe4QRPSJI= +github.com/anacrolix/torrent v1.48.1-0.20230219022425-e8971ea0f1bf h1:gQCApNMI+lbXYLRiiiC5S2mU9k2BZT9FNnRr//eUzXc= +github.com/anacrolix/torrent v1.48.1-0.20230219022425-e8971ea0f1bf/go.mod h1:5OY82KVPu5Fq+P0HefdTQKRt0gfBXeHeRUE04VaSoQo= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs= github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4= @@ -84,6 +84,7 @@ github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNa github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/immutable v0.3.0 h1:TVRhuZx2wG9SZ0LRdqlbs9S5BZ6Y24hJEHTCgWHZEIw= github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= @@ -107,8 +108,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= -github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= +github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -135,8 +136,10 @@ github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1T github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -164,8 +167,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -190,23 +194,24 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= +github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/herumi/bls-eth-go-binary v1.28.1 h1:fcIZ48y5EE9973k05XjE8+P3YiQgjZz4JI/YabAm8KA= github.com/herumi/bls-eth-go-binary v1.28.1/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/holiman/uint256 v1.2.1 h1:XRtyuda/zw2l+Bq/38n5XUoEF72aSOu/77Thd9pPp2o= -github.com/holiman/uint256 v1.2.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk= +github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -231,12 +236,12 @@ github.com/ledgerwatch/log/v3 v3.7.0 h1:aFPEZdwZx4jzA3+/Pf8wNDN5tCI0cIolq/kfvgcM github.com/ledgerwatch/log/v3 v3.7.0/go.mod h1:J2Jl6zV/58LeA6LTaVVnCGyf1/cYYSEOOLHY4ZN8S2A= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= github.com/ledgerwatch/secp256k1 v1.0.0/go.mod h1:SPmqJFciiF/Q0mPt2jVs2dTr/1TZBTIA+kPMmKgBAak= -github.com/ledgerwatch/trackerslist v1.0.0 h1:6gnQu93WCTL4jPcdmc8UEmw56Cb8IFQHLGnevfIeLwo= -github.com/ledgerwatch/trackerslist v1.0.0/go.mod h1:pCC+eEw8izNcnBBiSwvIq8kKsxDLInAafSW275jqFrg= +github.com/ledgerwatch/trackerslist v1.1.0 h1:eKhgeURD9x/J3qzMnL6C0e0cLy6Ld7Ck/VR/yF+7cZQ= +github.com/ledgerwatch/trackerslist v1.1.0/go.mod h1:wWU/V810cpsEl//o49ebwAWf0BL0WOJiu/577L4IVok= github.com/lispad/go-generics-tools v1.1.0 h1:mbSgcxdFVmpoyso1X/MJHXbSbSL3dD+qhRryyxk+/XY= github.com/lispad/go-generics-tools v1.1.0/go.mod h1:2csd1EJljo/gy5qG4khXol7ivCPptNjG5Uv2X8MgK84= -github.com/matryer/moq v0.3.0 h1:4j0goF/XK3pMTc7fJB3fveuTJoQNdavRX/78vlK3Xb4= -github.com/matryer/moq v0.3.0/go.mod h1:RJ75ZZZD71hejp39j4crZLsEDszGk6iH4v4YsWFKH4s= +github.com/matryer/moq v0.3.1 h1:kLDiBJoGcusWS2BixGyTkF224aSCD8nLY24tj/NcTCs= +github.com/matryer/moq v0.3.1/go.mod h1:RJ75ZZZD71hejp39j4crZLsEDszGk6iH4v4YsWFKH4s= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -271,8 +276,9 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E= github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ= github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= -github.com/pion/dtls/v2 v2.1.5 h1:jlh2vtIyUBShchoTDqpCCqiYCyRFJ/lvf/gQ8TALs+c= github.com/pion/dtls/v2 v2.1.5/go.mod h1:BqCE7xPZbPSubGasRoDFJeTsyJtdD1FanJYL0JGheqY= +github.com/pion/dtls/v2 v2.2.4 h1:YSfYwDQgrxMYXLBc/m7PFY5BVtWlNm/DN4qoU2CbcWg= +github.com/pion/dtls/v2 v2.2.4/go.mod h1:WGKfxqhrddne4Kg3p11FUMJrynkOY4lb25zHNO49wuw= github.com/pion/ice/v2 v2.2.6 h1:R/vaLlI1J2gCx141L5PEwtuGAGcyS6e7E0hDeJFq5Ig= github.com/pion/ice/v2 v2.2.6/go.mod h1:SWuHiOGP17lGromHTFadUe1EuPgFh/oCU6FCMZHooVE= github.com/pion/interceptor v0.1.11 h1:00U6OlqxA3FFB50HSg25J/8cWi7P6FbSzw4eFn24Bvs= @@ -301,10 +307,13 @@ github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZ github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= github.com/pion/transport v0.13.1 h1:/UH5yLeQtwm2VZIPjxwnNFxjS4DFhyLfS4GlfuKUzfA= github.com/pion/transport v0.13.1/go.mod h1:EBxbqzyv+ZrmDb82XswEE0BjfQFtuw1Nu6sjnjWCsGg= +github.com/pion/transport/v2 v2.0.0 h1:bsMYyqHCbkvHwj+eNCFBuxtlKndKfyGI2vaQmM3fIE4= +github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc= github.com/pion/turn/v2 v2.0.8 h1:KEstL92OUN3k5k8qxsXHpr7WWfrdp7iJZHx99ud8muw= github.com/pion/turn/v2 v2.0.8/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= -github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= +github.com/pion/udp v0.1.4 h1:OowsTmu1Od3sD6i3fQUJxJn2fEvJO6L1TidgadtbTI8= +github.com/pion/udp v0.1.4/go.mod h1:G8LDo56HsFwC24LIcnT4YIDU5qcB6NepqqjP0keL2us= github.com/pion/webrtc/v3 v3.1.42 h1:wJEQFIXVanptnQcHOLTuIo4AtGB2+mG2x4OhIhnITOA= github.com/pion/webrtc/v3 v3.1.42/go.mod h1:ffD9DulDrPxyWvDPUIPAOSAWx9GUlOExiJPf7cCcMLA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -336,8 +345,8 @@ github.com/protolambda/go-kzg v0.0.0-20221224134646-c91cee5e954e/go.mod h1:7EhkB github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/roberto-bayardo/interfaces v0.0.0-20230125140013-a42dd5f11cb5 h1:/ScbB0EJluuR1vlfAsFhqpP5Pjg4TVG1DEluy3B8awc= -github.com/roberto-bayardo/interfaces v0.0.0-20230125140013-a42dd5f11cb5/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= +github.com/roberto-bayardo/interfaces v0.0.0-20230415160431-75e636779b49 h1:RmyA3NHGT+2jWxYIw8T1ZCElWWE3kjwgn84GaMFSgo8= +github.com/roberto-bayardo/interfaces v0.0.0-20230415160431-75e636779b49/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= @@ -368,15 +377,16 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tidwall/btree v1.5.0 h1:iV0yVY/frd7r6qGBXfEYs7DH0gTDgrKTrDjS7xt/IyQ= -github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/torquem-ch/mdbx-go v0.27.5 h1:bbhXQGFCmoxbRDXKYEJwxSOOTeBKwoD4pFBUpK9+V1g= -github.com/torquem-ch/mdbx-go v0.27.5/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= +github.com/torquem-ch/mdbx-go v0.27.10 h1:iwb8Wn9gse4MEYIltAna+pxMPCY7hA1/5LLN/Qrcsx0= +github.com/torquem-ch/mdbx-go v0.27.10/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= @@ -385,6 +395,7 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -394,31 +405,34 @@ go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -436,6 +450,7 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -443,8 +458,11 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -454,6 +472,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -479,22 +498,31 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -504,12 +532,14 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -522,8 +552,8 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20230202175211-008b39050e57 h1:vArvWooPH749rNHpBGgVl+U9B9dATjiEhJzcWGlovNs= -google.golang.org/genproto v0.0.0-20230202175211-008b39050e57/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -531,10 +561,10 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -543,9 +573,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -558,9 +587,11 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/gointerfaces/downloader/downloader.pb.go b/gointerfaces/downloader/downloader.pb.go index 8d324b712..745b56cf2 100644 --- a/gointerfaces/downloader/downloader.pb.go +++ b/gointerfaces/downloader/downloader.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: downloader/downloader.proto package downloader @@ -213,16 +213,16 @@ type StatsReply struct { // - ensure all pieces hashes available // - validate files after crush // - when all metadata ready - can start download/upload - MetadataReady int32 `protobuf:"varint,1,opt,name=metadataReady,proto3" json:"metadataReady,omitempty"` - FilesTotal int32 `protobuf:"varint,2,opt,name=filesTotal,proto3" json:"filesTotal,omitempty"` - PeersUnique int32 `protobuf:"varint,4,opt,name=peersUnique,proto3" json:"peersUnique,omitempty"` - ConnectionsTotal uint64 `protobuf:"varint,5,opt,name=connectionsTotal,proto3" json:"connectionsTotal,omitempty"` + MetadataReady int32 `protobuf:"varint,1,opt,name=metadata_ready,json=metadataReady,proto3" json:"metadata_ready,omitempty"` + FilesTotal int32 `protobuf:"varint,2,opt,name=files_total,json=filesTotal,proto3" json:"files_total,omitempty"` + PeersUnique int32 `protobuf:"varint,4,opt,name=peers_unique,json=peersUnique,proto3" json:"peers_unique,omitempty"` + ConnectionsTotal uint64 `protobuf:"varint,5,opt,name=connections_total,json=connectionsTotal,proto3" json:"connections_total,omitempty"` Completed bool `protobuf:"varint,6,opt,name=completed,proto3" json:"completed,omitempty"` Progress float32 `protobuf:"fixed32,7,opt,name=progress,proto3" json:"progress,omitempty"` - BytesCompleted uint64 `protobuf:"varint,8,opt,name=bytesCompleted,proto3" json:"bytesCompleted,omitempty"` - BytesTotal uint64 `protobuf:"varint,9,opt,name=bytesTotal,proto3" json:"bytesTotal,omitempty"` - UploadRate uint64 `protobuf:"varint,10,opt,name=uploadRate,proto3" json:"uploadRate,omitempty"` // bytes/sec - DownloadRate uint64 `protobuf:"varint,11,opt,name=downloadRate,proto3" json:"downloadRate,omitempty"` // bytes/sec + BytesCompleted uint64 `protobuf:"varint,8,opt,name=bytes_completed,json=bytesCompleted,proto3" json:"bytes_completed,omitempty"` + BytesTotal uint64 `protobuf:"varint,9,opt,name=bytes_total,json=bytesTotal,proto3" json:"bytes_total,omitempty"` + UploadRate uint64 `protobuf:"varint,10,opt,name=upload_rate,json=uploadRate,proto3" json:"upload_rate,omitempty"` // bytes/sec + DownloadRate uint64 `protobuf:"varint,11,opt,name=download_rate,json=downloadRate,proto3" json:"download_rate,omitempty"` // bytes/sec } func (x *StatsReply) Reset() { @@ -347,44 +347,45 @@ var file_downloader_downloader_proto_rawDesc = []byte{ 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x0f, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0xe6, 0x02, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x61, 0x64, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x54, - 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, - 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x55, - 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x70, 0x65, 0x65, - 0x72, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x54, - 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x26, - 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, - 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1e, 0x0a, 0x0a, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x52, 0x61, 0x74, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x75, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x64, 0x6f, - 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x32, 0xcb, 0x01, 0x0a, 0x0a, 0x44, - 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x08, 0x44, 0x6f, 0x77, - 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1b, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x19, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, - 0x64, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x05, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x19, 0x5a, 0x17, 0x2e, 0x2f, 0x64, 0x6f, - 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x3b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, - 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, + 0x73, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, + 0x70, 0x65, 0x65, 0x72, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0a, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x61, + 0x74, 0x65, 0x32, 0xcb, 0x01, 0x0a, 0x0a, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x41, 0x0a, 0x08, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1b, 0x2e, + 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, + 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x19, + 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x64, + 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, + 0x42, 0x19, 0x5a, 0x17, 0x2e, 0x2f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, + 0x3b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( diff --git a/gointerfaces/downloader/downloader_grpc.pb.go b/gointerfaces/downloader/downloader_grpc.pb.go index 5162a82cf..6cb063b55 100644 --- a/gointerfaces/downloader/downloader_grpc.pb.go +++ b/gointerfaces/downloader/downloader_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: downloader/downloader.proto package downloader @@ -19,6 +19,12 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Downloader_Download_FullMethodName = "/downloader.Downloader/Download" + Downloader_Verify_FullMethodName = "/downloader.Downloader/Verify" + Downloader_Stats_FullMethodName = "/downloader.Downloader/Stats" +) + // DownloaderClient is the client API for Downloader service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -38,7 +44,7 @@ func NewDownloaderClient(cc grpc.ClientConnInterface) DownloaderClient { func (c *downloaderClient) Download(ctx context.Context, in *DownloadRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/downloader.Downloader/Download", in, out, opts...) + err := c.cc.Invoke(ctx, Downloader_Download_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -47,7 +53,7 @@ func (c *downloaderClient) Download(ctx context.Context, in *DownloadRequest, op func (c *downloaderClient) Verify(ctx context.Context, in *VerifyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/downloader.Downloader/Verify", in, out, opts...) + err := c.cc.Invoke(ctx, Downloader_Verify_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -56,7 +62,7 @@ func (c *downloaderClient) Verify(ctx context.Context, in *VerifyRequest, opts . func (c *downloaderClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsReply, error) { out := new(StatsReply) - err := c.cc.Invoke(ctx, "/downloader.Downloader/Stats", in, out, opts...) + err := c.cc.Invoke(ctx, Downloader_Stats_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -109,7 +115,7 @@ func _Downloader_Download_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/downloader.Downloader/Download", + FullMethod: Downloader_Download_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DownloaderServer).Download(ctx, req.(*DownloadRequest)) @@ -127,7 +133,7 @@ func _Downloader_Verify_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/downloader.Downloader/Verify", + FullMethod: Downloader_Verify_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DownloaderServer).Verify(ctx, req.(*VerifyRequest)) @@ -145,7 +151,7 @@ func _Downloader_Stats_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/downloader.Downloader/Stats", + FullMethod: Downloader_Stats_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DownloaderServer).Stats(ctx, req.(*StatsRequest)) diff --git a/gointerfaces/execution/execution.pb.go b/gointerfaces/execution/execution.pb.go index 40ed7a488..7232d6b34 100644 --- a/gointerfaces/execution/execution.pb.go +++ b/gointerfaces/execution/execution.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: execution/execution.proto package execution @@ -78,8 +78,8 @@ type ForkChoiceReceipt struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` // Forkchoice is either successful or unsuccessful. - LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latestValidHash,proto3" json:"latestValidHash,omitempty"` // Return latest valid hash in case of halt of execution. + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` // Forkchoice is either successful or unsuccessful. + LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latest_valid_hash,json=latestValidHash,proto3" json:"latest_valid_hash,omitempty"` // Return latest valid hash in case of halt of execution. } func (x *ForkChoiceReceipt) Reset() { @@ -134,9 +134,9 @@ type ValidationReceipt struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ValidationStatus ValidationStatus `protobuf:"varint,1,opt,name=validationStatus,proto3,enum=execution.ValidationStatus" json:"validationStatus,omitempty"` - LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latestValidHash,proto3" json:"latestValidHash,omitempty"` - MissingHash *types.H256 `protobuf:"bytes,3,opt,name=missingHash,proto3,oneof" json:"missingHash,omitempty"` // The missing hash, in case we receive MissingSegment so that we can reverse download it. + ValidationStatus ValidationStatus `protobuf:"varint,1,opt,name=validation_status,json=validationStatus,proto3,enum=execution.ValidationStatus" json:"validation_status,omitempty"` + LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latest_valid_hash,json=latestValidHash,proto3" json:"latest_valid_hash,omitempty"` + MissingHash *types.H256 `protobuf:"bytes,3,opt,name=missing_hash,json=missingHash,proto3,oneof" json:"missing_hash,omitempty"` // The missing hash, in case we receive MissingSegment so that we can reverse download it. } func (x *ValidationReceipt) Reset() { @@ -239,30 +239,31 @@ func (x *IsCanonicalResponse) GetCanonical() bool { return false } -// Header is an header for execution +// Header is a header for execution type Header struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ParentHash *types.H256 `protobuf:"bytes,1,opt,name=parentHash,proto3" json:"parentHash,omitempty"` + ParentHash *types.H256 `protobuf:"bytes,1,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"` Coinbase *types.H160 `protobuf:"bytes,2,opt,name=coinbase,proto3" json:"coinbase,omitempty"` - StateRoot *types.H256 `protobuf:"bytes,3,opt,name=stateRoot,proto3" json:"stateRoot,omitempty"` - ReceiptRoot *types.H256 `protobuf:"bytes,4,opt,name=receiptRoot,proto3" json:"receiptRoot,omitempty"` - LogsBloom *types.H2048 `protobuf:"bytes,5,opt,name=logsBloom,proto3" json:"logsBloom,omitempty"` - MixDigest *types.H256 `protobuf:"bytes,6,opt,name=mixDigest,proto3" json:"mixDigest,omitempty"` - BlockNumber uint64 `protobuf:"varint,7,opt,name=blockNumber,proto3" json:"blockNumber,omitempty"` - GasLimit uint64 `protobuf:"varint,8,opt,name=gasLimit,proto3" json:"gasLimit,omitempty"` - GasUsed uint64 `protobuf:"varint,9,opt,name=gasUsed,proto3" json:"gasUsed,omitempty"` + StateRoot *types.H256 `protobuf:"bytes,3,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` + ReceiptRoot *types.H256 `protobuf:"bytes,4,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty"` + LogsBloom *types.H2048 `protobuf:"bytes,5,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"` + MixDigest *types.H256 `protobuf:"bytes,6,opt,name=mix_digest,json=mixDigest,proto3" json:"mix_digest,omitempty"` + BlockNumber uint64 `protobuf:"varint,7,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` + GasLimit uint64 `protobuf:"varint,8,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + GasUsed uint64 `protobuf:"varint,9,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` Timestamp uint64 `protobuf:"varint,10,opt,name=timestamp,proto3" json:"timestamp,omitempty"` Nonce uint64 `protobuf:"varint,11,opt,name=nonce,proto3" json:"nonce,omitempty"` - ExtraData []byte `protobuf:"bytes,12,opt,name=extraData,proto3" json:"extraData,omitempty"` + ExtraData []byte `protobuf:"bytes,12,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"` Difficulty *types.H256 `protobuf:"bytes,13,opt,name=difficulty,proto3" json:"difficulty,omitempty"` - BlockHash *types.H256 `protobuf:"bytes,14,opt,name=blockHash,proto3" json:"blockHash,omitempty"` // We keep this so that we can validate it - OmmerHash *types.H256 `protobuf:"bytes,15,opt,name=ommerHash,proto3" json:"ommerHash,omitempty"` - TransactionHash *types.H256 `protobuf:"bytes,16,opt,name=transactionHash,proto3" json:"transactionHash,omitempty"` - BaseFeePerGas *types.H256 `protobuf:"bytes,17,opt,name=baseFeePerGas,proto3,oneof" json:"baseFeePerGas,omitempty"` - WithdrawalHash *types.H256 `protobuf:"bytes,18,opt,name=withdrawalHash,proto3,oneof" json:"withdrawalHash,omitempty"` + BlockHash *types.H256 `protobuf:"bytes,14,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` // We keep this so that we can validate it + OmmerHash *types.H256 `protobuf:"bytes,15,opt,name=ommer_hash,json=ommerHash,proto3" json:"ommer_hash,omitempty"` + TransactionHash *types.H256 `protobuf:"bytes,16,opt,name=transaction_hash,json=transactionHash,proto3" json:"transaction_hash,omitempty"` + BaseFeePerGas *types.H256 `protobuf:"bytes,17,opt,name=base_fee_per_gas,json=baseFeePerGas,proto3,oneof" json:"base_fee_per_gas,omitempty"` + WithdrawalHash *types.H256 `protobuf:"bytes,18,opt,name=withdrawal_hash,json=withdrawalHash,proto3,oneof" json:"withdrawal_hash,omitempty"` + ExcessDataGas *types.H256 `protobuf:"bytes,19,opt,name=excess_data_gas,json=excessDataGas,proto3,oneof" json:"excess_data_gas,omitempty"` } func (x *Header) Reset() { @@ -423,14 +424,21 @@ func (x *Header) GetWithdrawalHash() *types.H256 { return nil } +func (x *Header) GetExcessDataGas() *types.H256 { + if x != nil { + return x.ExcessDataGas + } + return nil +} + // Body is a block body for execution type BlockBody struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockHash *types.H256 `protobuf:"bytes,1,opt,name=blockHash,proto3" json:"blockHash,omitempty"` - BlockNumber uint64 `protobuf:"varint,2,opt,name=blockNumber,proto3" json:"blockNumber,omitempty"` + BlockHash *types.H256 `protobuf:"bytes,1,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` + BlockNumber uint64 `protobuf:"varint,2,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` // Raw transactions in byte format. Transactions [][]byte `protobuf:"bytes,3,rep,name=transactions,proto3" json:"transactions,omitempty"` Uncles []*Header `protobuf:"bytes,4,rep,name=uncles,proto3" json:"uncles,omitempty"` @@ -603,7 +611,7 @@ type GetHeaderHashNumberResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockNumber *uint64 `protobuf:"varint,1,opt,name=blockNumber,proto3,oneof" json:"blockNumber,omitempty"` // null if not found. + BlockNumber *uint64 `protobuf:"varint,1,opt,name=block_number,json=blockNumber,proto3,oneof" json:"block_number,omitempty"` // null if not found. } func (x *GetHeaderHashNumberResponse) Reset() { @@ -651,8 +659,8 @@ type GetSegmentRequest struct { unknownFields protoimpl.UnknownFields // Get headers/body by number or hash, invalid if none set. - BlockNumber *uint64 `protobuf:"varint,1,opt,name=blockNumber,proto3,oneof" json:"blockNumber,omitempty"` - BlockHash *types.H256 `protobuf:"bytes,2,opt,name=blockHash,proto3,oneof" json:"blockHash,omitempty"` + BlockNumber *uint64 `protobuf:"varint,1,opt,name=block_number,json=blockNumber,proto3,oneof" json:"block_number,omitempty"` + BlockHash *types.H256 `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3,oneof" json:"block_hash,omitempty"` } func (x *GetSegmentRequest) Reset() { @@ -839,174 +847,181 @@ var file_execution_execution_proto_rawDesc = []byte{ 0x0a, 0x19, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x64, 0x0a, 0x11, 0x46, 0x6f, 0x72, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x66, 0x0a, 0x11, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x35, 0x0a, 0x0f, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, 0x68, 0x22, - 0xd7, 0x01, 0x0a, 0x11, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x47, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x1b, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, - 0x0a, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, - 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x32, 0x0a, 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, - 0x48, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6d, 0x69, - 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x22, 0x33, 0x0a, 0x13, 0x49, 0x73, 0x43, - 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x22, 0x8a, - 0x06, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x0a, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, - 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, - 0x29, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, - 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0b, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x09, 0x6c, 0x6f, 0x67, - 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x30, 0x34, 0x38, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, - 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x29, 0x0a, 0x09, 0x6d, 0x69, 0x78, 0x44, 0x69, 0x67, 0x65, - 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x6d, 0x69, 0x78, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, - 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, - 0x0a, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, + 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, + 0x68, 0x22, 0xdc, 0x01, 0x0a, 0x11, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x48, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x37, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x33, 0x0a, 0x0c, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, + 0x0f, 0x0a, 0x0d, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x22, 0x33, 0x0a, 0x13, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x61, 0x6e, 0x6f, + 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x22, 0xec, 0x06, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, + 0x35, 0x36, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x27, + 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x08, 0x63, + 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x6f, 0x6f, 0x74, 0x12, 0x2e, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, + 0x6f, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, + 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x48, 0x32, 0x30, 0x34, 0x38, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, + 0x12, 0x2a, 0x0a, 0x0a, 0x6d, 0x69, 0x78, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, + 0x36, 0x52, 0x09, 0x6d, 0x69, 0x78, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2b, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x64, 0x69, 0x66, - 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x48, 0x61, 0x73, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, - 0x73, 0x68, 0x12, 0x29, 0x0a, 0x09, 0x6f, 0x6d, 0x6d, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, - 0x35, 0x36, 0x52, 0x09, 0x6f, 0x6d, 0x6d, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x12, 0x35, 0x0a, - 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, - 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x48, 0x61, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, - 0x65, 0x72, 0x47, 0x61, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x00, 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, - 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, - 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, - 0x36, 0x48, 0x01, 0x52, 0x0e, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x48, - 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x46, - 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x77, 0x69, 0x74, - 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x22, 0xdc, 0x01, 0x0a, 0x09, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x29, 0x0a, 0x09, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x48, 0x61, 0x73, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x75, 0x6e, - 0x63, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x75, - 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, - 0x77, 0x61, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, - 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, - 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x49, 0x0a, 0x0f, 0x47, 0x65, - 0x74, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, - 0x79, 0x48, 0x00, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, - 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x22, 0x54, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x88, 0x01, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x25, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x01, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0x43, 0x0a, 0x14, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x43, 0x0a, 0x13, 0x49, - 0x6e, 0x73, 0x65, 0x72, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, 0x62, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x06, 0x62, 0x6f, 0x64, 0x69, 0x65, 0x73, - 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2a, 0x55, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x10, - 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x6f, 0x6f, 0x46, 0x61, 0x72, 0x41, 0x77, 0x61, - 0x79, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x53, 0x65, - 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x10, 0x03, 0x32, 0xf7, 0x04, 0x0a, 0x09, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x0d, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x47, 0x0a, 0x0c, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, - 0x12, 0x1e, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x73, - 0x65, 0x72, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x17, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x0b, 0x2e, 0x74, 0x79, 0x70, + 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x2a, 0x0a, 0x0a, 0x6f, 0x6d, 0x6d, 0x65, 0x72, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x6f, 0x6d, 0x6d, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x36, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, + 0x61, 0x73, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x39, 0x0a, 0x10, 0x62, 0x61, 0x73, 0x65, 0x5f, + 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x00, + 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x39, 0x0a, 0x0f, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x01, 0x52, 0x0e, 0x77, 0x69, 0x74, 0x68, + 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, + 0x0f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x67, 0x61, 0x73, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, + 0x32, 0x35, 0x36, 0x48, 0x02, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x44, 0x61, 0x74, + 0x61, 0x47, 0x61, 0x73, 0x88, 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x42, 0x12, 0x0a, 0x10, + 0x5f, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x67, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, + 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, + 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, + 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, + 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, + 0x61, 0x77, 0x61, 0x6c, 0x73, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x49, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x48, 0x00, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x62, 0x6f, 0x64, 0x79, + 0x22, 0x56, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, + 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x26, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, + 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x01, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x22, 0x43, 0x0a, 0x14, 0x49, 0x6e, 0x73, 0x65, 0x72, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2b, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x43, 0x0a, 0x13, + 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, 0x62, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x06, 0x62, 0x6f, 0x64, 0x69, 0x65, + 0x73, 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2a, 0x55, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x6f, 0x6f, 0x46, 0x61, 0x72, 0x41, 0x77, + 0x61, 0x79, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x53, + 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x10, 0x03, 0x32, 0xf7, 0x04, 0x0a, 0x09, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x0d, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, + 0x73, 0x12, 0x1e, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, + 0x73, 0x65, 0x72, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x3d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x12, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x3d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, - 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x12, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x52, 0x65, 0x63, - 0x65, 0x69, 0x70, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x17, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x47, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x43, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x1c, 0x2e, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0f, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, 0x6e, - 0x69, 0x63, 0x61, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x12, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x1e, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x0b, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x26, 0x2e, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, - 0x61, 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x17, 0x5a, 0x15, 0x2e, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x3b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x69, 0x6f, 0x6e, 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x47, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x43, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x1c, 0x2e, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0f, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, + 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x12, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x1e, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x0b, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x26, 0x2e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x48, 0x61, 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x17, 0x5a, 0x15, 0x2e, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x3b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1044,53 +1059,54 @@ var file_execution_execution_proto_goTypes = []interface{}{ (*types.ExecutionPayload)(nil), // 17: types.ExecutionPayload } var file_execution_execution_proto_depIdxs = []int32{ - 13, // 0: execution.ForkChoiceReceipt.latestValidHash:type_name -> types.H256 - 0, // 1: execution.ValidationReceipt.validationStatus:type_name -> execution.ValidationStatus - 13, // 2: execution.ValidationReceipt.latestValidHash:type_name -> types.H256 - 13, // 3: execution.ValidationReceipt.missingHash:type_name -> types.H256 - 13, // 4: execution.Header.parentHash:type_name -> types.H256 + 13, // 0: execution.ForkChoiceReceipt.latest_valid_hash:type_name -> types.H256 + 0, // 1: execution.ValidationReceipt.validation_status:type_name -> execution.ValidationStatus + 13, // 2: execution.ValidationReceipt.latest_valid_hash:type_name -> types.H256 + 13, // 3: execution.ValidationReceipt.missing_hash:type_name -> types.H256 + 13, // 4: execution.Header.parent_hash:type_name -> types.H256 14, // 5: execution.Header.coinbase:type_name -> types.H160 - 13, // 6: execution.Header.stateRoot:type_name -> types.H256 - 13, // 7: execution.Header.receiptRoot:type_name -> types.H256 - 15, // 8: execution.Header.logsBloom:type_name -> types.H2048 - 13, // 9: execution.Header.mixDigest:type_name -> types.H256 + 13, // 6: execution.Header.state_root:type_name -> types.H256 + 13, // 7: execution.Header.receipt_root:type_name -> types.H256 + 15, // 8: execution.Header.logs_bloom:type_name -> types.H2048 + 13, // 9: execution.Header.mix_digest:type_name -> types.H256 13, // 10: execution.Header.difficulty:type_name -> types.H256 - 13, // 11: execution.Header.blockHash:type_name -> types.H256 - 13, // 12: execution.Header.ommerHash:type_name -> types.H256 - 13, // 13: execution.Header.transactionHash:type_name -> types.H256 - 13, // 14: execution.Header.baseFeePerGas:type_name -> types.H256 - 13, // 15: execution.Header.withdrawalHash:type_name -> types.H256 - 13, // 16: execution.BlockBody.blockHash:type_name -> types.H256 - 4, // 17: execution.BlockBody.uncles:type_name -> execution.Header - 16, // 18: execution.BlockBody.withdrawals:type_name -> types.Withdrawal - 4, // 19: execution.GetHeaderResponse.header:type_name -> execution.Header - 5, // 20: execution.GetBodyResponse.body:type_name -> execution.BlockBody - 13, // 21: execution.GetSegmentRequest.blockHash:type_name -> types.H256 - 4, // 22: execution.InsertHeadersRequest.headers:type_name -> execution.Header - 5, // 23: execution.InsertBodiesRequest.bodies:type_name -> execution.BlockBody - 10, // 24: execution.Execution.InsertHeaders:input_type -> execution.InsertHeadersRequest - 11, // 25: execution.Execution.InsertBodies:input_type -> execution.InsertBodiesRequest - 13, // 26: execution.Execution.ValidateChain:input_type -> types.H256 - 13, // 27: execution.Execution.UpdateForkChoice:input_type -> types.H256 - 12, // 28: execution.Execution.AssembleBlock:input_type -> execution.EmptyMessage - 9, // 29: execution.Execution.GetHeader:input_type -> execution.GetSegmentRequest - 9, // 30: execution.Execution.GetBody:input_type -> execution.GetSegmentRequest - 13, // 31: execution.Execution.IsCanonicalHash:input_type -> types.H256 - 13, // 32: execution.Execution.GetHeaderHashNumber:input_type -> types.H256 - 12, // 33: execution.Execution.InsertHeaders:output_type -> execution.EmptyMessage - 12, // 34: execution.Execution.InsertBodies:output_type -> execution.EmptyMessage - 2, // 35: execution.Execution.ValidateChain:output_type -> execution.ValidationReceipt - 1, // 36: execution.Execution.UpdateForkChoice:output_type -> execution.ForkChoiceReceipt - 17, // 37: execution.Execution.AssembleBlock:output_type -> types.ExecutionPayload - 6, // 38: execution.Execution.GetHeader:output_type -> execution.GetHeaderResponse - 7, // 39: execution.Execution.GetBody:output_type -> execution.GetBodyResponse - 3, // 40: execution.Execution.IsCanonicalHash:output_type -> execution.IsCanonicalResponse - 8, // 41: execution.Execution.GetHeaderHashNumber:output_type -> execution.GetHeaderHashNumberResponse - 33, // [33:42] is the sub-list for method output_type - 24, // [24:33] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 13, // 11: execution.Header.block_hash:type_name -> types.H256 + 13, // 12: execution.Header.ommer_hash:type_name -> types.H256 + 13, // 13: execution.Header.transaction_hash:type_name -> types.H256 + 13, // 14: execution.Header.base_fee_per_gas:type_name -> types.H256 + 13, // 15: execution.Header.withdrawal_hash:type_name -> types.H256 + 13, // 16: execution.Header.excess_data_gas:type_name -> types.H256 + 13, // 17: execution.BlockBody.block_hash:type_name -> types.H256 + 4, // 18: execution.BlockBody.uncles:type_name -> execution.Header + 16, // 19: execution.BlockBody.withdrawals:type_name -> types.Withdrawal + 4, // 20: execution.GetHeaderResponse.header:type_name -> execution.Header + 5, // 21: execution.GetBodyResponse.body:type_name -> execution.BlockBody + 13, // 22: execution.GetSegmentRequest.block_hash:type_name -> types.H256 + 4, // 23: execution.InsertHeadersRequest.headers:type_name -> execution.Header + 5, // 24: execution.InsertBodiesRequest.bodies:type_name -> execution.BlockBody + 10, // 25: execution.Execution.InsertHeaders:input_type -> execution.InsertHeadersRequest + 11, // 26: execution.Execution.InsertBodies:input_type -> execution.InsertBodiesRequest + 13, // 27: execution.Execution.ValidateChain:input_type -> types.H256 + 13, // 28: execution.Execution.UpdateForkChoice:input_type -> types.H256 + 12, // 29: execution.Execution.AssembleBlock:input_type -> execution.EmptyMessage + 9, // 30: execution.Execution.GetHeader:input_type -> execution.GetSegmentRequest + 9, // 31: execution.Execution.GetBody:input_type -> execution.GetSegmentRequest + 13, // 32: execution.Execution.IsCanonicalHash:input_type -> types.H256 + 13, // 33: execution.Execution.GetHeaderHashNumber:input_type -> types.H256 + 12, // 34: execution.Execution.InsertHeaders:output_type -> execution.EmptyMessage + 12, // 35: execution.Execution.InsertBodies:output_type -> execution.EmptyMessage + 2, // 36: execution.Execution.ValidateChain:output_type -> execution.ValidationReceipt + 1, // 37: execution.Execution.UpdateForkChoice:output_type -> execution.ForkChoiceReceipt + 17, // 38: execution.Execution.AssembleBlock:output_type -> types.ExecutionPayload + 6, // 39: execution.Execution.GetHeader:output_type -> execution.GetHeaderResponse + 7, // 40: execution.Execution.GetBody:output_type -> execution.GetBodyResponse + 3, // 41: execution.Execution.IsCanonicalHash:output_type -> execution.IsCanonicalResponse + 8, // 42: execution.Execution.GetHeaderHashNumber:output_type -> execution.GetHeaderHashNumberResponse + 34, // [34:43] is the sub-list for method output_type + 25, // [25:34] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_execution_execution_proto_init() } diff --git a/gointerfaces/execution/execution_grpc.pb.go b/gointerfaces/execution/execution_grpc.pb.go index b5ecadbd6..6dafb6ae8 100644 --- a/gointerfaces/execution/execution_grpc.pb.go +++ b/gointerfaces/execution/execution_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: execution/execution.proto package execution @@ -19,6 +19,18 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Execution_InsertHeaders_FullMethodName = "/execution.Execution/InsertHeaders" + Execution_InsertBodies_FullMethodName = "/execution.Execution/InsertBodies" + Execution_ValidateChain_FullMethodName = "/execution.Execution/ValidateChain" + Execution_UpdateForkChoice_FullMethodName = "/execution.Execution/UpdateForkChoice" + Execution_AssembleBlock_FullMethodName = "/execution.Execution/AssembleBlock" + Execution_GetHeader_FullMethodName = "/execution.Execution/GetHeader" + Execution_GetBody_FullMethodName = "/execution.Execution/GetBody" + Execution_IsCanonicalHash_FullMethodName = "/execution.Execution/IsCanonicalHash" + Execution_GetHeaderHashNumber_FullMethodName = "/execution.Execution/GetHeaderHashNumber" +) + // ExecutionClient is the client API for Execution service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -47,7 +59,7 @@ func NewExecutionClient(cc grpc.ClientConnInterface) ExecutionClient { func (c *executionClient) InsertHeaders(ctx context.Context, in *InsertHeadersRequest, opts ...grpc.CallOption) (*EmptyMessage, error) { out := new(EmptyMessage) - err := c.cc.Invoke(ctx, "/execution.Execution/InsertHeaders", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_InsertHeaders_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -56,7 +68,7 @@ func (c *executionClient) InsertHeaders(ctx context.Context, in *InsertHeadersRe func (c *executionClient) InsertBodies(ctx context.Context, in *InsertBodiesRequest, opts ...grpc.CallOption) (*EmptyMessage, error) { out := new(EmptyMessage) - err := c.cc.Invoke(ctx, "/execution.Execution/InsertBodies", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_InsertBodies_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -65,7 +77,7 @@ func (c *executionClient) InsertBodies(ctx context.Context, in *InsertBodiesRequ func (c *executionClient) ValidateChain(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*ValidationReceipt, error) { out := new(ValidationReceipt) - err := c.cc.Invoke(ctx, "/execution.Execution/ValidateChain", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_ValidateChain_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -74,7 +86,7 @@ func (c *executionClient) ValidateChain(ctx context.Context, in *types.H256, opt func (c *executionClient) UpdateForkChoice(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*ForkChoiceReceipt, error) { out := new(ForkChoiceReceipt) - err := c.cc.Invoke(ctx, "/execution.Execution/UpdateForkChoice", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_UpdateForkChoice_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -83,7 +95,7 @@ func (c *executionClient) UpdateForkChoice(ctx context.Context, in *types.H256, func (c *executionClient) AssembleBlock(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*types.ExecutionPayload, error) { out := new(types.ExecutionPayload) - err := c.cc.Invoke(ctx, "/execution.Execution/AssembleBlock", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_AssembleBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -92,7 +104,7 @@ func (c *executionClient) AssembleBlock(ctx context.Context, in *EmptyMessage, o func (c *executionClient) GetHeader(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetHeaderResponse, error) { out := new(GetHeaderResponse) - err := c.cc.Invoke(ctx, "/execution.Execution/GetHeader", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_GetHeader_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -101,7 +113,7 @@ func (c *executionClient) GetHeader(ctx context.Context, in *GetSegmentRequest, func (c *executionClient) GetBody(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetBodyResponse, error) { out := new(GetBodyResponse) - err := c.cc.Invoke(ctx, "/execution.Execution/GetBody", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_GetBody_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -110,7 +122,7 @@ func (c *executionClient) GetBody(ctx context.Context, in *GetSegmentRequest, op func (c *executionClient) IsCanonicalHash(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*IsCanonicalResponse, error) { out := new(IsCanonicalResponse) - err := c.cc.Invoke(ctx, "/execution.Execution/IsCanonicalHash", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_IsCanonicalHash_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -119,7 +131,7 @@ func (c *executionClient) IsCanonicalHash(ctx context.Context, in *types.H256, o func (c *executionClient) GetHeaderHashNumber(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*GetHeaderHashNumberResponse, error) { out := new(GetHeaderHashNumberResponse) - err := c.cc.Invoke(ctx, "/execution.Execution/GetHeaderHashNumber", in, out, opts...) + err := c.cc.Invoke(ctx, Execution_GetHeaderHashNumber_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -199,7 +211,7 @@ func _Execution_InsertHeaders_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/InsertHeaders", + FullMethod: Execution_InsertHeaders_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).InsertHeaders(ctx, req.(*InsertHeadersRequest)) @@ -217,7 +229,7 @@ func _Execution_InsertBodies_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/InsertBodies", + FullMethod: Execution_InsertBodies_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).InsertBodies(ctx, req.(*InsertBodiesRequest)) @@ -235,7 +247,7 @@ func _Execution_ValidateChain_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/ValidateChain", + FullMethod: Execution_ValidateChain_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).ValidateChain(ctx, req.(*types.H256)) @@ -253,7 +265,7 @@ func _Execution_UpdateForkChoice_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/UpdateForkChoice", + FullMethod: Execution_UpdateForkChoice_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).UpdateForkChoice(ctx, req.(*types.H256)) @@ -271,7 +283,7 @@ func _Execution_AssembleBlock_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/AssembleBlock", + FullMethod: Execution_AssembleBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).AssembleBlock(ctx, req.(*EmptyMessage)) @@ -289,7 +301,7 @@ func _Execution_GetHeader_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/GetHeader", + FullMethod: Execution_GetHeader_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).GetHeader(ctx, req.(*GetSegmentRequest)) @@ -307,7 +319,7 @@ func _Execution_GetBody_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/GetBody", + FullMethod: Execution_GetBody_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).GetBody(ctx, req.(*GetSegmentRequest)) @@ -325,7 +337,7 @@ func _Execution_IsCanonicalHash_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/IsCanonicalHash", + FullMethod: Execution_IsCanonicalHash_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).IsCanonicalHash(ctx, req.(*types.H256)) @@ -343,7 +355,7 @@ func _Execution_GetHeaderHashNumber_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/execution.Execution/GetHeaderHashNumber", + FullMethod: Execution_GetHeaderHashNumber_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ExecutionServer).GetHeaderHashNumber(ctx, req.(*types.H256)) diff --git a/gointerfaces/remote/ethbackend.pb.go b/gointerfaces/remote/ethbackend.pb.go index fd424bbd8..cff259a71 100644 --- a/gointerfaces/remote/ethbackend.pb.go +++ b/gointerfaces/remote/ethbackend.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: remote/ethbackend.proto package remote @@ -392,7 +392,7 @@ type EngineGetPayloadRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PayloadId uint64 `protobuf:"varint,1,opt,name=payloadId,proto3" json:"payloadId,omitempty"` + PayloadId uint64 `protobuf:"varint,1,opt,name=payload_id,json=payloadId,proto3" json:"payload_id,omitempty"` } func (x *EngineGetPayloadRequest) Reset() { @@ -439,7 +439,7 @@ type EngineGetBlobsBundleRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PayloadId uint64 `protobuf:"varint,1,opt,name=payloadId,proto3" json:"payloadId,omitempty"` + PayloadId uint64 `protobuf:"varint,1,opt,name=payload_id,json=payloadId,proto3" json:"payload_id,omitempty"` } func (x *EngineGetBlobsBundleRequest) Reset() { @@ -487,8 +487,8 @@ type EnginePayloadStatus struct { unknownFields protoimpl.UnknownFields Status EngineStatus `protobuf:"varint,1,opt,name=status,proto3,enum=remote.EngineStatus" json:"status,omitempty"` - LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latestValidHash,proto3" json:"latestValidHash,omitempty"` - ValidationError string `protobuf:"bytes,3,opt,name=validationError,proto3" json:"validationError,omitempty"` + LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latest_valid_hash,json=latestValidHash,proto3" json:"latest_valid_hash,omitempty"` + ValidationError string `protobuf:"bytes,3,opt,name=validation_error,json=validationError,proto3" json:"validation_error,omitempty"` } func (x *EnginePayloadStatus) Reset() { @@ -551,8 +551,8 @@ type EnginePayloadAttributes struct { Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // v1 - no withdrawals, v2 - with withdrawals Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - PrevRandao *types.H256 `protobuf:"bytes,3,opt,name=prevRandao,proto3" json:"prevRandao,omitempty"` - SuggestedFeeRecipient *types.H160 `protobuf:"bytes,4,opt,name=suggestedFeeRecipient,proto3" json:"suggestedFeeRecipient,omitempty"` + PrevRandao *types.H256 `protobuf:"bytes,3,opt,name=prev_randao,json=prevRandao,proto3" json:"prev_randao,omitempty"` + SuggestedFeeRecipient *types.H160 `protobuf:"bytes,4,opt,name=suggested_fee_recipient,json=suggestedFeeRecipient,proto3" json:"suggested_fee_recipient,omitempty"` Withdrawals []*types.Withdrawal `protobuf:"bytes,5,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"` } @@ -628,9 +628,9 @@ type EngineForkChoiceState struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - HeadBlockHash *types.H256 `protobuf:"bytes,1,opt,name=headBlockHash,proto3" json:"headBlockHash,omitempty"` - SafeBlockHash *types.H256 `protobuf:"bytes,2,opt,name=safeBlockHash,proto3" json:"safeBlockHash,omitempty"` - FinalizedBlockHash *types.H256 `protobuf:"bytes,3,opt,name=finalizedBlockHash,proto3" json:"finalizedBlockHash,omitempty"` + HeadBlockHash *types.H256 `protobuf:"bytes,1,opt,name=head_block_hash,json=headBlockHash,proto3" json:"head_block_hash,omitempty"` + SafeBlockHash *types.H256 `protobuf:"bytes,2,opt,name=safe_block_hash,json=safeBlockHash,proto3" json:"safe_block_hash,omitempty"` + FinalizedBlockHash *types.H256 `protobuf:"bytes,3,opt,name=finalized_block_hash,json=finalizedBlockHash,proto3" json:"finalized_block_hash,omitempty"` } func (x *EngineForkChoiceState) Reset() { @@ -691,8 +691,8 @@ type EngineForkChoiceUpdatedRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ForkchoiceState *EngineForkChoiceState `protobuf:"bytes,1,opt,name=forkchoiceState,proto3" json:"forkchoiceState,omitempty"` - PayloadAttributes *EnginePayloadAttributes `protobuf:"bytes,2,opt,name=payloadAttributes,proto3" json:"payloadAttributes,omitempty"` + ForkchoiceState *EngineForkChoiceState `protobuf:"bytes,1,opt,name=forkchoice_state,json=forkchoiceState,proto3" json:"forkchoice_state,omitempty"` + PayloadAttributes *EnginePayloadAttributes `protobuf:"bytes,2,opt,name=payload_attributes,json=payloadAttributes,proto3" json:"payload_attributes,omitempty"` } func (x *EngineForkChoiceUpdatedRequest) Reset() { @@ -746,8 +746,8 @@ type EngineForkChoiceUpdatedResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PayloadStatus *EnginePayloadStatus `protobuf:"bytes,1,opt,name=payloadStatus,proto3" json:"payloadStatus,omitempty"` - PayloadId uint64 `protobuf:"varint,2,opt,name=payloadId,proto3" json:"payloadId,omitempty"` + PayloadStatus *EnginePayloadStatus `protobuf:"bytes,1,opt,name=payload_status,json=payloadStatus,proto3" json:"payload_status,omitempty"` + PayloadId uint64 `protobuf:"varint,2,opt,name=payload_id,json=payloadId,proto3" json:"payload_id,omitempty"` } func (x *EngineForkChoiceUpdatedResponse) Reset() { @@ -801,8 +801,8 @@ type EngineGetPayloadResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ExecutionPayload *types.ExecutionPayload `protobuf:"bytes,1,opt,name=executionPayload,proto3" json:"executionPayload,omitempty"` - BlockValue *types.H256 `protobuf:"bytes,2,opt,name=blockValue,proto3" json:"blockValue,omitempty"` + ExecutionPayload *types.ExecutionPayload `protobuf:"bytes,1,opt,name=execution_payload,json=executionPayload,proto3" json:"execution_payload,omitempty"` + BlockValue *types.H256 `protobuf:"bytes,2,opt,name=block_value,json=blockValue,proto3" json:"block_value,omitempty"` } func (x *EngineGetPayloadResponse) Reset() { @@ -979,7 +979,7 @@ type ClientVersionReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NodeName string `protobuf:"bytes,1,opt,name=nodeName,proto3" json:"nodeName,omitempty"` + NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` } func (x *ClientVersionReply) Reset() { @@ -1128,9 +1128,9 @@ type LogsFilterRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AllAddresses bool `protobuf:"varint,1,opt,name=allAddresses,proto3" json:"allAddresses,omitempty"` + AllAddresses bool `protobuf:"varint,1,opt,name=all_addresses,json=allAddresses,proto3" json:"all_addresses,omitempty"` Addresses []*types.H160 `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"` - AllTopics bool `protobuf:"varint,3,opt,name=allTopics,proto3" json:"allTopics,omitempty"` + AllTopics bool `protobuf:"varint,3,opt,name=all_topics,json=allTopics,proto3" json:"all_topics,omitempty"` Topics []*types.H256 `protobuf:"bytes,4,rep,name=topics,proto3" json:"topics,omitempty"` } @@ -1200,13 +1200,13 @@ type SubscribeLogsReply struct { unknownFields protoimpl.UnknownFields Address *types.H160 `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - BlockHash *types.H256 `protobuf:"bytes,2,opt,name=blockHash,proto3" json:"blockHash,omitempty"` - BlockNumber uint64 `protobuf:"varint,3,opt,name=blockNumber,proto3" json:"blockNumber,omitempty"` + BlockHash *types.H256 `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` + BlockNumber uint64 `protobuf:"varint,3,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - LogIndex uint64 `protobuf:"varint,5,opt,name=logIndex,proto3" json:"logIndex,omitempty"` + LogIndex uint64 `protobuf:"varint,5,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` Topics []*types.H256 `protobuf:"bytes,6,rep,name=topics,proto3" json:"topics,omitempty"` - TransactionHash *types.H256 `protobuf:"bytes,7,opt,name=transactionHash,proto3" json:"transactionHash,omitempty"` - TransactionIndex uint64 `protobuf:"varint,8,opt,name=transactionIndex,proto3" json:"transactionIndex,omitempty"` + TransactionHash *types.H256 `protobuf:"bytes,7,opt,name=transaction_hash,json=transactionHash,proto3" json:"transaction_hash,omitempty"` + TransactionIndex uint64 `protobuf:"varint,8,opt,name=transaction_index,json=transactionIndex,proto3" json:"transaction_index,omitempty"` Removed bool `protobuf:"varint,9,opt,name=removed,proto3" json:"removed,omitempty"` } @@ -1310,8 +1310,8 @@ type BlockRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockHeight uint64 `protobuf:"varint,2,opt,name=blockHeight,proto3" json:"blockHeight,omitempty"` - BlockHash *types.H256 `protobuf:"bytes,3,opt,name=blockHash,proto3" json:"blockHash,omitempty"` + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + BlockHash *types.H256 `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` } func (x *BlockRequest) Reset() { @@ -1365,7 +1365,7 @@ type BlockReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockRlp []byte `protobuf:"bytes,1,opt,name=blockRlp,proto3" json:"blockRlp,omitempty"` + BlockRlp []byte `protobuf:"bytes,1,opt,name=block_rlp,json=blockRlp,proto3" json:"block_rlp,omitempty"` Senders []byte `protobuf:"bytes,2,opt,name=senders,proto3" json:"senders,omitempty"` } @@ -1420,7 +1420,7 @@ type TxnLookupRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TxnHash *types.H256 `protobuf:"bytes,1,opt,name=txnHash,proto3" json:"txnHash,omitempty"` + TxnHash *types.H256 `protobuf:"bytes,1,opt,name=txn_hash,json=txnHash,proto3" json:"txn_hash,omitempty"` } func (x *TxnLookupRequest) Reset() { @@ -1467,7 +1467,7 @@ type TxnLookupReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockNumber uint64 `protobuf:"varint,1,opt,name=blockNumber,proto3" json:"blockNumber,omitempty"` + BlockNumber uint64 `protobuf:"varint,1,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` } func (x *TxnLookupReply) Reset() { @@ -1561,7 +1561,7 @@ type NodesInfoReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NodesInfo []*types.NodeInfoReply `protobuf:"bytes,1,rep,name=nodesInfo,proto3" json:"nodesInfo,omitempty"` + NodesInfo []*types.NodeInfoReply `protobuf:"bytes,1,rep,name=nodes_info,json=nodesInfo,proto3" json:"nodes_info,omitempty"` } func (x *NodesInfoReply) Reset() { @@ -1655,7 +1655,7 @@ type PendingBlockReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockRlp []byte `protobuf:"bytes,1,opt,name=blockRlp,proto3" json:"blockRlp,omitempty"` + BlockRlp []byte `protobuf:"bytes,1,opt,name=block_rlp,json=blockRlp,proto3" json:"block_rlp,omitempty"` } func (x *PendingBlockReply) Reset() { @@ -1866,280 +1866,282 @@ var file_remote_ethbackend_proto_rawDesc = []byte{ 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x37, 0x0a, 0x17, 0x45, 0x6e, 0x67, 0x69, + 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x38, 0x0a, 0x17, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, - 0x64, 0x22, 0x3b, 0x0a, 0x1b, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0xa4, - 0x01, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, 0x0a, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x28, 0x0a, 0x0f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xf6, 0x01, 0x0a, 0x17, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2b, 0x0a, 0x0a, 0x70, 0x72, 0x65, - 0x76, 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x76, - 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x12, 0x41, 0x0a, 0x15, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, - 0x74, 0x65, 0x64, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, - 0x36, 0x30, 0x52, 0x15, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x65, 0x65, - 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, - 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, - 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x22, 0xba, - 0x01, 0x0a, 0x15, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, - 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x31, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x68, 0x65, - 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x31, 0x0a, 0x0d, 0x73, - 0x61, 0x66, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, - 0x0d, 0x73, 0x61, 0x66, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3b, - 0x0a, 0x12, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x48, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x12, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0xb8, 0x01, 0x0a, 0x1e, + 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x22, 0x3c, 0x0a, 0x1b, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x42, + 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, + 0x22, 0xa7, 0x01, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x29, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x17, 0x45, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, + 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, + 0x52, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x12, 0x43, 0x0a, 0x17, + 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x72, 0x65, + 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x15, 0x73, 0x75, 0x67, 0x67, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, + 0x74, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, + 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, + 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x22, 0xc0, 0x01, 0x0a, 0x15, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x33, 0x0a, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, + 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x33, 0x0a, 0x0f, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x73, 0x61, 0x66, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3d, 0x0a, 0x14, 0x66, 0x69, + 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x12, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0xba, 0x01, 0x0a, 0x1e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x10, + 0x66, 0x6f, 0x72, 0x6b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, - 0x0a, 0x0f, 0x66, 0x6f, 0x72, 0x6b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x6b, 0x63, 0x68, 0x6f, 0x69, - 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x6b, 0x63, 0x68, 0x6f, 0x69, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x1f, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x1f, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, - 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x18, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x10, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2b, 0x0a, - 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x16, 0x0a, 0x14, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x6f, - 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, - 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x47, 0x0a, - 0x0e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x09, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x8e, 0x01, + 0x0a, 0x18, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x10, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x2c, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, + 0x35, 0x36, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x18, + 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, + 0x22, 0x16, 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, + 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x10, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x4c, 0x6f, 0x67, 0x73, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0c, - 0x61, 0x6c, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x12, 0x29, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, - 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, - 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x61, 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0xda, - 0x02, 0x0a, 0x12, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x67, 0x73, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, - 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x09, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, - 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x06, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x35, - 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, - 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2a, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x5b, 0x0a, 0x0c, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x29, 0x0a, - 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0x42, 0x0a, 0x0a, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x6c, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x6c, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x22, 0x39, 0x0a, 0x10, - 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x25, 0x0a, 0x07, 0x74, 0x78, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x07, - 0x74, 0x78, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x22, 0x32, 0x0a, 0x0e, 0x54, 0x78, 0x6e, 0x4c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x10, 0x4e, - 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, 0x0e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x73, - 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x33, 0x0a, 0x0a, 0x50, - 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x65, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x22, 0x2f, 0x0a, 0x11, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6c, + 0x70, 0x65, 0x22, 0x47, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xa7, 0x01, 0x0a, 0x11, + 0x4c, 0x6f, 0x67, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, + 0x12, 0x23, 0x0a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0xdf, 0x02, 0x0a, 0x12, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, + 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, + 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x5d, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0x43, 0x0a, 0x0a, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6c, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6c, - 0x70, 0x22, 0x4c, 0x0a, 0x25, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, - 0x68, 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x06, 0x68, 0x61, - 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, - 0x54, 0x0a, 0x26, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x20, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, - 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x56, - 0x31, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x62, 0x6f, 0x64, - 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x42, 0x6f, 0x64, 0x79, 0x56, 0x31, 0x52, 0x06, 0x62, 0x6f, 0x64, 0x69, 0x65, 0x73, - 0x2a, 0x4a, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x45, 0x41, - 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, - 0x5f, 0x4c, 0x4f, 0x47, 0x53, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x45, 0x4e, 0x44, 0x49, - 0x4e, 0x47, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, - 0x57, 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x03, 0x2a, 0x59, 0x0a, 0x0c, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x09, 0x0a, 0x05, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, - 0x49, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x10, - 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x16, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, - 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x04, 0x32, 0xb8, 0x0b, 0x0a, 0x0a, 0x45, 0x54, 0x48, 0x42, - 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x12, 0x3d, 0x0a, 0x09, 0x45, 0x74, 0x68, 0x65, 0x72, 0x62, - 0x61, 0x73, 0x65, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x74, 0x68, - 0x65, 0x72, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x62, 0x61, 0x73, 0x65, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x65, 0x74, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x46, 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x65, - 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x48, 0x0a, 0x10, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x65, 0x77, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x12, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1b, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x6a, 0x0a, 0x17, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x26, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, - 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x10, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, - 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x1e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x56, 0x31, 0x12, 0x2d, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, + 0x70, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x22, 0x3a, 0x0a, 0x10, 0x54, + 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x26, 0x0a, 0x08, 0x74, 0x78, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x07, + 0x74, 0x78, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x22, 0x33, 0x0a, 0x0e, 0x54, 0x78, 0x6e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x10, + 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x45, 0x0a, 0x0e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x33, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x33, 0x0a, + 0x0a, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x70, + 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65, + 0x72, 0x73, 0x22, 0x30, 0x0a, 0x11, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x72, 0x6c, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x6c, 0x70, 0x22, 0x4c, 0x0a, 0x25, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, - 0x48, 0x61, 0x73, 0x68, 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, + 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x22, 0x54, 0x0a, 0x26, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x20, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, + 0x65, 0x73, 0x56, 0x31, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, + 0x62, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x79, 0x56, 0x31, 0x52, 0x06, 0x62, 0x6f, 0x64, + 0x69, 0x65, 0x73, 0x2a, 0x4a, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, + 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x45, 0x4e, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x4f, 0x47, 0x53, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x10, 0x0a, + 0x0c, 0x4e, 0x45, 0x57, 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x03, 0x2a, + 0x59, 0x0a, 0x0c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x59, 0x4e, 0x43, 0x49, + 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, + 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x4c, + 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x04, 0x32, 0xb8, 0x0b, 0x0a, 0x0a, 0x45, + 0x54, 0x48, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x12, 0x3d, 0x0a, 0x09, 0x45, 0x74, 0x68, + 0x65, 0x72, 0x62, 0x61, 0x73, 0x65, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x45, 0x74, 0x68, 0x65, 0x72, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x62, + 0x61, 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x4e, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x46, 0x0a, 0x0c, 0x4e, 0x65, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x48, 0x0a, 0x10, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x65, 0x77, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, + 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x6a, 0x0a, 0x17, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x26, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, + 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x46, + 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x10, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x56, 0x31, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1f, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x79, 0x0a, 0x1e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x56, + 0x31, 0x12, 0x2d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, - 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x56, 0x31, 0x12, 0x2e, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x56, 0x31, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x16, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, - 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x23, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, - 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42, 0x6c, 0x6f, 0x62, - 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x4f, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x0d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, - 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x4a, - 0x0a, 0x0d, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x12, - 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x67, - 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x31, 0x0a, 0x05, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3d, 0x0a, - 0x09, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x54, 0x78, - 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x08, - 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x50, 0x65, - 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x41, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x28, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, + 0x56, 0x31, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1f, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, + 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x56, 0x31, 0x12, 0x2e, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x56, 0x31, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x56, 0x31, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x16, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, + 0x31, 0x12, 0x23, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42, + 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x36, 0x0a, 0x07, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4f, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x0d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x3f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x18, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, + 0x01, 0x12, 0x4a, 0x0a, 0x0d, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, + 0x67, 0x73, 0x12, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x31, 0x0a, + 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x3d, 0x0a, 0x09, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x18, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x3c, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x33, 0x0a, + 0x05, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2206,29 +2208,29 @@ var file_remote_ethbackend_proto_goTypes = []interface{}{ var file_remote_ethbackend_proto_depIdxs = []int32{ 35, // 0: remote.EtherbaseReply.address:type_name -> types.H160 1, // 1: remote.EnginePayloadStatus.status:type_name -> remote.EngineStatus - 36, // 2: remote.EnginePayloadStatus.latestValidHash:type_name -> types.H256 - 36, // 3: remote.EnginePayloadAttributes.prevRandao:type_name -> types.H256 - 35, // 4: remote.EnginePayloadAttributes.suggestedFeeRecipient:type_name -> types.H160 + 36, // 2: remote.EnginePayloadStatus.latest_valid_hash:type_name -> types.H256 + 36, // 3: remote.EnginePayloadAttributes.prev_randao:type_name -> types.H256 + 35, // 4: remote.EnginePayloadAttributes.suggested_fee_recipient:type_name -> types.H160 37, // 5: remote.EnginePayloadAttributes.withdrawals:type_name -> types.Withdrawal - 36, // 6: remote.EngineForkChoiceState.headBlockHash:type_name -> types.H256 - 36, // 7: remote.EngineForkChoiceState.safeBlockHash:type_name -> types.H256 - 36, // 8: remote.EngineForkChoiceState.finalizedBlockHash:type_name -> types.H256 - 12, // 9: remote.EngineForkChoiceUpdatedRequest.forkchoiceState:type_name -> remote.EngineForkChoiceState - 11, // 10: remote.EngineForkChoiceUpdatedRequest.payloadAttributes:type_name -> remote.EnginePayloadAttributes - 10, // 11: remote.EngineForkChoiceUpdatedResponse.payloadStatus:type_name -> remote.EnginePayloadStatus - 38, // 12: remote.EngineGetPayloadResponse.executionPayload:type_name -> types.ExecutionPayload - 36, // 13: remote.EngineGetPayloadResponse.blockValue:type_name -> types.H256 + 36, // 6: remote.EngineForkChoiceState.head_block_hash:type_name -> types.H256 + 36, // 7: remote.EngineForkChoiceState.safe_block_hash:type_name -> types.H256 + 36, // 8: remote.EngineForkChoiceState.finalized_block_hash:type_name -> types.H256 + 12, // 9: remote.EngineForkChoiceUpdatedRequest.forkchoice_state:type_name -> remote.EngineForkChoiceState + 11, // 10: remote.EngineForkChoiceUpdatedRequest.payload_attributes:type_name -> remote.EnginePayloadAttributes + 10, // 11: remote.EngineForkChoiceUpdatedResponse.payload_status:type_name -> remote.EnginePayloadStatus + 38, // 12: remote.EngineGetPayloadResponse.execution_payload:type_name -> types.ExecutionPayload + 36, // 13: remote.EngineGetPayloadResponse.block_value:type_name -> types.H256 0, // 14: remote.SubscribeRequest.type:type_name -> remote.Event 0, // 15: remote.SubscribeReply.type:type_name -> remote.Event 35, // 16: remote.LogsFilterRequest.addresses:type_name -> types.H160 36, // 17: remote.LogsFilterRequest.topics:type_name -> types.H256 35, // 18: remote.SubscribeLogsReply.address:type_name -> types.H160 - 36, // 19: remote.SubscribeLogsReply.blockHash:type_name -> types.H256 + 36, // 19: remote.SubscribeLogsReply.block_hash:type_name -> types.H256 36, // 20: remote.SubscribeLogsReply.topics:type_name -> types.H256 - 36, // 21: remote.SubscribeLogsReply.transactionHash:type_name -> types.H256 - 36, // 22: remote.BlockRequest.blockHash:type_name -> types.H256 - 36, // 23: remote.TxnLookupRequest.txnHash:type_name -> types.H256 - 39, // 24: remote.NodesInfoReply.nodesInfo:type_name -> types.NodeInfoReply + 36, // 21: remote.SubscribeLogsReply.transaction_hash:type_name -> types.H256 + 36, // 22: remote.BlockRequest.block_hash:type_name -> types.H256 + 36, // 23: remote.TxnLookupRequest.txn_hash:type_name -> types.H256 + 39, // 24: remote.NodesInfoReply.nodes_info:type_name -> types.NodeInfoReply 40, // 25: remote.PeersReply.peers:type_name -> types.PeerInfo 36, // 26: remote.EngineGetPayloadBodiesByHashV1Request.hashes:type_name -> types.H256 41, // 27: remote.EngineGetPayloadBodiesV1Response.bodies:type_name -> types.ExecutionPayloadBodyV1 diff --git a/gointerfaces/remote/ethbackend_grpc.pb.go b/gointerfaces/remote/ethbackend_grpc.pb.go index 8687dc933..ea364e79f 100644 --- a/gointerfaces/remote/ethbackend_grpc.pb.go +++ b/gointerfaces/remote/ethbackend_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: remote/ethbackend.proto package remote @@ -20,6 +20,28 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ETHBACKEND_Etherbase_FullMethodName = "/remote.ETHBACKEND/Etherbase" + ETHBACKEND_NetVersion_FullMethodName = "/remote.ETHBACKEND/NetVersion" + ETHBACKEND_NetPeerCount_FullMethodName = "/remote.ETHBACKEND/NetPeerCount" + ETHBACKEND_EngineNewPayload_FullMethodName = "/remote.ETHBACKEND/EngineNewPayload" + ETHBACKEND_EngineForkChoiceUpdated_FullMethodName = "/remote.ETHBACKEND/EngineForkChoiceUpdated" + ETHBACKEND_EngineGetPayload_FullMethodName = "/remote.ETHBACKEND/EngineGetPayload" + ETHBACKEND_EngineGetPayloadBodiesByHashV1_FullMethodName = "/remote.ETHBACKEND/EngineGetPayloadBodiesByHashV1" + ETHBACKEND_EngineGetPayloadBodiesByRangeV1_FullMethodName = "/remote.ETHBACKEND/EngineGetPayloadBodiesByRangeV1" + ETHBACKEND_EngineGetBlobsBundleV1_FullMethodName = "/remote.ETHBACKEND/EngineGetBlobsBundleV1" + ETHBACKEND_Version_FullMethodName = "/remote.ETHBACKEND/Version" + ETHBACKEND_ProtocolVersion_FullMethodName = "/remote.ETHBACKEND/ProtocolVersion" + ETHBACKEND_ClientVersion_FullMethodName = "/remote.ETHBACKEND/ClientVersion" + ETHBACKEND_Subscribe_FullMethodName = "/remote.ETHBACKEND/Subscribe" + ETHBACKEND_SubscribeLogs_FullMethodName = "/remote.ETHBACKEND/SubscribeLogs" + ETHBACKEND_Block_FullMethodName = "/remote.ETHBACKEND/Block" + ETHBACKEND_TxnLookup_FullMethodName = "/remote.ETHBACKEND/TxnLookup" + ETHBACKEND_NodeInfo_FullMethodName = "/remote.ETHBACKEND/NodeInfo" + ETHBACKEND_Peers_FullMethodName = "/remote.ETHBACKEND/Peers" + ETHBACKEND_PendingBlock_FullMethodName = "/remote.ETHBACKEND/PendingBlock" +) + // ETHBACKENDClient is the client API for ETHBACKEND service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -70,7 +92,7 @@ func NewETHBACKENDClient(cc grpc.ClientConnInterface) ETHBACKENDClient { func (c *eTHBACKENDClient) Etherbase(ctx context.Context, in *EtherbaseRequest, opts ...grpc.CallOption) (*EtherbaseReply, error) { out := new(EtherbaseReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/Etherbase", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_Etherbase_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -79,7 +101,7 @@ func (c *eTHBACKENDClient) Etherbase(ctx context.Context, in *EtherbaseRequest, func (c *eTHBACKENDClient) NetVersion(ctx context.Context, in *NetVersionRequest, opts ...grpc.CallOption) (*NetVersionReply, error) { out := new(NetVersionReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/NetVersion", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_NetVersion_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -88,7 +110,7 @@ func (c *eTHBACKENDClient) NetVersion(ctx context.Context, in *NetVersionRequest func (c *eTHBACKENDClient) NetPeerCount(ctx context.Context, in *NetPeerCountRequest, opts ...grpc.CallOption) (*NetPeerCountReply, error) { out := new(NetPeerCountReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/NetPeerCount", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_NetPeerCount_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -97,7 +119,7 @@ func (c *eTHBACKENDClient) NetPeerCount(ctx context.Context, in *NetPeerCountReq func (c *eTHBACKENDClient) EngineNewPayload(ctx context.Context, in *types.ExecutionPayload, opts ...grpc.CallOption) (*EnginePayloadStatus, error) { out := new(EnginePayloadStatus) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/EngineNewPayload", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_EngineNewPayload_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -106,7 +128,7 @@ func (c *eTHBACKENDClient) EngineNewPayload(ctx context.Context, in *types.Execu func (c *eTHBACKENDClient) EngineForkChoiceUpdated(ctx context.Context, in *EngineForkChoiceUpdatedRequest, opts ...grpc.CallOption) (*EngineForkChoiceUpdatedResponse, error) { out := new(EngineForkChoiceUpdatedResponse) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/EngineForkChoiceUpdated", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_EngineForkChoiceUpdated_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -115,7 +137,7 @@ func (c *eTHBACKENDClient) EngineForkChoiceUpdated(ctx context.Context, in *Engi func (c *eTHBACKENDClient) EngineGetPayload(ctx context.Context, in *EngineGetPayloadRequest, opts ...grpc.CallOption) (*EngineGetPayloadResponse, error) { out := new(EngineGetPayloadResponse) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/EngineGetPayload", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_EngineGetPayload_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -124,7 +146,7 @@ func (c *eTHBACKENDClient) EngineGetPayload(ctx context.Context, in *EngineGetPa func (c *eTHBACKENDClient) EngineGetPayloadBodiesByHashV1(ctx context.Context, in *EngineGetPayloadBodiesByHashV1Request, opts ...grpc.CallOption) (*EngineGetPayloadBodiesV1Response, error) { out := new(EngineGetPayloadBodiesV1Response) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/EngineGetPayloadBodiesByHashV1", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_EngineGetPayloadBodiesByHashV1_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -133,7 +155,7 @@ func (c *eTHBACKENDClient) EngineGetPayloadBodiesByHashV1(ctx context.Context, i func (c *eTHBACKENDClient) EngineGetPayloadBodiesByRangeV1(ctx context.Context, in *EngineGetPayloadBodiesByRangeV1Request, opts ...grpc.CallOption) (*EngineGetPayloadBodiesV1Response, error) { out := new(EngineGetPayloadBodiesV1Response) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/EngineGetPayloadBodiesByRangeV1", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_EngineGetPayloadBodiesByRangeV1_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -142,7 +164,7 @@ func (c *eTHBACKENDClient) EngineGetPayloadBodiesByRangeV1(ctx context.Context, func (c *eTHBACKENDClient) EngineGetBlobsBundleV1(ctx context.Context, in *EngineGetBlobsBundleRequest, opts ...grpc.CallOption) (*types.BlobsBundleV1, error) { out := new(types.BlobsBundleV1) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/EngineGetBlobsBundleV1", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_EngineGetBlobsBundleV1_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -151,7 +173,7 @@ func (c *eTHBACKENDClient) EngineGetBlobsBundleV1(ctx context.Context, in *Engin func (c *eTHBACKENDClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) { out := new(types.VersionReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/Version", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -160,7 +182,7 @@ func (c *eTHBACKENDClient) Version(ctx context.Context, in *emptypb.Empty, opts func (c *eTHBACKENDClient) ProtocolVersion(ctx context.Context, in *ProtocolVersionRequest, opts ...grpc.CallOption) (*ProtocolVersionReply, error) { out := new(ProtocolVersionReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/ProtocolVersion", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_ProtocolVersion_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -169,7 +191,7 @@ func (c *eTHBACKENDClient) ProtocolVersion(ctx context.Context, in *ProtocolVers func (c *eTHBACKENDClient) ClientVersion(ctx context.Context, in *ClientVersionRequest, opts ...grpc.CallOption) (*ClientVersionReply, error) { out := new(ClientVersionReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/ClientVersion", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_ClientVersion_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -177,7 +199,7 @@ func (c *eTHBACKENDClient) ClientVersion(ctx context.Context, in *ClientVersionR } func (c *eTHBACKENDClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (ETHBACKEND_SubscribeClient, error) { - stream, err := c.cc.NewStream(ctx, ÐBACKEND_ServiceDesc.Streams[0], "/remote.ETHBACKEND/Subscribe", opts...) + stream, err := c.cc.NewStream(ctx, ÐBACKEND_ServiceDesc.Streams[0], ETHBACKEND_Subscribe_FullMethodName, opts...) if err != nil { return nil, err } @@ -209,7 +231,7 @@ func (x *eTHBACKENDSubscribeClient) Recv() (*SubscribeReply, error) { } func (c *eTHBACKENDClient) SubscribeLogs(ctx context.Context, opts ...grpc.CallOption) (ETHBACKEND_SubscribeLogsClient, error) { - stream, err := c.cc.NewStream(ctx, ÐBACKEND_ServiceDesc.Streams[1], "/remote.ETHBACKEND/SubscribeLogs", opts...) + stream, err := c.cc.NewStream(ctx, ÐBACKEND_ServiceDesc.Streams[1], ETHBACKEND_SubscribeLogs_FullMethodName, opts...) if err != nil { return nil, err } @@ -241,7 +263,7 @@ func (x *eTHBACKENDSubscribeLogsClient) Recv() (*SubscribeLogsReply, error) { func (c *eTHBACKENDClient) Block(ctx context.Context, in *BlockRequest, opts ...grpc.CallOption) (*BlockReply, error) { out := new(BlockReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/Block", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_Block_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -250,7 +272,7 @@ func (c *eTHBACKENDClient) Block(ctx context.Context, in *BlockRequest, opts ... func (c *eTHBACKENDClient) TxnLookup(ctx context.Context, in *TxnLookupRequest, opts ...grpc.CallOption) (*TxnLookupReply, error) { out := new(TxnLookupReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/TxnLookup", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_TxnLookup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -259,7 +281,7 @@ func (c *eTHBACKENDClient) TxnLookup(ctx context.Context, in *TxnLookupRequest, func (c *eTHBACKENDClient) NodeInfo(ctx context.Context, in *NodesInfoRequest, opts ...grpc.CallOption) (*NodesInfoReply, error) { out := new(NodesInfoReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/NodeInfo", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_NodeInfo_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -268,7 +290,7 @@ func (c *eTHBACKENDClient) NodeInfo(ctx context.Context, in *NodesInfoRequest, o func (c *eTHBACKENDClient) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) { out := new(PeersReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/Peers", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_Peers_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -277,7 +299,7 @@ func (c *eTHBACKENDClient) Peers(ctx context.Context, in *emptypb.Empty, opts .. func (c *eTHBACKENDClient) PendingBlock(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PendingBlockReply, error) { out := new(PendingBlockReply) - err := c.cc.Invoke(ctx, "/remote.ETHBACKEND/PendingBlock", in, out, opts...) + err := c.cc.Invoke(ctx, ETHBACKEND_PendingBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -409,7 +431,7 @@ func _ETHBACKEND_Etherbase_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/Etherbase", + FullMethod: ETHBACKEND_Etherbase_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).Etherbase(ctx, req.(*EtherbaseRequest)) @@ -427,7 +449,7 @@ func _ETHBACKEND_NetVersion_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/NetVersion", + FullMethod: ETHBACKEND_NetVersion_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).NetVersion(ctx, req.(*NetVersionRequest)) @@ -445,7 +467,7 @@ func _ETHBACKEND_NetPeerCount_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/NetPeerCount", + FullMethod: ETHBACKEND_NetPeerCount_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).NetPeerCount(ctx, req.(*NetPeerCountRequest)) @@ -463,7 +485,7 @@ func _ETHBACKEND_EngineNewPayload_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/EngineNewPayload", + FullMethod: ETHBACKEND_EngineNewPayload_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).EngineNewPayload(ctx, req.(*types.ExecutionPayload)) @@ -481,7 +503,7 @@ func _ETHBACKEND_EngineForkChoiceUpdated_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/EngineForkChoiceUpdated", + FullMethod: ETHBACKEND_EngineForkChoiceUpdated_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).EngineForkChoiceUpdated(ctx, req.(*EngineForkChoiceUpdatedRequest)) @@ -499,7 +521,7 @@ func _ETHBACKEND_EngineGetPayload_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/EngineGetPayload", + FullMethod: ETHBACKEND_EngineGetPayload_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).EngineGetPayload(ctx, req.(*EngineGetPayloadRequest)) @@ -517,7 +539,7 @@ func _ETHBACKEND_EngineGetPayloadBodiesByHashV1_Handler(srv interface{}, ctx con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/EngineGetPayloadBodiesByHashV1", + FullMethod: ETHBACKEND_EngineGetPayloadBodiesByHashV1_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).EngineGetPayloadBodiesByHashV1(ctx, req.(*EngineGetPayloadBodiesByHashV1Request)) @@ -535,7 +557,7 @@ func _ETHBACKEND_EngineGetPayloadBodiesByRangeV1_Handler(srv interface{}, ctx co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/EngineGetPayloadBodiesByRangeV1", + FullMethod: ETHBACKEND_EngineGetPayloadBodiesByRangeV1_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).EngineGetPayloadBodiesByRangeV1(ctx, req.(*EngineGetPayloadBodiesByRangeV1Request)) @@ -553,7 +575,7 @@ func _ETHBACKEND_EngineGetBlobsBundleV1_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/EngineGetBlobsBundleV1", + FullMethod: ETHBACKEND_EngineGetBlobsBundleV1_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).EngineGetBlobsBundleV1(ctx, req.(*EngineGetBlobsBundleRequest)) @@ -571,7 +593,7 @@ func _ETHBACKEND_Version_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/Version", + FullMethod: ETHBACKEND_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).Version(ctx, req.(*emptypb.Empty)) @@ -589,7 +611,7 @@ func _ETHBACKEND_ProtocolVersion_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/ProtocolVersion", + FullMethod: ETHBACKEND_ProtocolVersion_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).ProtocolVersion(ctx, req.(*ProtocolVersionRequest)) @@ -607,7 +629,7 @@ func _ETHBACKEND_ClientVersion_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/ClientVersion", + FullMethod: ETHBACKEND_ClientVersion_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).ClientVersion(ctx, req.(*ClientVersionRequest)) @@ -672,7 +694,7 @@ func _ETHBACKEND_Block_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/Block", + FullMethod: ETHBACKEND_Block_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).Block(ctx, req.(*BlockRequest)) @@ -690,7 +712,7 @@ func _ETHBACKEND_TxnLookup_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/TxnLookup", + FullMethod: ETHBACKEND_TxnLookup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).TxnLookup(ctx, req.(*TxnLookupRequest)) @@ -708,7 +730,7 @@ func _ETHBACKEND_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/NodeInfo", + FullMethod: ETHBACKEND_NodeInfo_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).NodeInfo(ctx, req.(*NodesInfoRequest)) @@ -726,7 +748,7 @@ func _ETHBACKEND_Peers_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/Peers", + FullMethod: ETHBACKEND_Peers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).Peers(ctx, req.(*emptypb.Empty)) @@ -744,7 +766,7 @@ func _ETHBACKEND_PendingBlock_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.ETHBACKEND/PendingBlock", + FullMethod: ETHBACKEND_PendingBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ETHBACKENDServer).PendingBlock(ctx, req.(*emptypb.Empty)) diff --git a/gointerfaces/remote/kv.pb.go b/gointerfaces/remote/kv.pb.go index c101fb67e..ff6448729 100644 --- a/gointerfaces/remote/kv.pb.go +++ b/gointerfaces/remote/kv.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: remote/kv.proto package remote @@ -226,7 +226,7 @@ type Cursor struct { unknownFields protoimpl.UnknownFields Op Op `protobuf:"varint,1,opt,name=op,proto3,enum=remote.Op" json:"op,omitempty"` - BucketName string `protobuf:"bytes,2,opt,name=bucketName,proto3" json:"bucketName,omitempty"` + BucketName string `protobuf:"bytes,2,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` Cursor uint32 `protobuf:"varint,3,opt,name=cursor,proto3" json:"cursor,omitempty"` K []byte `protobuf:"bytes,4,opt,name=k,proto3" json:"k,omitempty"` V []byte `protobuf:"bytes,5,opt,name=v,proto3" json:"v,omitempty"` @@ -306,9 +306,9 @@ type Pair struct { K []byte `protobuf:"bytes,1,opt,name=k,proto3" json:"k,omitempty"` V []byte `protobuf:"bytes,2,opt,name=v,proto3" json:"v,omitempty"` - CursorID uint32 `protobuf:"varint,3,opt,name=cursorID,proto3" json:"cursorID,omitempty"` // send once after new cursor open - ViewID uint64 `protobuf:"varint,4,opt,name=viewID,proto3" json:"viewID,omitempty"` // return once after tx open. mdbx's tx.ViewID() - id of write transaction in db - TxID uint64 `protobuf:"varint,5,opt,name=txID,proto3" json:"txID,omitempty"` // return once after tx open. internal identifier - use it in other methods - to achieve consistant DB view (to read data from same DB tx on server). + CursorId uint32 `protobuf:"varint,3,opt,name=cursor_id,json=cursorId,proto3" json:"cursor_id,omitempty"` // send once after new cursor open + ViewId uint64 `protobuf:"varint,4,opt,name=view_id,json=viewId,proto3" json:"view_id,omitempty"` // return once after tx open. mdbx's tx.ViewID() - id of write transaction in db + TxId uint64 `protobuf:"varint,5,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // return once after tx open. internal identifier - use it in other methods - to achieve consistant DB view (to read data from same DB tx on server). } func (x *Pair) Reset() { @@ -357,23 +357,23 @@ func (x *Pair) GetV() []byte { return nil } -func (x *Pair) GetCursorID() uint32 { +func (x *Pair) GetCursorId() uint32 { if x != nil { - return x.CursorID + return x.CursorId } return 0 } -func (x *Pair) GetViewID() uint64 { +func (x *Pair) GetViewId() uint64 { if x != nil { - return x.ViewID + return x.ViewId } return 0 } -func (x *Pair) GetTxID() uint64 { +func (x *Pair) GetTxId() uint64 { if x != nil { - return x.TxID + return x.TxId } return 0 } @@ -443,7 +443,7 @@ type AccountChange struct { Action Action `protobuf:"varint,3,opt,name=action,proto3,enum=remote.Action" json:"action,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // nil if there is no UPSERT in action Code []byte `protobuf:"bytes,5,opt,name=code,proto3" json:"code,omitempty"` // nil if there is no CODE in action - StorageChanges []*StorageChange `protobuf:"bytes,6,rep,name=storageChanges,proto3" json:"storageChanges,omitempty"` + StorageChanges []*StorageChange `protobuf:"bytes,6,rep,name=storage_changes,json=storageChanges,proto3" json:"storage_changes,omitempty"` } func (x *AccountChange) Reset() { @@ -526,10 +526,10 @@ type StateChangeBatch struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - StateVersionID uint64 `protobuf:"varint,1,opt,name=stateVersionID,proto3" json:"stateVersionID,omitempty"` // mdbx's tx.ID() - id of write transaction in db - where this changes happened - ChangeBatch []*StateChange `protobuf:"bytes,2,rep,name=changeBatch,proto3" json:"changeBatch,omitempty"` - PendingBlockBaseFee uint64 `protobuf:"varint,3,opt,name=pendingBlockBaseFee,proto3" json:"pendingBlockBaseFee,omitempty"` // BaseFee of the next block to be produced - BlockGasLimit uint64 `protobuf:"varint,4,opt,name=blockGasLimit,proto3" json:"blockGasLimit,omitempty"` // GasLimit of the latest block - proxy for the gas limit of the next block to be produced + StateVersionId uint64 `protobuf:"varint,1,opt,name=state_version_id,json=stateVersionId,proto3" json:"state_version_id,omitempty"` // mdbx's tx.ID() - id of write transaction in db - where this changes happened + ChangeBatch []*StateChange `protobuf:"bytes,2,rep,name=change_batch,json=changeBatch,proto3" json:"change_batch,omitempty"` + PendingBlockBaseFee uint64 `protobuf:"varint,3,opt,name=pending_block_base_fee,json=pendingBlockBaseFee,proto3" json:"pending_block_base_fee,omitempty"` // BaseFee of the next block to be produced + BlockGasLimit uint64 `protobuf:"varint,4,opt,name=block_gas_limit,json=blockGasLimit,proto3" json:"block_gas_limit,omitempty"` // GasLimit of the latest block - proxy for the gas limit of the next block to be produced } func (x *StateChangeBatch) Reset() { @@ -564,9 +564,9 @@ func (*StateChangeBatch) Descriptor() ([]byte, []int) { return file_remote_kv_proto_rawDescGZIP(), []int{4} } -func (x *StateChangeBatch) GetStateVersionID() uint64 { +func (x *StateChangeBatch) GetStateVersionId() uint64 { if x != nil { - return x.StateVersionID + return x.StateVersionId } return 0 } @@ -599,8 +599,8 @@ type StateChange struct { unknownFields protoimpl.UnknownFields Direction Direction `protobuf:"varint,1,opt,name=direction,proto3,enum=remote.Direction" json:"direction,omitempty"` - BlockHeight uint64 `protobuf:"varint,2,opt,name=blockHeight,proto3" json:"blockHeight,omitempty"` - BlockHash *types.H256 `protobuf:"bytes,3,opt,name=blockHash,proto3" json:"blockHash,omitempty"` + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + BlockHash *types.H256 `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` Changes []*AccountChange `protobuf:"bytes,4,rep,name=changes,proto3" json:"changes,omitempty"` Txs [][]byte `protobuf:"bytes,5,rep,name=txs,proto3" json:"txs,omitempty"` // enable by withTransactions=true } @@ -677,8 +677,8 @@ type StateChangeRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - WithStorage bool `protobuf:"varint,1,opt,name=withStorage,proto3" json:"withStorage,omitempty"` - WithTransactions bool `protobuf:"varint,2,opt,name=withTransactions,proto3" json:"withTransactions,omitempty"` + WithStorage bool `protobuf:"varint,1,opt,name=with_storage,json=withStorage,proto3" json:"with_storage,omitempty"` + WithTransactions bool `protobuf:"varint,2,opt,name=with_transactions,json=withTransactions,proto3" json:"with_transactions,omitempty"` } func (x *StateChangeRequest) Reset() { @@ -933,10 +933,11 @@ type DomainGetReq struct { TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx() // query params - Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` - K []byte `protobuf:"bytes,3,opt,name=k,proto3" json:"k,omitempty"` - Ts uint64 `protobuf:"varint,4,opt,name=ts,proto3" json:"ts,omitempty"` - K2 []byte `protobuf:"bytes,5,opt,name=k2,proto3" json:"k2,omitempty"` + Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` + K []byte `protobuf:"bytes,3,opt,name=k,proto3" json:"k,omitempty"` + Ts uint64 `protobuf:"varint,4,opt,name=ts,proto3" json:"ts,omitempty"` + K2 []byte `protobuf:"bytes,5,opt,name=k2,proto3" json:"k2,omitempty"` + Latest bool `protobuf:"varint,6,opt,name=latest,proto3" json:"latest,omitempty"` // if true, then `ts` ignored and return latest state (without history lookup) } func (x *DomainGetReq) Reset() { @@ -1006,6 +1007,13 @@ func (x *DomainGetReq) GetK2() []byte { return nil } +func (x *DomainGetReq) GetLatest() bool { + if x != nil { + return x.Latest + } + return false +} + type DomainGetReply struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1355,6 +1363,232 @@ func (x *IndexRangeReply) GetNextPageToken() string { return "" } +type HistoryRangeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx() + // query params + Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` + FromTs int64 `protobuf:"zigzag64,4,opt,name=from_ts,json=fromTs,proto3" json:"from_ts,omitempty"` // -1 means Inf + ToTs int64 `protobuf:"zigzag64,5,opt,name=to_ts,json=toTs,proto3" json:"to_ts,omitempty"` // -1 means Inf + OrderAscend bool `protobuf:"varint,6,opt,name=order_ascend,json=orderAscend,proto3" json:"order_ascend,omitempty"` + Limit int64 `protobuf:"zigzag64,7,opt,name=limit,proto3" json:"limit,omitempty"` // <= 0 means no limit + // pagination params + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // <= 0 means server will choose + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *HistoryRangeReq) Reset() { + *x = HistoryRangeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_remote_kv_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HistoryRangeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HistoryRangeReq) ProtoMessage() {} + +func (x *HistoryRangeReq) ProtoReflect() protoreflect.Message { + mi := &file_remote_kv_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HistoryRangeReq.ProtoReflect.Descriptor instead. +func (*HistoryRangeReq) Descriptor() ([]byte, []int) { + return file_remote_kv_proto_rawDescGZIP(), []int{16} +} + +func (x *HistoryRangeReq) GetTxId() uint64 { + if x != nil { + return x.TxId + } + return 0 +} + +func (x *HistoryRangeReq) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +func (x *HistoryRangeReq) GetFromTs() int64 { + if x != nil { + return x.FromTs + } + return 0 +} + +func (x *HistoryRangeReq) GetToTs() int64 { + if x != nil { + return x.ToTs + } + return 0 +} + +func (x *HistoryRangeReq) GetOrderAscend() bool { + if x != nil { + return x.OrderAscend + } + return false +} + +func (x *HistoryRangeReq) GetLimit() int64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *HistoryRangeReq) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *HistoryRangeReq) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +type DomainRangeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx() + // query params + Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` + FromKey []byte `protobuf:"bytes,3,opt,name=from_key,json=fromKey,proto3" json:"from_key,omitempty"` // nil means Inf + ToKey []byte `protobuf:"bytes,4,opt,name=to_key,json=toKey,proto3" json:"to_key,omitempty"` // nil means Inf + Ts uint64 `protobuf:"varint,5,opt,name=ts,proto3" json:"ts,omitempty"` + Latest bool `protobuf:"varint,6,opt,name=latest,proto3" json:"latest,omitempty"` // if true, then `ts` ignored and return latest state (without history lookup) + OrderAscend bool `protobuf:"varint,7,opt,name=order_ascend,json=orderAscend,proto3" json:"order_ascend,omitempty"` + Limit int64 `protobuf:"zigzag64,8,opt,name=limit,proto3" json:"limit,omitempty"` // <= 0 means no limit + // pagination params + PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // <= 0 means server will choose + PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *DomainRangeReq) Reset() { + *x = DomainRangeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_remote_kv_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DomainRangeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DomainRangeReq) ProtoMessage() {} + +func (x *DomainRangeReq) ProtoReflect() protoreflect.Message { + mi := &file_remote_kv_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DomainRangeReq.ProtoReflect.Descriptor instead. +func (*DomainRangeReq) Descriptor() ([]byte, []int) { + return file_remote_kv_proto_rawDescGZIP(), []int{17} +} + +func (x *DomainRangeReq) GetTxId() uint64 { + if x != nil { + return x.TxId + } + return 0 +} + +func (x *DomainRangeReq) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +func (x *DomainRangeReq) GetFromKey() []byte { + if x != nil { + return x.FromKey + } + return nil +} + +func (x *DomainRangeReq) GetToKey() []byte { + if x != nil { + return x.ToKey + } + return nil +} + +func (x *DomainRangeReq) GetTs() uint64 { + if x != nil { + return x.Ts + } + return 0 +} + +func (x *DomainRangeReq) GetLatest() bool { + if x != nil { + return x.Latest + } + return false +} + +func (x *DomainRangeReq) GetOrderAscend() bool { + if x != nil { + return x.OrderAscend + } + return false +} + +func (x *DomainRangeReq) GetLimit() int64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *DomainRangeReq) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *DomainRangeReq) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + type Pairs struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1368,7 +1602,7 @@ type Pairs struct { func (x *Pairs) Reset() { *x = Pairs{} if protoimpl.UnsafeEnabled { - mi := &file_remote_kv_proto_msgTypes[16] + mi := &file_remote_kv_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1381,7 +1615,7 @@ func (x *Pairs) String() string { func (*Pairs) ProtoMessage() {} func (x *Pairs) ProtoReflect() protoreflect.Message { - mi := &file_remote_kv_proto_msgTypes[16] + mi := &file_remote_kv_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1394,7 +1628,7 @@ func (x *Pairs) ProtoReflect() protoreflect.Message { // Deprecated: Use Pairs.ProtoReflect.Descriptor instead. func (*Pairs) Descriptor() ([]byte, []int) { - return file_remote_kv_proto_rawDescGZIP(), []int{16} + return file_remote_kv_proto_rawDescGZIP(), []int{18} } func (x *Pairs) GetKeys() [][]byte { @@ -1430,7 +1664,7 @@ type ParisPagination struct { func (x *ParisPagination) Reset() { *x = ParisPagination{} if protoimpl.UnsafeEnabled { - mi := &file_remote_kv_proto_msgTypes[17] + mi := &file_remote_kv_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1443,7 +1677,7 @@ func (x *ParisPagination) String() string { func (*ParisPagination) ProtoMessage() {} func (x *ParisPagination) ProtoReflect() protoreflect.Message { - mi := &file_remote_kv_proto_msgTypes[17] + mi := &file_remote_kv_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1456,7 +1690,7 @@ func (x *ParisPagination) ProtoReflect() protoreflect.Message { // Deprecated: Use ParisPagination.ProtoReflect.Descriptor instead. func (*ParisPagination) Descriptor() ([]byte, []int) { - return file_remote_kv_proto_rawDescGZIP(), []int{17} + return file_remote_kv_proto_rawDescGZIP(), []int{19} } func (x *ParisPagination) GetNextKey() []byte { @@ -1485,7 +1719,7 @@ type IndexPagination struct { func (x *IndexPagination) Reset() { *x = IndexPagination{} if protoimpl.UnsafeEnabled { - mi := &file_remote_kv_proto_msgTypes[18] + mi := &file_remote_kv_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1498,7 +1732,7 @@ func (x *IndexPagination) String() string { func (*IndexPagination) ProtoMessage() {} func (x *IndexPagination) ProtoReflect() protoreflect.Message { - mi := &file_remote_kv_proto_msgTypes[18] + mi := &file_remote_kv_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1511,7 +1745,7 @@ func (x *IndexPagination) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexPagination.ProtoReflect.Descriptor instead. func (*IndexPagination) Descriptor() ([]byte, []int) { - return file_remote_kv_proto_rawDescGZIP(), []int{18} + return file_remote_kv_proto_rawDescGZIP(), []int{20} } func (x *IndexPagination) GetNextTimeStamp() int64 { @@ -1535,203 +1769,243 @@ var file_remote_kv_proto_rawDesc = []byte{ 0x6f, 0x12, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x78, 0x0a, 0x06, 0x43, 0x75, 0x72, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x06, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4f, 0x70, 0x52, 0x02, 0x6f, 0x70, 0x12, - 0x1e, 0x0a, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x01, 0x76, 0x22, 0x6a, 0x0a, 0x04, 0x50, 0x61, 0x69, 0x72, 0x12, 0x0c, 0x0a, 0x01, 0x6b, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x75, 0x72, 0x73, 0x6f, - 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x75, 0x72, 0x73, 0x6f, - 0x72, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x69, 0x65, 0x77, 0x49, 0x44, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x06, 0x76, 0x69, 0x65, 0x77, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x78, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x44, 0x22, - 0x4c, 0x0a, 0x0d, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xe7, 0x01, - 0x0a, 0x0d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x69, 0x6e, 0x63, 0x61, 0x72, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x6e, 0x63, - 0x61, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3d, 0x0a, 0x0e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xc9, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x0e, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, - 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x70, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x61, 0x73, 0x65, 0x46, - 0x65, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x12, 0x24, 0x0a, - 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x61, 0x73, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x22, 0xce, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x29, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, - 0x68, 0x12, 0x2f, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x03, 0x74, 0x78, 0x73, 0x22, 0x62, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x77, 0x69, - 0x74, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x77, 0x69, 0x74, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x10, - 0x77, 0x69, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x77, 0x69, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x58, 0x0a, 0x0e, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, - 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x65, - 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x66, 0x69, 0x6c, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x74, 0x6f, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x0c, - 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, - 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x67, 0x0a, 0x0c, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a, 0x01, - 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x6b, 0x32, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6b, 0x32, 0x22, 0x2e, 0x0a, 0x0e, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, - 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x58, 0x0a, 0x0d, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, + 0x1f, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x01, 0x76, 0x22, 0x6d, 0x0a, 0x04, 0x50, 0x61, 0x69, 0x72, 0x12, 0x0c, 0x0a, 0x01, + 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x75, 0x72, 0x73, + 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x75, 0x72, + 0x73, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x76, 0x69, 0x65, 0x77, 0x49, 0x64, 0x12, 0x13, + 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, + 0x78, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0d, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, + 0x32, 0x35, 0x36, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x22, 0xe8, 0x01, 0x0a, 0x0d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, + 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x69, 0x6e, + 0x63, 0x61, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x69, 0x6e, 0x63, 0x61, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3e, 0x0a, 0x0f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xd1, 0x01, 0x0a, + 0x10, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0c, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x42, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0xd0, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x2f, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, + 0x74, 0x78, 0x73, 0x22, 0x64, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x69, 0x74, + 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x77, 0x69, 0x74, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x77, 0x69, 0x74, 0x68, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x77, 0x69, 0x74, 0x68, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x58, 0x0a, + 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x46, 0x69, 0x6c, + 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x69, 0x73, 0x74, 0x6f, + 0x72, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x74, 0x6f, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, + 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x12, 0x52, + 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x7f, 0x0a, 0x0c, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a, + 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x74, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x6b, + 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6b, 0x32, 0x12, 0x16, 0x0a, 0x06, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x0e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x02, 0x6f, 0x6b, 0x22, 0x58, 0x0a, 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, 0x0a, + 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22, 0x2f, 0x0a, + 0x0f, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0e, + 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0xeb, + 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a, 0x01, 0x6b, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, + 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, + 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, + 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x59, 0x0a, 0x0f, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, + 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x02, 0x74, 0x73, 0x22, 0x2f, 0x0a, 0x0f, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, - 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, - 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, - 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x22, 0x59, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, - 0x0a, 0x05, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, - 0x61, 0x72, 0x69, 0x73, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, - 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, - 0x4f, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x2a, 0x86, 0x02, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, - 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, - 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45, 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, - 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, - 0x52, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, - 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, - 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x58, 0x54, 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, - 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x09, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, - 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, - 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52, 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, - 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x45, 0x56, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, - 0x0e, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, - 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, - 0x58, 0x41, 0x43, 0x54, 0x10, 0x10, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, - 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, - 0x50, 0x45, 0x4e, 0x5f, 0x44, 0x55, 0x50, 0x5f, 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x21, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, - 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x43, 0x4f, 0x44, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, - 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, - 0x45, 0x10, 0x04, 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x55, 0x4e, 0x57, 0x49, 0x4e, 0x44, 0x10, 0x01, 0x32, 0xcc, 0x03, 0x0a, 0x02, 0x4b, 0x56, - 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, - 0x12, 0x46, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x12, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x09, 0x44, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x47, 0x65, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, - 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x15, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, - 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x73, 0x12, + 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52, 0x04, + 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, + 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x02, 0x0a, 0x0e, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, + 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6d, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, 0x6f, 0x6d, 0x4b, + 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, + 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, 0x6b, + 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, + 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x69, 0x73, 0x50, 0x61, 0x67, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12, + 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x86, 0x02, 0x0a, 0x02, 0x4f, 0x70, 0x12, + 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x49, + 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45, 0x45, + 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, + 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x04, 0x12, + 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x4c, 0x41, 0x53, + 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x58, 0x54, 0x10, + 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x09, 0x12, + 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0b, + 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52, + 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x45, 0x56, + 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x45, 0x45, + 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x45, + 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x10, 0x12, 0x08, + 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x53, + 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, 0x55, 0x50, 0x5f, + 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, + 0x21, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x53, + 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, 0x45, + 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x02, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x03, 0x12, + 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x04, 0x2a, 0x24, 0x0a, 0x09, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x4f, 0x52, 0x57, + 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x57, 0x49, 0x4e, 0x44, 0x10, + 0x01, 0x32, 0xba, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x30, 0x01, + 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x18, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x39, 0x0a, 0x09, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, + 0x65, 0x74, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, + 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x34, 0x0a, 0x0b, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, + 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x42, 0x11, + 0x5a, 0x0f, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1747,7 +2021,7 @@ func file_remote_kv_proto_rawDescGZIP() []byte { } var file_remote_kv_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_remote_kv_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_remote_kv_proto_msgTypes = make([]protoimpl.MessageInfo, 21) var file_remote_kv_proto_goTypes = []interface{}{ (Op)(0), // 0: remote.Op (Action)(0), // 1: remote.Action @@ -1768,42 +2042,48 @@ var file_remote_kv_proto_goTypes = []interface{}{ (*HistoryGetReply)(nil), // 16: remote.HistoryGetReply (*IndexRangeReq)(nil), // 17: remote.IndexRangeReq (*IndexRangeReply)(nil), // 18: remote.IndexRangeReply - (*Pairs)(nil), // 19: remote.Pairs - (*ParisPagination)(nil), // 20: remote.ParisPagination - (*IndexPagination)(nil), // 21: remote.IndexPagination - (*types.H256)(nil), // 22: types.H256 - (*types.H160)(nil), // 23: types.H160 - (*emptypb.Empty)(nil), // 24: google.protobuf.Empty - (*types.VersionReply)(nil), // 25: types.VersionReply + (*HistoryRangeReq)(nil), // 19: remote.HistoryRangeReq + (*DomainRangeReq)(nil), // 20: remote.DomainRangeReq + (*Pairs)(nil), // 21: remote.Pairs + (*ParisPagination)(nil), // 22: remote.ParisPagination + (*IndexPagination)(nil), // 23: remote.IndexPagination + (*types.H256)(nil), // 24: types.H256 + (*types.H160)(nil), // 25: types.H160 + (*emptypb.Empty)(nil), // 26: google.protobuf.Empty + (*types.VersionReply)(nil), // 27: types.VersionReply } var file_remote_kv_proto_depIdxs = []int32{ 0, // 0: remote.Cursor.op:type_name -> remote.Op - 22, // 1: remote.StorageChange.location:type_name -> types.H256 - 23, // 2: remote.AccountChange.address:type_name -> types.H160 + 24, // 1: remote.StorageChange.location:type_name -> types.H256 + 25, // 2: remote.AccountChange.address:type_name -> types.H160 1, // 3: remote.AccountChange.action:type_name -> remote.Action - 5, // 4: remote.AccountChange.storageChanges:type_name -> remote.StorageChange - 8, // 5: remote.StateChangeBatch.changeBatch:type_name -> remote.StateChange + 5, // 4: remote.AccountChange.storage_changes:type_name -> remote.StorageChange + 8, // 5: remote.StateChangeBatch.change_batch:type_name -> remote.StateChange 2, // 6: remote.StateChange.direction:type_name -> remote.Direction - 22, // 7: remote.StateChange.blockHash:type_name -> types.H256 + 24, // 7: remote.StateChange.block_hash:type_name -> types.H256 6, // 8: remote.StateChange.changes:type_name -> remote.AccountChange - 24, // 9: remote.KV.Version:input_type -> google.protobuf.Empty + 26, // 9: remote.KV.Version:input_type -> google.protobuf.Empty 3, // 10: remote.KV.Tx:input_type -> remote.Cursor 9, // 11: remote.KV.StateChanges:input_type -> remote.StateChangeRequest 10, // 12: remote.KV.Snapshots:input_type -> remote.SnapshotsRequest - 13, // 13: remote.KV.DomainGet:input_type -> remote.DomainGetReq - 15, // 14: remote.KV.HistoryGet:input_type -> remote.HistoryGetReq - 17, // 15: remote.KV.IndexRange:input_type -> remote.IndexRangeReq - 12, // 16: remote.KV.Range:input_type -> remote.RangeReq - 25, // 17: remote.KV.Version:output_type -> types.VersionReply - 4, // 18: remote.KV.Tx:output_type -> remote.Pair - 7, // 19: remote.KV.StateChanges:output_type -> remote.StateChangeBatch - 11, // 20: remote.KV.Snapshots:output_type -> remote.SnapshotsReply - 14, // 21: remote.KV.DomainGet:output_type -> remote.DomainGetReply - 16, // 22: remote.KV.HistoryGet:output_type -> remote.HistoryGetReply - 18, // 23: remote.KV.IndexRange:output_type -> remote.IndexRangeReply - 19, // 24: remote.KV.Range:output_type -> remote.Pairs - 17, // [17:25] is the sub-list for method output_type - 9, // [9:17] is the sub-list for method input_type + 12, // 13: remote.KV.Range:input_type -> remote.RangeReq + 13, // 14: remote.KV.DomainGet:input_type -> remote.DomainGetReq + 15, // 15: remote.KV.HistoryGet:input_type -> remote.HistoryGetReq + 17, // 16: remote.KV.IndexRange:input_type -> remote.IndexRangeReq + 19, // 17: remote.KV.HistoryRange:input_type -> remote.HistoryRangeReq + 20, // 18: remote.KV.DomainRange:input_type -> remote.DomainRangeReq + 27, // 19: remote.KV.Version:output_type -> types.VersionReply + 4, // 20: remote.KV.Tx:output_type -> remote.Pair + 7, // 21: remote.KV.StateChanges:output_type -> remote.StateChangeBatch + 11, // 22: remote.KV.Snapshots:output_type -> remote.SnapshotsReply + 21, // 23: remote.KV.Range:output_type -> remote.Pairs + 14, // 24: remote.KV.DomainGet:output_type -> remote.DomainGetReply + 16, // 25: remote.KV.HistoryGet:output_type -> remote.HistoryGetReply + 18, // 26: remote.KV.IndexRange:output_type -> remote.IndexRangeReply + 21, // 27: remote.KV.HistoryRange:output_type -> remote.Pairs + 21, // 28: remote.KV.DomainRange:output_type -> remote.Pairs + 19, // [19:29] is the sub-list for method output_type + 9, // [9:19] is the sub-list for method input_type 9, // [9:9] is the sub-list for extension type_name 9, // [9:9] is the sub-list for extension extendee 0, // [0:9] is the sub-list for field type_name @@ -2008,7 +2288,7 @@ func file_remote_kv_proto_init() { } } file_remote_kv_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Pairs); i { + switch v := v.(*HistoryRangeReq); i { case 0: return &v.state case 1: @@ -2020,7 +2300,7 @@ func file_remote_kv_proto_init() { } } file_remote_kv_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParisPagination); i { + switch v := v.(*DomainRangeReq); i { case 0: return &v.state case 1: @@ -2032,6 +2312,30 @@ func file_remote_kv_proto_init() { } } file_remote_kv_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Pairs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_remote_kv_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParisPagination); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_remote_kv_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*IndexPagination); i { case 0: return &v.state @@ -2050,7 +2354,7 @@ func file_remote_kv_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_remote_kv_proto_rawDesc, NumEnums: 3, - NumMessages: 19, + NumMessages: 21, NumExtensions: 0, NumServices: 1, }, diff --git a/gointerfaces/remote/kv_grpc.pb.go b/gointerfaces/remote/kv_grpc.pb.go index 5cd9892c5..e34bb5229 100644 --- a/gointerfaces/remote/kv_grpc.pb.go +++ b/gointerfaces/remote/kv_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: remote/kv.proto package remote @@ -20,6 +20,19 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + KV_Version_FullMethodName = "/remote.KV/Version" + KV_Tx_FullMethodName = "/remote.KV/Tx" + KV_StateChanges_FullMethodName = "/remote.KV/StateChanges" + KV_Snapshots_FullMethodName = "/remote.KV/Snapshots" + KV_Range_FullMethodName = "/remote.KV/Range" + KV_DomainGet_FullMethodName = "/remote.KV/DomainGet" + KV_HistoryGet_FullMethodName = "/remote.KV/HistoryGet" + KV_IndexRange_FullMethodName = "/remote.KV/IndexRange" + KV_HistoryRange_FullMethodName = "/remote.KV/HistoryRange" + KV_DomainRange_FullMethodName = "/remote.KV/DomainRange" +) + // KVClient is the client API for KV service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,15 +48,17 @@ type KVClient interface { StateChanges(ctx context.Context, in *StateChangeRequest, opts ...grpc.CallOption) (KV_StateChangesClient, error) // Snapshots returns list of current snapshot files. Then client can just open all of them. Snapshots(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error) - // Temporal methods - DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) - HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) - IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) // Range [from, to) // Range(from, nil) means [from, EndOfTable) // Range(nil, to) means [StartOfTable, to) // If orderAscend=false server expecting `from`<`to`. Example: Range("B", "A") Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) + // Temporal methods + DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) + HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) + IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) + HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) + DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) } type kVClient struct { @@ -56,7 +71,7 @@ func NewKVClient(cc grpc.ClientConnInterface) KVClient { func (c *kVClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) { out := new(types.VersionReply) - err := c.cc.Invoke(ctx, "/remote.KV/Version", in, out, opts...) + err := c.cc.Invoke(ctx, KV_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -64,7 +79,7 @@ func (c *kVClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc. } func (c *kVClient) Tx(ctx context.Context, opts ...grpc.CallOption) (KV_TxClient, error) { - stream, err := c.cc.NewStream(ctx, &KV_ServiceDesc.Streams[0], "/remote.KV/Tx", opts...) + stream, err := c.cc.NewStream(ctx, &KV_ServiceDesc.Streams[0], KV_Tx_FullMethodName, opts...) if err != nil { return nil, err } @@ -95,7 +110,7 @@ func (x *kVTxClient) Recv() (*Pair, error) { } func (c *kVClient) StateChanges(ctx context.Context, in *StateChangeRequest, opts ...grpc.CallOption) (KV_StateChangesClient, error) { - stream, err := c.cc.NewStream(ctx, &KV_ServiceDesc.Streams[1], "/remote.KV/StateChanges", opts...) + stream, err := c.cc.NewStream(ctx, &KV_ServiceDesc.Streams[1], KV_StateChanges_FullMethodName, opts...) if err != nil { return nil, err } @@ -128,7 +143,16 @@ func (x *kVStateChangesClient) Recv() (*StateChangeBatch, error) { func (c *kVClient) Snapshots(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error) { out := new(SnapshotsReply) - err := c.cc.Invoke(ctx, "/remote.KV/Snapshots", in, out, opts...) + err := c.cc.Invoke(ctx, KV_Snapshots_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) { + out := new(Pairs) + err := c.cc.Invoke(ctx, KV_Range_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -137,7 +161,7 @@ func (c *kVClient) Snapshots(ctx context.Context, in *SnapshotsRequest, opts ... func (c *kVClient) DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) { out := new(DomainGetReply) - err := c.cc.Invoke(ctx, "/remote.KV/DomainGet", in, out, opts...) + err := c.cc.Invoke(ctx, KV_DomainGet_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -146,7 +170,7 @@ func (c *kVClient) DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc func (c *kVClient) HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) { out := new(HistoryGetReply) - err := c.cc.Invoke(ctx, "/remote.KV/HistoryGet", in, out, opts...) + err := c.cc.Invoke(ctx, KV_HistoryGet_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -155,16 +179,25 @@ func (c *kVClient) HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...gr func (c *kVClient) IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) { out := new(IndexRangeReply) - err := c.cc.Invoke(ctx, "/remote.KV/IndexRange", in, out, opts...) + err := c.cc.Invoke(ctx, KV_IndexRange_FullMethodName, in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *kVClient) Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) { +func (c *kVClient) HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) { out := new(Pairs) - err := c.cc.Invoke(ctx, "/remote.KV/Range", in, out, opts...) + err := c.cc.Invoke(ctx, KV_HistoryRange_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) { + out := new(Pairs) + err := c.cc.Invoke(ctx, KV_DomainRange_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -186,15 +219,17 @@ type KVServer interface { StateChanges(*StateChangeRequest, KV_StateChangesServer) error // Snapshots returns list of current snapshot files. Then client can just open all of them. Snapshots(context.Context, *SnapshotsRequest) (*SnapshotsReply, error) - // Temporal methods - DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) - HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error) - IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) // Range [from, to) // Range(from, nil) means [from, EndOfTable) // Range(nil, to) means [StartOfTable, to) // If orderAscend=false server expecting `from`<`to`. Example: Range("B", "A") Range(context.Context, *RangeReq) (*Pairs, error) + // Temporal methods + DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) + HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error) + IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) + HistoryRange(context.Context, *HistoryRangeReq) (*Pairs, error) + DomainRange(context.Context, *DomainRangeReq) (*Pairs, error) mustEmbedUnimplementedKVServer() } @@ -214,6 +249,9 @@ func (UnimplementedKVServer) StateChanges(*StateChangeRequest, KV_StateChangesSe func (UnimplementedKVServer) Snapshots(context.Context, *SnapshotsRequest) (*SnapshotsReply, error) { return nil, status.Errorf(codes.Unimplemented, "method Snapshots not implemented") } +func (UnimplementedKVServer) Range(context.Context, *RangeReq) (*Pairs, error) { + return nil, status.Errorf(codes.Unimplemented, "method Range not implemented") +} func (UnimplementedKVServer) DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) { return nil, status.Errorf(codes.Unimplemented, "method DomainGet not implemented") } @@ -223,8 +261,11 @@ func (UnimplementedKVServer) HistoryGet(context.Context, *HistoryGetReq) (*Histo func (UnimplementedKVServer) IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) { return nil, status.Errorf(codes.Unimplemented, "method IndexRange not implemented") } -func (UnimplementedKVServer) Range(context.Context, *RangeReq) (*Pairs, error) { - return nil, status.Errorf(codes.Unimplemented, "method Range not implemented") +func (UnimplementedKVServer) HistoryRange(context.Context, *HistoryRangeReq) (*Pairs, error) { + return nil, status.Errorf(codes.Unimplemented, "method HistoryRange not implemented") +} +func (UnimplementedKVServer) DomainRange(context.Context, *DomainRangeReq) (*Pairs, error) { + return nil, status.Errorf(codes.Unimplemented, "method DomainRange not implemented") } func (UnimplementedKVServer) mustEmbedUnimplementedKVServer() {} @@ -249,7 +290,7 @@ func _KV_Version_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.KV/Version", + FullMethod: KV_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVServer).Version(ctx, req.(*emptypb.Empty)) @@ -314,7 +355,7 @@ func _KV_Snapshots_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.KV/Snapshots", + FullMethod: KV_Snapshots_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVServer).Snapshots(ctx, req.(*SnapshotsRequest)) @@ -322,6 +363,24 @@ func _KV_Snapshots_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RangeReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Range(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KV_Range_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Range(ctx, req.(*RangeReq)) + } + return interceptor(ctx, in, info, handler) +} + func _KV_DomainGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DomainGetReq) if err := dec(in); err != nil { @@ -332,7 +391,7 @@ func _KV_DomainGet_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.KV/DomainGet", + FullMethod: KV_DomainGet_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVServer).DomainGet(ctx, req.(*DomainGetReq)) @@ -350,7 +409,7 @@ func _KV_HistoryGet_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.KV/HistoryGet", + FullMethod: KV_HistoryGet_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVServer).HistoryGet(ctx, req.(*HistoryGetReq)) @@ -368,7 +427,7 @@ func _KV_IndexRange_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.KV/IndexRange", + FullMethod: KV_IndexRange_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVServer).IndexRange(ctx, req.(*IndexRangeReq)) @@ -376,20 +435,38 @@ func _KV_IndexRange_Handler(srv interface{}, ctx context.Context, dec func(inter return interceptor(ctx, in, info, handler) } -func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RangeReq) +func _KV_HistoryRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HistoryRangeReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(KVServer).Range(ctx, in) + return srv.(KVServer).HistoryRange(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/remote.KV/Range", + FullMethod: KV_HistoryRange_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Range(ctx, req.(*RangeReq)) + return srv.(KVServer).HistoryRange(ctx, req.(*HistoryRangeReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_DomainRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DomainRangeReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).DomainRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KV_DomainRange_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).DomainRange(ctx, req.(*DomainRangeReq)) } return interceptor(ctx, in, info, handler) } @@ -409,6 +486,10 @@ var KV_ServiceDesc = grpc.ServiceDesc{ MethodName: "Snapshots", Handler: _KV_Snapshots_Handler, }, + { + MethodName: "Range", + Handler: _KV_Range_Handler, + }, { MethodName: "DomainGet", Handler: _KV_DomainGet_Handler, @@ -422,8 +503,12 @@ var KV_ServiceDesc = grpc.ServiceDesc{ Handler: _KV_IndexRange_Handler, }, { - MethodName: "Range", - Handler: _KV_Range_Handler, + MethodName: "HistoryRange", + Handler: _KV_HistoryRange_Handler, + }, + { + MethodName: "DomainRange", + Handler: _KV_DomainRange_Handler, }, }, Streams: []grpc.StreamDesc{ diff --git a/gointerfaces/remote/mocks.go b/gointerfaces/remote/mocks.go index 86a2666a9..24e98be04 100644 --- a/gointerfaces/remote/mocks.go +++ b/gointerfaces/remote/mocks.go @@ -25,9 +25,15 @@ var _ KVClient = &KVClientMock{} // DomainGetFunc: func(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) { // panic("mock out the DomainGet method") // }, +// DomainRangeFunc: func(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) { +// panic("mock out the DomainRange method") +// }, // HistoryGetFunc: func(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) { // panic("mock out the HistoryGet method") // }, +// HistoryRangeFunc: func(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) { +// panic("mock out the HistoryRange method") +// }, // IndexRangeFunc: func(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) { // panic("mock out the IndexRange method") // }, @@ -56,9 +62,15 @@ type KVClientMock struct { // DomainGetFunc mocks the DomainGet method. DomainGetFunc func(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) + // DomainRangeFunc mocks the DomainRange method. + DomainRangeFunc func(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) + // HistoryGetFunc mocks the HistoryGet method. HistoryGetFunc func(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) + // HistoryRangeFunc mocks the HistoryRange method. + HistoryRangeFunc func(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) + // IndexRangeFunc mocks the IndexRange method. IndexRangeFunc func(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) @@ -88,6 +100,15 @@ type KVClientMock struct { // Opts is the opts argument value. Opts []grpc.CallOption } + // DomainRange holds details about calls to the DomainRange method. + DomainRange []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // In is the in argument value. + In *DomainRangeReq + // Opts is the opts argument value. + Opts []grpc.CallOption + } // HistoryGet holds details about calls to the HistoryGet method. HistoryGet []struct { // Ctx is the ctx argument value. @@ -97,6 +118,15 @@ type KVClientMock struct { // Opts is the opts argument value. Opts []grpc.CallOption } + // HistoryRange holds details about calls to the HistoryRange method. + HistoryRange []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // In is the in argument value. + In *HistoryRangeReq + // Opts is the opts argument value. + Opts []grpc.CallOption + } // IndexRange holds details about calls to the IndexRange method. IndexRange []struct { // Ctx is the ctx argument value. @@ -151,7 +181,9 @@ type KVClientMock struct { } } lockDomainGet sync.RWMutex + lockDomainRange sync.RWMutex lockHistoryGet sync.RWMutex + lockHistoryRange sync.RWMutex lockIndexRange sync.RWMutex lockRange sync.RWMutex lockSnapshots sync.RWMutex @@ -204,6 +236,50 @@ func (mock *KVClientMock) DomainGetCalls() []struct { return calls } +// DomainRange calls DomainRangeFunc. +func (mock *KVClientMock) DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) { + callInfo := struct { + Ctx context.Context + In *DomainRangeReq + Opts []grpc.CallOption + }{ + Ctx: ctx, + In: in, + Opts: opts, + } + mock.lockDomainRange.Lock() + mock.calls.DomainRange = append(mock.calls.DomainRange, callInfo) + mock.lockDomainRange.Unlock() + if mock.DomainRangeFunc == nil { + var ( + pairsOut *Pairs + errOut error + ) + return pairsOut, errOut + } + return mock.DomainRangeFunc(ctx, in, opts...) +} + +// DomainRangeCalls gets all the calls that were made to DomainRange. +// Check the length with: +// +// len(mockedKVClient.DomainRangeCalls()) +func (mock *KVClientMock) DomainRangeCalls() []struct { + Ctx context.Context + In *DomainRangeReq + Opts []grpc.CallOption +} { + var calls []struct { + Ctx context.Context + In *DomainRangeReq + Opts []grpc.CallOption + } + mock.lockDomainRange.RLock() + calls = mock.calls.DomainRange + mock.lockDomainRange.RUnlock() + return calls +} + // HistoryGet calls HistoryGetFunc. func (mock *KVClientMock) HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) { callInfo := struct { @@ -248,6 +324,50 @@ func (mock *KVClientMock) HistoryGetCalls() []struct { return calls } +// HistoryRange calls HistoryRangeFunc. +func (mock *KVClientMock) HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) { + callInfo := struct { + Ctx context.Context + In *HistoryRangeReq + Opts []grpc.CallOption + }{ + Ctx: ctx, + In: in, + Opts: opts, + } + mock.lockHistoryRange.Lock() + mock.calls.HistoryRange = append(mock.calls.HistoryRange, callInfo) + mock.lockHistoryRange.Unlock() + if mock.HistoryRangeFunc == nil { + var ( + pairsOut *Pairs + errOut error + ) + return pairsOut, errOut + } + return mock.HistoryRangeFunc(ctx, in, opts...) +} + +// HistoryRangeCalls gets all the calls that were made to HistoryRange. +// Check the length with: +// +// len(mockedKVClient.HistoryRangeCalls()) +func (mock *KVClientMock) HistoryRangeCalls() []struct { + Ctx context.Context + In *HistoryRangeReq + Opts []grpc.CallOption +} { + var calls []struct { + Ctx context.Context + In *HistoryRangeReq + Opts []grpc.CallOption + } + mock.lockHistoryRange.RLock() + calls = mock.calls.HistoryRange + mock.lockHistoryRange.RUnlock() + return calls +} + // IndexRange calls IndexRangeFunc. func (mock *KVClientMock) IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) { callInfo := struct { diff --git a/gointerfaces/sentinel/sentinel.pb.go b/gointerfaces/sentinel/sentinel.pb.go index 567e991da..747221aa3 100644 --- a/gointerfaces/sentinel/sentinel.pb.go +++ b/gointerfaces/sentinel/sentinel.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: p2psentinel/sentinel.proto package sentinel @@ -123,6 +123,53 @@ func (*EmptyMessage) Descriptor() ([]byte, []int) { return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{0} } +type Peer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid string `protobuf:"bytes,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *Peer) Reset() { + *x = Peer{} + if protoimpl.UnsafeEnabled { + mi := &file_p2psentinel_sentinel_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Peer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Peer) ProtoMessage() {} + +func (x *Peer) ProtoReflect() protoreflect.Message { + mi := &file_p2psentinel_sentinel_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Peer.ProtoReflect.Descriptor instead. +func (*Peer) Descriptor() ([]byte, []int) { + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{1} +} + +func (x *Peer) GetPid() string { + if x != nil { + return x.Pid + } + return "" +} + type GossipData struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -130,12 +177,13 @@ type GossipData struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data Type GossipType `protobuf:"varint,2,opt,name=type,proto3,enum=sentinel.GossipType" json:"type,omitempty"` + Peer *Peer `protobuf:"bytes,3,opt,name=peer,proto3,oneof" json:"peer,omitempty"` } func (x *GossipData) Reset() { *x = GossipData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[1] + mi := &file_p2psentinel_sentinel_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -148,7 +196,7 @@ func (x *GossipData) String() string { func (*GossipData) ProtoMessage() {} func (x *GossipData) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[1] + mi := &file_p2psentinel_sentinel_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -161,7 +209,7 @@ func (x *GossipData) ProtoReflect() protoreflect.Message { // Deprecated: Use GossipData.ProtoReflect.Descriptor instead. func (*GossipData) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{1} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{2} } func (x *GossipData) GetData() []byte { @@ -178,6 +226,13 @@ func (x *GossipData) GetType() GossipType { return GossipType_LightClientFinalityUpdateGossipType } +func (x *GossipData) GetPeer() *Peer { + if x != nil { + return x.Peer + } + return nil +} + type Status struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -193,7 +248,7 @@ type Status struct { func (x *Status) Reset() { *x = Status{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[2] + mi := &file_p2psentinel_sentinel_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -206,7 +261,7 @@ func (x *Status) String() string { func (*Status) ProtoMessage() {} func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[2] + mi := &file_p2psentinel_sentinel_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -219,7 +274,7 @@ func (x *Status) ProtoReflect() protoreflect.Message { // Deprecated: Use Status.ProtoReflect.Descriptor instead. func (*Status) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{2} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{3} } func (x *Status) GetForkDigest() uint32 { @@ -268,7 +323,7 @@ type PeerCount struct { func (x *PeerCount) Reset() { *x = PeerCount{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[3] + mi := &file_p2psentinel_sentinel_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -281,7 +336,7 @@ func (x *PeerCount) String() string { func (*PeerCount) ProtoMessage() {} func (x *PeerCount) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[3] + mi := &file_p2psentinel_sentinel_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -294,7 +349,7 @@ func (x *PeerCount) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerCount.ProtoReflect.Descriptor instead. func (*PeerCount) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{3} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{4} } func (x *PeerCount) GetAmount() uint64 { @@ -316,7 +371,7 @@ type RequestData struct { func (x *RequestData) Reset() { *x = RequestData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[4] + mi := &file_p2psentinel_sentinel_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -329,7 +384,7 @@ func (x *RequestData) String() string { func (*RequestData) ProtoMessage() {} func (x *RequestData) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[4] + mi := &file_p2psentinel_sentinel_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -342,7 +397,7 @@ func (x *RequestData) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestData.ProtoReflect.Descriptor instead. func (*RequestData) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{4} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{5} } func (x *RequestData) GetData() []byte { @@ -371,7 +426,7 @@ type ResponseData struct { func (x *ResponseData) Reset() { *x = ResponseData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentinel_sentinel_proto_msgTypes[5] + mi := &file_p2psentinel_sentinel_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -384,7 +439,7 @@ func (x *ResponseData) String() string { func (*ResponseData) ProtoMessage() {} func (x *ResponseData) ProtoReflect() protoreflect.Message { - mi := &file_p2psentinel_sentinel_proto_msgTypes[5] + mi := &file_p2psentinel_sentinel_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -397,7 +452,7 @@ func (x *ResponseData) ProtoReflect() protoreflect.Message { // Deprecated: Use ResponseData.ProtoReflect.Descriptor instead. func (*ResponseData) Descriptor() ([]byte, []int) { - return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{5} + return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{6} } func (x *ResponseData) GetData() []byte { @@ -421,68 +476,80 @@ var file_p2psentinel_sentinel_proto_rawDesc = []byte{ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4a, 0x0a, 0x0a, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, - 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73, - 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x72, - 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x64, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, - 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x28, - 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, - 0x68, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, - 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0x23, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x37, 0x0a, 0x0b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, - 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x22, 0x38, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2a, 0xf9, 0x01, - 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x23, - 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6e, 0x61, 0x6c, - 0x69, 0x74, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, - 0x79, 0x70, 0x65, 0x10, 0x00, 0x12, 0x29, 0x0a, 0x25, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x73, 0x74, 0x69, 0x63, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, - 0x12, 0x19, 0x0a, 0x15, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x41, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x1b, 0x0a, 0x17, - 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x04, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x72, 0x6f, - 0x70, 0x6f, 0x73, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x74, 0x74, - 0x65, 0x73, 0x74, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x06, 0x32, 0xfb, 0x01, 0x0a, 0x08, 0x53, 0x65, - 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, - 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x1a, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x53, 0x65, 0x6e, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, - 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, - 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x35, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, - 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, - 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, - 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x1a, 0x13, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x15, 0x5a, 0x13, 0x2e, 0x2f, 0x73, 0x65, 0x6e, - 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x18, 0x0a, 0x04, 0x50, 0x65, 0x65, + 0x72, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x70, 0x69, 0x64, 0x22, 0x7c, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x27, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, + 0x72, 0x22, 0xcd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, + 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, + 0x35, 0x36, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x52, 0x6f, 0x6f, + 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x65, + 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x66, 0x69, 0x6e, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x09, 0x68, 0x65, + 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, 0x68, 0x65, 0x61, 0x64, + 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x6c, 0x6f, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, 0x64, 0x53, 0x6c, 0x6f, + 0x74, 0x22, 0x23, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, + 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x37, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, + 0x38, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2a, 0xf9, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x23, 0x4c, 0x69, 0x67, 0x68, + 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, + 0x00, 0x12, 0x29, 0x0a, 0x25, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x73, 0x74, 0x69, 0x63, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, + 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x1b, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, + 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, + 0x79, 0x70, 0x65, 0x10, 0x04, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, + 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, + 0x79, 0x70, 0x65, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, + 0x79, 0x70, 0x65, 0x10, 0x06, 0x32, 0xed, 0x02, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x74, 0x69, 0x6e, + 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x14, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, + 0x61, 0x74, 0x61, 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x73, 0x65, + 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x35, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x47, 0x65, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, + 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x13, + 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x42, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, + 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, + 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, + 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, + 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x15, 0x5a, 0x13, 0x2e, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x69, + 0x6e, 0x65, 0x6c, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -498,34 +565,40 @@ func file_p2psentinel_sentinel_proto_rawDescGZIP() []byte { } var file_p2psentinel_sentinel_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_p2psentinel_sentinel_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_p2psentinel_sentinel_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_p2psentinel_sentinel_proto_goTypes = []interface{}{ (GossipType)(0), // 0: sentinel.GossipType (*EmptyMessage)(nil), // 1: sentinel.EmptyMessage - (*GossipData)(nil), // 2: sentinel.GossipData - (*Status)(nil), // 3: sentinel.Status - (*PeerCount)(nil), // 4: sentinel.PeerCount - (*RequestData)(nil), // 5: sentinel.RequestData - (*ResponseData)(nil), // 6: sentinel.ResponseData - (*types.H256)(nil), // 7: types.H256 + (*Peer)(nil), // 2: sentinel.Peer + (*GossipData)(nil), // 3: sentinel.GossipData + (*Status)(nil), // 4: sentinel.Status + (*PeerCount)(nil), // 5: sentinel.PeerCount + (*RequestData)(nil), // 6: sentinel.RequestData + (*ResponseData)(nil), // 7: sentinel.ResponseData + (*types.H256)(nil), // 8: types.H256 } var file_p2psentinel_sentinel_proto_depIdxs = []int32{ - 0, // 0: sentinel.GossipData.type:type_name -> sentinel.GossipType - 7, // 1: sentinel.Status.finalized_root:type_name -> types.H256 - 7, // 2: sentinel.Status.head_root:type_name -> types.H256 - 1, // 3: sentinel.Sentinel.SubscribeGossip:input_type -> sentinel.EmptyMessage - 5, // 4: sentinel.Sentinel.SendRequest:input_type -> sentinel.RequestData - 3, // 5: sentinel.Sentinel.SetStatus:input_type -> sentinel.Status - 1, // 6: sentinel.Sentinel.GetPeers:input_type -> sentinel.EmptyMessage - 2, // 7: sentinel.Sentinel.SubscribeGossip:output_type -> sentinel.GossipData - 6, // 8: sentinel.Sentinel.SendRequest:output_type -> sentinel.ResponseData - 1, // 9: sentinel.Sentinel.SetStatus:output_type -> sentinel.EmptyMessage - 4, // 10: sentinel.Sentinel.GetPeers:output_type -> sentinel.PeerCount - 7, // [7:11] is the sub-list for method output_type - 3, // [3:7] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 0, // 0: sentinel.GossipData.type:type_name -> sentinel.GossipType + 2, // 1: sentinel.GossipData.peer:type_name -> sentinel.Peer + 8, // 2: sentinel.Status.finalized_root:type_name -> types.H256 + 8, // 3: sentinel.Status.head_root:type_name -> types.H256 + 1, // 4: sentinel.Sentinel.SubscribeGossip:input_type -> sentinel.EmptyMessage + 6, // 5: sentinel.Sentinel.SendRequest:input_type -> sentinel.RequestData + 4, // 6: sentinel.Sentinel.SetStatus:input_type -> sentinel.Status + 1, // 7: sentinel.Sentinel.GetPeers:input_type -> sentinel.EmptyMessage + 2, // 8: sentinel.Sentinel.BanPeer:input_type -> sentinel.Peer + 3, // 9: sentinel.Sentinel.PublishGossip:input_type -> sentinel.GossipData + 3, // 10: sentinel.Sentinel.SubscribeGossip:output_type -> sentinel.GossipData + 7, // 11: sentinel.Sentinel.SendRequest:output_type -> sentinel.ResponseData + 1, // 12: sentinel.Sentinel.SetStatus:output_type -> sentinel.EmptyMessage + 5, // 13: sentinel.Sentinel.GetPeers:output_type -> sentinel.PeerCount + 1, // 14: sentinel.Sentinel.BanPeer:output_type -> sentinel.EmptyMessage + 1, // 15: sentinel.Sentinel.PublishGossip:output_type -> sentinel.EmptyMessage + 10, // [10:16] is the sub-list for method output_type + 4, // [4:10] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_p2psentinel_sentinel_proto_init() } @@ -547,7 +620,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GossipData); i { + switch v := v.(*Peer); i { case 0: return &v.state case 1: @@ -559,7 +632,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Status); i { + switch v := v.(*GossipData); i { case 0: return &v.state case 1: @@ -571,7 +644,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerCount); i { + switch v := v.(*Status); i { case 0: return &v.state case 1: @@ -583,7 +656,7 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestData); i { + switch v := v.(*PeerCount); i { case 0: return &v.state case 1: @@ -595,6 +668,18 @@ func file_p2psentinel_sentinel_proto_init() { } } file_p2psentinel_sentinel_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2psentinel_sentinel_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResponseData); i { case 0: return &v.state @@ -607,13 +692,14 @@ func file_p2psentinel_sentinel_proto_init() { } } } + file_p2psentinel_sentinel_proto_msgTypes[2].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2psentinel_sentinel_proto_rawDesc, NumEnums: 1, - NumMessages: 6, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, diff --git a/gointerfaces/sentinel/sentinel_grpc.pb.go b/gointerfaces/sentinel/sentinel_grpc.pb.go index a600da9ae..e31653104 100644 --- a/gointerfaces/sentinel/sentinel_grpc.pb.go +++ b/gointerfaces/sentinel/sentinel_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: p2psentinel/sentinel.proto package sentinel @@ -18,6 +18,15 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Sentinel_SubscribeGossip_FullMethodName = "/sentinel.Sentinel/SubscribeGossip" + Sentinel_SendRequest_FullMethodName = "/sentinel.Sentinel/SendRequest" + Sentinel_SetStatus_FullMethodName = "/sentinel.Sentinel/SetStatus" + Sentinel_GetPeers_FullMethodName = "/sentinel.Sentinel/GetPeers" + Sentinel_BanPeer_FullMethodName = "/sentinel.Sentinel/BanPeer" + Sentinel_PublishGossip_FullMethodName = "/sentinel.Sentinel/PublishGossip" +) + // SentinelClient is the client API for Sentinel service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -26,6 +35,8 @@ type SentinelClient interface { SendRequest(ctx context.Context, in *RequestData, opts ...grpc.CallOption) (*ResponseData, error) SetStatus(ctx context.Context, in *Status, opts ...grpc.CallOption) (*EmptyMessage, error) GetPeers(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*PeerCount, error) + BanPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error) + PublishGossip(ctx context.Context, in *GossipData, opts ...grpc.CallOption) (*EmptyMessage, error) } type sentinelClient struct { @@ -37,7 +48,7 @@ func NewSentinelClient(cc grpc.ClientConnInterface) SentinelClient { } func (c *sentinelClient) SubscribeGossip(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) { - stream, err := c.cc.NewStream(ctx, &Sentinel_ServiceDesc.Streams[0], "/sentinel.Sentinel/SubscribeGossip", opts...) + stream, err := c.cc.NewStream(ctx, &Sentinel_ServiceDesc.Streams[0], Sentinel_SubscribeGossip_FullMethodName, opts...) if err != nil { return nil, err } @@ -70,7 +81,7 @@ func (x *sentinelSubscribeGossipClient) Recv() (*GossipData, error) { func (c *sentinelClient) SendRequest(ctx context.Context, in *RequestData, opts ...grpc.CallOption) (*ResponseData, error) { out := new(ResponseData) - err := c.cc.Invoke(ctx, "/sentinel.Sentinel/SendRequest", in, out, opts...) + err := c.cc.Invoke(ctx, Sentinel_SendRequest_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -79,7 +90,7 @@ func (c *sentinelClient) SendRequest(ctx context.Context, in *RequestData, opts func (c *sentinelClient) SetStatus(ctx context.Context, in *Status, opts ...grpc.CallOption) (*EmptyMessage, error) { out := new(EmptyMessage) - err := c.cc.Invoke(ctx, "/sentinel.Sentinel/SetStatus", in, out, opts...) + err := c.cc.Invoke(ctx, Sentinel_SetStatus_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -88,7 +99,25 @@ func (c *sentinelClient) SetStatus(ctx context.Context, in *Status, opts ...grpc func (c *sentinelClient) GetPeers(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*PeerCount, error) { out := new(PeerCount) - err := c.cc.Invoke(ctx, "/sentinel.Sentinel/GetPeers", in, out, opts...) + err := c.cc.Invoke(ctx, Sentinel_GetPeers_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sentinelClient) BanPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error) { + out := new(EmptyMessage) + err := c.cc.Invoke(ctx, Sentinel_BanPeer_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sentinelClient) PublishGossip(ctx context.Context, in *GossipData, opts ...grpc.CallOption) (*EmptyMessage, error) { + out := new(EmptyMessage) + err := c.cc.Invoke(ctx, Sentinel_PublishGossip_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -103,6 +132,8 @@ type SentinelServer interface { SendRequest(context.Context, *RequestData) (*ResponseData, error) SetStatus(context.Context, *Status) (*EmptyMessage, error) GetPeers(context.Context, *EmptyMessage) (*PeerCount, error) + BanPeer(context.Context, *Peer) (*EmptyMessage, error) + PublishGossip(context.Context, *GossipData) (*EmptyMessage, error) mustEmbedUnimplementedSentinelServer() } @@ -122,6 +153,12 @@ func (UnimplementedSentinelServer) SetStatus(context.Context, *Status) (*EmptyMe func (UnimplementedSentinelServer) GetPeers(context.Context, *EmptyMessage) (*PeerCount, error) { return nil, status.Errorf(codes.Unimplemented, "method GetPeers not implemented") } +func (UnimplementedSentinelServer) BanPeer(context.Context, *Peer) (*EmptyMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method BanPeer not implemented") +} +func (UnimplementedSentinelServer) PublishGossip(context.Context, *GossipData) (*EmptyMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method PublishGossip not implemented") +} func (UnimplementedSentinelServer) mustEmbedUnimplementedSentinelServer() {} // UnsafeSentinelServer may be embedded to opt out of forward compatibility for this service. @@ -166,7 +203,7 @@ func _Sentinel_SendRequest_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentinel.Sentinel/SendRequest", + FullMethod: Sentinel_SendRequest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentinelServer).SendRequest(ctx, req.(*RequestData)) @@ -184,7 +221,7 @@ func _Sentinel_SetStatus_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentinel.Sentinel/SetStatus", + FullMethod: Sentinel_SetStatus_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentinelServer).SetStatus(ctx, req.(*Status)) @@ -202,7 +239,7 @@ func _Sentinel_GetPeers_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentinel.Sentinel/GetPeers", + FullMethod: Sentinel_GetPeers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentinelServer).GetPeers(ctx, req.(*EmptyMessage)) @@ -210,6 +247,42 @@ func _Sentinel_GetPeers_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } +func _Sentinel_BanPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Peer) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SentinelServer).BanPeer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Sentinel_BanPeer_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SentinelServer).BanPeer(ctx, req.(*Peer)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sentinel_PublishGossip_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GossipData) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SentinelServer).PublishGossip(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Sentinel_PublishGossip_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SentinelServer).PublishGossip(ctx, req.(*GossipData)) + } + return interceptor(ctx, in, info, handler) +} + // Sentinel_ServiceDesc is the grpc.ServiceDesc for Sentinel service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -229,6 +302,14 @@ var Sentinel_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetPeers", Handler: _Sentinel_GetPeers_Handler, }, + { + MethodName: "BanPeer", + Handler: _Sentinel_BanPeer_Handler, + }, + { + MethodName: "PublishGossip", + Handler: _Sentinel_PublishGossip_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/gointerfaces/sentry/mocks.go b/gointerfaces/sentry/mocks.go index d4054379b..edfea4526 100644 --- a/gointerfaces/sentry/mocks.go +++ b/gointerfaces/sentry/mocks.go @@ -42,9 +42,6 @@ var _ SentryServer = &SentryServerMock{} // PeerMinBlockFunc: func(contextMoqParam context.Context, peerMinBlockRequest *PeerMinBlockRequest) (*emptypb.Empty, error) { // panic("mock out the PeerMinBlock method") // }, -// PeerUselessFunc: func(contextMoqParam context.Context, peerUselessRequest *PeerUselessRequest) (*emptypb.Empty, error) { -// panic("mock out the PeerUseless method") -// }, // PeersFunc: func(contextMoqParam context.Context, empty *emptypb.Empty) (*PeersReply, error) { // panic("mock out the Peers method") // }, @@ -97,9 +94,6 @@ type SentryServerMock struct { // PeerMinBlockFunc mocks the PeerMinBlock method. PeerMinBlockFunc func(contextMoqParam context.Context, peerMinBlockRequest *PeerMinBlockRequest) (*emptypb.Empty, error) - // PeerUselessFunc mocks the PeerUseless method. - PeerUselessFunc func(contextMoqParam context.Context, peerUselessRequest *PeerUselessRequest) (*emptypb.Empty, error) - // PeersFunc mocks the Peers method. PeersFunc func(contextMoqParam context.Context, empty *emptypb.Empty) (*PeersReply, error) @@ -175,13 +169,6 @@ type SentryServerMock struct { // PeerMinBlockRequest is the peerMinBlockRequest argument value. PeerMinBlockRequest *PeerMinBlockRequest } - // PeerUseless holds details about calls to the PeerUseless method. - PeerUseless []struct { - // ContextMoqParam is the contextMoqParam argument value. - ContextMoqParam context.Context - // PeerUselessRequest is the peerUselessRequest argument value. - PeerUselessRequest *PeerUselessRequest - } // Peers holds details about calls to the Peers method. Peers []struct { // ContextMoqParam is the contextMoqParam argument value. @@ -242,7 +229,6 @@ type SentryServerMock struct { lockPeerCount sync.RWMutex lockPeerEvents sync.RWMutex lockPeerMinBlock sync.RWMutex - lockPeerUseless sync.RWMutex lockPeers sync.RWMutex lockPenalizePeer sync.RWMutex lockSendMessageById sync.RWMutex @@ -531,46 +517,6 @@ func (mock *SentryServerMock) PeerMinBlockCalls() []struct { return calls } -// PeerUseless calls PeerUselessFunc. -func (mock *SentryServerMock) PeerUseless(contextMoqParam context.Context, peerUselessRequest *PeerUselessRequest) (*emptypb.Empty, error) { - callInfo := struct { - ContextMoqParam context.Context - PeerUselessRequest *PeerUselessRequest - }{ - ContextMoqParam: contextMoqParam, - PeerUselessRequest: peerUselessRequest, - } - mock.lockPeerUseless.Lock() - mock.calls.PeerUseless = append(mock.calls.PeerUseless, callInfo) - mock.lockPeerUseless.Unlock() - if mock.PeerUselessFunc == nil { - var ( - emptyOut *emptypb.Empty - errOut error - ) - return emptyOut, errOut - } - return mock.PeerUselessFunc(contextMoqParam, peerUselessRequest) -} - -// PeerUselessCalls gets all the calls that were made to PeerUseless. -// Check the length with: -// -// len(mockedSentryServer.PeerUselessCalls()) -func (mock *SentryServerMock) PeerUselessCalls() []struct { - ContextMoqParam context.Context - PeerUselessRequest *PeerUselessRequest -} { - var calls []struct { - ContextMoqParam context.Context - PeerUselessRequest *PeerUselessRequest - } - mock.lockPeerUseless.RLock() - calls = mock.calls.PeerUseless - mock.lockPeerUseless.RUnlock() - return calls -} - // Peers calls PeersFunc. func (mock *SentryServerMock) Peers(contextMoqParam context.Context, empty *emptypb.Empty) (*PeersReply, error) { callInfo := struct { @@ -909,9 +855,6 @@ var _ SentryClient = &SentryClientMock{} // PeerMinBlockFunc: func(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { // panic("mock out the PeerMinBlock method") // }, -// PeerUselessFunc: func(ctx context.Context, in *PeerUselessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { -// panic("mock out the PeerUseless method") -// }, // PeersFunc: func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) { // panic("mock out the Peers method") // }, @@ -961,9 +904,6 @@ type SentryClientMock struct { // PeerMinBlockFunc mocks the PeerMinBlock method. PeerMinBlockFunc func(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // PeerUselessFunc mocks the PeerUseless method. - PeerUselessFunc func(ctx context.Context, in *PeerUselessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // PeersFunc mocks the Peers method. PeersFunc func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) @@ -1050,15 +990,6 @@ type SentryClientMock struct { // Opts is the opts argument value. Opts []grpc.CallOption } - // PeerUseless holds details about calls to the PeerUseless method. - PeerUseless []struct { - // Ctx is the ctx argument value. - Ctx context.Context - // In is the in argument value. - In *PeerUselessRequest - // Opts is the opts argument value. - Opts []grpc.CallOption - } // Peers holds details about calls to the Peers method. Peers []struct { // Ctx is the ctx argument value. @@ -1130,7 +1061,6 @@ type SentryClientMock struct { lockPeerCount sync.RWMutex lockPeerEvents sync.RWMutex lockPeerMinBlock sync.RWMutex - lockPeerUseless sync.RWMutex lockPeers sync.RWMutex lockPenalizePeer sync.RWMutex lockSendMessageById sync.RWMutex @@ -1448,50 +1378,6 @@ func (mock *SentryClientMock) PeerMinBlockCalls() []struct { return calls } -// PeerUseless calls PeerUselessFunc. -func (mock *SentryClientMock) PeerUseless(ctx context.Context, in *PeerUselessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - callInfo := struct { - Ctx context.Context - In *PeerUselessRequest - Opts []grpc.CallOption - }{ - Ctx: ctx, - In: in, - Opts: opts, - } - mock.lockPeerUseless.Lock() - mock.calls.PeerUseless = append(mock.calls.PeerUseless, callInfo) - mock.lockPeerUseless.Unlock() - if mock.PeerUselessFunc == nil { - var ( - emptyOut *emptypb.Empty - errOut error - ) - return emptyOut, errOut - } - return mock.PeerUselessFunc(ctx, in, opts...) -} - -// PeerUselessCalls gets all the calls that were made to PeerUseless. -// Check the length with: -// -// len(mockedSentryClient.PeerUselessCalls()) -func (mock *SentryClientMock) PeerUselessCalls() []struct { - Ctx context.Context - In *PeerUselessRequest - Opts []grpc.CallOption -} { - var calls []struct { - Ctx context.Context - In *PeerUselessRequest - Opts []grpc.CallOption - } - mock.lockPeerUseless.RLock() - calls = mock.calls.PeerUseless - mock.lockPeerUseless.RUnlock() - return calls -} - // Peers calls PeersFunc. func (mock *SentryClientMock) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) { callInfo := struct { diff --git a/gointerfaces/sentry/sentry.pb.go b/gointerfaces/sentry/sentry.pb.go index 7a354dd40..7986d1187 100644 --- a/gointerfaces/sentry/sentry.pb.go +++ b/gointerfaces/sentry/sentry.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: p2psentry/sentry.proto package sentry @@ -301,7 +301,7 @@ func (x PeerEvent_PeerEventId) Number() protoreflect.EnumNumber { // Deprecated: Use PeerEvent_PeerEventId.Descriptor instead. func (PeerEvent_PeerEventId) EnumDescriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{21, 0} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{20, 0} } type OutboundMessageData struct { @@ -689,53 +689,6 @@ func (x *PeerMinBlockRequest) GetMinBlock() uint64 { return 0 } -type PeerUselessRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerId *types.H512 `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` -} - -func (x *PeerUselessRequest) Reset() { - *x = PeerUselessRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerUselessRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerUselessRequest) ProtoMessage() {} - -func (x *PeerUselessRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerUselessRequest.ProtoReflect.Descriptor instead. -func (*PeerUselessRequest) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{7} -} - -func (x *PeerUselessRequest) GetPeerId() *types.H512 { - if x != nil { - return x.PeerId - } - return nil -} - type InboundMessage struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -749,7 +702,7 @@ type InboundMessage struct { func (x *InboundMessage) Reset() { *x = InboundMessage{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[8] + mi := &file_p2psentry_sentry_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -762,7 +715,7 @@ func (x *InboundMessage) String() string { func (*InboundMessage) ProtoMessage() {} func (x *InboundMessage) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[8] + mi := &file_p2psentry_sentry_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -775,7 +728,7 @@ func (x *InboundMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use InboundMessage.ProtoReflect.Descriptor instead. func (*InboundMessage) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{8} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{7} } func (x *InboundMessage) GetId() MessageId { @@ -812,7 +765,7 @@ type Forks struct { func (x *Forks) Reset() { *x = Forks{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[9] + mi := &file_p2psentry_sentry_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -825,7 +778,7 @@ func (x *Forks) String() string { func (*Forks) ProtoMessage() {} func (x *Forks) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[9] + mi := &file_p2psentry_sentry_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -838,7 +791,7 @@ func (x *Forks) ProtoReflect() protoreflect.Message { // Deprecated: Use Forks.ProtoReflect.Descriptor instead. func (*Forks) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{9} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{8} } func (x *Forks) GetGenesis() *types.H256 { @@ -878,7 +831,7 @@ type StatusData struct { func (x *StatusData) Reset() { *x = StatusData{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[10] + mi := &file_p2psentry_sentry_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -891,7 +844,7 @@ func (x *StatusData) String() string { func (*StatusData) ProtoMessage() {} func (x *StatusData) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[10] + mi := &file_p2psentry_sentry_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -904,7 +857,7 @@ func (x *StatusData) ProtoReflect() protoreflect.Message { // Deprecated: Use StatusData.ProtoReflect.Descriptor instead. func (*StatusData) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{10} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{9} } func (x *StatusData) GetNetworkId() uint64 { @@ -958,7 +911,7 @@ type SetStatusReply struct { func (x *SetStatusReply) Reset() { *x = SetStatusReply{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[11] + mi := &file_p2psentry_sentry_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -971,7 +924,7 @@ func (x *SetStatusReply) String() string { func (*SetStatusReply) ProtoMessage() {} func (x *SetStatusReply) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[11] + mi := &file_p2psentry_sentry_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -984,7 +937,7 @@ func (x *SetStatusReply) ProtoReflect() protoreflect.Message { // Deprecated: Use SetStatusReply.ProtoReflect.Descriptor instead. func (*SetStatusReply) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{11} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{10} } type HandShakeReply struct { @@ -998,7 +951,7 @@ type HandShakeReply struct { func (x *HandShakeReply) Reset() { *x = HandShakeReply{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[12] + mi := &file_p2psentry_sentry_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1011,7 +964,7 @@ func (x *HandShakeReply) String() string { func (*HandShakeReply) ProtoMessage() {} func (x *HandShakeReply) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[12] + mi := &file_p2psentry_sentry_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1024,7 +977,7 @@ func (x *HandShakeReply) ProtoReflect() protoreflect.Message { // Deprecated: Use HandShakeReply.ProtoReflect.Descriptor instead. func (*HandShakeReply) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{12} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{11} } func (x *HandShakeReply) GetProtocol() Protocol { @@ -1045,7 +998,7 @@ type MessagesRequest struct { func (x *MessagesRequest) Reset() { *x = MessagesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[13] + mi := &file_p2psentry_sentry_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1058,7 +1011,7 @@ func (x *MessagesRequest) String() string { func (*MessagesRequest) ProtoMessage() {} func (x *MessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[13] + mi := &file_p2psentry_sentry_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1071,7 +1024,7 @@ func (x *MessagesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MessagesRequest.ProtoReflect.Descriptor instead. func (*MessagesRequest) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{13} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{12} } func (x *MessagesRequest) GetIds() []MessageId { @@ -1092,7 +1045,7 @@ type PeersReply struct { func (x *PeersReply) Reset() { *x = PeersReply{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[14] + mi := &file_p2psentry_sentry_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1105,7 +1058,7 @@ func (x *PeersReply) String() string { func (*PeersReply) ProtoMessage() {} func (x *PeersReply) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[14] + mi := &file_p2psentry_sentry_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1118,7 +1071,7 @@ func (x *PeersReply) ProtoReflect() protoreflect.Message { // Deprecated: Use PeersReply.ProtoReflect.Descriptor instead. func (*PeersReply) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{14} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{13} } func (x *PeersReply) GetPeers() []*types.PeerInfo { @@ -1137,7 +1090,7 @@ type PeerCountRequest struct { func (x *PeerCountRequest) Reset() { *x = PeerCountRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[15] + mi := &file_p2psentry_sentry_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1150,7 +1103,7 @@ func (x *PeerCountRequest) String() string { func (*PeerCountRequest) ProtoMessage() {} func (x *PeerCountRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[15] + mi := &file_p2psentry_sentry_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1163,7 +1116,7 @@ func (x *PeerCountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerCountRequest.ProtoReflect.Descriptor instead. func (*PeerCountRequest) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{15} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{14} } type PeerCountPerProtocol struct { @@ -1178,7 +1131,7 @@ type PeerCountPerProtocol struct { func (x *PeerCountPerProtocol) Reset() { *x = PeerCountPerProtocol{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[16] + mi := &file_p2psentry_sentry_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1191,7 +1144,7 @@ func (x *PeerCountPerProtocol) String() string { func (*PeerCountPerProtocol) ProtoMessage() {} func (x *PeerCountPerProtocol) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[16] + mi := &file_p2psentry_sentry_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1204,7 +1157,7 @@ func (x *PeerCountPerProtocol) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerCountPerProtocol.ProtoReflect.Descriptor instead. func (*PeerCountPerProtocol) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{16} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{15} } func (x *PeerCountPerProtocol) GetProtocol() Protocol { @@ -1227,13 +1180,13 @@ type PeerCountReply struct { unknownFields protoimpl.UnknownFields Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - CountsPerProtocol []*PeerCountPerProtocol `protobuf:"bytes,2,rep,name=countsPerProtocol,proto3" json:"countsPerProtocol,omitempty"` + CountsPerProtocol []*PeerCountPerProtocol `protobuf:"bytes,2,rep,name=counts_per_protocol,json=countsPerProtocol,proto3" json:"counts_per_protocol,omitempty"` } func (x *PeerCountReply) Reset() { *x = PeerCountReply{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[17] + mi := &file_p2psentry_sentry_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1246,7 +1199,7 @@ func (x *PeerCountReply) String() string { func (*PeerCountReply) ProtoMessage() {} func (x *PeerCountReply) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[17] + mi := &file_p2psentry_sentry_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1259,7 +1212,7 @@ func (x *PeerCountReply) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerCountReply.ProtoReflect.Descriptor instead. func (*PeerCountReply) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{17} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{16} } func (x *PeerCountReply) GetCount() uint64 { @@ -1287,7 +1240,7 @@ type PeerByIdRequest struct { func (x *PeerByIdRequest) Reset() { *x = PeerByIdRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[18] + mi := &file_p2psentry_sentry_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1300,7 +1253,7 @@ func (x *PeerByIdRequest) String() string { func (*PeerByIdRequest) ProtoMessage() {} func (x *PeerByIdRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[18] + mi := &file_p2psentry_sentry_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1313,7 +1266,7 @@ func (x *PeerByIdRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerByIdRequest.ProtoReflect.Descriptor instead. func (*PeerByIdRequest) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{18} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{17} } func (x *PeerByIdRequest) GetPeerId() *types.H512 { @@ -1334,7 +1287,7 @@ type PeerByIdReply struct { func (x *PeerByIdReply) Reset() { *x = PeerByIdReply{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[19] + mi := &file_p2psentry_sentry_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1347,7 +1300,7 @@ func (x *PeerByIdReply) String() string { func (*PeerByIdReply) ProtoMessage() {} func (x *PeerByIdReply) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[19] + mi := &file_p2psentry_sentry_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1360,7 +1313,7 @@ func (x *PeerByIdReply) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerByIdReply.ProtoReflect.Descriptor instead. func (*PeerByIdReply) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{19} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{18} } func (x *PeerByIdReply) GetPeer() *types.PeerInfo { @@ -1379,7 +1332,7 @@ type PeerEventsRequest struct { func (x *PeerEventsRequest) Reset() { *x = PeerEventsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[20] + mi := &file_p2psentry_sentry_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1392,7 +1345,7 @@ func (x *PeerEventsRequest) String() string { func (*PeerEventsRequest) ProtoMessage() {} func (x *PeerEventsRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[20] + mi := &file_p2psentry_sentry_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1405,7 +1358,7 @@ func (x *PeerEventsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerEventsRequest.ProtoReflect.Descriptor instead. func (*PeerEventsRequest) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{20} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{19} } type PeerEvent struct { @@ -1420,7 +1373,7 @@ type PeerEvent struct { func (x *PeerEvent) Reset() { *x = PeerEvent{} if protoimpl.UnsafeEnabled { - mi := &file_p2psentry_sentry_proto_msgTypes[21] + mi := &file_p2psentry_sentry_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1433,7 +1386,7 @@ func (x *PeerEvent) String() string { func (*PeerEvent) ProtoMessage() {} func (x *PeerEvent) ProtoReflect() protoreflect.Message { - mi := &file_p2psentry_sentry_proto_msgTypes[21] + mi := &file_p2psentry_sentry_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1446,7 +1399,7 @@ func (x *PeerEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerEvent.ProtoReflect.Descriptor instead. func (*PeerEvent) Descriptor() ([]byte, []int) { - return file_p2psentry_sentry_proto_rawDescGZIP(), []int{21} + return file_p2psentry_sentry_proto_rawDescGZIP(), []int{20} } func (x *PeerEvent) GetPeerId() *types.H512 { @@ -1514,204 +1467,196 @@ var file_p2psentry_sentry_proto_rawDesc = []byte{ 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x22, 0x3a, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x55, 0x73, 0x65, 0x6c, 0x65, 0x73, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x6d, - 0x0a, 0x0e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x21, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x73, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x70, 0x0a, - 0x05, 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x25, 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x48, 0x32, 0x35, 0x36, 0x52, 0x07, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x12, 0x21, 0x0a, - 0x0c, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x04, 0x52, 0x0b, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x46, 0x6f, 0x72, 0x6b, 0x73, - 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x22, - 0x89, 0x02, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, - 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x36, 0x0a, - 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x69, - 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x28, 0x0a, 0x09, 0x62, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x61, - 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, 0x62, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, - 0x2a, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x46, 0x6f, 0x72, 0x6b, - 0x73, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x6d, - 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, - 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x10, 0x0a, 0x0e, 0x53, - 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x3e, 0x0a, - 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x36, 0x0a, - 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x23, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x11, 0x2e, - 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, - 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x33, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x22, 0x12, 0x0a, 0x10, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5a, - 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x72, 0x0a, 0x0e, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x4a, 0x0a, 0x11, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x50, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x11, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x73, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x37, - 0x0a, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, - 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x42, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x42, - 0x79, 0x49, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, - 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, - 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x22, 0x13, 0x0a, 0x11, 0x50, - 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x97, 0x01, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, - 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x49, 0x64, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x2a, - 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x0b, 0x0a, - 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x69, - 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x01, 0x2a, 0x80, 0x06, 0x0a, 0x09, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x47, 0x45, 0x54, 0x5f, 0x42, - 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, 0x35, 0x10, - 0x01, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, - 0x52, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, - 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, - 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, 0x49, 0x45, 0x53, - 0x5f, 0x36, 0x35, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, - 0x4f, 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x45, - 0x54, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x06, - 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x36, 0x35, - 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, - 0x54, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x45, 0x49, - 0x50, 0x54, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x09, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x57, 0x5f, - 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, - 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x36, - 0x35, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0c, 0x12, 0x24, 0x0a, 0x20, 0x4e, 0x45, 0x57, 0x5f, - 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0d, 0x12, 0x1e, - 0x0a, 0x1a, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, - 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0e, 0x12, 0x1a, - 0x0a, 0x16, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, - 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x11, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x57, - 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x36, - 0x10, 0x12, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, - 0x36, 0x36, 0x10, 0x13, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, - 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x14, 0x12, 0x24, 0x0a, 0x20, 0x4e, 0x45, 0x57, - 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x15, 0x12, - 0x18, 0x0a, 0x14, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, - 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x16, 0x12, 0x17, 0x0a, 0x13, 0x47, 0x45, 0x54, - 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x36, - 0x10, 0x17, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, - 0x41, 0x54, 0x41, 0x5f, 0x36, 0x36, 0x10, 0x18, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x45, 0x54, 0x5f, - 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x19, 0x12, 0x1e, 0x0a, - 0x1a, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, - 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1a, 0x12, 0x14, 0x0a, - 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, - 0x36, 0x10, 0x1b, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, - 0x49, 0x45, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1c, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x44, 0x45, - 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x36, 0x36, 0x10, 0x1d, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, - 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1e, 0x12, 0x1a, 0x0a, 0x16, 0x50, - 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, - 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1f, 0x12, 0x24, 0x0a, 0x20, 0x4e, 0x45, 0x57, 0x5f, 0x50, - 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x38, 0x10, 0x20, 0x2a, 0x17, 0x0a, - 0x0b, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x08, 0x0a, 0x04, - 0x4b, 0x69, 0x63, 0x6b, 0x10, 0x00, 0x2a, 0x36, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, 0x35, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x45, 0x54, 0x48, 0x36, 0x36, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, - 0x37, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, 0x38, 0x10, 0x03, 0x32, 0xe6, - 0x07, 0x0a, 0x06, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x09, 0x53, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x43, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x6e, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x63, 0x6b, 0x22, 0x6d, 0x0a, 0x0e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x07, 0x70, + 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x64, 0x22, 0x70, 0x0a, 0x05, 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x25, 0x0a, 0x07, 0x67, 0x65, + 0x6e, 0x65, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x07, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6b, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0b, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x46, + 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, + 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, + 0x72, 0x6b, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, + 0x64, 0x12, 0x36, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, + 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, + 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x28, 0x0a, 0x09, 0x62, 0x65, 0x73, + 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, 0x62, 0x65, 0x73, 0x74, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x2a, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x22, + 0x10, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x22, 0x3e, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x22, 0x36, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x49, 0x64, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x33, 0x0a, 0x0a, 0x50, 0x65, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x22, 0x12, + 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x5a, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2c, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x74, + 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x13, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, + 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x52, 0x11, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x37, 0x0a, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x42, 0x0a, + 0x0d, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, + 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, + 0x72, 0x22, 0x13, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x97, 0x01, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, + 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x08, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x73, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x52, 0x07, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x49, 0x64, 0x22, 0x2a, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x00, + 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x01, + 0x2a, 0x80, 0x06, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x0d, + 0x0a, 0x09, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x00, 0x12, 0x18, 0x0a, + 0x14, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x02, 0x12, 0x13, 0x0a, + 0x0f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x35, + 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, + 0x42, 0x4f, 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x42, + 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x47, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x36, 0x35, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, + 0x41, 0x54, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x45, 0x54, 0x5f, + 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x08, 0x12, 0x0f, 0x0a, + 0x0b, 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x09, 0x12, 0x17, + 0x0a, 0x13, 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, + 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, 0x57, 0x5f, 0x42, + 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x36, 0x35, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x52, 0x41, + 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0c, 0x12, 0x24, + 0x0a, 0x20, 0x4e, 0x45, 0x57, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, + 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, + 0x36, 0x35, 0x10, 0x0d, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, + 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, + 0x36, 0x35, 0x10, 0x0e, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, + 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0f, + 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x11, 0x12, + 0x17, 0x0a, 0x13, 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, + 0x48, 0x45, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x12, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, 0x57, 0x5f, + 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x36, 0x36, 0x10, 0x13, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x52, + 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x14, 0x12, + 0x24, 0x0a, 0x20, 0x4e, 0x45, 0x57, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, + 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, + 0x5f, 0x36, 0x36, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, + 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x16, 0x12, + 0x17, 0x0a, 0x13, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, + 0x49, 0x45, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x17, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x45, 0x54, 0x5f, + 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x36, 0x36, 0x10, 0x18, 0x12, 0x13, + 0x0a, 0x0f, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, + 0x36, 0x10, 0x19, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, + 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, + 0x36, 0x10, 0x1a, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, + 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1b, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f, + 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1c, 0x12, 0x10, + 0x0a, 0x0c, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x36, 0x36, 0x10, 0x1d, + 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x36, 0x10, + 0x1e, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, + 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1f, 0x12, 0x24, 0x0a, + 0x20, 0x4e, 0x45, 0x57, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, + 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, + 0x38, 0x10, 0x20, 0x2a, 0x17, 0x0a, 0x0b, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x4b, 0x69, + 0x6e, 0x64, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x69, 0x63, 0x6b, 0x10, 0x00, 0x2a, 0x36, 0x0a, 0x08, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, + 0x35, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, 0x36, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, 0x37, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, + 0x36, 0x38, 0x10, 0x03, 0x32, 0xa3, 0x07, 0x0a, 0x06, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x37, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x2e, 0x73, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x61, 0x74, 0x61, + 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x43, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, + 0x0c, 0x50, 0x65, 0x65, 0x72, 0x4d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4d, 0x69, 0x6e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x4d, - 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, - 0x50, 0x65, 0x65, 0x72, 0x55, 0x73, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x55, 0x73, 0x65, 0x6c, 0x65, 0x73, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x3b, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x16, 0x2e, 0x67, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x50, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, + 0x4d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x24, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x4d, + 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, + 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x12, 0x44, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x79, 0x49, 0x64, 0x12, 0x1e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, + 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, + 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x50, 0x65, + 0x65, 0x72, 0x73, 0x12, 0x27, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, + 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, + 0x42, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x41, 0x6c, 0x6c, 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x75, 0x74, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, + 0x65, 0x72, 0x73, 0x12, 0x3d, 0x0a, 0x08, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, + 0x17, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x30, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, + 0x49, 0x64, 0x12, 0x17, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x19, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x30, 0x01, + 0x12, 0x38, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x50, 0x0a, 0x15, - 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x4d, 0x69, 0x6e, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x24, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, - 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x4d, 0x69, 0x6e, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x44, - 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x49, - 0x64, 0x12, 0x1e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, - 0x65, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, - 0x12, 0x27, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, - 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x10, - 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x41, 0x6c, 0x6c, - 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x11, 0x2e, - 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, - 0x12, 0x3d, 0x0a, 0x08, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x17, 0x2e, 0x73, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, - 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x30, 0x01, 0x12, - 0x33, 0x0a, 0x05, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x12, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x18, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, 0x12, - 0x17, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x3c, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x19, 0x2e, - 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x30, 0x01, 0x12, 0x38, 0x0a, - 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x73, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, + 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1727,7 +1672,7 @@ func file_p2psentry_sentry_proto_rawDescGZIP() []byte { } var file_p2psentry_sentry_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_p2psentry_sentry_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_p2psentry_sentry_proto_msgTypes = make([]protoimpl.MessageInfo, 21) var file_p2psentry_sentry_proto_goTypes = []interface{}{ (MessageId)(0), // 0: sentry.MessageId (PenaltyKind)(0), // 1: sentry.PenaltyKind @@ -1740,88 +1685,84 @@ var file_p2psentry_sentry_proto_goTypes = []interface{}{ (*SentPeers)(nil), // 8: sentry.SentPeers (*PenalizePeerRequest)(nil), // 9: sentry.PenalizePeerRequest (*PeerMinBlockRequest)(nil), // 10: sentry.PeerMinBlockRequest - (*PeerUselessRequest)(nil), // 11: sentry.PeerUselessRequest - (*InboundMessage)(nil), // 12: sentry.InboundMessage - (*Forks)(nil), // 13: sentry.Forks - (*StatusData)(nil), // 14: sentry.StatusData - (*SetStatusReply)(nil), // 15: sentry.SetStatusReply - (*HandShakeReply)(nil), // 16: sentry.HandShakeReply - (*MessagesRequest)(nil), // 17: sentry.MessagesRequest - (*PeersReply)(nil), // 18: sentry.PeersReply - (*PeerCountRequest)(nil), // 19: sentry.PeerCountRequest - (*PeerCountPerProtocol)(nil), // 20: sentry.PeerCountPerProtocol - (*PeerCountReply)(nil), // 21: sentry.PeerCountReply - (*PeerByIdRequest)(nil), // 22: sentry.PeerByIdRequest - (*PeerByIdReply)(nil), // 23: sentry.PeerByIdReply - (*PeerEventsRequest)(nil), // 24: sentry.PeerEventsRequest - (*PeerEvent)(nil), // 25: sentry.PeerEvent - (*types.H512)(nil), // 26: types.H512 - (*types.H256)(nil), // 27: types.H256 - (*types.PeerInfo)(nil), // 28: types.PeerInfo - (*emptypb.Empty)(nil), // 29: google.protobuf.Empty - (*types.NodeInfoReply)(nil), // 30: types.NodeInfoReply + (*InboundMessage)(nil), // 11: sentry.InboundMessage + (*Forks)(nil), // 12: sentry.Forks + (*StatusData)(nil), // 13: sentry.StatusData + (*SetStatusReply)(nil), // 14: sentry.SetStatusReply + (*HandShakeReply)(nil), // 15: sentry.HandShakeReply + (*MessagesRequest)(nil), // 16: sentry.MessagesRequest + (*PeersReply)(nil), // 17: sentry.PeersReply + (*PeerCountRequest)(nil), // 18: sentry.PeerCountRequest + (*PeerCountPerProtocol)(nil), // 19: sentry.PeerCountPerProtocol + (*PeerCountReply)(nil), // 20: sentry.PeerCountReply + (*PeerByIdRequest)(nil), // 21: sentry.PeerByIdRequest + (*PeerByIdReply)(nil), // 22: sentry.PeerByIdReply + (*PeerEventsRequest)(nil), // 23: sentry.PeerEventsRequest + (*PeerEvent)(nil), // 24: sentry.PeerEvent + (*types.H512)(nil), // 25: types.H512 + (*types.H256)(nil), // 26: types.H256 + (*types.PeerInfo)(nil), // 27: types.PeerInfo + (*emptypb.Empty)(nil), // 28: google.protobuf.Empty + (*types.NodeInfoReply)(nil), // 29: types.NodeInfoReply } var file_p2psentry_sentry_proto_depIdxs = []int32{ 0, // 0: sentry.OutboundMessageData.id:type_name -> sentry.MessageId 4, // 1: sentry.SendMessageByMinBlockRequest.data:type_name -> sentry.OutboundMessageData 4, // 2: sentry.SendMessageByIdRequest.data:type_name -> sentry.OutboundMessageData - 26, // 3: sentry.SendMessageByIdRequest.peer_id:type_name -> types.H512 + 25, // 3: sentry.SendMessageByIdRequest.peer_id:type_name -> types.H512 4, // 4: sentry.SendMessageToRandomPeersRequest.data:type_name -> sentry.OutboundMessageData - 26, // 5: sentry.SentPeers.peers:type_name -> types.H512 - 26, // 6: sentry.PenalizePeerRequest.peer_id:type_name -> types.H512 + 25, // 5: sentry.SentPeers.peers:type_name -> types.H512 + 25, // 6: sentry.PenalizePeerRequest.peer_id:type_name -> types.H512 1, // 7: sentry.PenalizePeerRequest.penalty:type_name -> sentry.PenaltyKind - 26, // 8: sentry.PeerMinBlockRequest.peer_id:type_name -> types.H512 - 26, // 9: sentry.PeerUselessRequest.peer_id:type_name -> types.H512 - 0, // 10: sentry.InboundMessage.id:type_name -> sentry.MessageId - 26, // 11: sentry.InboundMessage.peer_id:type_name -> types.H512 - 27, // 12: sentry.Forks.genesis:type_name -> types.H256 - 27, // 13: sentry.StatusData.total_difficulty:type_name -> types.H256 - 27, // 14: sentry.StatusData.best_hash:type_name -> types.H256 - 13, // 15: sentry.StatusData.fork_data:type_name -> sentry.Forks - 2, // 16: sentry.HandShakeReply.protocol:type_name -> sentry.Protocol - 0, // 17: sentry.MessagesRequest.ids:type_name -> sentry.MessageId - 28, // 18: sentry.PeersReply.peers:type_name -> types.PeerInfo - 2, // 19: sentry.PeerCountPerProtocol.protocol:type_name -> sentry.Protocol - 20, // 20: sentry.PeerCountReply.countsPerProtocol:type_name -> sentry.PeerCountPerProtocol - 26, // 21: sentry.PeerByIdRequest.peer_id:type_name -> types.H512 - 28, // 22: sentry.PeerByIdReply.peer:type_name -> types.PeerInfo - 26, // 23: sentry.PeerEvent.peer_id:type_name -> types.H512 - 3, // 24: sentry.PeerEvent.event_id:type_name -> sentry.PeerEvent.PeerEventId - 14, // 25: sentry.Sentry.SetStatus:input_type -> sentry.StatusData - 9, // 26: sentry.Sentry.PenalizePeer:input_type -> sentry.PenalizePeerRequest - 10, // 27: sentry.Sentry.PeerMinBlock:input_type -> sentry.PeerMinBlockRequest - 11, // 28: sentry.Sentry.PeerUseless:input_type -> sentry.PeerUselessRequest - 29, // 29: sentry.Sentry.HandShake:input_type -> google.protobuf.Empty - 5, // 30: sentry.Sentry.SendMessageByMinBlock:input_type -> sentry.SendMessageByMinBlockRequest - 6, // 31: sentry.Sentry.SendMessageById:input_type -> sentry.SendMessageByIdRequest - 7, // 32: sentry.Sentry.SendMessageToRandomPeers:input_type -> sentry.SendMessageToRandomPeersRequest - 4, // 33: sentry.Sentry.SendMessageToAll:input_type -> sentry.OutboundMessageData - 17, // 34: sentry.Sentry.Messages:input_type -> sentry.MessagesRequest - 29, // 35: sentry.Sentry.Peers:input_type -> google.protobuf.Empty - 19, // 36: sentry.Sentry.PeerCount:input_type -> sentry.PeerCountRequest - 22, // 37: sentry.Sentry.PeerById:input_type -> sentry.PeerByIdRequest - 24, // 38: sentry.Sentry.PeerEvents:input_type -> sentry.PeerEventsRequest - 29, // 39: sentry.Sentry.NodeInfo:input_type -> google.protobuf.Empty - 15, // 40: sentry.Sentry.SetStatus:output_type -> sentry.SetStatusReply - 29, // 41: sentry.Sentry.PenalizePeer:output_type -> google.protobuf.Empty - 29, // 42: sentry.Sentry.PeerMinBlock:output_type -> google.protobuf.Empty - 29, // 43: sentry.Sentry.PeerUseless:output_type -> google.protobuf.Empty - 16, // 44: sentry.Sentry.HandShake:output_type -> sentry.HandShakeReply - 8, // 45: sentry.Sentry.SendMessageByMinBlock:output_type -> sentry.SentPeers - 8, // 46: sentry.Sentry.SendMessageById:output_type -> sentry.SentPeers - 8, // 47: sentry.Sentry.SendMessageToRandomPeers:output_type -> sentry.SentPeers - 8, // 48: sentry.Sentry.SendMessageToAll:output_type -> sentry.SentPeers - 12, // 49: sentry.Sentry.Messages:output_type -> sentry.InboundMessage - 18, // 50: sentry.Sentry.Peers:output_type -> sentry.PeersReply - 21, // 51: sentry.Sentry.PeerCount:output_type -> sentry.PeerCountReply - 23, // 52: sentry.Sentry.PeerById:output_type -> sentry.PeerByIdReply - 25, // 53: sentry.Sentry.PeerEvents:output_type -> sentry.PeerEvent - 30, // 54: sentry.Sentry.NodeInfo:output_type -> types.NodeInfoReply - 40, // [40:55] is the sub-list for method output_type - 25, // [25:40] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 25, // 8: sentry.PeerMinBlockRequest.peer_id:type_name -> types.H512 + 0, // 9: sentry.InboundMessage.id:type_name -> sentry.MessageId + 25, // 10: sentry.InboundMessage.peer_id:type_name -> types.H512 + 26, // 11: sentry.Forks.genesis:type_name -> types.H256 + 26, // 12: sentry.StatusData.total_difficulty:type_name -> types.H256 + 26, // 13: sentry.StatusData.best_hash:type_name -> types.H256 + 12, // 14: sentry.StatusData.fork_data:type_name -> sentry.Forks + 2, // 15: sentry.HandShakeReply.protocol:type_name -> sentry.Protocol + 0, // 16: sentry.MessagesRequest.ids:type_name -> sentry.MessageId + 27, // 17: sentry.PeersReply.peers:type_name -> types.PeerInfo + 2, // 18: sentry.PeerCountPerProtocol.protocol:type_name -> sentry.Protocol + 19, // 19: sentry.PeerCountReply.counts_per_protocol:type_name -> sentry.PeerCountPerProtocol + 25, // 20: sentry.PeerByIdRequest.peer_id:type_name -> types.H512 + 27, // 21: sentry.PeerByIdReply.peer:type_name -> types.PeerInfo + 25, // 22: sentry.PeerEvent.peer_id:type_name -> types.H512 + 3, // 23: sentry.PeerEvent.event_id:type_name -> sentry.PeerEvent.PeerEventId + 13, // 24: sentry.Sentry.SetStatus:input_type -> sentry.StatusData + 9, // 25: sentry.Sentry.PenalizePeer:input_type -> sentry.PenalizePeerRequest + 10, // 26: sentry.Sentry.PeerMinBlock:input_type -> sentry.PeerMinBlockRequest + 28, // 27: sentry.Sentry.HandShake:input_type -> google.protobuf.Empty + 5, // 28: sentry.Sentry.SendMessageByMinBlock:input_type -> sentry.SendMessageByMinBlockRequest + 6, // 29: sentry.Sentry.SendMessageById:input_type -> sentry.SendMessageByIdRequest + 7, // 30: sentry.Sentry.SendMessageToRandomPeers:input_type -> sentry.SendMessageToRandomPeersRequest + 4, // 31: sentry.Sentry.SendMessageToAll:input_type -> sentry.OutboundMessageData + 16, // 32: sentry.Sentry.Messages:input_type -> sentry.MessagesRequest + 28, // 33: sentry.Sentry.Peers:input_type -> google.protobuf.Empty + 18, // 34: sentry.Sentry.PeerCount:input_type -> sentry.PeerCountRequest + 21, // 35: sentry.Sentry.PeerById:input_type -> sentry.PeerByIdRequest + 23, // 36: sentry.Sentry.PeerEvents:input_type -> sentry.PeerEventsRequest + 28, // 37: sentry.Sentry.NodeInfo:input_type -> google.protobuf.Empty + 14, // 38: sentry.Sentry.SetStatus:output_type -> sentry.SetStatusReply + 28, // 39: sentry.Sentry.PenalizePeer:output_type -> google.protobuf.Empty + 28, // 40: sentry.Sentry.PeerMinBlock:output_type -> google.protobuf.Empty + 15, // 41: sentry.Sentry.HandShake:output_type -> sentry.HandShakeReply + 8, // 42: sentry.Sentry.SendMessageByMinBlock:output_type -> sentry.SentPeers + 8, // 43: sentry.Sentry.SendMessageById:output_type -> sentry.SentPeers + 8, // 44: sentry.Sentry.SendMessageToRandomPeers:output_type -> sentry.SentPeers + 8, // 45: sentry.Sentry.SendMessageToAll:output_type -> sentry.SentPeers + 11, // 46: sentry.Sentry.Messages:output_type -> sentry.InboundMessage + 17, // 47: sentry.Sentry.Peers:output_type -> sentry.PeersReply + 20, // 48: sentry.Sentry.PeerCount:output_type -> sentry.PeerCountReply + 22, // 49: sentry.Sentry.PeerById:output_type -> sentry.PeerByIdReply + 24, // 50: sentry.Sentry.PeerEvents:output_type -> sentry.PeerEvent + 29, // 51: sentry.Sentry.NodeInfo:output_type -> types.NodeInfoReply + 38, // [38:52] is the sub-list for method output_type + 24, // [24:38] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name } func init() { file_p2psentry_sentry_proto_init() } @@ -1915,18 +1856,6 @@ func file_p2psentry_sentry_proto_init() { } } file_p2psentry_sentry_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerUselessRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_p2psentry_sentry_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InboundMessage); i { case 0: return &v.state @@ -1938,7 +1867,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Forks); i { case 0: return &v.state @@ -1950,7 +1879,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StatusData); i { case 0: return &v.state @@ -1962,7 +1891,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetStatusReply); i { case 0: return &v.state @@ -1974,7 +1903,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HandShakeReply); i { case 0: return &v.state @@ -1986,7 +1915,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MessagesRequest); i { case 0: return &v.state @@ -1998,7 +1927,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeersReply); i { case 0: return &v.state @@ -2010,7 +1939,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerCountRequest); i { case 0: return &v.state @@ -2022,7 +1951,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerCountPerProtocol); i { case 0: return &v.state @@ -2034,7 +1963,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerCountReply); i { case 0: return &v.state @@ -2046,7 +1975,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerByIdRequest); i { case 0: return &v.state @@ -2058,7 +1987,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerByIdReply); i { case 0: return &v.state @@ -2070,7 +1999,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerEventsRequest); i { case 0: return &v.state @@ -2082,7 +2011,7 @@ func file_p2psentry_sentry_proto_init() { return nil } } - file_p2psentry_sentry_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_p2psentry_sentry_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerEvent); i { case 0: return &v.state @@ -2095,14 +2024,14 @@ func file_p2psentry_sentry_proto_init() { } } } - file_p2psentry_sentry_proto_msgTypes[19].OneofWrappers = []interface{}{} + file_p2psentry_sentry_proto_msgTypes[18].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2psentry_sentry_proto_rawDesc, NumEnums: 4, - NumMessages: 22, + NumMessages: 21, NumExtensions: 0, NumServices: 1, }, diff --git a/gointerfaces/sentry/sentry_grpc.pb.go b/gointerfaces/sentry/sentry_grpc.pb.go index d9cb9a44f..b633d72ae 100644 --- a/gointerfaces/sentry/sentry_grpc.pb.go +++ b/gointerfaces/sentry/sentry_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: p2psentry/sentry.proto package sentry @@ -20,6 +20,23 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Sentry_SetStatus_FullMethodName = "/sentry.Sentry/SetStatus" + Sentry_PenalizePeer_FullMethodName = "/sentry.Sentry/PenalizePeer" + Sentry_PeerMinBlock_FullMethodName = "/sentry.Sentry/PeerMinBlock" + Sentry_HandShake_FullMethodName = "/sentry.Sentry/HandShake" + Sentry_SendMessageByMinBlock_FullMethodName = "/sentry.Sentry/SendMessageByMinBlock" + Sentry_SendMessageById_FullMethodName = "/sentry.Sentry/SendMessageById" + Sentry_SendMessageToRandomPeers_FullMethodName = "/sentry.Sentry/SendMessageToRandomPeers" + Sentry_SendMessageToAll_FullMethodName = "/sentry.Sentry/SendMessageToAll" + Sentry_Messages_FullMethodName = "/sentry.Sentry/Messages" + Sentry_Peers_FullMethodName = "/sentry.Sentry/Peers" + Sentry_PeerCount_FullMethodName = "/sentry.Sentry/PeerCount" + Sentry_PeerById_FullMethodName = "/sentry.Sentry/PeerById" + Sentry_PeerEvents_FullMethodName = "/sentry.Sentry/PeerEvents" + Sentry_NodeInfo_FullMethodName = "/sentry.Sentry/NodeInfo" +) + // SentryClient is the client API for Sentry service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -28,7 +45,6 @@ type SentryClient interface { SetStatus(ctx context.Context, in *StatusData, opts ...grpc.CallOption) (*SetStatusReply, error) PenalizePeer(ctx context.Context, in *PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) PeerMinBlock(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - PeerUseless(ctx context.Context, in *PeerUselessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // HandShake - pre-requirement for all Send* methods - returns list of ETH protocol versions, // without knowledge of protocol - impossible encode correct P2P message HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HandShakeReply, error) @@ -59,7 +75,7 @@ func NewSentryClient(cc grpc.ClientConnInterface) SentryClient { func (c *sentryClient) SetStatus(ctx context.Context, in *StatusData, opts ...grpc.CallOption) (*SetStatusReply, error) { out := new(SetStatusReply) - err := c.cc.Invoke(ctx, "/sentry.Sentry/SetStatus", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_SetStatus_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -68,7 +84,7 @@ func (c *sentryClient) SetStatus(ctx context.Context, in *StatusData, opts ...gr func (c *sentryClient) PenalizePeer(ctx context.Context, in *PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sentry.Sentry/PenalizePeer", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_PenalizePeer_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -77,16 +93,7 @@ func (c *sentryClient) PenalizePeer(ctx context.Context, in *PenalizePeerRequest func (c *sentryClient) PeerMinBlock(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sentry.Sentry/PeerMinBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sentryClient) PeerUseless(ctx context.Context, in *PeerUselessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sentry.Sentry/PeerUseless", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_PeerMinBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -95,7 +102,7 @@ func (c *sentryClient) PeerUseless(ctx context.Context, in *PeerUselessRequest, func (c *sentryClient) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HandShakeReply, error) { out := new(HandShakeReply) - err := c.cc.Invoke(ctx, "/sentry.Sentry/HandShake", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_HandShake_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -104,7 +111,7 @@ func (c *sentryClient) HandShake(ctx context.Context, in *emptypb.Empty, opts .. func (c *sentryClient) SendMessageByMinBlock(ctx context.Context, in *SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*SentPeers, error) { out := new(SentPeers) - err := c.cc.Invoke(ctx, "/sentry.Sentry/SendMessageByMinBlock", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_SendMessageByMinBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -113,7 +120,7 @@ func (c *sentryClient) SendMessageByMinBlock(ctx context.Context, in *SendMessag func (c *sentryClient) SendMessageById(ctx context.Context, in *SendMessageByIdRequest, opts ...grpc.CallOption) (*SentPeers, error) { out := new(SentPeers) - err := c.cc.Invoke(ctx, "/sentry.Sentry/SendMessageById", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_SendMessageById_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -122,7 +129,7 @@ func (c *sentryClient) SendMessageById(ctx context.Context, in *SendMessageByIdR func (c *sentryClient) SendMessageToRandomPeers(ctx context.Context, in *SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*SentPeers, error) { out := new(SentPeers) - err := c.cc.Invoke(ctx, "/sentry.Sentry/SendMessageToRandomPeers", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_SendMessageToRandomPeers_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -131,7 +138,7 @@ func (c *sentryClient) SendMessageToRandomPeers(ctx context.Context, in *SendMes func (c *sentryClient) SendMessageToAll(ctx context.Context, in *OutboundMessageData, opts ...grpc.CallOption) (*SentPeers, error) { out := new(SentPeers) - err := c.cc.Invoke(ctx, "/sentry.Sentry/SendMessageToAll", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_SendMessageToAll_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -139,7 +146,7 @@ func (c *sentryClient) SendMessageToAll(ctx context.Context, in *OutboundMessage } func (c *sentryClient) Messages(ctx context.Context, in *MessagesRequest, opts ...grpc.CallOption) (Sentry_MessagesClient, error) { - stream, err := c.cc.NewStream(ctx, &Sentry_ServiceDesc.Streams[0], "/sentry.Sentry/Messages", opts...) + stream, err := c.cc.NewStream(ctx, &Sentry_ServiceDesc.Streams[0], Sentry_Messages_FullMethodName, opts...) if err != nil { return nil, err } @@ -172,7 +179,7 @@ func (x *sentryMessagesClient) Recv() (*InboundMessage, error) { func (c *sentryClient) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) { out := new(PeersReply) - err := c.cc.Invoke(ctx, "/sentry.Sentry/Peers", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_Peers_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -181,7 +188,7 @@ func (c *sentryClient) Peers(ctx context.Context, in *emptypb.Empty, opts ...grp func (c *sentryClient) PeerCount(ctx context.Context, in *PeerCountRequest, opts ...grpc.CallOption) (*PeerCountReply, error) { out := new(PeerCountReply) - err := c.cc.Invoke(ctx, "/sentry.Sentry/PeerCount", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_PeerCount_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -190,7 +197,7 @@ func (c *sentryClient) PeerCount(ctx context.Context, in *PeerCountRequest, opts func (c *sentryClient) PeerById(ctx context.Context, in *PeerByIdRequest, opts ...grpc.CallOption) (*PeerByIdReply, error) { out := new(PeerByIdReply) - err := c.cc.Invoke(ctx, "/sentry.Sentry/PeerById", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_PeerById_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -198,7 +205,7 @@ func (c *sentryClient) PeerById(ctx context.Context, in *PeerByIdRequest, opts . } func (c *sentryClient) PeerEvents(ctx context.Context, in *PeerEventsRequest, opts ...grpc.CallOption) (Sentry_PeerEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &Sentry_ServiceDesc.Streams[1], "/sentry.Sentry/PeerEvents", opts...) + stream, err := c.cc.NewStream(ctx, &Sentry_ServiceDesc.Streams[1], Sentry_PeerEvents_FullMethodName, opts...) if err != nil { return nil, err } @@ -231,7 +238,7 @@ func (x *sentryPeerEventsClient) Recv() (*PeerEvent, error) { func (c *sentryClient) NodeInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.NodeInfoReply, error) { out := new(types.NodeInfoReply) - err := c.cc.Invoke(ctx, "/sentry.Sentry/NodeInfo", in, out, opts...) + err := c.cc.Invoke(ctx, Sentry_NodeInfo_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -246,7 +253,6 @@ type SentryServer interface { SetStatus(context.Context, *StatusData) (*SetStatusReply, error) PenalizePeer(context.Context, *PenalizePeerRequest) (*emptypb.Empty, error) PeerMinBlock(context.Context, *PeerMinBlockRequest) (*emptypb.Empty, error) - PeerUseless(context.Context, *PeerUselessRequest) (*emptypb.Empty, error) // HandShake - pre-requirement for all Send* methods - returns list of ETH protocol versions, // without knowledge of protocol - impossible encode correct P2P message HandShake(context.Context, *emptypb.Empty) (*HandShakeReply, error) @@ -281,9 +287,6 @@ func (UnimplementedSentryServer) PenalizePeer(context.Context, *PenalizePeerRequ func (UnimplementedSentryServer) PeerMinBlock(context.Context, *PeerMinBlockRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method PeerMinBlock not implemented") } -func (UnimplementedSentryServer) PeerUseless(context.Context, *PeerUselessRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method PeerUseless not implemented") -} func (UnimplementedSentryServer) HandShake(context.Context, *emptypb.Empty) (*HandShakeReply, error) { return nil, status.Errorf(codes.Unimplemented, "method HandShake not implemented") } @@ -340,7 +343,7 @@ func _Sentry_SetStatus_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/SetStatus", + FullMethod: Sentry_SetStatus_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).SetStatus(ctx, req.(*StatusData)) @@ -358,7 +361,7 @@ func _Sentry_PenalizePeer_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/PenalizePeer", + FullMethod: Sentry_PenalizePeer_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).PenalizePeer(ctx, req.(*PenalizePeerRequest)) @@ -376,7 +379,7 @@ func _Sentry_PeerMinBlock_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/PeerMinBlock", + FullMethod: Sentry_PeerMinBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).PeerMinBlock(ctx, req.(*PeerMinBlockRequest)) @@ -384,24 +387,6 @@ func _Sentry_PeerMinBlock_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } -func _Sentry_PeerUseless_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PeerUselessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SentryServer).PeerUseless(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/sentry.Sentry/PeerUseless", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SentryServer).PeerUseless(ctx, req.(*PeerUselessRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Sentry_HandShake_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(emptypb.Empty) if err := dec(in); err != nil { @@ -412,7 +397,7 @@ func _Sentry_HandShake_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/HandShake", + FullMethod: Sentry_HandShake_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).HandShake(ctx, req.(*emptypb.Empty)) @@ -430,7 +415,7 @@ func _Sentry_SendMessageByMinBlock_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/SendMessageByMinBlock", + FullMethod: Sentry_SendMessageByMinBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).SendMessageByMinBlock(ctx, req.(*SendMessageByMinBlockRequest)) @@ -448,7 +433,7 @@ func _Sentry_SendMessageById_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/SendMessageById", + FullMethod: Sentry_SendMessageById_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).SendMessageById(ctx, req.(*SendMessageByIdRequest)) @@ -466,7 +451,7 @@ func _Sentry_SendMessageToRandomPeers_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/SendMessageToRandomPeers", + FullMethod: Sentry_SendMessageToRandomPeers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).SendMessageToRandomPeers(ctx, req.(*SendMessageToRandomPeersRequest)) @@ -484,7 +469,7 @@ func _Sentry_SendMessageToAll_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/SendMessageToAll", + FullMethod: Sentry_SendMessageToAll_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).SendMessageToAll(ctx, req.(*OutboundMessageData)) @@ -523,7 +508,7 @@ func _Sentry_Peers_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/Peers", + FullMethod: Sentry_Peers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).Peers(ctx, req.(*emptypb.Empty)) @@ -541,7 +526,7 @@ func _Sentry_PeerCount_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/PeerCount", + FullMethod: Sentry_PeerCount_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).PeerCount(ctx, req.(*PeerCountRequest)) @@ -559,7 +544,7 @@ func _Sentry_PeerById_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/PeerById", + FullMethod: Sentry_PeerById_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).PeerById(ctx, req.(*PeerByIdRequest)) @@ -598,7 +583,7 @@ func _Sentry_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sentry.Sentry/NodeInfo", + FullMethod: Sentry_NodeInfo_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SentryServer).NodeInfo(ctx, req.(*emptypb.Empty)) @@ -625,10 +610,6 @@ var Sentry_ServiceDesc = grpc.ServiceDesc{ MethodName: "PeerMinBlock", Handler: _Sentry_PeerMinBlock_Handler, }, - { - MethodName: "PeerUseless", - Handler: _Sentry_PeerUseless_Handler, - }, { MethodName: "HandShake", Handler: _Sentry_HandShake_Handler, diff --git a/gointerfaces/txpool/mining.pb.go b/gointerfaces/txpool/mining.pb.go index 61c1f9fc1..0ac57a961 100644 --- a/gointerfaces/txpool/mining.pb.go +++ b/gointerfaces/txpool/mining.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: txpool/mining.proto package txpool @@ -65,7 +65,7 @@ type OnPendingBlockReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RplBlock []byte `protobuf:"bytes,1,opt,name=rplBlock,proto3" json:"rplBlock,omitempty"` + RplBlock []byte `protobuf:"bytes,1,opt,name=rpl_block,json=rplBlock,proto3" json:"rpl_block,omitempty"` } func (x *OnPendingBlockReply) Reset() { @@ -150,7 +150,7 @@ type OnMinedBlockReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RplBlock []byte `protobuf:"bytes,1,opt,name=rplBlock,proto3" json:"rplBlock,omitempty"` + RplBlock []byte `protobuf:"bytes,1,opt,name=rpl_block,json=rplBlock,proto3" json:"rpl_block,omitempty"` } func (x *OnMinedBlockReply) Reset() { @@ -235,7 +235,7 @@ type OnPendingLogsReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RplLogs []byte `protobuf:"bytes,1,opt,name=rplLogs,proto3" json:"rplLogs,omitempty"` + RplLogs []byte `protobuf:"bytes,1,opt,name=rpl_logs,json=rplLogs,proto3" json:"rpl_logs,omitempty"` } func (x *OnPendingLogsReply) Reset() { @@ -320,10 +320,10 @@ type GetWorkReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - HeaderHash string `protobuf:"bytes,1,opt,name=headerHash,proto3" json:"headerHash,omitempty"` // 32 bytes hex encoded current block header pow-hash - SeedHash string `protobuf:"bytes,2,opt,name=seedHash,proto3" json:"seedHash,omitempty"` // 32 bytes hex encoded seed hash used for DAG - Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty - BlockNumber string `protobuf:"bytes,4,opt,name=blockNumber,proto3" json:"blockNumber,omitempty"` // hex encoded block number + HeaderHash string `protobuf:"bytes,1,opt,name=header_hash,json=headerHash,proto3" json:"header_hash,omitempty"` // 32 bytes hex encoded current block header pow-hash + SeedHash string `protobuf:"bytes,2,opt,name=seed_hash,json=seedHash,proto3" json:"seed_hash,omitempty"` // 32 bytes hex encoded seed hash used for DAG + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty + BlockNumber string `protobuf:"bytes,4,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` // hex encoded block number } func (x *GetWorkReply) Reset() { @@ -391,8 +391,8 @@ type SubmitWorkRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockNonce []byte `protobuf:"bytes,1,opt,name=blockNonce,proto3" json:"blockNonce,omitempty"` - PowHash []byte `protobuf:"bytes,2,opt,name=powHash,proto3" json:"powHash,omitempty"` + BlockNonce []byte `protobuf:"bytes,1,opt,name=block_nonce,json=blockNonce,proto3" json:"block_nonce,omitempty"` + PowHash []byte `protobuf:"bytes,2,opt,name=pow_hash,json=powHash,proto3" json:"pow_hash,omitempty"` Digest []byte `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` } @@ -641,7 +641,7 @@ type HashRateReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - HashRate uint64 `protobuf:"varint,1,opt,name=hashRate,proto3" json:"hashRate,omitempty"` + HashRate uint64 `protobuf:"varint,1,opt,name=hash_rate,json=hashRate,proto3" json:"hash_rate,omitempty"` } func (x *HashRateReply) Reset() { @@ -785,93 +785,93 @@ var file_txpool_mining_proto_rawDesc = []byte{ 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x17, 0x0a, 0x15, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31, 0x0a, 0x13, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, - 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x15, 0x0a, 0x13, 0x4f, 0x6e, 0x4d, - 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x2f, 0x0a, 0x11, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x22, 0x16, 0x0a, 0x14, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, - 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x4f, 0x6e, 0x50, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x18, 0x0a, 0x07, 0x72, 0x70, 0x6c, 0x4c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x72, 0x70, 0x6c, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0x10, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x0c, - 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1e, 0x0a, 0x0a, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, - 0x73, 0x65, 0x65, 0x64, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x73, 0x65, 0x65, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x22, 0x65, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x6f, 0x77, 0x48, 0x61, - 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x6f, 0x77, 0x48, 0x61, 0x73, - 0x68, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x53, 0x75, 0x62, - 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, - 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x3b, 0x0a, 0x15, - 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, 0x0a, 0x13, 0x53, 0x75, 0x62, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x32, 0x0a, 0x13, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, + 0x09, 0x72, 0x70, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x15, 0x0a, 0x13, 0x4f, 0x6e, + 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x30, 0x0a, 0x11, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x70, 0x6c, 0x5f, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x16, 0x0a, 0x14, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x12, 0x4f, + 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x70, 0x6c, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x70, 0x6c, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0x10, 0x0a, 0x0e, + 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x87, + 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x67, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x6d, + 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x19, + 0x0a, 0x08, 0x70, 0x6f, 0x77, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x70, 0x6f, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x3b, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x74, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x64, 0x22, 0x25, 0x0a, 0x13, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x11, 0x0a, 0x0f, 0x48, 0x61, 0x73, 0x68, + 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2c, 0x0a, 0x0d, 0x48, + 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x68, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x22, 0x0f, 0x0a, 0x0d, 0x4d, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x41, 0x0a, 0x0b, 0x4d, 0x69, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x32, 0xe2, 0x04, + 0x0a, 0x06, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x4e, 0x0a, 0x0e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, + 0x12, 0x48, 0x0a, 0x0c, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x12, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x4b, 0x0a, 0x0d, 0x4f, 0x6e, + 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x74, 0x78, + 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, + 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x74, 0x78, 0x70, 0x6f, + 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x37, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x12, 0x16, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x78, 0x70, + 0x6f, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x40, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x19, + 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, + 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x4c, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x52, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, + 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, - 0x22, 0x11, 0x0a, 0x0f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x0d, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, - 0x22, 0x0f, 0x0a, 0x0d, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x41, 0x0a, 0x0b, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x75, - 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x32, 0xe2, 0x04, 0x0a, 0x06, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, - 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x74, 0x78, 0x70, 0x6f, - 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, - 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x48, 0x0a, 0x0c, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, - 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, - 0x2e, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, - 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, - 0x01, 0x12, 0x4b, 0x0a, 0x0d, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, - 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1a, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x37, - 0x0a, 0x07, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x16, 0x2e, 0x74, 0x78, 0x70, 0x6f, - 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x6d, 0x69, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x19, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, - 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4c, 0x0a, 0x0e, 0x53, 0x75, 0x62, - 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x74, 0x78, - 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x78, 0x70, - 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x48, 0x61, 0x73, 0x68, 0x52, - 0x61, 0x74, 0x65, 0x12, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x48, 0x61, 0x73, - 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x74, + 0x12, 0x3a, 0x0a, 0x08, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x15, 0x2e, - 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4d, 0x69, - 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x74, - 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x06, + 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x15, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, + 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, + 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74, + 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/gointerfaces/txpool/mining_grpc.pb.go b/gointerfaces/txpool/mining_grpc.pb.go index d6491ff09..e20c2b940 100644 --- a/gointerfaces/txpool/mining_grpc.pb.go +++ b/gointerfaces/txpool/mining_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: txpool/mining.proto package txpool @@ -20,6 +20,18 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Mining_Version_FullMethodName = "/txpool.Mining/Version" + Mining_OnPendingBlock_FullMethodName = "/txpool.Mining/OnPendingBlock" + Mining_OnMinedBlock_FullMethodName = "/txpool.Mining/OnMinedBlock" + Mining_OnPendingLogs_FullMethodName = "/txpool.Mining/OnPendingLogs" + Mining_GetWork_FullMethodName = "/txpool.Mining/GetWork" + Mining_SubmitWork_FullMethodName = "/txpool.Mining/SubmitWork" + Mining_SubmitHashRate_FullMethodName = "/txpool.Mining/SubmitHashRate" + Mining_HashRate_FullMethodName = "/txpool.Mining/HashRate" + Mining_Mining_FullMethodName = "/txpool.Mining/Mining" +) + // MiningClient is the client API for Mining service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,10 +47,11 @@ type MiningClient interface { // GetWork returns a work package for external miner. // // The work package consists of 3 strings: - // result[0] - 32 bytes hex encoded current block header pow-hash - // result[1] - 32 bytes hex encoded seed hash used for DAG - // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty - // result[3] - hex encoded block number + // + // result[0] - 32 bytes hex encoded current block header pow-hash + // result[1] - 32 bytes hex encoded seed hash used for DAG + // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty + // result[3] - hex encoded block number GetWork(ctx context.Context, in *GetWorkRequest, opts ...grpc.CallOption) (*GetWorkReply, error) // SubmitWork can be used by external miner to submit their POW solution. // It returns an indication if the work was accepted. @@ -67,7 +80,7 @@ func NewMiningClient(cc grpc.ClientConnInterface) MiningClient { func (c *miningClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) { out := new(types.VersionReply) - err := c.cc.Invoke(ctx, "/txpool.Mining/Version", in, out, opts...) + err := c.cc.Invoke(ctx, Mining_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -75,7 +88,7 @@ func (c *miningClient) Version(ctx context.Context, in *emptypb.Empty, opts ...g } func (c *miningClient) OnPendingBlock(ctx context.Context, in *OnPendingBlockRequest, opts ...grpc.CallOption) (Mining_OnPendingBlockClient, error) { - stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[0], "/txpool.Mining/OnPendingBlock", opts...) + stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[0], Mining_OnPendingBlock_FullMethodName, opts...) if err != nil { return nil, err } @@ -107,7 +120,7 @@ func (x *miningOnPendingBlockClient) Recv() (*OnPendingBlockReply, error) { } func (c *miningClient) OnMinedBlock(ctx context.Context, in *OnMinedBlockRequest, opts ...grpc.CallOption) (Mining_OnMinedBlockClient, error) { - stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[1], "/txpool.Mining/OnMinedBlock", opts...) + stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[1], Mining_OnMinedBlock_FullMethodName, opts...) if err != nil { return nil, err } @@ -139,7 +152,7 @@ func (x *miningOnMinedBlockClient) Recv() (*OnMinedBlockReply, error) { } func (c *miningClient) OnPendingLogs(ctx context.Context, in *OnPendingLogsRequest, opts ...grpc.CallOption) (Mining_OnPendingLogsClient, error) { - stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[2], "/txpool.Mining/OnPendingLogs", opts...) + stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[2], Mining_OnPendingLogs_FullMethodName, opts...) if err != nil { return nil, err } @@ -172,7 +185,7 @@ func (x *miningOnPendingLogsClient) Recv() (*OnPendingLogsReply, error) { func (c *miningClient) GetWork(ctx context.Context, in *GetWorkRequest, opts ...grpc.CallOption) (*GetWorkReply, error) { out := new(GetWorkReply) - err := c.cc.Invoke(ctx, "/txpool.Mining/GetWork", in, out, opts...) + err := c.cc.Invoke(ctx, Mining_GetWork_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -181,7 +194,7 @@ func (c *miningClient) GetWork(ctx context.Context, in *GetWorkRequest, opts ... func (c *miningClient) SubmitWork(ctx context.Context, in *SubmitWorkRequest, opts ...grpc.CallOption) (*SubmitWorkReply, error) { out := new(SubmitWorkReply) - err := c.cc.Invoke(ctx, "/txpool.Mining/SubmitWork", in, out, opts...) + err := c.cc.Invoke(ctx, Mining_SubmitWork_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -190,7 +203,7 @@ func (c *miningClient) SubmitWork(ctx context.Context, in *SubmitWorkRequest, op func (c *miningClient) SubmitHashRate(ctx context.Context, in *SubmitHashRateRequest, opts ...grpc.CallOption) (*SubmitHashRateReply, error) { out := new(SubmitHashRateReply) - err := c.cc.Invoke(ctx, "/txpool.Mining/SubmitHashRate", in, out, opts...) + err := c.cc.Invoke(ctx, Mining_SubmitHashRate_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -199,7 +212,7 @@ func (c *miningClient) SubmitHashRate(ctx context.Context, in *SubmitHashRateReq func (c *miningClient) HashRate(ctx context.Context, in *HashRateRequest, opts ...grpc.CallOption) (*HashRateReply, error) { out := new(HashRateReply) - err := c.cc.Invoke(ctx, "/txpool.Mining/HashRate", in, out, opts...) + err := c.cc.Invoke(ctx, Mining_HashRate_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -208,7 +221,7 @@ func (c *miningClient) HashRate(ctx context.Context, in *HashRateRequest, opts . func (c *miningClient) Mining(ctx context.Context, in *MiningRequest, opts ...grpc.CallOption) (*MiningReply, error) { out := new(MiningReply) - err := c.cc.Invoke(ctx, "/txpool.Mining/Mining", in, out, opts...) + err := c.cc.Invoke(ctx, Mining_Mining_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -230,10 +243,11 @@ type MiningServer interface { // GetWork returns a work package for external miner. // // The work package consists of 3 strings: - // result[0] - 32 bytes hex encoded current block header pow-hash - // result[1] - 32 bytes hex encoded seed hash used for DAG - // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty - // result[3] - hex encoded block number + // + // result[0] - 32 bytes hex encoded current block header pow-hash + // result[1] - 32 bytes hex encoded seed hash used for DAG + // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty + // result[3] - hex encoded block number GetWork(context.Context, *GetWorkRequest) (*GetWorkReply, error) // SubmitWork can be used by external miner to submit their POW solution. // It returns an indication if the work was accepted. @@ -307,7 +321,7 @@ func _Mining_Version_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Mining/Version", + FullMethod: Mining_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MiningServer).Version(ctx, req.(*emptypb.Empty)) @@ -388,7 +402,7 @@ func _Mining_GetWork_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Mining/GetWork", + FullMethod: Mining_GetWork_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MiningServer).GetWork(ctx, req.(*GetWorkRequest)) @@ -406,7 +420,7 @@ func _Mining_SubmitWork_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Mining/SubmitWork", + FullMethod: Mining_SubmitWork_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MiningServer).SubmitWork(ctx, req.(*SubmitWorkRequest)) @@ -424,7 +438,7 @@ func _Mining_SubmitHashRate_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Mining/SubmitHashRate", + FullMethod: Mining_SubmitHashRate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MiningServer).SubmitHashRate(ctx, req.(*SubmitHashRateRequest)) @@ -442,7 +456,7 @@ func _Mining_HashRate_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Mining/HashRate", + FullMethod: Mining_HashRate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MiningServer).HashRate(ctx, req.(*HashRateRequest)) @@ -460,7 +474,7 @@ func _Mining_Mining_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Mining/Mining", + FullMethod: Mining_Mining_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MiningServer).Mining(ctx, req.(*MiningRequest)) diff --git a/gointerfaces/txpool/txpool.pb.go b/gointerfaces/txpool/txpool.pb.go index 8b27b4c5f..711e8f064 100644 --- a/gointerfaces/txpool/txpool.pb.go +++ b/gointerfaces/txpool/txpool.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: txpool/txpool.proto package txpool @@ -181,7 +181,7 @@ type AddRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RlpTxs [][]byte `protobuf:"bytes,1,rep,name=rlpTxs,proto3" json:"rlpTxs,omitempty"` + RlpTxs [][]byte `protobuf:"bytes,1,rep,name=rlp_txs,json=rlpTxs,proto3" json:"rlp_txs,omitempty"` } func (x *AddRequest) Reset() { @@ -330,7 +330,7 @@ type TransactionsReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RlpTxs [][]byte `protobuf:"bytes,1,rep,name=rlpTxs,proto3" json:"rlpTxs,omitempty"` + RlpTxs [][]byte `protobuf:"bytes,1,rep,name=rlp_txs,json=rlpTxs,proto3" json:"rlp_txs,omitempty"` } func (x *TransactionsReply) Reset() { @@ -415,7 +415,7 @@ type OnAddReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RplTxs [][]byte `protobuf:"bytes,1,rep,name=rplTxs,proto3" json:"rplTxs,omitempty"` + RplTxs [][]byte `protobuf:"bytes,1,rep,name=rpl_txs,json=rplTxs,proto3" json:"rpl_txs,omitempty"` } func (x *OnAddReply) Reset() { @@ -632,9 +632,9 @@ type StatusReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PendingCount uint32 `protobuf:"varint,1,opt,name=pendingCount,proto3" json:"pendingCount,omitempty"` - QueuedCount uint32 `protobuf:"varint,2,opt,name=queuedCount,proto3" json:"queuedCount,omitempty"` - BaseFeeCount uint32 `protobuf:"varint,3,opt,name=baseFeeCount,proto3" json:"baseFeeCount,omitempty"` + PendingCount uint32 `protobuf:"varint,1,opt,name=pending_count,json=pendingCount,proto3" json:"pending_count,omitempty"` + QueuedCount uint32 `protobuf:"varint,2,opt,name=queued_count,json=queuedCount,proto3" json:"queued_count,omitempty"` + BaseFeeCount uint32 `protobuf:"varint,3,opt,name=base_fee_count,json=baseFeeCount,proto3" json:"base_fee_count,omitempty"` } func (x *StatusReply) Reset() { @@ -797,9 +797,9 @@ type AllReply_Tx struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TxnType AllReply_TxnType `protobuf:"varint,1,opt,name=txnType,proto3,enum=txpool.AllReply_TxnType" json:"txnType,omitempty"` + TxnType AllReply_TxnType `protobuf:"varint,1,opt,name=txn_type,json=txnType,proto3,enum=txpool.AllReply_TxnType" json:"txn_type,omitempty"` Sender *types.H160 `protobuf:"bytes,2,opt,name=sender,proto3" json:"sender,omitempty"` - RlpTx []byte `protobuf:"bytes,3,opt,name=rlpTx,proto3" json:"rlpTx,omitempty"` + RlpTx []byte `protobuf:"bytes,3,opt,name=rlp_tx,json=rlpTx,proto3" json:"rlp_tx,omitempty"` } func (x *AllReply_Tx) Reset() { @@ -861,8 +861,8 @@ type PendingReply_Tx struct { unknownFields protoimpl.UnknownFields Sender *types.H160 `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` - RlpTx []byte `protobuf:"bytes,2,opt,name=rlpTx,proto3" json:"rlpTx,omitempty"` - IsLocal bool `protobuf:"varint,3,opt,name=isLocal,proto3" json:"isLocal,omitempty"` + RlpTx []byte `protobuf:"bytes,2,opt,name=rlp_tx,json=rlpTx,proto3" json:"rlp_tx,omitempty"` + IsLocal bool `protobuf:"varint,3,opt,name=is_local,json=isLocal,proto3" json:"is_local,omitempty"` } func (x *PendingReply_Tx) Reset() { @@ -928,104 +928,105 @@ var file_txpool_txpool_proto_rawDesc = []byte{ 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2f, 0x0a, 0x08, 0x54, 0x78, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x24, - 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x72, 0x6c, 0x70, 0x54, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6c, - 0x70, 0x54, 0x78, 0x73, 0x22, 0x54, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x30, 0x0a, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x3a, 0x0a, 0x13, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x23, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, - 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, - 0x6c, 0x70, 0x54, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6c, 0x70, - 0x54, 0x78, 0x73, 0x22, 0x0e, 0x0a, 0x0c, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x0a, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x70, 0x6c, 0x54, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x06, 0x72, 0x70, 0x6c, 0x54, 0x78, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x41, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xd8, 0x01, 0x0a, 0x08, 0x41, 0x6c, 0x6c, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, 0x6c, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x52, 0x03, 0x74, 0x78, 0x73, 0x1a, 0x73, 0x0a, 0x02, 0x54, - 0x78, 0x12, 0x32, 0x0a, 0x07, 0x74, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, 0x6c, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x78, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, - 0x36, 0x30, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6c, - 0x70, 0x54, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6c, 0x70, 0x54, 0x78, - 0x22, 0x30, 0x0a, 0x07, 0x54, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x50, - 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, - 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x46, 0x45, 0x45, - 0x10, 0x02, 0x22, 0x94, 0x01, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x52, 0x03, 0x74, 0x78, 0x73, 0x1a, 0x59, - 0x0a, 0x02, 0x54, 0x78, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, - 0x30, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6c, 0x70, - 0x54, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6c, 0x70, 0x54, 0x78, 0x12, - 0x18, 0x0a, 0x07, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x77, 0x0a, 0x0b, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x65, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0c, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, - 0x0b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x22, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x35, 0x0a, 0x0c, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, - 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x38, 0x0a, 0x0a, 0x4e, 0x6f, - 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, - 0x6f, 0x6e, 0x63, 0x65, 0x2a, 0x6c, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, - 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, - 0x53, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x46, 0x45, 0x45, 0x5f, 0x54, 0x4f, 0x4f, - 0x5f, 0x4c, 0x4f, 0x57, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, - 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x12, 0x12, - 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x10, 0x05, 0x32, 0xec, 0x03, 0x0a, 0x06, 0x54, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x12, 0x36, 0x0a, - 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x0b, 0x46, 0x69, 0x6e, 0x64, 0x55, 0x6e, 0x6b, - 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x78, - 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x1a, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, - 0x54, 0x78, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, - 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x46, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, + 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x25, + 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, + 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72, + 0x6c, 0x70, 0x54, 0x78, 0x73, 0x22, 0x54, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x30, 0x0a, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x3a, 0x0a, 0x13, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2b, 0x0a, - 0x03, 0x41, 0x6c, 0x6c, 0x12, 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, - 0x6c, 0x2e, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x50, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, - 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x14, 0x2e, 0x74, - 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x41, 0x64, - 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x15, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x78, 0x70, 0x6f, - 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, - 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, - 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, - 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74, 0x78, - 0x70, 0x6f, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x74, 0x12, 0x23, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, + 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x2c, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x17, 0x0a, 0x07, + 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72, + 0x6c, 0x70, 0x54, 0x78, 0x73, 0x22, 0x0e, 0x0a, 0x0c, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x25, 0x0a, 0x0a, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x70, 0x6c, 0x5f, 0x74, 0x78, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x70, 0x6c, 0x54, 0x78, 0x73, 0x22, 0x0c, 0x0a, 0x0a, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xda, 0x01, 0x0a, 0x08, 0x41, + 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, + 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x52, 0x03, 0x74, 0x78, 0x73, 0x1a, 0x75, + 0x0a, 0x02, 0x54, 0x78, 0x12, 0x33, 0x0a, 0x08, 0x74, 0x78, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x07, 0x74, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x15, + 0x0a, 0x06, 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x72, 0x6c, 0x70, 0x54, 0x78, 0x22, 0x30, 0x0a, 0x07, 0x54, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x41, 0x53, + 0x45, 0x5f, 0x46, 0x45, 0x45, 0x10, 0x02, 0x22, 0x96, 0x01, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x50, + 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x52, 0x03, + 0x74, 0x78, 0x73, 0x1a, 0x5b, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x15, + 0x0a, 0x06, 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x72, 0x6c, 0x70, 0x54, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x7b, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x71, 0x75, 0x65, + 0x75, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x35, + 0x0a, 0x0c, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x38, 0x0a, 0x0a, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2a, + 0x6c, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, + 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x01, + 0x12, 0x0f, 0x0a, 0x0b, 0x46, 0x45, 0x45, 0x5f, 0x54, 0x4f, 0x4f, 0x5f, 0x4c, 0x4f, 0x57, 0x10, + 0x02, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, + 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xec, 0x03, + 0x0a, 0x06, 0x54, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x31, 0x0a, 0x0b, 0x46, 0x69, 0x6e, 0x64, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x78, 0x48, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x1a, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x78, 0x48, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x12, 0x2e, 0x74, 0x78, 0x70, + 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, + 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x46, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x03, 0x41, 0x6c, 0x6c, 0x12, + 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, 0x6c, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, + 0x6c, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x33, + 0x0a, 0x05, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, + 0x2e, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, + 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x2e, + 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, + 0x63, 0x65, 0x12, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, + 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, + 0x2e, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1073,7 +1074,7 @@ var file_txpool_txpool_proto_depIdxs = []int32{ 16, // 3: txpool.AllReply.txs:type_name -> txpool.AllReply.Tx 17, // 4: txpool.PendingReply.txs:type_name -> txpool.PendingReply.Tx 19, // 5: txpool.NonceRequest.address:type_name -> types.H160 - 1, // 6: txpool.AllReply.Tx.txnType:type_name -> txpool.AllReply.TxnType + 1, // 6: txpool.AllReply.Tx.txn_type:type_name -> txpool.AllReply.TxnType 19, // 7: txpool.AllReply.Tx.sender:type_name -> types.H160 19, // 8: txpool.PendingReply.Tx.sender:type_name -> types.H160 20, // 9: txpool.Txpool.Version:input_type -> google.protobuf.Empty diff --git a/gointerfaces/txpool/txpool_grpc.pb.go b/gointerfaces/txpool/txpool_grpc.pb.go index cb0a430e0..1cad16590 100644 --- a/gointerfaces/txpool/txpool_grpc.pb.go +++ b/gointerfaces/txpool/txpool_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.2 // source: txpool/txpool.proto package txpool @@ -20,6 +20,18 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Txpool_Version_FullMethodName = "/txpool.Txpool/Version" + Txpool_FindUnknown_FullMethodName = "/txpool.Txpool/FindUnknown" + Txpool_Add_FullMethodName = "/txpool.Txpool/Add" + Txpool_Transactions_FullMethodName = "/txpool.Txpool/Transactions" + Txpool_All_FullMethodName = "/txpool.Txpool/All" + Txpool_Pending_FullMethodName = "/txpool.Txpool/Pending" + Txpool_OnAdd_FullMethodName = "/txpool.Txpool/OnAdd" + Txpool_Status_FullMethodName = "/txpool.Txpool/Status" + Txpool_Nonce_FullMethodName = "/txpool.Txpool/Nonce" +) + // TxpoolClient is the client API for Txpool service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -55,7 +67,7 @@ func NewTxpoolClient(cc grpc.ClientConnInterface) TxpoolClient { func (c *txpoolClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) { out := new(types.VersionReply) - err := c.cc.Invoke(ctx, "/txpool.Txpool/Version", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -64,7 +76,7 @@ func (c *txpoolClient) Version(ctx context.Context, in *emptypb.Empty, opts ...g func (c *txpoolClient) FindUnknown(ctx context.Context, in *TxHashes, opts ...grpc.CallOption) (*TxHashes, error) { out := new(TxHashes) - err := c.cc.Invoke(ctx, "/txpool.Txpool/FindUnknown", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_FindUnknown_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -73,7 +85,7 @@ func (c *txpoolClient) FindUnknown(ctx context.Context, in *TxHashes, opts ...gr func (c *txpoolClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddReply, error) { out := new(AddReply) - err := c.cc.Invoke(ctx, "/txpool.Txpool/Add", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_Add_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -82,7 +94,7 @@ func (c *txpoolClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.Cal func (c *txpoolClient) Transactions(ctx context.Context, in *TransactionsRequest, opts ...grpc.CallOption) (*TransactionsReply, error) { out := new(TransactionsReply) - err := c.cc.Invoke(ctx, "/txpool.Txpool/Transactions", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_Transactions_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -91,7 +103,7 @@ func (c *txpoolClient) Transactions(ctx context.Context, in *TransactionsRequest func (c *txpoolClient) All(ctx context.Context, in *AllRequest, opts ...grpc.CallOption) (*AllReply, error) { out := new(AllReply) - err := c.cc.Invoke(ctx, "/txpool.Txpool/All", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_All_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -100,7 +112,7 @@ func (c *txpoolClient) All(ctx context.Context, in *AllRequest, opts ...grpc.Cal func (c *txpoolClient) Pending(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PendingReply, error) { out := new(PendingReply) - err := c.cc.Invoke(ctx, "/txpool.Txpool/Pending", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_Pending_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -108,7 +120,7 @@ func (c *txpoolClient) Pending(ctx context.Context, in *emptypb.Empty, opts ...g } func (c *txpoolClient) OnAdd(ctx context.Context, in *OnAddRequest, opts ...grpc.CallOption) (Txpool_OnAddClient, error) { - stream, err := c.cc.NewStream(ctx, &Txpool_ServiceDesc.Streams[0], "/txpool.Txpool/OnAdd", opts...) + stream, err := c.cc.NewStream(ctx, &Txpool_ServiceDesc.Streams[0], Txpool_OnAdd_FullMethodName, opts...) if err != nil { return nil, err } @@ -141,7 +153,7 @@ func (x *txpoolOnAddClient) Recv() (*OnAddReply, error) { func (c *txpoolClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusReply, error) { out := new(StatusReply) - err := c.cc.Invoke(ctx, "/txpool.Txpool/Status", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_Status_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -150,7 +162,7 @@ func (c *txpoolClient) Status(ctx context.Context, in *StatusRequest, opts ...gr func (c *txpoolClient) Nonce(ctx context.Context, in *NonceRequest, opts ...grpc.CallOption) (*NonceReply, error) { out := new(NonceReply) - err := c.cc.Invoke(ctx, "/txpool.Txpool/Nonce", in, out, opts...) + err := c.cc.Invoke(ctx, Txpool_Nonce_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -237,7 +249,7 @@ func _Txpool_Version_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/Version", + FullMethod: Txpool_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).Version(ctx, req.(*emptypb.Empty)) @@ -255,7 +267,7 @@ func _Txpool_FindUnknown_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/FindUnknown", + FullMethod: Txpool_FindUnknown_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).FindUnknown(ctx, req.(*TxHashes)) @@ -273,7 +285,7 @@ func _Txpool_Add_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/Add", + FullMethod: Txpool_Add_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).Add(ctx, req.(*AddRequest)) @@ -291,7 +303,7 @@ func _Txpool_Transactions_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/Transactions", + FullMethod: Txpool_Transactions_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).Transactions(ctx, req.(*TransactionsRequest)) @@ -309,7 +321,7 @@ func _Txpool_All_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/All", + FullMethod: Txpool_All_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).All(ctx, req.(*AllRequest)) @@ -327,7 +339,7 @@ func _Txpool_Pending_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/Pending", + FullMethod: Txpool_Pending_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).Pending(ctx, req.(*emptypb.Empty)) @@ -366,7 +378,7 @@ func _Txpool_Status_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/Status", + FullMethod: Txpool_Status_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).Status(ctx, req.(*StatusRequest)) @@ -384,7 +396,7 @@ func _Txpool_Nonce_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/txpool.Txpool/Nonce", + FullMethod: Txpool_Nonce_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TxpoolServer).Nonce(ctx, req.(*NonceRequest)) diff --git a/gointerfaces/types/types.pb.go b/gointerfaces/types/types.pb.go index e904c77e4..2ea63fe87 100644 --- a/gointerfaces/types/types.pb.go +++ b/gointerfaces/types/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: types/types.proto package types @@ -417,29 +417,29 @@ func (x *VersionReply) GetPatch() uint32 { // ------------------------------------------------------------------------ // Engine API types -// See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md +// See https://github.com/ethereum/execution-apis/blob/main/src/engine type ExecutionPayload struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // v1 - no withdrawals, v2 - with withdrawals, v3 - with excess data gas - ParentHash *H256 `protobuf:"bytes,2,opt,name=parentHash,proto3" json:"parentHash,omitempty"` + ParentHash *H256 `protobuf:"bytes,2,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"` Coinbase *H160 `protobuf:"bytes,3,opt,name=coinbase,proto3" json:"coinbase,omitempty"` - StateRoot *H256 `protobuf:"bytes,4,opt,name=stateRoot,proto3" json:"stateRoot,omitempty"` - ReceiptRoot *H256 `protobuf:"bytes,5,opt,name=receiptRoot,proto3" json:"receiptRoot,omitempty"` - LogsBloom *H2048 `protobuf:"bytes,6,opt,name=logsBloom,proto3" json:"logsBloom,omitempty"` - PrevRandao *H256 `protobuf:"bytes,7,opt,name=prevRandao,proto3" json:"prevRandao,omitempty"` - BlockNumber uint64 `protobuf:"varint,8,opt,name=blockNumber,proto3" json:"blockNumber,omitempty"` - GasLimit uint64 `protobuf:"varint,9,opt,name=gasLimit,proto3" json:"gasLimit,omitempty"` - GasUsed uint64 `protobuf:"varint,10,opt,name=gasUsed,proto3" json:"gasUsed,omitempty"` + StateRoot *H256 `protobuf:"bytes,4,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` + ReceiptRoot *H256 `protobuf:"bytes,5,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty"` + LogsBloom *H2048 `protobuf:"bytes,6,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"` + PrevRandao *H256 `protobuf:"bytes,7,opt,name=prev_randao,json=prevRandao,proto3" json:"prev_randao,omitempty"` + BlockNumber uint64 `protobuf:"varint,8,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` + GasLimit uint64 `protobuf:"varint,9,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + GasUsed uint64 `protobuf:"varint,10,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` Timestamp uint64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - ExtraData []byte `protobuf:"bytes,12,opt,name=extraData,proto3" json:"extraData,omitempty"` - BaseFeePerGas *H256 `protobuf:"bytes,13,opt,name=baseFeePerGas,proto3" json:"baseFeePerGas,omitempty"` - BlockHash *H256 `protobuf:"bytes,14,opt,name=blockHash,proto3" json:"blockHash,omitempty"` + ExtraData []byte `protobuf:"bytes,12,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"` + BaseFeePerGas *H256 `protobuf:"bytes,13,opt,name=base_fee_per_gas,json=baseFeePerGas,proto3" json:"base_fee_per_gas,omitempty"` + BlockHash *H256 `protobuf:"bytes,14,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` Transactions [][]byte `protobuf:"bytes,15,rep,name=transactions,proto3" json:"transactions,omitempty"` Withdrawals []*Withdrawal `protobuf:"bytes,16,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"` - ExcessDataGas *H256 `protobuf:"bytes,17,opt,name=excessDataGas,proto3" json:"excessDataGas,omitempty"` + ExcessDataGas *H256 `protobuf:"bytes,17,opt,name=excess_data_gas,json=excessDataGas,proto3,oneof" json:"excess_data_gas,omitempty"` } func (x *ExecutionPayload) Reset() { @@ -599,7 +599,7 @@ type Withdrawal struct { unknownFields protoimpl.UnknownFields Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` - ValidatorIndex uint64 `protobuf:"varint,2,opt,name=validatorIndex,proto3" json:"validatorIndex,omitempty"` + ValidatorIndex uint64 `protobuf:"varint,2,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` Address *H160 `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"` } @@ -669,7 +669,7 @@ type BlobsBundleV1 struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlockHash *H256 `protobuf:"bytes,1,opt,name=blockHash,proto3" json:"blockHash,omitempty"` + BlockHash *H256 `protobuf:"bytes,1,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` // TODO(eip-4844): define a protobuf message for type KZGCommitment Kzgs [][]byte `protobuf:"bytes,2,rep,name=kzgs,proto3" json:"kzgs,omitempty"` // TODO(eip-4844): define a protobuf message for type Blob @@ -794,7 +794,7 @@ type NodeInfoReply struct { Enode string `protobuf:"bytes,3,opt,name=enode,proto3" json:"enode,omitempty"` Enr string `protobuf:"bytes,4,opt,name=enr,proto3" json:"enr,omitempty"` Ports *NodeInfoPorts `protobuf:"bytes,5,opt,name=ports,proto3" json:"ports,omitempty"` - ListenerAddr string `protobuf:"bytes,6,opt,name=listenerAddr,proto3" json:"listenerAddr,omitempty"` + ListenerAddr string `protobuf:"bytes,6,opt,name=listener_addr,json=listenerAddr,proto3" json:"listener_addr,omitempty"` Protocols []byte `protobuf:"bytes,7,opt,name=protocols,proto3" json:"protocols,omitempty"` } @@ -889,11 +889,11 @@ type PeerInfo struct { Enode string `protobuf:"bytes,3,opt,name=enode,proto3" json:"enode,omitempty"` Enr string `protobuf:"bytes,4,opt,name=enr,proto3" json:"enr,omitempty"` Caps []string `protobuf:"bytes,5,rep,name=caps,proto3" json:"caps,omitempty"` - ConnLocalAddr string `protobuf:"bytes,6,opt,name=connLocalAddr,proto3" json:"connLocalAddr,omitempty"` - ConnRemoteAddr string `protobuf:"bytes,7,opt,name=connRemoteAddr,proto3" json:"connRemoteAddr,omitempty"` - ConnIsInbound bool `protobuf:"varint,8,opt,name=connIsInbound,proto3" json:"connIsInbound,omitempty"` - ConnIsTrusted bool `protobuf:"varint,9,opt,name=connIsTrusted,proto3" json:"connIsTrusted,omitempty"` - ConnIsStatic bool `protobuf:"varint,10,opt,name=connIsStatic,proto3" json:"connIsStatic,omitempty"` + ConnLocalAddr string `protobuf:"bytes,6,opt,name=conn_local_addr,json=connLocalAddr,proto3" json:"conn_local_addr,omitempty"` + ConnRemoteAddr string `protobuf:"bytes,7,opt,name=conn_remote_addr,json=connRemoteAddr,proto3" json:"conn_remote_addr,omitempty"` + ConnIsInbound bool `protobuf:"varint,8,opt,name=conn_is_inbound,json=connIsInbound,proto3" json:"conn_is_inbound,omitempty"` + ConnIsTrusted bool `protobuf:"varint,9,opt,name=conn_is_trusted,json=connIsTrusted,proto3" json:"conn_is_trusted,omitempty"` + ConnIsStatic bool `protobuf:"varint,10,opt,name=conn_is_static,json=connIsStatic,proto3" json:"conn_is_static,omitempty"` } func (x *PeerInfo) Reset() { @@ -1124,126 +1124,129 @@ var file_types_types_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb3, 0x05, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0xdb, 0x05, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, - 0x36, 0x30, 0x52, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x09, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, - 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, - 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, - 0x6f, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x30, 0x34, 0x38, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, - 0x6f, 0x6d, 0x12, 0x2b, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, - 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x12, - 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, - 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, - 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, - 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x31, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, - 0x72, 0x47, 0x61, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, - 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x12, 0x29, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x61, 0x73, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, - 0x68, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, - 0x77, 0x61, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, - 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x12, 0x31, 0x0a, 0x0d, 0x65, 0x78, - 0x63, 0x65, 0x73, 0x73, 0x44, 0x61, 0x74, 0x61, 0x47, 0x61, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, - 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x44, 0x61, 0x74, 0x61, 0x47, 0x61, 0x73, 0x22, 0x89, 0x01, - 0x0a, 0x0a, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x12, 0x26, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x64, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, - 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x29, 0x0a, 0x09, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x7a, 0x67, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0c, 0x52, 0x04, 0x6b, 0x7a, 0x67, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, - 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, - 0x49, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x73, - 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x09, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x1a, - 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x22, 0xc9, 0x01, 0x0a, 0x0d, 0x4e, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x70, 0x6f, 0x72, 0x74, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x05, 0x70, - 0x6f, 0x72, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, - 0x41, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xa8, 0x02, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12, - 0x12, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x63, - 0x61, 0x70, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x41, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, - 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, - 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x49, 0x6e, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, - 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, - 0x73, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, - 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x12, 0x22, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x53, 0x74, 0x61, 0x74, 0x69, - 0x63, 0x22, 0x71, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x79, 0x56, 0x31, 0x12, 0x22, 0x0a, 0x0c, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, - 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, - 0x77, 0x61, 0x6c, 0x73, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd1, 0x86, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, 0x6a, 0x6f, - 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xd2, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, 0x15, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0xd3, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x50, 0x61, 0x74, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, + 0x31, 0x36, 0x30, 0x52, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x0a, + 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2e, 0x0a, 0x0c, 0x72, 0x65, 0x63, + 0x65, 0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0b, 0x72, 0x65, + 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, + 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x30, 0x34, 0x38, 0x52, 0x09, 0x6c, 0x6f, 0x67, + 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x72, + 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x52, 0x61, + 0x6e, 0x64, 0x61, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x10, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, + 0x32, 0x35, 0x36, 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, + 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, + 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, + 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0f, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, + 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, + 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x12, 0x38, 0x0a, 0x0f, 0x65, 0x78, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x00, 0x52, + 0x0d, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x44, 0x61, 0x74, 0x61, 0x47, 0x61, 0x73, 0x88, 0x01, + 0x01, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x67, 0x61, 0x73, 0x22, 0x8a, 0x01, 0x0a, 0x0a, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, + 0x61, 0x77, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x27, 0x0a, 0x0f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, + 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x22, 0x65, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x56, 0x31, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x12, 0x0a, 0x04, 0x6b, 0x7a, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, 0x6b, + 0x7a, 0x67, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0x49, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x22, 0xca, 0x01, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, + 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, + 0x6e, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x41, + 0x64, 0x64, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x61, + 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x63, 0x61, 0x70, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, + 0x73, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, + 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, + 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x22, 0x71, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x79, 0x56, 0x31, + 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, + 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, + 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xd1, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4d, 0x61, 0x6a, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, + 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd2, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, + 0x63, 0x68, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd3, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x61, 0x74, 0x63, 0x68, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1286,18 +1289,18 @@ var file_types_types_proto_depIdxs = []int32{ 3, // 6: types.H1024.lo:type_name -> types.H512 4, // 7: types.H2048.hi:type_name -> types.H1024 4, // 8: types.H2048.lo:type_name -> types.H1024 - 2, // 9: types.ExecutionPayload.parentHash:type_name -> types.H256 + 2, // 9: types.ExecutionPayload.parent_hash:type_name -> types.H256 1, // 10: types.ExecutionPayload.coinbase:type_name -> types.H160 - 2, // 11: types.ExecutionPayload.stateRoot:type_name -> types.H256 - 2, // 12: types.ExecutionPayload.receiptRoot:type_name -> types.H256 - 5, // 13: types.ExecutionPayload.logsBloom:type_name -> types.H2048 - 2, // 14: types.ExecutionPayload.prevRandao:type_name -> types.H256 - 2, // 15: types.ExecutionPayload.baseFeePerGas:type_name -> types.H256 - 2, // 16: types.ExecutionPayload.blockHash:type_name -> types.H256 + 2, // 11: types.ExecutionPayload.state_root:type_name -> types.H256 + 2, // 12: types.ExecutionPayload.receipt_root:type_name -> types.H256 + 5, // 13: types.ExecutionPayload.logs_bloom:type_name -> types.H2048 + 2, // 14: types.ExecutionPayload.prev_randao:type_name -> types.H256 + 2, // 15: types.ExecutionPayload.base_fee_per_gas:type_name -> types.H256 + 2, // 16: types.ExecutionPayload.block_hash:type_name -> types.H256 8, // 17: types.ExecutionPayload.withdrawals:type_name -> types.Withdrawal - 2, // 18: types.ExecutionPayload.excessDataGas:type_name -> types.H256 + 2, // 18: types.ExecutionPayload.excess_data_gas:type_name -> types.H256 1, // 19: types.Withdrawal.address:type_name -> types.H160 - 2, // 20: types.BlobsBundleV1.blockHash:type_name -> types.H256 + 2, // 20: types.BlobsBundleV1.block_hash:type_name -> types.H256 10, // 21: types.NodeInfoReply.ports:type_name -> types.NodeInfoPorts 8, // 22: types.ExecutionPayloadBodyV1.withdrawals:type_name -> types.Withdrawal 14, // 23: types.service_major_version:extendee -> google.protobuf.FileOptions @@ -1485,6 +1488,7 @@ func file_types_types_proto_init() { } } } + file_types_types_proto_msgTypes[7].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/kv/Readme.md b/kv/Readme.md index 137772925..cba4f0538 100644 --- a/kv/Readme.md +++ b/kv/Readme.md @@ -6,7 +6,7 @@ Words "KV" and "DB" have special meaning here: - DB - object-oriented-style API to access data: Get/Put/Delete/WalkOverTable/MultiPut, managing transactions internally. -So, DB abstraction fits 95% times and leads to more maintainable code - because it's looks stateless. +So, DB abstraction fits 95% times and leads to more maintainable code - because it looks stateless. About "key-value-style": Modern key-value databases don't provide Get/Put/Delete methods, because it's very hard-drive-unfriendly - it pushes developers do random-disk-access which @@ -57,7 +57,7 @@ kv_temporal.go - MultipleDatabases, Customization: `NewMDBX().Path(path).WithBucketsConfig(config).Open()` -- 1 Transaction object can be used only withing 1 goroutine. +- 1 Transaction object can be used only within 1 goroutine. - Only 1 write transaction can be active at a time (other will wait). - Unlimited read transactions can be active concurrently (not blocked by write transaction). @@ -86,7 +86,8 @@ if err != nil { - Methods .Bucket() and .Cursor(), can’t return nil, can't return error. - Bucket and Cursor - are interfaces - means different classes can satisfy it: for example `MdbxCursor` and `MdbxDupSortCursor` classes satisfy it. - If your are not familiar with "DupSort" concept, please read [dupsort.md](../docs/programmers_guide/dupsort.md) first. + If your are not familiar with "DupSort" concept, please read [dupsort.md](https://github.com/ledgerwatch/erigon/blob/devel/docs/programmers_guide/dupsort.md) + - If Cursor returns err!=nil then key SHOULD be != nil (can be []byte{} for example). diff --git a/kv/helpers.go b/kv/helpers.go index f3989fd48..4f0a42469 100644 --- a/kv/helpers.go +++ b/kv/helpers.go @@ -20,11 +20,12 @@ import ( "context" "fmt" "os" + "sync" + "sync/atomic" "time" "github.com/ledgerwatch/erigon-lib/common" "github.com/torquem-ch/mdbx-go/mdbx" - "go.uber.org/atomic" ) func DefaultPageSize() uint64 { @@ -133,12 +134,22 @@ func GetBool(tx Getter, bucket string, k []byte) (enabled bool, err error) { return bytes2bool(vBytes), nil } -func ReadAhead(ctx context.Context, db RoDB, progress *atomic.Bool, table string, from []byte, amount uint32) { - if db == nil || progress.Load() { - return +func ReadAhead(ctx context.Context, db RoDB, progress *atomic.Bool, table string, from []byte, amount uint32) (clean func()) { + if db == nil { + return func() {} } - progress.Store(true) + if ok := progress.CompareAndSwap(false, true); !ok { + return func() {} + } + ctx, cancel := context.WithCancel(ctx) + wg := sync.WaitGroup{} + clean = func() { + cancel() + wg.Wait() + } + wg.Add(1) go func() { + defer wg.Done() defer progress.Store(false) _ = db.View(ctx, func(tx Tx) error { c, err := tx.Cursor(table) @@ -161,6 +172,7 @@ func ReadAhead(ctx context.Context, db RoDB, progress *atomic.Bool, table string return nil }) }() + return clean } // FirstKey - candidate on move to kv.Tx interface diff --git a/kv/iter/helpers.go b/kv/iter/helpers.go index 1f3dd7519..05dc18a10 100644 --- a/kv/iter/helpers.go +++ b/kv/iter/helpers.go @@ -90,3 +90,25 @@ func (m *PairsWithErrorIter) Next() ([]byte, []byte, error) { m.i++ return []byte(fmt.Sprintf("%x", m.i)), []byte(fmt.Sprintf("%x", m.i)), nil } + +func Count[T any](s Unary[T]) (cnt int, err error) { + for s.HasNext() { + _, err := s.Next() + if err != nil { + return cnt, err + } + cnt++ + } + return cnt, err +} + +func CountDual[K, V any](s Dual[K, V]) (cnt int, err error) { + for s.HasNext() { + _, _, err := s.Next() + if err != nil { + return cnt, err + } + cnt++ + } + return cnt, err +} diff --git a/kv/iter/iter.go b/kv/iter/iter.go index 6a70e7de5..d722caed1 100644 --- a/kv/iter/iter.go +++ b/kv/iter/iter.go @@ -19,10 +19,15 @@ package iter import ( "bytes" + "github.com/ledgerwatch/erigon-lib/kv/order" "golang.org/x/exp/constraints" "golang.org/x/exp/slices" ) +type Closer interface { + Close() +} + var ( EmptyU64 = &EmptyUnary[uint64]{} EmptyKV = &EmptyDual[[]byte, []byte]{} @@ -90,10 +95,11 @@ type UnionKVIter struct { xHasNext, yHasNext bool xNextK, xNextV []byte yNextK, yNextV []byte + limit int err error } -func UnionKV(x, y KV) KV { +func UnionKV(x, y KV, limit int) KV { if x == nil && y == nil { return EmptyKV } @@ -103,12 +109,14 @@ func UnionKV(x, y KV) KV { if y == nil { return x } - m := &UnionKVIter{x: x, y: y} + m := &UnionKVIter{x: x, y: y, limit: limit} m.advanceX() m.advanceY() return m } -func (m *UnionKVIter) HasNext() bool { return m.xHasNext || m.yHasNext } +func (m *UnionKVIter) HasNext() bool { + return m.err != nil || (m.limit != 0 && m.xHasNext) || (m.limit != 0 && m.yHasNext) +} func (m *UnionKVIter) advanceX() { if m.err != nil { return @@ -131,6 +139,7 @@ func (m *UnionKVIter) Next() ([]byte, []byte, error) { if m.err != nil { return nil, nil, m.err } + m.limit-- if m.xHasNext && m.yHasNext { cmp := bytes.Compare(m.xNextK, m.yNextK) if cmp < 0 { @@ -156,17 +165,28 @@ func (m *UnionKVIter) Next() ([]byte, []byte, error) { m.advanceY() return k, v, err } -func (m *UnionKVIter) ToArray() (keys, values [][]byte, err error) { return ToKVArray(m) } -// UnionIter -type UnionIter[T constraints.Ordered] struct { +// func (m *UnionKVIter) ToArray() (keys, values [][]byte, err error) { return ToKVArray(m) } +func (m *UnionKVIter) Close() { + if x, ok := m.x.(Closer); ok { + x.Close() + } + if y, ok := m.y.(Closer); ok { + y.Close() + } +} + +// UnionUnary +type UnionUnary[T constraints.Ordered] struct { x, y Unary[T] + asc bool xHas, yHas bool xNextK, yNextK T err error + limit int } -func Union[T constraints.Ordered](x, y Unary[T]) Unary[T] { +func Union[T constraints.Ordered](x, y Unary[T], asc order.By, limit int) Unary[T] { if x == nil && y == nil { return &EmptyUnary[T]{} } @@ -176,16 +196,22 @@ func Union[T constraints.Ordered](x, y Unary[T]) Unary[T] { if y == nil { return x } - m := &UnionIter[T]{x: x, y: y} + if !x.HasNext() { + return y + } + if !y.HasNext() { + return x + } + m := &UnionUnary[T]{x: x, y: y, asc: bool(asc), limit: limit} m.advanceX() m.advanceY() return m } -func (m *UnionIter[T]) HasNext() bool { - return m.err != nil || m.xHas || m.yHas +func (m *UnionUnary[T]) HasNext() bool { + return m.err != nil || (m.limit != 0 && m.xHas) || (m.limit != 0 && m.yHas) } -func (m *UnionIter[T]) advanceX() { +func (m *UnionUnary[T]) advanceX() { if m.err != nil { return } @@ -194,7 +220,7 @@ func (m *UnionIter[T]) advanceX() { m.xNextK, m.err = m.x.Next() } } -func (m *UnionIter[T]) advanceY() { +func (m *UnionUnary[T]) advanceY() { if m.err != nil { return } @@ -203,12 +229,18 @@ func (m *UnionIter[T]) advanceY() { m.yNextK, m.err = m.y.Next() } } -func (m *UnionIter[T]) Next() (res T, err error) { + +func (m *UnionUnary[T]) less() bool { + return (m.asc && m.xNextK < m.yNextK) || (!m.asc && m.xNextK > m.yNextK) +} + +func (m *UnionUnary[T]) Next() (res T, err error) { if m.err != nil { return res, m.err } + m.limit-- if m.xHas && m.yHas { - if m.xNextK < m.yNextK { + if m.less() { k, err := m.xNextK, m.err m.advanceX() return k, err @@ -231,24 +263,35 @@ func (m *UnionIter[T]) Next() (res T, err error) { m.advanceY() return k, err } +func (m *UnionUnary[T]) Close() { + if x, ok := m.x.(Closer); ok { + x.Close() + } + if y, ok := m.y.(Closer); ok { + y.Close() + } +} // IntersectIter type IntersectIter[T constraints.Ordered] struct { x, y Unary[T] xHasNext, yHasNext bool xNextK, yNextK T + limit int err error } -func Intersect[T constraints.Ordered](x, y Unary[T]) Unary[T] { - if x == nil || y == nil { +func Intersect[T constraints.Ordered](x, y Unary[T], limit int) Unary[T] { + if x == nil || y == nil || !x.HasNext() || !y.HasNext() { return &EmptyUnary[T]{} } - m := &IntersectIter[T]{x: x, y: y} + m := &IntersectIter[T]{x: x, y: y, limit: limit} m.advance() return m } -func (m *IntersectIter[T]) HasNext() bool { return m.xHasNext && m.yHasNext } +func (m *IntersectIter[T]) HasNext() bool { + return m.err != nil || (m.limit != 0 && m.xHasNext && m.yHasNext) +} func (m *IntersectIter[T]) advance() { m.advanceX() m.advanceY() @@ -288,10 +331,22 @@ func (m *IntersectIter[T]) advanceY() { } } func (m *IntersectIter[T]) Next() (T, error) { + if m.err != nil { + return m.xNextK, m.err + } + m.limit-- k, err := m.xNextK, m.err m.advance() return k, err } +func (m *IntersectIter[T]) Close() { + if x, ok := m.x.(Closer); ok { + x.Close() + } + if y, ok := m.y.(Closer); ok { + y.Close() + } +} // TransformDualIter - analog `map` (in terms of map-filter-reduce pattern) type TransformDualIter[K, V any] struct { @@ -310,6 +365,33 @@ func (m *TransformDualIter[K, V]) Next() (K, V, error) { } return m.transform(k, v) } +func (m *TransformDualIter[K, v]) Close() { + if x, ok := m.it.(Closer); ok { + x.Close() + } +} + +type TransformKV2U64Iter[K, V []byte] struct { + it KV + transform func(K, V) (uint64, error) +} + +func TransformKV2U64[K, V []byte](it KV, transform func(K, V) (uint64, error)) *TransformKV2U64Iter[K, V] { + return &TransformKV2U64Iter[K, V]{it: it, transform: transform} +} +func (m *TransformKV2U64Iter[K, V]) HasNext() bool { return m.it.HasNext() } +func (m *TransformKV2U64Iter[K, V]) Next() (uint64, error) { + k, v, err := m.it.Next() + if err != nil { + return 0, err + } + return m.transform(k, v) +} +func (m *TransformKV2U64Iter[K, v]) Close() { + if x, ok := m.it.(Closer); ok { + x.Close() + } +} // FilterDualIter - analog `map` (in terms of map-filter-reduce pattern) // please avoid reading from Disk/DB more elements and then filter them. Better @@ -356,6 +438,11 @@ func (m *FilterDualIter[K, V]) Next() (k K, v V, err error) { m.advance() return k, v, err } +func (m *FilterDualIter[K, v]) Close() { + if x, ok := m.it.(Closer); ok { + x.Close() + } +} // FilterUnaryIter - analog `map` (in terms of map-filter-reduce pattern) // please avoid reading from Disk/DB more elements and then filter them. Better @@ -400,6 +487,11 @@ func (m *FilterUnaryIter[T]) Next() (k T, err error) { m.advance() return k, err } +func (m *FilterUnaryIter[T]) Close() { + if x, ok := m.it.(Closer); ok { + x.Close() + } +} // PaginatedIter - for remote-list pagination // diff --git a/kv/iter/iter_interface.go b/kv/iter/iter_interface.go index 5ea4f9e33..dbe0e6ba4 100644 --- a/kv/iter/iter_interface.go +++ b/kv/iter/iter_interface.go @@ -79,6 +79,24 @@ type ( func ToU64Arr(s U64) ([]uint64, error) { return ToArr[uint64](s) } func ToKVArray(s KV) ([][]byte, [][]byte, error) { return ToDualArray[[]byte, []byte](s) } +func ToArrU64Must(s U64) []uint64 { + arr, err := ToArr[uint64](s) + if err != nil { + panic(err) + } + return arr +} +func ToArrKVMust(s KV) ([][]byte, [][]byte) { + keys, values, err := ToDualArray[[]byte, []byte](s) + if err != nil { + panic(err) + } + return keys, values +} + +func CountU64(s U64) (int, error) { return Count[uint64](s) } +func CountKV(s KV) (int, error) { return CountDual[[]byte, []byte](s) } + func TransformKV(it KV, transform func(k, v []byte) ([]byte, []byte, error)) *TransformDualIter[[]byte, []byte] { return TransformDual[[]byte, []byte](it, transform) } diff --git a/kv/iter/iter_test.go b/kv/iter/iter_test.go index 0ef22946b..3cad8a467 100644 --- a/kv/iter/iter_test.go +++ b/kv/iter/iter_test.go @@ -25,22 +25,38 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/stretchr/testify/require" ) func TestUnion(t *testing.T) { t.Run("arrays", func(t *testing.T) { - s1 := iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7}) + s1 := iter.Array[uint64]([]uint64{1, 3, 6, 7}) s2 := iter.Array[uint64]([]uint64{2, 3, 7, 8}) - s3 := iter.Union[uint64](s1, s2) + s3 := iter.Union[uint64](s1, s2, order.Asc, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) - require.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, res) + require.Equal(t, []uint64{1, 2, 3, 6, 7, 8}, res) + + s1 = iter.ReverseArray[uint64]([]uint64{1, 3, 6, 7}) + s2 = iter.ReverseArray[uint64]([]uint64{2, 3, 7, 8}) + s3 = iter.Union[uint64](s1, s2, order.Desc, -1) + res, err = iter.ToArr[uint64](s3) + require.NoError(t, err) + require.Equal(t, []uint64{8, 7, 6, 3, 2, 1}, res) + + s1 = iter.ReverseArray[uint64]([]uint64{1, 3, 6, 7}) + s2 = iter.ReverseArray[uint64]([]uint64{2, 3, 7, 8}) + s3 = iter.Union[uint64](s1, s2, order.Desc, 2) + res, err = iter.ToArr[uint64](s3) + require.NoError(t, err) + require.Equal(t, []uint64{8, 7}, res) + }) t.Run("empty left", func(t *testing.T) { s1 := iter.EmptyU64 s2 := iter.Array[uint64]([]uint64{2, 3, 7, 8}) - s3 := iter.Union[uint64](s1, s2) + s3 := iter.Union[uint64](s1, s2, order.Asc, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) require.Equal(t, []uint64{2, 3, 7, 8}, res) @@ -48,7 +64,7 @@ func TestUnion(t *testing.T) { t.Run("empty right", func(t *testing.T) { s1 := iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7}) s2 := iter.EmptyU64 - s3 := iter.Union[uint64](s1, s2) + s3 := iter.Union[uint64](s1, s2, order.Asc, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) require.Equal(t, []uint64{1, 3, 4, 5, 6, 7}, res) @@ -56,7 +72,7 @@ func TestUnion(t *testing.T) { t.Run("empty", func(t *testing.T) { s1 := iter.EmptyU64 s2 := iter.EmptyU64 - s3 := iter.Union[uint64](s1, s2) + s3 := iter.Union[uint64](s1, s2, order.Asc, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) require.Nil(t, res) @@ -76,7 +92,7 @@ func TestUnionPairs(t *testing.T) { _ = tx.Put(kv.PlainState, []byte{3}, []byte{9}) it, _ := tx.Range(kv.AccountsHistory, nil, nil) it2, _ := tx.Range(kv.PlainState, nil, nil) - keys, values, err := iter.ToKVArray(iter.UnionKV(it, it2)) + keys, values, err := iter.ToKVArray(iter.UnionKV(it, it2, -1)) require.NoError(err) require.Equal([][]byte{{1}, {2}, {3}, {4}}, keys) require.Equal([][]byte{{1}, {9}, {1}, {1}}, values) @@ -89,7 +105,7 @@ func TestUnionPairs(t *testing.T) { _ = tx.Put(kv.PlainState, []byte{3}, []byte{9}) it, _ := tx.Range(kv.AccountsHistory, nil, nil) it2, _ := tx.Range(kv.PlainState, nil, nil) - keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2)) + keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2, -1)) require.NoError(err) require.Equal([][]byte{{2}, {3}}, keys) }) @@ -102,7 +118,7 @@ func TestUnionPairs(t *testing.T) { _ = tx.Put(kv.AccountsHistory, []byte{4}, []byte{1}) it, _ := tx.Range(kv.AccountsHistory, nil, nil) it2, _ := tx.Range(kv.PlainState, nil, nil) - keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2)) + keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2, -1)) require.NoError(err) require.Equal([][]byte{{1}, {3}, {4}}, keys) }) @@ -112,7 +128,7 @@ func TestUnionPairs(t *testing.T) { defer tx.Rollback() it, _ := tx.Range(kv.AccountsHistory, nil, nil) it2, _ := tx.Range(kv.PlainState, nil, nil) - m := iter.UnionKV(it, it2) + m := iter.UnionKV(it, it2, -1) require.False(m.HasNext()) }) t.Run("error handling", func(t *testing.T) { @@ -121,7 +137,7 @@ func TestUnionPairs(t *testing.T) { defer tx.Rollback() it := iter.PairsWithError(10) it2 := iter.PairsWithError(12) - keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2)) + keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2, -1)) require.Equal("expected error at iteration: 10", err.Error()) require.Equal(10, len(keys)) }) @@ -131,21 +147,28 @@ func TestIntersect(t *testing.T) { t.Run("intersect", func(t *testing.T) { s1 := iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7}) s2 := iter.Array[uint64]([]uint64{2, 3, 7}) - s3 := iter.Intersect[uint64](s1, s2) + s3 := iter.Intersect[uint64](s1, s2, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) require.Equal(t, []uint64{3, 7}, res) + + s1 = iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7}) + s2 = iter.Array[uint64]([]uint64{2, 3, 7}) + s3 = iter.Intersect[uint64](s1, s2, 1) + res, err = iter.ToArr[uint64](s3) + require.NoError(t, err) + require.Equal(t, []uint64{3}, res) }) t.Run("empty left", func(t *testing.T) { s1 := iter.EmptyU64 s2 := iter.Array[uint64]([]uint64{2, 3, 7, 8}) - s3 := iter.Intersect[uint64](s1, s2) + s3 := iter.Intersect[uint64](s1, s2, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) require.Nil(t, res) s2 = iter.Array[uint64]([]uint64{2, 3, 7, 8}) - s3 = iter.Intersect[uint64](nil, s2) + s3 = iter.Intersect[uint64](nil, s2, -1) res, err = iter.ToArr[uint64](s3) require.NoError(t, err) require.Nil(t, res) @@ -153,13 +176,13 @@ func TestIntersect(t *testing.T) { t.Run("empty right", func(t *testing.T) { s1 := iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7}) s2 := iter.EmptyU64 - s3 := iter.Intersect[uint64](s1, s2) + s3 := iter.Intersect[uint64](s1, s2, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) require.Nil(t, nil, res) s1 = iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7}) - s3 = iter.Intersect[uint64](s1, nil) + s3 = iter.Intersect[uint64](s1, nil, -1) res, err = iter.ToArr[uint64](s3) require.NoError(t, err) require.Nil(t, res) @@ -167,12 +190,12 @@ func TestIntersect(t *testing.T) { t.Run("empty", func(t *testing.T) { s1 := iter.EmptyU64 s2 := iter.EmptyU64 - s3 := iter.Intersect[uint64](s1, s2) + s3 := iter.Intersect[uint64](s1, s2, -1) res, err := iter.ToArr[uint64](s3) require.NoError(t, err) require.Nil(t, res) - s3 = iter.Intersect[uint64](nil, nil) + s3 = iter.Intersect[uint64](nil, nil, -1) res, err = iter.ToArr[uint64](s3) require.NoError(t, err) require.Nil(t, res) diff --git a/kv/kv_interface.go b/kv/kv_interface.go index 955a8db54..24892c285 100644 --- a/kv/kv_interface.go +++ b/kv/kv_interface.go @@ -26,34 +26,45 @@ import ( ) //Variables Naming: -// ts - TimeStamp (usually it's TxnNumber) // tx - Database Transaction // txn - Ethereum Transaction (and TxNum - is also number of Etherum Transaction) -// RoTx - Read-Only Database Transaction -// RwTx - Read-Write Database Transaction -// k - key -// v - value +// RoTx - Read-Only Database Transaction. RwTx - read-write +// k, v - key, value +// ts - TimeStamp. Usually it's Etherum's TransactionNumber (auto-increment ID). Or BlockNumber. // Cursor - low-level mdbx-tide api to navigate over Table -// Iter - high-level iterator-like api over Table, InvertedIndex, History, Domain. Has less features than Cursor. +// Iter - high-level iterator-like api over Table/InvertedIndex/History/Domain. Has less features than Cursor. See package `iter` //Methods Naming: // Get: exact match of criterias -// Range: [from, to). Stream(from, nil) means [from, EndOfTable). Stream(nil, to) means [StartOfTable, to). -// Each: Range(from, nil) +// Range: [from, to). from=nil means StartOfTable, to=nil means EndOfTable, rangeLimit=-1 means Unlimited // Prefix: `Range(Table, prefix, kv.NextSubtree(prefix))` -// Limit: [from, INF) AND maximum N records -//Entity Naming: -// State: simple table in db -// InvertedIndex: supports range-scans -// History: can return value of key K as of given TimeStamp. Doesn't know about latest/current value of key K. Returns NIL if K not changed after TimeStamp. -// Domain: as History but also aware about latest/current value of key K. +//Abstraction Layers: +// LowLevel: +// 1. DB/Tx - low-level key-value database +// 2. Snapshots/Freeze - immutable files with historical data. May be downloaded at first App +// start or auto-generate by moving old data from DB to Snapshots. +// MediumLevel: +// 1. TemporalDB - abstracting DB+Snapshots. Target is: +// - provide 'time-travel' API for data: consistan snapshot of data as of given Timestamp. +// - to keep DB small - only for Hot/Recent data (can be update/delete by re-org). +// - using next entities: +// - InvertedIndex: supports range-scans +// - History: can return value of key K as of given TimeStamp. Doesn't know about latest/current +// value of key K. Returns NIL if K not changed after TimeStamp. +// - Domain: as History but also aware about latest/current value of key K. Can move +// cold (updated long time ago) parts of state from db to snapshots. + +// HighLevel: +// 1. Application - rely on TemporalDB (Ex: ExecutionLayer) or just DB (Ex: TxPool, Sentry, Downloader). const ReadersLimit = 32000 // MDBX_READERS_LIMIT=32767 +// const Unbounded []byte = nil +const Unlim int = -1 + var ( ErrAttemptToDeleteNonDeprecatedBucket = errors.New("only buckets from dbutils.ChaindataDeprecatedTables can be deleted") - ErrUnknownBucket = errors.New("unknown bucket. add it to dbutils.ChaindataTables") DbSize = metrics.NewCounter(`db_size`) //nolint TxLimit = metrics.NewCounter(`tx_limit`) //nolint @@ -70,37 +81,39 @@ var ( DbCommitEnding = metrics.GetOrCreateSummary(`db_commit_seconds{phase="ending"}`) //nolint DbCommitTotal = metrics.GetOrCreateSummary(`db_commit_seconds{phase="total"}`) //nolint - DbPgopsNewly = metrics.NewCounter(`db_pgops{phase="newly"}`) //nolint - DbPgopsCow = metrics.NewCounter(`db_pgops{phase="cow"}`) //nolint - DbPgopsClone = metrics.NewCounter(`db_pgops{phase="clone"}`) //nolint - DbPgopsSplit = metrics.NewCounter(`db_pgops{phase="split"}`) //nolint - DbPgopsMerge = metrics.NewCounter(`db_pgops{phase="merge"}`) //nolint - DbPgopsSpill = metrics.NewCounter(`db_pgops{phase="spill"}`) //nolint - DbPgopsUnspill = metrics.NewCounter(`db_pgops{phase="unspill"}`) //nolint - DbPgopsWops = metrics.NewCounter(`db_pgops{phase="wops"}`) //nolint - DbPgopsPrefault = metrics.NewCounter(`db_pgops{phase="prefault"}`) //nolint - DbPgopsMinicore = metrics.NewCounter(`db_pgops{phase="minicore"}`) //nolint - DbPgopsMsync = metrics.NewCounter(`db_pgops{phase="msync"}`) //nolint - DbPgopsFsync = metrics.NewCounter(`db_pgops{phase="fsync"}`) //nolint - DbMiLastPgNo = metrics.NewCounter(`db_mi_last_pgno`) //nolint - - DbGcWorkRtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="work_rtime"}`) //nolint - DbGcWorkRsteps = metrics.NewCounter(`db_gc{phase="work_rsteps"}`) //nolint - DbGcWorkRxpages = metrics.NewCounter(`db_gc{phase="work_rxpages"}`) //nolint - DbGcSelfRtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="self_rtime"}`) //nolint - DbGcSelfXtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="self_xtime"}`) //nolint - DbGcWorkXtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="work_xtime"}`) //nolint - DbGcSelfRsteps = metrics.NewCounter(`db_gc{phase="self_rsteps"}`) //nolint - DbGcWloops = metrics.NewCounter(`db_gc{phase="wloop"}`) //nolint - DbGcCoalescences = metrics.NewCounter(`db_gc{phase="coalescences"}`) //nolint - DbGcWipes = metrics.NewCounter(`db_gc{phase="wipes"}`) //nolint - DbGcFlushes = metrics.NewCounter(`db_gc{phase="flushes"}`) //nolint - DbGcKicks = metrics.NewCounter(`db_gc{phase="kicks"}`) //nolint - DbGcWorkMajflt = metrics.NewCounter(`db_gc{phase="work_majflt"}`) //nolint - DbGcSelfMajflt = metrics.NewCounter(`db_gc{phase="self_majflt"}`) //nolint - DbGcWorkCounter = metrics.NewCounter(`db_gc{phase="work_counter"}`) //nolint - DbGcSelfCounter = metrics.NewCounter(`db_gc{phase="self_counter"}`) //nolint - DbGcSelfXpages = metrics.NewCounter(`db_gc{phase="self_xpages"}`) //nolint + DbPgopsNewly = metrics.NewCounter(`db_pgops{phase="newly"}`) //nolint + DbPgopsCow = metrics.NewCounter(`db_pgops{phase="cow"}`) //nolint + DbPgopsClone = metrics.NewCounter(`db_pgops{phase="clone"}`) //nolint + DbPgopsSplit = metrics.NewCounter(`db_pgops{phase="split"}`) //nolint + DbPgopsMerge = metrics.NewCounter(`db_pgops{phase="merge"}`) //nolint + DbPgopsSpill = metrics.NewCounter(`db_pgops{phase="spill"}`) //nolint + DbPgopsUnspill = metrics.NewCounter(`db_pgops{phase="unspill"}`) //nolint + DbPgopsWops = metrics.NewCounter(`db_pgops{phase="wops"}`) //nolint + /* + DbPgopsPrefault = metrics.NewCounter(`db_pgops{phase="prefault"}`) //nolint + DbPgopsMinicore = metrics.NewCounter(`db_pgops{phase="minicore"}`) //nolint + DbPgopsMsync = metrics.NewCounter(`db_pgops{phase="msync"}`) //nolint + DbPgopsFsync = metrics.NewCounter(`db_pgops{phase="fsync"}`) //nolint + DbMiLastPgNo = metrics.NewCounter(`db_mi_last_pgno`) //nolint + + DbGcWorkRtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="work_rtime"}`) //nolint + DbGcWorkRsteps = metrics.NewCounter(`db_gc{phase="work_rsteps"}`) //nolint + DbGcWorkRxpages = metrics.NewCounter(`db_gc{phase="work_rxpages"}`) //nolint + DbGcSelfRtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="self_rtime"}`) //nolint + DbGcSelfXtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="self_xtime"}`) //nolint + DbGcWorkXtime = metrics.GetOrCreateSummary(`db_gc_seconds{phase="work_xtime"}`) //nolint + DbGcSelfRsteps = metrics.NewCounter(`db_gc{phase="self_rsteps"}`) //nolint + DbGcWloops = metrics.NewCounter(`db_gc{phase="wloop"}`) //nolint + DbGcCoalescences = metrics.NewCounter(`db_gc{phase="coalescences"}`) //nolint + DbGcWipes = metrics.NewCounter(`db_gc{phase="wipes"}`) //nolint + DbGcFlushes = metrics.NewCounter(`db_gc{phase="flushes"}`) //nolint + DbGcKicks = metrics.NewCounter(`db_gc{phase="kicks"}`) //nolint + DbGcWorkMajflt = metrics.NewCounter(`db_gc{phase="work_majflt"}`) //nolint + DbGcSelfMajflt = metrics.NewCounter(`db_gc{phase="self_majflt"}`) //nolint + DbGcWorkCounter = metrics.NewCounter(`db_gc{phase="work_counter"}`) //nolint + DbGcSelfCounter = metrics.NewCounter(`db_gc{phase="self_counter"}`) //nolint + DbGcSelfXpages = metrics.NewCounter(`db_gc{phase="self_xpages"}`) //nolint + */ //DbGcWorkPnlMergeTime = metrics.GetOrCreateSummary(`db_gc_pnl_seconds{phase="work_merge_time"}`) //nolint //DbGcWorkPnlMergeVolume = metrics.NewCounter(`db_gc_pnl{phase="work_merge_volume"}`) //nolint @@ -298,6 +311,7 @@ type StatelessRwTx interface { // - ReadOnly transactions do not lock goroutine to thread, RwTx does type Tx interface { StatelessReadTx + BucketMigratorRO // ID returns the identifier associated with this transaction. For a // read-only transaction, this corresponds to the snapshot being read; @@ -334,6 +348,9 @@ type Tx interface { // Prefix - is exactly Range(Table, prefix, kv.NextSubtree(prefix)) Prefix(table string, prefix []byte) (iter.KV, error) + // RangeDupSort - like Range but for fixed single key and iterating over range of values + RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) + // --- High-Level methods: 1request -> 1page of values in response -> send next page request --- // Paginate(table string, fromPrefix, toPrefix []byte) (PairsStream, error) @@ -363,13 +380,17 @@ type RwTx interface { CollectMetrics() } +type BucketMigratorRO interface { + ListBuckets() ([]string, error) +} + // BucketMigrator used for buckets migration, don't use it in usual app code type BucketMigrator interface { + BucketMigratorRO DropBucket(string) error CreateBucket(string) error ExistsBucket(string) (bool, error) ClearBucket(string) error - ListBuckets() ([]string, error) } // Cursor - class for navigating through a database @@ -441,22 +462,21 @@ type RwCursorDupSort interface { AppendDup(key, value []byte) error // AppendDup - same as Append, but for sorted dup data } -var ErrNotSupported = errors.New("not supported") - // ---- Temporal part type ( Domain string History string InvertedIdx string ) -type TemporalRoDb interface { +type TemporalRoDB interface { RoDB BeginTemporalRo(ctx context.Context) (TemporalTx, error) ViewTemporal(ctx context.Context, f func(tx TemporalTx) error) error } type TemporalTx interface { Tx - DomainGet(name Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) + DomainGet(name Domain, k, k2 []byte) (v []byte, ok bool, err error) + DomainGetAsOf(name Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) HistoryGet(name History, k []byte, ts uint64) (v []byte, ok bool, err error) // IndexRange - return iterator over range of inverted index for given key `k` @@ -468,10 +488,10 @@ type TemporalTx interface { // Example: IndexRange("IndexName", -1, -1, order.Asc, 10) IndexRange(name InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) HistoryRange(name History, fromTs, toTs int, asc order.By, limit int) (it iter.KV, err error) - DomainRange(name Domain, k1, k2 []byte, asOfTs uint64, asc order.By, limit int) (it iter.KV, err error) + DomainRange(name Domain, fromKey, toKey []byte, ts uint64, asc order.By, limit int) (it iter.KV, err error) } type TemporalRwDB interface { RwDB - TemporalRoDb + TemporalRoDB } diff --git a/kv/kvcache/cache.go b/kv/kvcache/cache.go index 1d3d81f06..9ffefba70 100644 --- a/kv/kvcache/cache.go +++ b/kv/kvcache/cache.go @@ -23,12 +23,12 @@ import ( "hash" "sort" "sync" + "sync/atomic" "time" "github.com/VictoriaMetrics/metrics" "github.com/c2h5oh/datasize" btree2 "github.com/tidwall/btree" - "go.uber.org/atomic" "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon-lib/common" @@ -271,7 +271,7 @@ func (c *Coherent) OnNewBlock(stateChanges *remote.StateChangeBatch) { c.lock.Lock() defer c.lock.Unlock() c.waitExceededCount.Store(0) // reset the circuit breaker - id := stateChanges.StateVersionID + id := stateChanges.StateVersionId r := c.advanceRoot(id) for _, sc := range stateChanges.ChangeBatch { for i := range sc.Changes { @@ -318,7 +318,7 @@ func (c *Coherent) OnNewBlock(stateChanges *remote.StateChangeBatch) { } } - switched := r.readyChanClosed.CAS(false, true) + switched := r.readyChanClosed.CompareAndSwap(false, true) if switched { close(r.ready) //broadcast } @@ -356,7 +356,7 @@ func (c *Coherent) View(ctx context.Context, tx kv.Tx) (CacheView, error) { return nil, fmt.Errorf("kvcache rootNum=%x, %w", tx.ViewID(), ctx.Err()) case <-time.After(c.cfg.NewBlockWait): //TODO: switch to timer to save resources c.timeout.Inc() - c.waitExceededCount.Inc() + c.waitExceededCount.Add(1) //log.Info("timeout", "db_id", id, "has_btree", r.cache != nil) } return &CoherentView{stateVersionID: id, tx: tx, cache: c}, nil diff --git a/kv/kvcache/cache_test.go b/kv/kvcache/cache_test.go index 520d86b31..0f119831a 100644 --- a/kv/kvcache/cache_test.go +++ b/kv/kvcache/cache_test.go @@ -126,7 +126,7 @@ func TestEviction(t *testing.T) { require.Equal(0, c.stateEvict.Len()) //require.Equal(c.roots[c.latestViewID].cache.Len(), c.stateEvict.Len()) c.OnNewBlock(&remote.StateChangeBatch{ - StateVersionID: id + 1, + StateVersionId: id + 1, ChangeBatch: []*remote.StateChange{ { Direction: remote.Direction_FORWARD, @@ -228,7 +228,7 @@ func TestAPI(t *testing.T) { txID3 := put(k1[:], []byte{3}) // even if core already on block 3 c.OnNewBlock(&remote.StateChangeBatch{ - StateVersionID: txID2, + StateVersionId: txID2, PendingBlockBaseFee: 1, ChangeBatch: []*remote.StateChange{ { @@ -259,7 +259,7 @@ func TestAPI(t *testing.T) { res5, res6 := get(k1, txID3), get(k2, txID3) // will see View of transaction 3, even if notification has not enough changes c.OnNewBlock(&remote.StateChangeBatch{ - StateVersionID: txID3, + StateVersionId: txID3, PendingBlockBaseFee: 1, ChangeBatch: []*remote.StateChange{ { @@ -291,7 +291,7 @@ func TestAPI(t *testing.T) { txID4 := put(k1[:], []byte{2}) _ = txID4 c.OnNewBlock(&remote.StateChangeBatch{ - StateVersionID: txID4, + StateVersionId: txID4, PendingBlockBaseFee: 1, ChangeBatch: []*remote.StateChange{ { @@ -309,7 +309,7 @@ func TestAPI(t *testing.T) { fmt.Printf("-----4\n") txID5 := put(k1[:], []byte{4}) // reorg to new chain c.OnNewBlock(&remote.StateChangeBatch{ - StateVersionID: txID4, + StateVersionId: txID4, PendingBlockBaseFee: 1, ChangeBatch: []*remote.StateChange{ { diff --git a/kv/mdbx/kv_abstract_test.go b/kv/mdbx/kv_abstract_test.go index ff721fc63..6dbec07d9 100644 --- a/kv/mdbx/kv_abstract_test.go +++ b/kv/mdbx/kv_abstract_test.go @@ -144,7 +144,7 @@ func TestManagedTx(t *testing.T) { db := db msg := fmt.Sprintf("%T", db) switch db.(type) { - case *remotedb.RemoteKV: + case *remotedb.DB: default: continue } diff --git a/kv/mdbx/kv_mdbx.go b/kv/mdbx/kv_mdbx.go index 82132e7ed..a722d6892 100644 --- a/kv/mdbx/kv_mdbx.go +++ b/kv/mdbx/kv_mdbx.go @@ -26,6 +26,7 @@ import ( "sort" "strings" "sync" + "sync/atomic" "time" "github.com/c2h5oh/datasize" @@ -33,10 +34,11 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" + "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/log/v3" "github.com/pbnjay/memory" "github.com/torquem-ch/mdbx-go/mdbx" - "go.uber.org/atomic" + "golang.org/x/exp/maps" "golang.org/x/sync/semaphore" ) @@ -186,6 +188,27 @@ func (opts MdbxOpts) WithTableCfg(f TableCfgFunc) MdbxOpts { return opts } +var pathDbMap = map[string]kv.RoDB{} +var pathDbMapLock sync.Mutex + +func addToPathDbMap(path string, db kv.RoDB) { + pathDbMapLock.Lock() + defer pathDbMapLock.Unlock() + pathDbMap[path] = db +} + +func removeFromPathDbMap(path string) { + pathDbMapLock.Lock() + defer pathDbMapLock.Unlock() + delete(pathDbMap, path) +} + +func PathDbMap() map[string]kv.RoDB { + pathDbMapLock.Lock() + defer pathDbMapLock.Unlock() + return maps.Clone(pathDbMap) +} + func (opts MdbxOpts) Open() (kv.RwDB, error) { if dbg.WriteMap() { opts = opts.WriteMap() //nolint @@ -368,6 +391,8 @@ func (opts MdbxOpts) Open() (kv.RwDB, error) { } } + db.path = opts.path + addToPathDbMap(opts.path, db) return db, nil } @@ -388,6 +413,7 @@ type MdbxKV struct { opts MdbxOpts txSize uint64 closed atomic.Bool + path string } func (db *MdbxKV) PageSize() uint64 { return db.opts.pageSize } @@ -432,10 +458,9 @@ func (db *MdbxKV) openDBIs(buckets []string) error { // Close closes db // All transactions must be closed before closing the database. func (db *MdbxKV) Close() { - if db.closed.Load() { + if ok := db.closed.CompareAndSwap(false, true); !ok { return } - db.closed.Store(true) db.wg.Wait() db.env.Close() db.env = nil @@ -445,6 +470,7 @@ func (db *MdbxKV) Close() { db.log.Warn("failed to remove in-mem db file", "err", err) } } + removeFromPathDbMap(db.path) } func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { @@ -614,7 +640,7 @@ func (tx *MdbxTx) ListBuckets() ([]string, error) { } func (db *MdbxKV) View(ctx context.Context, f func(tx kv.Tx) error) (err error) { - // can't use db.evn.View method - because it calls commit for read transactions - it conflicts with write transactions. + // can't use db.env.View method - because it calls commit for read transactions - it conflicts with write transactions. tx, err := db.BeginRo(ctx) if err != nil { return err @@ -1492,7 +1518,7 @@ func (c *MdbxDupSortCursor) SeekBothRange(key, value []byte) ([]byte, error) { if mdbx.IsNotFound(err) { return nil, nil } - return nil, fmt.Errorf("in SeekBothRange: %w", err) + return nil, fmt.Errorf("in SeekBothRange, table=%s: %w", c.bucketName, err) } return v, nil } @@ -1662,51 +1688,40 @@ func (tx *MdbxTx) Prefix(table string, prefix []byte) (iter.KV, error) { return tx.Range(table, prefix, nextPrefix) } -// func (tx *MdbxTx) Stream(table string, fromPrefix, toPrefix []byte) (iter.KV, error) { -// return tx.StreamAscend(table, fromPrefix, toPrefix, -1) -// } -// -// func (tx *MdbxTx) StreamAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { -// return tx.rangeOrderLimit(table, fromPrefix, toPrefix, true, limit) -// } -// -// func (tx *MdbxTx) StreamDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { -// return tx.rangeOrderLimit(table, fromPrefix, toPrefix, false, limit) -// } func (tx *MdbxTx) Range(table string, fromPrefix, toPrefix []byte) (iter.KV, error) { return tx.RangeAscend(table, fromPrefix, toPrefix, -1) } func (tx *MdbxTx) RangeAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { - return tx.rangeOrderLimit(table, fromPrefix, toPrefix, true, limit) + return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Asc, limit) } func (tx *MdbxTx) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { - return tx.rangeOrderLimit(table, fromPrefix, toPrefix, false, limit) + return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Desc, limit) } type cursor2iter struct { c kv.Cursor fromPrefix, toPrefix, nextK, nextV []byte err error - orderAscend bool + orderAscend order.By limit int64 ctx context.Context } -func (tx *MdbxTx) rangeOrderLimit(table string, fromPrefix, toPrefix []byte, orderAscend bool, limit int) (*cursor2iter, error) { +func (tx *MdbxTx) rangeOrderLimit(table string, fromPrefix, toPrefix []byte, orderAscend order.By, limit int) (*cursor2iter, error) { s := &cursor2iter{ctx: tx.ctx, fromPrefix: fromPrefix, toPrefix: toPrefix, orderAscend: orderAscend, limit: int64(limit)} tx.streams = append(tx.streams, s) return s.init(table, tx) } func (s *cursor2iter) init(table string, tx kv.Tx) (*cursor2iter, error) { if s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) >= 0 { - return nil, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.fromPrefix, s.toPrefix) + return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.fromPrefix, s.toPrefix) } if !s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) <= 0 { - return nil, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.toPrefix, s.fromPrefix) + return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.toPrefix, s.fromPrefix) } c, err := tx.Cursor(table) if err != nil { - return nil, err + return s, err } s.c = c @@ -1725,7 +1740,14 @@ func (s *cursor2iter) init(table string, tx kv.Tx) (*cursor2iter, error) { } else { // seek exactly to given key or previous one s.nextK, s.nextV, s.err = s.c.SeekExact(s.fromPrefix) - if s.nextK == nil { // no such key + if s.err != nil { + return s, s.err + } + if s.nextK != nil { // go to last value of this key + if casted, ok := s.c.(kv.CursorDupSort); ok { + s.nextV, s.err = casted.LastDup() + } + } else { // key not found, go to prev one s.nextK, s.nextV, s.err = s.c.Prev() } return s, s.err @@ -1754,7 +1776,7 @@ func (s *cursor2iter) HasNext() bool { //Asc: [from, to) AND from > to //Desc: [from, to) AND from < to cmp := bytes.Compare(s.nextK, s.toPrefix) - return (s.orderAscend && cmp < 0) || (!s.orderAscend && cmp > 0) + return (bool(s.orderAscend) && cmp < 0) || (!bool(s.orderAscend) && cmp > 0) } func (s *cursor2iter) Next() (k, v []byte, err error) { select { @@ -1772,6 +1794,104 @@ func (s *cursor2iter) Next() (k, v []byte, err error) { return k, v, err } +func (tx *MdbxTx) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) { + s := &cursorDup2iter{ctx: tx.ctx, key: key, fromPrefix: fromPrefix, toPrefix: toPrefix, orderAscend: bool(asc), limit: int64(limit)} + tx.streams = append(tx.streams, s) + return s.init(table, tx) +} + +type cursorDup2iter struct { + c kv.CursorDupSort + key []byte + fromPrefix, toPrefix, nextV []byte + err error + orderAscend bool + limit int64 + ctx context.Context +} + +func (s *cursorDup2iter) init(table string, tx kv.Tx) (*cursorDup2iter, error) { + if s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) >= 0 { + return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.fromPrefix, s.toPrefix) + } + if !s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) <= 0 { + return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.toPrefix, s.fromPrefix) + } + c, err := tx.CursorDupSort(table) + if err != nil { + return s, err + } + s.c = c + k, _, err := c.SeekExact(s.key) + if err != nil { + return s, err + } + if k == nil { + return s, nil + } + + if s.fromPrefix == nil { // no initial position + if s.orderAscend { + s.nextV, s.err = s.c.FirstDup() + } else { + s.nextV, s.err = s.c.LastDup() + } + return s, s.err + } + + if s.orderAscend { + s.nextV, s.err = s.c.SeekBothRange(s.key, s.fromPrefix) + return s, s.err + } else { + // seek exactly to given key or previous one + _, s.nextV, s.err = s.c.SeekBothExact(s.key, s.fromPrefix) + if s.nextV == nil { // no such key + _, s.nextV, s.err = s.c.PrevDup() + } + return s, s.err + } +} + +func (s *cursorDup2iter) Close() { + if s.c != nil { + s.c.Close() + } +} +func (s *cursorDup2iter) HasNext() bool { + if s.err != nil { // always true, then .Next() call will return this error + return true + } + if s.limit == 0 { // limit reached + return false + } + if s.nextV == nil { // EndOfTable + return false + } + if s.toPrefix == nil { // s.nextK == nil check is above + return true + } + + //Asc: [from, to) AND from > to + //Desc: [from, to) AND from < to + cmp := bytes.Compare(s.nextV, s.toPrefix) + return (s.orderAscend && cmp < 0) || (!s.orderAscend && cmp > 0) +} +func (s *cursorDup2iter) Next() (k, v []byte, err error) { + select { + case <-s.ctx.Done(): + return nil, nil, s.ctx.Err() + default: + } + s.limit-- + v, err = s.nextV, s.err + if s.orderAscend { + _, s.nextV, s.err = s.c.NextDup() + } else { + _, s.nextV, s.err = s.c.PrevDup() + } + return s.key, v, err +} + func (tx *MdbxTx) ForAmount(bucket string, fromPrefix []byte, amount uint32, walker func(k, v []byte) error) error { if amount == 0 { return nil diff --git a/kv/mdbx/kv_mdbx_test.go b/kv/mdbx/kv_mdbx_test.go index 900f7b0ea..6fac3a222 100644 --- a/kv/mdbx/kv_mdbx_test.go +++ b/kv/mdbx/kv_mdbx_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -125,12 +126,84 @@ func TestRange(t *testing.T) { _, tx, _ := BaseCase(t) //[from, to) - it, err := tx.RangeDescend("Table", []byte("key3"), []byte("key1"), -1) + it, err := tx.RangeDescend("Table", []byte("key3"), []byte("key1"), kv.Unlim) require.NoError(t, err) require.True(t, it.HasNext()) k, v, err := it.Next() require.NoError(t, err) require.Equal(t, "key3", string(k)) + require.Equal(t, "value3.3", string(v)) + + require.True(t, it.HasNext()) + k, v, err = it.Next() + require.NoError(t, err) + require.Equal(t, "key3", string(k)) + require.Equal(t, "value3.1", string(v)) + + require.False(t, it.HasNext()) + + it, err = tx.RangeDescend("Table", nil, nil, 2) + require.NoError(t, err) + + cnt := 0 + for it.HasNext() { + _, _, err := it.Next() + require.NoError(t, err) + cnt++ + } + require.Equal(t, 2, cnt) + }) +} + +func TestRangeDupSort(t *testing.T) { + t.Run("Asc", func(t *testing.T) { + _, tx, _ := BaseCase(t) + + //[from, to) + it, err := tx.RangeDupSort("Table", []byte("key1"), nil, nil, order.Asc, -1) + require.NoError(t, err) + require.True(t, it.HasNext()) + k, v, err := it.Next() + require.NoError(t, err) + require.Equal(t, "key1", string(k)) + require.Equal(t, "value1.1", string(v)) + + require.True(t, it.HasNext()) + k, v, err = it.Next() + require.NoError(t, err) + require.Equal(t, "key1", string(k)) + require.Equal(t, "value1.3", string(v)) + + require.False(t, it.HasNext()) + require.False(t, it.HasNext()) + + // [from, nil) means [from, INF) + it, err = tx.Range("Table", []byte("key1"), nil) + require.NoError(t, err) + cnt := 0 + for it.HasNext() { + _, _, err := it.Next() + require.NoError(t, err) + cnt++ + } + require.Equal(t, 4, cnt) + }) + t.Run("Desc", func(t *testing.T) { + _, tx, _ := BaseCase(t) + + //[from, to) + it, err := tx.RangeDupSort("Table", []byte("key3"), nil, nil, order.Desc, -1) + require.NoError(t, err) + require.True(t, it.HasNext()) + k, v, err := it.Next() + require.NoError(t, err) + require.Equal(t, "key3", string(k)) + require.Equal(t, "value3.3", string(v)) + + require.True(t, it.HasNext()) + k, v, err = it.Next() + require.NoError(t, err) + require.Equal(t, "key3", string(k)) require.Equal(t, "value3.1", string(v)) require.False(t, it.HasNext()) diff --git a/kv/memdb/memory_database.go b/kv/memdb/memory_database.go index e6476c5ab..57b90680d 100644 --- a/kv/memdb/memory_database.go +++ b/kv/memdb/memory_database.go @@ -25,23 +25,25 @@ import ( "github.com/ledgerwatch/log/v3" ) -func New() kv.RwDB { - return mdbx.NewMDBX(log.New()).InMem("").MustOpen() +func New(tmpDir string) kv.RwDB { + return mdbx.NewMDBX(log.New()).InMem(tmpDir).MustOpen() } -func NewPoolDB() kv.RwDB { - return mdbx.NewMDBX(log.New()).InMem("").Label(kv.TxPoolDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }).MustOpen() +func NewPoolDB(tmpDir string) kv.RwDB { + return mdbx.NewMDBX(log.New()).InMem(tmpDir).Label(kv.TxPoolDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }).MustOpen() } -func NewDownloaderDB() kv.RwDB { - return mdbx.NewMDBX(log.New()).InMem("").Label(kv.DownloaderDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.DownloaderTablesCfg }).MustOpen() +func NewDownloaderDB(tmpDir string) kv.RwDB { + return mdbx.NewMDBX(log.New()).InMem(tmpDir).Label(kv.DownloaderDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.DownloaderTablesCfg }).MustOpen() } -func NewSentryDB() kv.RwDB { - return mdbx.NewMDBX(log.New()).InMem("").Label(kv.SentryDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.SentryTablesCfg }).MustOpen() +func NewSentryDB(tmpDir string) kv.RwDB { + return mdbx.NewMDBX(log.New()).InMem(tmpDir).Label(kv.SentryDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.SentryTablesCfg }).MustOpen() } func NewTestDB(tb testing.TB) kv.RwDB { tb.Helper() - db := New() + tmpDir := tb.TempDir() + tb.Helper() + db := New(tmpDir) tb.Cleanup(db.Close) return db } @@ -68,28 +70,32 @@ func BeginRo(tb testing.TB, db kv.RoDB) kv.Tx { func NewTestPoolDB(tb testing.TB) kv.RwDB { tb.Helper() - db := NewPoolDB() + tmpDir := tb.TempDir() + db := NewPoolDB(tmpDir) tb.Cleanup(db.Close) return db } func NewTestDownloaderDB(tb testing.TB) kv.RwDB { tb.Helper() - db := NewDownloaderDB() + tmpDir := tb.TempDir() + db := NewDownloaderDB(tmpDir) tb.Cleanup(db.Close) return db } func NewTestSentrylDB(tb testing.TB) kv.RwDB { tb.Helper() - db := NewPoolDB() + tmpDir := tb.TempDir() + db := NewPoolDB(tmpDir) tb.Cleanup(db.Close) return db } func NewTestTx(tb testing.TB) (kv.RwDB, kv.RwTx) { tb.Helper() - db := New() + tmpDir := tb.TempDir() + db := New(tmpDir) tb.Cleanup(db.Close) tx, err := db.BeginRw(context.Background()) if err != nil { diff --git a/kv/memdb/memory_mutation.go b/kv/memdb/memory_mutation.go index 7bd80f130..4706a8c0a 100644 --- a/kv/memdb/memory_mutation.go +++ b/kv/memdb/memory_mutation.go @@ -18,6 +18,7 @@ import ( "context" "github.com/ledgerwatch/erigon-lib/kv/iter" + "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/kv" @@ -231,6 +232,9 @@ func (m *MemoryMutation) RangeAscend(table string, fromPrefix, toPrefix []byte, func (m *MemoryMutation) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { panic("please implement me") } +func (m *MemoryMutation) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) { + panic("please implement me") +} func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { c, err := m.Cursor(bucket) diff --git a/kv/rawdbv3/txnum.go b/kv/rawdbv3/txnum.go index d6ad58483..f01bae476 100644 --- a/kv/rawdbv3/txnum.go +++ b/kv/rawdbv3/txnum.go @@ -154,6 +154,38 @@ func (txNums) FindBlockNum(tx kv.Tx, endTxNumMinimax uint64) (ok bool, blockNum } return true, blockNum, nil } +func (txNums) Last(tx kv.Tx) (blockNum, txNum uint64, err error) { + c, err := tx.Cursor(kv.MaxTxNum) + if err != nil { + return 0, 0, err + } + defer c.Close() + + lastK, lastV, err := c.Last() + if err != nil { + return 0, 0, err + } + if lastK == nil || lastV == nil { + return 0, 0, nil + } + return binary.BigEndian.Uint64(lastK), binary.BigEndian.Uint64(lastV), nil +} +func (txNums) First(tx kv.Tx) (blockNum, txNum uint64, err error) { + c, err := tx.Cursor(kv.MaxTxNum) + if err != nil { + return 0, 0, err + } + defer c.Close() + + lastK, lastV, err := c.First() + if err != nil { + return 0, 0, err + } + if lastK == nil || lastV == nil { + return 0, 0, nil + } + return binary.BigEndian.Uint64(lastK), binary.BigEndian.Uint64(lastV), nil +} // LastKey func LastKey(tx kv.Tx, table string) ([]byte, error) { diff --git a/kv/remotedb/kv_remote.go b/kv/remotedb/kv_remote.go index 017ff7047..0e805e26e 100644 --- a/kv/remotedb/kv_remote.go +++ b/kv/remotedb/kv_remote.go @@ -46,7 +46,10 @@ type remoteOpts struct { version gointerfaces.Version } -type RemoteKV struct { +var _ kv.TemporalTx = (*tx)(nil) +var _ kv.TemporalRwDB = (*DB)(nil) + +type DB struct { remoteKV remote.KVClient log log.Logger buckets kv.TableCfg @@ -54,11 +57,11 @@ type RemoteKV struct { opts remoteOpts } -type remoteTx struct { +type tx struct { stream remote.KV_TxClient ctx context.Context streamCancelFn context.CancelFunc - db *RemoteKV + db *DB statelessCursors map[string]kv.Cursor cursors []*remoteCursor streams []kv.Closer @@ -69,7 +72,7 @@ type remoteTx struct { type remoteCursor struct { ctx context.Context stream remote.KV_TxClient - tx *remoteTx + tx *tx bucketName string bucketCfg kv.TableCfgItem id uint32 @@ -88,13 +91,13 @@ func (opts remoteOpts) WithBucketsConfig(f mdbx.TableCfgFunc) remoteOpts { return opts } -func (opts remoteOpts) Open() (*RemoteKV, error) { +func (opts remoteOpts) Open() (*DB, error) { targetSemCount := int64(runtime.GOMAXPROCS(-1)) - 1 if targetSemCount <= 1 { targetSemCount = 2 } - db := &RemoteKV{ + db := &DB{ opts: opts, remoteKV: opts.remoteKV, log: log.New("remote_db", opts.DialAddress), @@ -124,11 +127,11 @@ func NewRemote(v gointerfaces.Version, logger log.Logger, remoteKV remote.KVClie return remoteOpts{bucketsCfg: mdbx.WithChaindataTables, version: v, log: logger, remoteKV: remoteKV} } -func (db *RemoteKV) PageSize() uint64 { panic("not implemented") } -func (db *RemoteKV) ReadOnly() bool { return true } -func (db *RemoteKV) AllBuckets() kv.TableCfg { return db.buckets } +func (db *DB) PageSize() uint64 { panic("not implemented") } +func (db *DB) ReadOnly() bool { return true } +func (db *DB) AllBuckets() kv.TableCfg { return db.buckets } -func (db *RemoteKV) EnsureVersionCompatibility() bool { +func (db *DB) EnsureVersionCompatibility() bool { versionReply, err := db.remoteKV.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) if err != nil { db.log.Error("getting Version", "error", err) @@ -144,9 +147,9 @@ func (db *RemoteKV) EnsureVersionCompatibility() bool { return true } -func (db *RemoteKV) Close() {} +func (db *DB) Close() {} -func (db *RemoteKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { +func (db *DB) BeginRo(ctx context.Context) (txn kv.Tx, err error) { select { case <-ctx.Done(): return nil, ctx.Err() @@ -175,49 +178,68 @@ func (db *RemoteKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { streamCancelFn() return nil, err } - return &remoteTx{ctx: ctx, db: db, stream: stream, streamCancelFn: streamCancelFn, viewID: msg.ViewID, id: msg.TxID}, nil + return &tx{ctx: ctx, db: db, stream: stream, streamCancelFn: streamCancelFn, viewID: msg.ViewId, id: msg.TxId}, nil } - -func (db *RemoteKV) BeginRw(ctx context.Context) (kv.RwTx, error) { +func (db *DB) BeginTemporalRo(ctx context.Context) (kv.TemporalTx, error) { + t, err := db.BeginRo(ctx) + if err != nil { + return nil, err + } + return t.(kv.TemporalTx), nil +} +func (db *DB) BeginRw(ctx context.Context) (kv.RwTx, error) { return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method") } -func (db *RemoteKV) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { +func (db *DB) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method") } +func (db *DB) BeginTemporalRw(ctx context.Context) (kv.RwTx, error) { + return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRw method") +} +func (db *DB) BeginTemporalRwNosync(ctx context.Context) (kv.RwTx, error) { + return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRwNosync method") +} -func (db *RemoteKV) View(ctx context.Context, f func(tx kv.Tx) error) (err error) { +func (db *DB) View(ctx context.Context, f func(tx kv.Tx) error) (err error) { tx, err := db.BeginRo(ctx) if err != nil { return err } defer tx.Rollback() - + return f(tx) +} +func (db *DB) ViewTemporal(ctx context.Context, f func(tx kv.TemporalTx) error) (err error) { + tx, err := db.BeginTemporalRo(ctx) + if err != nil { + return err + } + defer tx.Rollback() return f(tx) } -func (db *RemoteKV) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) { +func (db *DB) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) { return fmt.Errorf("remote db provider doesn't support .Update method") } -func (db *RemoteKV) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) (err error) { - return fmt.Errorf("remote db provider doesn't support .Update method") +func (db *DB) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) (err error) { + return fmt.Errorf("remote db provider doesn't support .UpdateNosync method") } -func (tx *remoteTx) ViewID() uint64 { return tx.viewID } -func (tx *remoteTx) CollectMetrics() {} -func (tx *remoteTx) IncrementSequence(bucket string, amount uint64) (uint64, error) { +func (tx *tx) ViewID() uint64 { return tx.viewID } +func (tx *tx) CollectMetrics() {} +func (tx *tx) IncrementSequence(bucket string, amount uint64) (uint64, error) { panic("not implemented yet") } -func (tx *remoteTx) ReadSequence(bucket string) (uint64, error) { +func (tx *tx) ReadSequence(bucket string) (uint64, error) { panic("not implemented yet") } -func (tx *remoteTx) Append(bucket string, k, v []byte) error { panic("no write methods") } -func (tx *remoteTx) AppendDup(bucket string, k, v []byte) error { panic("no write methods") } +func (tx *tx) Append(bucket string, k, v []byte) error { panic("no write methods") } +func (tx *tx) AppendDup(bucket string, k, v []byte) error { panic("no write methods") } -func (tx *remoteTx) Commit() error { +func (tx *tx) Commit() error { panic("remote db is read-only") } -func (tx *remoteTx) Rollback() { +func (tx *tx) Rollback() { // don't close opened cursors - just close stream, server will cleanup everything well tx.closeGrpcStream() tx.db.roTxsLimiter.Release(1) @@ -225,9 +247,9 @@ func (tx *remoteTx) Rollback() { c.Close() } } -func (tx *remoteTx) DBSize() (uint64, error) { panic("not implemented") } +func (tx *tx) DBSize() (uint64, error) { panic("not implemented") } -func (tx *remoteTx) statelessCursor(bucket string) (kv.Cursor, error) { +func (tx *tx) statelessCursor(bucket string) (kv.Cursor, error) { if tx.statelessCursors == nil { tx.statelessCursors = make(map[string]kv.Cursor) } @@ -243,9 +265,9 @@ func (tx *remoteTx) statelessCursor(bucket string) (kv.Cursor, error) { return c, nil } -func (tx *remoteTx) BucketSize(name string) (uint64, error) { panic("not implemented") } +func (tx *tx) BucketSize(name string) (uint64, error) { panic("not implemented") } -func (tx *remoteTx) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { +func (tx *tx) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { it, err := tx.Range(bucket, fromPrefix, nil) if err != nil { return err @@ -262,7 +284,7 @@ func (tx *remoteTx) ForEach(bucket string, fromPrefix []byte, walker func(k, v [ return nil } -func (tx *remoteTx) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { +func (tx *tx) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { it, err := tx.Prefix(bucket, prefix) if err != nil { return err @@ -280,7 +302,7 @@ func (tx *remoteTx) ForPrefix(bucket string, prefix []byte, walker func(k, v []b } // TODO: this must be deprecated -func (tx *remoteTx) ForAmount(bucket string, fromPrefix []byte, amount uint32, walker func(k, v []byte) error) error { +func (tx *tx) ForAmount(bucket string, fromPrefix []byte, amount uint32, walker func(k, v []byte) error) error { if amount == 0 { return nil } @@ -302,7 +324,7 @@ func (tx *remoteTx) ForAmount(bucket string, fromPrefix []byte, amount uint32, w return nil } -func (tx *remoteTx) GetOne(bucket string, k []byte) (val []byte, err error) { +func (tx *tx) GetOne(bucket string, k []byte) (val []byte, err error) { c, err := tx.statelessCursor(bucket) if err != nil { return nil, err @@ -311,7 +333,7 @@ func (tx *remoteTx) GetOne(bucket string, k []byte) (val []byte, err error) { return val, err } -func (tx *remoteTx) Has(bucket string, k []byte) (bool, error) { +func (tx *tx) Has(bucket string, k []byte) (bool, error) { c, err := tx.statelessCursor(bucket) if err != nil { return false, err @@ -331,7 +353,7 @@ func (c *remoteCursor) Prev() ([]byte, []byte, error) { return c.prev() } -func (tx *remoteTx) Cursor(bucket string) (kv.Cursor, error) { +func (tx *tx) Cursor(bucket string) (kv.Cursor, error) { b := tx.db.buckets[bucket] c := &remoteCursor{tx: tx, ctx: tx.ctx, bucketName: bucket, bucketCfg: b, stream: tx.stream} tx.cursors = append(tx.cursors, c) @@ -342,15 +364,19 @@ func (tx *remoteTx) Cursor(bucket string) (kv.Cursor, error) { if err != nil { return nil, err } - c.id = msg.CursorID + c.id = msg.CursorId return c, nil } -func (c *remoteCursor) Put(k []byte, v []byte) error { panic("not supported") } -func (c *remoteCursor) PutNoOverwrite(k []byte, v []byte) error { panic("not supported") } -func (c *remoteCursor) Append(k []byte, v []byte) error { panic("not supported") } -func (c *remoteCursor) Delete(k []byte) error { panic("not supported") } -func (c *remoteCursor) DeleteCurrent() error { panic("not supported") } +func (tx *tx) ListBuckets() ([]string, error) { + return nil, fmt.Errorf("function ListBuckets is not implemented for remoteTx") +} + +// func (c *remoteCursor) Put(k []byte, v []byte) error { panic("not supported") } +// func (c *remoteCursor) PutNoOverwrite(k []byte, v []byte) error { panic("not supported") } +// func (c *remoteCursor) Append(k []byte, v []byte) error { panic("not supported") } +// func (c *remoteCursor) Delete(k []byte) error { panic("not supported") } +// func (c *remoteCursor) DeleteCurrent() error { panic("not supported") } func (c *remoteCursor) Count() (uint64, error) { if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_COUNT}); err != nil { return 0, err @@ -538,7 +564,7 @@ func (c *remoteCursor) Last() ([]byte, []byte, error) { return c.last() } -func (tx *remoteTx) closeGrpcStream() { +func (tx *tx) closeGrpcStream() { if tx.stream == nil { return } @@ -582,7 +608,7 @@ func (c *remoteCursor) Close() { } } -func (tx *remoteTx) CursorDupSort(bucket string) (kv.CursorDupSort, error) { +func (tx *tx) CursorDupSort(bucket string) (kv.CursorDupSort, error) { b := tx.db.buckets[bucket] c := &remoteCursor{tx: tx, ctx: tx.ctx, bucketName: bucket, bucketCfg: b, stream: tx.stream} tx.cursors = append(tx.cursors, c) @@ -593,7 +619,7 @@ func (tx *remoteTx) CursorDupSort(bucket string) (kv.CursorDupSort, error) { if err != nil { return nil, err } - c.id = msg.CursorID + c.id = msg.CursorId return &remoteCursorDupSort{remoteCursor: c}, nil } @@ -619,17 +645,51 @@ func (c *remoteCursorDupSort) PrevNoDup() ([]byte, []byte, error) { return c.pre func (c *remoteCursorDupSort) LastDup() ([]byte, error) { return c.lastDup() } // Temporal Methods -func (tx *remoteTx) HistoryGet(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { +func (tx *tx) DomainGetAsOf(name kv.Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) { + reply, err := tx.db.remoteKV.DomainGet(tx.ctx, &remote.DomainGetReq{TxId: tx.id, Table: string(name), K: k, K2: k2, Ts: ts}) + if err != nil { + return nil, false, err + } + return reply.V, reply.Ok, nil +} + +func (tx *tx) DomainGet(name kv.Domain, k, k2 []byte) (v []byte, ok bool, err error) { + reply, err := tx.db.remoteKV.DomainGet(tx.ctx, &remote.DomainGetReq{TxId: tx.id, Table: string(name), K: k, K2: k2, Latest: true}) + if err != nil { + return nil, false, err + } + return reply.V, reply.Ok, nil +} + +func (tx *tx) DomainRange(name kv.Domain, fromKey, toKey []byte, ts uint64, asc order.By, limit int) (it iter.KV, err error) { + return iter.PaginateKV(func(pageToken string) (keys, vals [][]byte, nextPageToken string, err error) { + reply, err := tx.db.remoteKV.DomainRange(tx.ctx, &remote.DomainRangeReq{TxId: tx.id, Table: string(name), FromKey: fromKey, ToKey: toKey, Ts: ts, OrderAscend: bool(asc), Limit: int64(limit)}) + if err != nil { + return nil, nil, "", err + } + return reply.Keys, reply.Values, reply.NextPageToken, nil + }), nil +} +func (tx *tx) HistoryGet(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { reply, err := tx.db.remoteKV.HistoryGet(tx.ctx, &remote.HistoryGetReq{TxId: tx.id, Table: string(name), K: k, Ts: ts}) if err != nil { return nil, false, err } return reply.V, reply.Ok, nil } +func (tx *tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int) (it iter.KV, err error) { + return iter.PaginateKV(func(pageToken string) (keys, vals [][]byte, nextPageToken string, err error) { + reply, err := tx.db.remoteKV.HistoryRange(tx.ctx, &remote.HistoryRangeReq{TxId: tx.id, Table: string(name), FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit)}) + if err != nil { + return nil, nil, "", err + } + return reply.Keys, reply.Values, reply.NextPageToken, nil + }), nil +} -func (tx *remoteTx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs, limit int) (timestamps iter.U64, err error) { +func (tx *tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) { return iter.PaginateU64(func(pageToken string) (arr []uint64, nextPageToken string, err error) { - req := &remote.IndexRangeReq{TxId: tx.id, Table: string(name), K: k, FromTs: int64(fromTs), ToTs: int64(toTs), Limit: int64(limit)} + req := &remote.IndexRangeReq{TxId: tx.id, Table: string(name), K: k, FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit)} reply, err := tx.db.remoteKV.IndexRange(tx.ctx, req) if err != nil { return nil, "", err @@ -638,7 +698,7 @@ func (tx *remoteTx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs, limi }), nil } -func (tx *remoteTx) Prefix(table string, prefix []byte) (iter.KV, error) { +func (tx *tx) Prefix(table string, prefix []byte) (iter.KV, error) { nextPrefix, ok := kv.NextSubtree(prefix) if !ok { return tx.Range(table, prefix, nil) @@ -646,44 +706,7 @@ func (tx *remoteTx) Prefix(table string, prefix []byte) (iter.KV, error) { return tx.Range(table, prefix, nextPrefix) } -/* -func (tx *remoteTx) IndexStream(name kv.InvertedIdx, k []byte, fromTs, toTs, limit int) (timestamps iter.U64, err error) { - //TODO: maybe add ctx.WithCancel - stream, err := tx.db.remoteKV.IndexStream(tx.ctx, &remote.IndexRangeReq{TxId: tx.id, Table: string(name), K: k, FromTs: int64(fromTs), ToTs: int64(toTs), Limit: int32(limit)}) - if err != nil { - return nil, err - } - it := &grpc2U64Stream[*remote.IndexRangeReply]{ - grpc2UnaryStream[*remote.IndexRangeReply, uint64]{stream: stream, unwrap: func(msg *remote.IndexRangeReply) []uint64 { return msg.Timestamps }}, - } - tx.streams = append(tx.streams, it) - return it, nil -} - -/* -func (tx *remoteTx) streamOrderLimit(table string, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) { - req := &remote.RangeReq{TxId: tx.id, Table: table, FromPrefix: fromPrefix, ToPrefix: toPrefix, OrderAscend: bool(asc), Limit: int32(limit)} - stream, err := tx.db.remoteKV.Stream(tx.ctx, req) - if err != nil { - return nil, err - } - it := &grpc2Pairs[*remote.Pairs]{stream: stream} - tx.streams = append(tx.streams, it) - return it, nil -} - -func (tx *remoteTx) Stream(table string, fromPrefix, toPrefix []byte) (iter.KV, error) { - return tx.StreamAscend(table, fromPrefix, toPrefix, -1) -} -func (tx *remoteTx) StreamAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { - return tx.streamOrderLimit(table, fromPrefix, toPrefix, true, limit) -} -func (tx *remoteTx) StreamDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { - return tx.streamOrderLimit(table, fromPrefix, toPrefix, false, limit) -} -*/ - -func (tx *remoteTx) rangeOrderLimit(table string, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) { +func (tx *tx) rangeOrderLimit(table string, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) { return iter.PaginateKV(func(pageToken string) (keys [][]byte, values [][]byte, nextPageToken string, err error) { req := &remote.RangeReq{TxId: tx.id, Table: table, FromPrefix: fromPrefix, ToPrefix: toPrefix, OrderAscend: bool(asc), Limit: int64(limit)} reply, err := tx.db.remoteKV.Range(tx.ctx, req) @@ -693,129 +716,15 @@ func (tx *remoteTx) rangeOrderLimit(table string, fromPrefix, toPrefix []byte, a return reply.Keys, reply.Values, reply.NextPageToken, nil }), nil } -func (tx *remoteTx) Range(table string, fromPrefix, toPrefix []byte) (iter.KV, error) { +func (tx *tx) Range(table string, fromPrefix, toPrefix []byte) (iter.KV, error) { return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Asc, -1) } -func (tx *remoteTx) RangeAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { +func (tx *tx) RangeAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Asc, limit) } -func (tx *remoteTx) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { +func (tx *tx) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) { return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Desc, limit) } - -/* -type grpcStream[Msg any] interface { - Recv() (Msg, error) - CloseSend() error -} - -type parisMsg interface { - GetKeys() [][]byte - GetValues() [][]byte -} -type grpc2Pairs[Msg parisMsg] struct { - stream grpcStream[Msg] - lastErr error - lastKeys [][]byte - lastValues [][]byte - i int -} - -func (it *grpc2Pairs[Msg]) NextBatch() ([][]byte, [][]byte, error) { - keys := it.lastKeys[it.i:] - values := it.lastValues[it.i:] - it.i = len(it.lastKeys) - return keys, values, nil -} -func (it *grpc2Pairs[Msg]) HasNext() bool { - if it.lastErr != nil { - return true - } - if it.i < len(it.lastKeys) { - return true - } - - it.i = 0 - msg, err := it.stream.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - return false - } - it.lastErr = err - return true - } - it.lastKeys = msg.GetKeys() - it.lastValues = msg.GetValues() - return len(it.lastKeys) > 0 -} -func (it *grpc2Pairs[Msg]) Close() { - //_ = it.stream.CloseSend() -} -func (it *grpc2Pairs[Msg]) Next() ([]byte, []byte, error) { - if it.lastErr != nil { - return nil, nil, it.lastErr - } - k := it.lastKeys[it.i] - v := it.lastValues[it.i] - it.i++ - return k, v, nil -} - -type grpc2U64Stream[Msg any] struct { - grpc2UnaryStream[Msg, uint64] -} - -func (it *grpc2U64Stream[Msg]) ToBitmap() (*roaring64.Bitmap, error) { - bm := roaring64.New() - for it.HasNext() { - batch, err := it.NextBatch() - if err != nil { - return nil, err - } - bm.AddMany(batch) - } - return bm, nil -} - -type grpc2UnaryStream[Msg any, Res any] struct { - stream grpcStream[Msg] - unwrap func(Msg) []Res - lastErr error - last []Res - i int -} - -func (it *grpc2UnaryStream[Msg, Res]) NextBatch() ([]Res, error) { - v := it.last[it.i:] - it.i = len(it.last) - return v, nil -} -func (it *grpc2UnaryStream[Msg, Res]) HasNext() bool { - if it.lastErr != nil { - return true - } - if it.i < len(it.last) { - return true - } - - it.i = 0 - msg, err := it.stream.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - return false - } - it.lastErr = err - return true - } - it.last = it.unwrap(msg) - return len(it.last) > 0 -} -func (it *grpc2UnaryStream[Msg, Res]) Close() { - //_ = it.stream.CloseSend() -} -func (it *grpc2UnaryStream[Msg, Res]) Next() (Res, error) { - v := it.last[it.i] - it.i++ - return v, nil +func (tx *tx) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) { + panic("not implemented yet") } -*/ diff --git a/kv/remotedbserver/remotedbserver.go b/kv/remotedbserver/remotedbserver.go index 2967a0ec9..2acff7397 100644 --- a/kv/remotedbserver/remotedbserver.go +++ b/kv/remotedbserver/remotedbserver.go @@ -24,12 +24,10 @@ import ( "io" "reflect" "sync" + "sync/atomic" "time" - "github.com/ledgerwatch/erigon-lib/kv/iter" - "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/log/v3" - "go.uber.org/atomic" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/emptypb" @@ -39,6 +37,8 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/iter" + "github.com/ledgerwatch/erigon-lib/kv/order" ) // MaxTxTTL - kv interface provide high-consistancy guaranties: Serializable Isolations Level https://en.wikipedia.org/wiki/Isolation_(database_systems) @@ -131,7 +131,7 @@ func (s *KvServer) begin(ctx context.Context) (id uint64, err error) { if errBegin != nil { return 0, errBegin } - id = s.txIdGen.Inc() + id = s.txIdGen.Add(1) s.txs[id] = &threadSafeTx{Tx: tx} return id, nil } @@ -218,7 +218,7 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { }); err != nil { return err } - if err := stream.Send(&remote.Pair{ViewID: viewID, TxID: id}); err != nil { + if err := stream.Send(&remote.Pair{ViewId: viewID, TxId: id}); err != nil { return fmt.Errorf("server-side error: %w", err) } @@ -315,7 +315,7 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { bucket: in.BucketName, c: c, } - if err := stream.Send(&remote.Pair{CursorID: CursorID}); err != nil { + if err := stream.Send(&remote.Pair{CursorId: CursorID}); err != nil { return fmt.Errorf("server-side error: %w", err) } continue @@ -335,7 +335,7 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { bucket: in.BucketName, c: c, } - if err := stream.Send(&remote.Pair{CursorID: CursorID}); err != nil { + if err := stream.Send(&remote.Pair{CursorId: CursorID}); err != nil { return fmt.Errorf("server-side error: %w", err) } continue @@ -513,9 +513,16 @@ func (s *KvServer) DomainGet(ctx context.Context, req *remote.DomainGetReq) (rep if !ok { return fmt.Errorf("server DB doesn't implement kv.Temporal interface") } - reply.V, reply.Ok, err = ttx.DomainGet(kv.Domain(req.Table), req.K, req.K2, req.Ts) - if err != nil { - return err + if req.Latest { + reply.V, reply.Ok, err = ttx.DomainGet(kv.Domain(req.Table), req.K, req.K2) + if err != nil { + return err + } + } else { + reply.V, reply.Ok, err = ttx.DomainGetAsOf(kv.Domain(req.Table), req.K, req.K2, req.Ts) + if err != nil { + return err + } } return nil }); err != nil { @@ -541,38 +548,6 @@ func (s *KvServer) HistoryGet(ctx context.Context, req *remote.HistoryGetReq) (r return reply, nil } -/* -func (s *KvServer) IndexStream(req *remote.IndexRangeReq, stream remote.KV_IndexStreamServer) error { - const step = 4096 // make sure `s.with` has limited time - var last int - for from := int(req.FromTs); from < int(req.ToTs); from = last { - if err := s.with(req.TxId, func(tx kv.Tx) error { - ttx, ok := tx.(kv.TemporalTx) - if !ok { - return fmt.Errorf("server DB doesn't implement kv.Temporal interface") - } - it, err := ttx.IndexRange(kv.InvertedIdx(req.Table), req.K, uint64(from), uint64(req.ToTs), order.By(req.OrderAscend), step) - if err != nil { - return err - } - bm, err := it.(bitmapdb.ToBitamp).ToBitmap() - if err != nil { - return err - } - if err := stream.Send(&remote.IndexRangeReply{Timestamps: bm.ToArray()}); err != nil { - return err - } - last = int(bm.Maximum()) - return nil - }); err != nil { - return err - } - } - return nil -} - -*/ - const PageSizeLimit = 4 * 4096 func (s *KvServer) IndexRange(ctx context.Context, req *remote.IndexRangeReq) (*remote.IndexRangeReply, error) { @@ -623,92 +598,6 @@ func (s *KvServer) IndexRange(ctx context.Context, req *remote.IndexRangeReq) (* return reply, nil } -/* -func (s *KvServer) Stream(req *remote.RangeReq, stream remote.KV_StreamServer) error { - orderAscend, fromPrefix, toPrefix := req.OrderAscend, req.FromPrefix, req.ToPrefix - if orderAscend && fromPrefix != nil && toPrefix != nil && bytes.Compare(fromPrefix, toPrefix) >= 0 { - return fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", fromPrefix, toPrefix) - } - if !orderAscend && fromPrefix != nil && toPrefix != nil && bytes.Compare(fromPrefix, toPrefix) <= 0 { - return fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", toPrefix, fromPrefix) - } - - var k, v []byte - - if req.OrderAscend && fromPrefix == nil { - fromPrefix = []byte{} - } - - var it iter.KV - var err error - var skipFirst = false - - limit := int(req.PageSize) - step := cmp.Min(s.rangeStep, limit) // make sure `s.with` has limited time - for from := fromPrefix; ; from = k { - if (req.OrderAscend && from == nil) || limit == 0 { - break - } - if toPrefix != nil { - cmp := bytes.Compare(from, toPrefix) - hasNext := (orderAscend && cmp < 0) || (!orderAscend && cmp > 0) - if !hasNext { - break - } - } - - reply := &remote.Pairs{} - if err = s.with(req.TxId, func(tx kv.Tx) error { - if orderAscend { - it, err = tx.RangeAscend(req.Table, from, toPrefix, step) - if err != nil { - return err - } - } else { - it, err = tx.RangeDescend(req.Table, from, toPrefix, step) - if err != nil { - return err - } - } - k = nil - for it.HasNext() { - k, v, err = it.Next() - if err != nil { - return err - } - reply.Keys = append(reply.Keys, k) - reply.Values = append(reply.Values, v) - limit-- - } - if k != nil { - k = common.Copy(k) - if req.OrderAscend { - k = append(k, []byte{01}...) - } else { - if skipFirst { - reply.Keys = reply.Keys[1:] - reply.Values = reply.Values[1:] - } - skipFirst = true - } - } - return nil - }); err != nil { - return err - } - - if len(reply.Keys) > 0 { - if err := stream.Send(reply); err != nil { - return err - } - } else { - break - } - } - return nil -} -*/ - func (s *KvServer) Range(ctx context.Context, req *remote.RangeReq) (*remote.Pairs, error) { from, limit := req.FromPrefix, int(req.Limit) if req.PageToken != "" { diff --git a/kv/remotedbserver/server_test.go b/kv/remotedbserver/server_test.go index 9ee65be6c..bdcf77e88 100644 --- a/kv/remotedbserver/server_test.go +++ b/kv/remotedbserver/server_test.go @@ -89,7 +89,7 @@ func TestKvServer_renew(t *testing.T) { s.rollback(id) return nil } - for i := 0; i < 100; i++ { + for i := 0; i < 10; i++ { g.Go(testCase) } require.NoError(g.Wait()) diff --git a/kv/tables.go b/kv/tables.go index 77ee6eb04..f327e8a0c 100644 --- a/kv/tables.go +++ b/kv/tables.go @@ -301,19 +301,6 @@ const ( CliqueSnapshot = "CliqueSnapshot" CliqueLastSnapshot = "CliqueLastSnapshot" - // Snapshot table used for Binance Smart Chain's consensus engine Parlia - // Schema of key/value pairs containing: - // Key (string): SnapshotFullKey = SnapshotBucket + num (uint64 big endian) + hash - // Value (JSON blob): - // { - // "number" // Block number where the snapshot was created - // "hash" // Block hash where the snapshot was created - // "validators" // Set of authorized validators at this moment - // "recents" // Set of recent validators for spam protections - // "recent_fork_hashes" // Set of recent forkHash - // } - ParliaSnapshot = "ParliaSnapshot" - // Proof-of-stake // Beacon chain head that is been executed at the current time CurrentExecutionPayload = "CurrentExecutionPayload" @@ -371,28 +358,24 @@ const ( AccountVals = "AccountVals" AccountHistoryKeys = "AccountHistoryKeys" AccountHistoryVals = "AccountHistoryVals" - AccountSettings = "AccountSettings" AccountIdx = "AccountIdx" StorageKeys = "StorageKeys" StorageVals = "StorageVals" StorageHistoryKeys = "StorageHistoryKeys" StorageHistoryVals = "StorageHistoryVals" - StorageSettings = "StorageSettings" StorageIdx = "StorageIdx" CodeKeys = "CodeKeys" CodeVals = "CodeVals" CodeHistoryKeys = "CodeHistoryKeys" CodeHistoryVals = "CodeHistoryVals" - CodeSettings = "CodeSettings" CodeIdx = "CodeIdx" CommitmentKeys = "CommitmentKeys" CommitmentVals = "CommitmentVals" CommitmentHistoryKeys = "CommitmentHistoryKeys" CommitmentHistoryVals = "CommitmentHistoryVals" - CommitmentSettings = "CommitmentSettings" CommitmentIdx = "CommitmentIdx" LogAddressKeys = "LogAddressKeys" @@ -427,13 +410,10 @@ const ( BeaconState = "BeaconState" // [slot] => [signature + block without execution payload] BeaconBlocks = "BeaconBlock" - // [slot] => [attestation list (ssz)] + // [slot] => [attestation list (custom encoding)] Attestetations = "Attestetations" - - // Erigon-CL indexing - - // [Slot] => [Root (block root/state root/eth1 root)] - SlotRootIndex = "SlotRootIndex" + // [slot] => [Finalized block root] + FinalizedBlockRoots = "FinalizedBlockRoots" // [Root (block root/state root/eth1 root)] => Slot RootSlotIndex = "RootSlotIndex" @@ -496,7 +476,6 @@ var ChaindataTables = []string{ CliqueSeparate, CliqueLastSnapshot, CliqueSnapshot, - ParliaSnapshot, SyncStageProgress, PlainState, PlainContractCode, @@ -540,28 +519,24 @@ var ChaindataTables = []string{ AccountVals, AccountHistoryKeys, AccountHistoryVals, - AccountSettings, AccountIdx, StorageKeys, StorageVals, StorageHistoryKeys, StorageHistoryVals, - StorageSettings, StorageIdx, CodeKeys, CodeVals, CodeHistoryKeys, CodeHistoryVals, - CodeSettings, CodeIdx, CommitmentKeys, CommitmentVals, CommitmentHistoryKeys, CommitmentHistoryVals, - CommitmentSettings, CommitmentIdx, LogAddressKeys, @@ -589,7 +564,7 @@ var ChaindataTables = []string{ // Beacon stuff BeaconState, BeaconBlocks, - SlotRootIndex, + FinalizedBlockRoots, RootSlotIndex, Attestetations, LightClient, @@ -680,9 +655,11 @@ var ChaindataTablesCfg = TableCfg{ AccountKeys: {Flags: DupSort}, AccountHistoryKeys: {Flags: DupSort}, + AccountHistoryVals: {Flags: DupSort}, AccountIdx: {Flags: DupSort}, StorageKeys: {Flags: DupSort}, StorageHistoryKeys: {Flags: DupSort}, + StorageHistoryVals: {Flags: DupSort}, StorageIdx: {Flags: DupSort}, CodeKeys: {Flags: DupSort}, CodeHistoryKeys: {Flags: DupSort}, diff --git a/recsplit/eliasfano32/elias_fano.go b/recsplit/eliasfano32/elias_fano.go index bed9e15b5..a966aa9c3 100644 --- a/recsplit/eliasfano32/elias_fano.go +++ b/recsplit/eliasfano32/elias_fano.go @@ -125,23 +125,24 @@ func (ef *EliasFano) Build() { lastSuperQ = i*64 + b ef.jump[(c/superQ)*superQSize] = lastSuperQ } - if (c & qMask) == 0 { - // When c is multiple of 2^8 (256) - var offset = i*64 + b - lastSuperQ // offset can be either 0, 256, 512, 768, ..., up to 4096-256 - // offset needs to be encoded as 16-bit integer, therefore the following check - if offset >= (1 << 32) { - fmt.Printf("ef.l=%x,ef.u=%x\n", ef.l, ef.u) - fmt.Printf("offset=%x,lastSuperQ=%x,i=%x,b=%x,c=%x\n", offset, lastSuperQ, i, b, c) - panic("") - } - // c % superQ is the bit index inside the group of 4096 bits - jumpSuperQ := (c / superQ) * superQSize - jumpInsideSuperQ := (c % superQ) / q - idx64 := jumpSuperQ + 1 + (jumpInsideSuperQ >> 1) - shift := 32 * (jumpInsideSuperQ % 2) - mask := uint64(0xffffffff) << shift - ef.jump[idx64] = (ef.jump[idx64] &^ mask) | (offset << shift) + if (c & qMask) != 0 { + c++ + continue + } + // When c is multiple of 2^8 (256) + var offset = i*64 + b - lastSuperQ // offset can be either 0, 256, 512, 768, ..., up to 4096-256 + // offset needs to be encoded as 16-bit integer, therefore the following check + if offset >= (1 << 32) { + fmt.Printf("ef.l=%x,ef.u=%x\n", ef.l, ef.u) + fmt.Printf("offset=%x,lastSuperQ=%x,i=%x,b=%x,c=%x\n", offset, lastSuperQ, i, b, c) + panic("") } + // c % superQ is the bit index inside the group of 4096 bits + jumpSuperQ := (c / superQ) * superQSize + jumpInsideSuperQ := (c % superQ) / q + idx64, shift := jumpSuperQ+1+(jumpInsideSuperQ>>1), 32*(jumpInsideSuperQ%2) + mask := uint64(0xffffffff) << shift + ef.jump[idx64] = (ef.jump[idx64] &^ mask) | (offset << shift) c++ } } @@ -150,8 +151,7 @@ func (ef *EliasFano) Build() { func (ef *EliasFano) get(i uint64) (val uint64, window uint64, sel int, currWord uint64, lower uint64) { lower = i * ef.l - idx64 := lower / 64 - shift := lower % 64 + idx64, shift := lower/64, lower%64 lower = ef.lowerBits[idx64] >> shift if shift > 0 { lower |= ef.lowerBits[idx64+1] << (64 - shift) @@ -159,8 +159,7 @@ func (ef *EliasFano) get(i uint64) (val uint64, window uint64, sel int, currWord jumpSuperQ := (i / superQ) * superQSize jumpInsideSuperQ := (i % superQ) / q - idx64 = jumpSuperQ + 1 + (jumpInsideSuperQ >> 1) - shift = 32 * (jumpInsideSuperQ % 2) + idx64, shift = jumpSuperQ+1+(jumpInsideSuperQ>>1), 32*(jumpInsideSuperQ%2) mask := uint64(0xffffffff) << shift jump := ef.jump[jumpSuperQ] + (ef.jump[idx64]&mask)>>shift @@ -175,7 +174,7 @@ func (ef *EliasFano) get(i uint64) (val uint64, window uint64, sel int, currWord } sel = bitutil.Select64(window, d) - val = ((currWord*64+uint64(sel)-i)<>= ef.l - valNext = ((currWord*64+uint64(bits.TrailingZeros64(window))-i-1)<>1), 32*(jumpInsideSuperQ%2) + mask := uint64(0xffffffff) << shift + jump := ef.jump[jumpSuperQ] + (ef.jump[idx64]&mask)>>shift + currWord := jump / 64 + window := ef.upperBits[currWord] & (uint64(0xffffffffffffffff) << (jump % 64)) + d := int(i & qMask) + + for bitCount := bits.OnesCount64(window); bitCount <= d; bitCount = bits.OnesCount64(window) { + currWord++ + window = ef.upperBits[currWord] + d -= bitCount + } + + sel := bitutil.Select64(window, d) + return currWord*64 + uint64(sel) - i +} + // Search returns the value in the sequence, equal or greater than given value -func (ef *EliasFano) Search(offset uint64) (uint64, bool) { - i := uint64(sort.Search(int(ef.count+1), func(i int) bool { - val, _, _, _, _ := ef.get(uint64(i)) - return val >= offset - })) - if i <= ef.count { - return ef.Get(i), true +func (ef *EliasFano) search(v uint64) (nextV uint64, nextI uint64, ok bool) { + if v == 0 { + return ef.Min(), 0, true + } + if v == ef.Max() { + return ef.Max(), ef.count, true + } + if v > ef.Max() { + return 0, 0, false + } + + hi := v >> ef.l + i := sort.Search(int(ef.count+1), func(i int) bool { + return ef.upper(uint64(i)) >= hi + }) + for j := uint64(i); j <= ef.count; j++ { + val, _, _, _, _ := ef.get(j) + if val >= v { + return val, j, true + } } - return 0, false + return 0, 0, false +} + +func (ef *EliasFano) Search(v uint64) (uint64, bool) { + n, _, ok := ef.search(v) + return n, ok } func (ef *EliasFano) Max() uint64 { @@ -227,8 +264,7 @@ func (ef *EliasFano) Count() uint64 { } func (ef *EliasFano) Iterator() *EliasFanoIter { - it := &EliasFanoIter{ef: ef, upperMask: 1, upperStep: uint64(1) << ef.l} - return it + return &EliasFanoIter{ef: ef, upperMask: 1, upperStep: uint64(1) << ef.l, lowerBits: ef.lowerBits, upperBits: ef.upperBits, count: ef.count, l: ef.l, lowerBitsMask: ef.lowerBitsMask} } func (ef *EliasFano) ReverseIterator() *iter.ArrStream[uint64] { //TODO: this is very un-optimal, need implement proper reverse-iterator @@ -246,30 +282,87 @@ func (ef *EliasFano) ReverseIterator() *iter.ArrStream[uint64] { type EliasFanoIter struct { ef *EliasFano + lowerBits []uint64 + upperBits []uint64 + + //constants + count uint64 + lowerBitsMask uint64 + l uint64 + upperStep uint64 + + //fields of current value + upper uint64 + upperIdx uint64 + + //fields of next value idx uint64 lowerIdx uint64 - upperIdx uint64 upperMask uint64 - upper uint64 - upperStep uint64 } func (efi *EliasFanoIter) HasNext() bool { - return efi.idx <= efi.ef.count + return efi.idx <= efi.count } -func (efi *EliasFanoIter) Next() (uint64, error) { - idx64 := efi.lowerIdx >> 6 - shift := efi.lowerIdx & 63 - lower := efi.ef.lowerBits[idx64] >> shift - if shift > 0 { - lower |= efi.ef.lowerBits[idx64+1] << (64 - shift) +func (efi *EliasFanoIter) Reset() { + efi.upperMask = 1 + efi.upperStep = uint64(1) << efi.l + efi.upperIdx = 0 + + efi.upper = 0 + efi.lowerIdx = 0 + efi.idx = 0 +} + +func (efi *EliasFanoIter) SeekDeprecated(n uint64) { + efi.Reset() + _, i, ok := efi.ef.search(n) + if !ok { + efi.idx = efi.count + 1 + return + } + for j := uint64(0); j < i; j++ { + efi.increment() + } + //fmt.Printf("seek: efi.upperMask(%d)=%d, upperIdx=%d, lowerIdx=%d, idx=%d\n", n, bits.TrailingZeros64(efi.upperMask), efi.upperIdx, efi.lowerIdx, efi.idx) + //fmt.Printf("seek: efi.upper=%d\n", efi.upper) +} + +func (efi *EliasFanoIter) Seek(n uint64) { + //fmt.Printf("b seek2: efi.upperMask(%d)=%d, upperIdx=%d, lowerIdx=%d, idx=%d\n", n, bits.TrailingZeros64(efi.upperMask), efi.upperIdx, efi.lowerIdx, efi.idx) + //fmt.Printf("b seek2: efi.upper=%d\n", efi.upper) + efi.Reset() + nn, nextI, ok := efi.ef.search(n) + _ = nn + if !ok { + efi.idx = efi.count + 1 + return } + if nextI == 0 { + return + } + + // fields of current value + v, _, sel, currWords, lower := efi.ef.get(nextI - 1) //TODO: search can return same info + efi.upper = v &^ (lower & efi.ef.lowerBitsMask) + efi.upperIdx = currWords + + // fields of next value + efi.lowerIdx = nextI * efi.l + efi.idx = nextI + efi.upperMask = 1 << (sel + 1) + + //fmt.Printf("seek2: efi.upperMask(%d)=%d, upperIdx=%d, lowerIdx=%d, idx=%d\n", n, bits.TrailingZeros64(efi.upperMask), efi.upperIdx, efi.lowerIdx, efi.idx) + //fmt.Printf("seek2: efi.upper=%d\n", efi.upper) +} + +func (efi *EliasFanoIter) increment() { if efi.upperMask == 0 { efi.upperIdx++ efi.upperMask = 1 } - for efi.ef.upperBits[efi.upperIdx]&efi.upperMask == 0 { + for efi.upperBits[efi.upperIdx]&efi.upperMask == 0 { efi.upper += efi.upperStep efi.upperMask <<= 1 if efi.upperMask == 0 { @@ -278,10 +371,18 @@ func (efi *EliasFanoIter) Next() (uint64, error) { } } efi.upperMask <<= 1 - efi.lowerIdx += efi.ef.l + efi.lowerIdx += efi.l efi.idx++ - val := (lower & efi.ef.lowerBitsMask) | efi.upper - return val, nil +} + +func (efi *EliasFanoIter) Next() (uint64, error) { + idx64, shift := efi.lowerIdx/64, efi.lowerIdx%64 + lower := efi.lowerBits[idx64] >> shift + if shift > 0 { + lower |= efi.lowerBits[idx64+1] << (64 - shift) + } + efi.increment() + return efi.upper | (lower & efi.lowerBitsMask), nil } // Write outputs the state of golomb rice encoding into a writer, which can be recovered later by Read @@ -326,6 +427,15 @@ func ReadEliasFano(r []byte) (*EliasFano, int) { return ef, 16 + 8*len(ef.data) } +// Reset - like ReadEliasFano, but for existing object +func (ef *EliasFano) Reset(r []byte) { + ef.count = binary.BigEndian.Uint64(r[:8]) + ef.u = binary.BigEndian.Uint64(r[8:16]) + ef.data = unsafe.Slice((*uint64)(unsafe.Pointer(&r[16])), (len(r)-16)/uint64Size) + ef.maxOffset = ef.u - 1 + ef.deriveFields() +} + func Max(r []byte) uint64 { return binary.BigEndian.Uint64(r[8:16]) - 1 } func Count(r []byte) uint64 { return binary.BigEndian.Uint64(r[:8]) + 1 } @@ -359,7 +469,7 @@ func Min(r []byte) uint64 { } sel := bitutil.Select64(window, 0) lowerBitsMask := (uint64(1) << l) - 1 - val := ((currWord*64+uint64(sel))<> 6 + idx64, shift := start>>6, int(start&63) mask := (uint64(1)<> shift if shift > 0 { lower |= ef.lowerBits[idx64+1] << (64 - shift) @@ -572,13 +680,11 @@ func (ef *DoubleEliasFano) get2(i uint64) (cumKeys uint64, position uint64, jumpSuperQ := (i / superQ) * superQSize * 2 jumpInsideSuperQ := (i % superQ) / q idx16 := 2*(jumpSuperQ+2) + 2*jumpInsideSuperQ - idx64 = idx16 / 2 - shift = 32 * (idx16 % 2) + idx64, shift = idx16/2, 32*(idx16%2) mask := uint64(0xffffffff) << shift jumpCumKeys := ef.jump[jumpSuperQ] + (ef.jump[idx64]&mask)>>shift idx16++ - idx64 = idx16 / 2 - shift = 32 * (idx16 % 2) + idx64, shift = idx16/2, 32*(idx16%2) mask = uint64(0xffffffff) << shift jumpPosition := ef.jump[jumpSuperQ+1] + (ef.jump[idx64]&mask)>>shift //fmt.Printf("i = %d, jumpCumKeys = %d, jumpPosition = %d\n", i, jumpCumKeys, jumpPosition) diff --git a/recsplit/eliasfano32/elias_fano_test.go b/recsplit/eliasfano32/elias_fano_test.go index 07957a3fe..5d9cd74f1 100644 --- a/recsplit/eliasfano32/elias_fano_test.go +++ b/recsplit/eliasfano32/elias_fano_test.go @@ -18,12 +18,125 @@ package eliasfano32 import ( "bytes" + "math" + "math/bits" "testing" "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func TestEliasFanoSeek(t *testing.T) { + count := uint64(1_000_000) + maxOffset := (count - 1) * 123 + ef := NewEliasFano(count, maxOffset) + vals := make([]uint64, 0, count) + for offset := uint64(0); offset < count; offset++ { + val := offset * 123 + vals = append(vals, val) + ef.AddOffset(val) + } + ef.Build() + + t.Run("iter match vals", func(t *testing.T) { + it := ef.Iterator() + for i := 0; it.HasNext(); i++ { + n, err := it.Next() + require.NoError(t, err) + require.Equal(t, int(vals[i]), int(n)) + } + }) + t.Run("iter grow", func(t *testing.T) { + it := ef.Iterator() + prev, _ := it.Next() + for it.HasNext() { + n, _ := it.Next() + require.GreaterOrEqual(t, int(n), int(prev)) + } + }) + + { + v2, ok2 := ef.Search(ef.Max()) + require.True(t, ok2, v2) + require.Equal(t, ef.Max(), v2) + it := ef.Iterator() + //it.SeekDeprecated(ef.Max()) + for i := 0; i < int(ef.Count()-1); i++ { + it.Next() + } + //save all fields values + //v1, v2, v3, v4, v5 := it.upperIdx, it.upperMask, it.lowerIdx, it.upper, it.idx + // seek to same item and check new fields + it.Seek(ef.Max()) + //require.Equal(t, int(v1), int(it.upperIdx)) + //require.Equal(t, int(v3), int(it.lowerIdx)) + //require.Equal(t, int(v5), int(it.idx)) + //require.Equal(t, bits.TrailingZeros64(v2), bits.TrailingZeros64(it.upperMask)) + //require.Equal(t, int(v4), int(it.upper)) + + require.True(t, it.HasNext(), v2) + itV, err := it.Next() + require.NoError(t, err) + require.Equal(t, int(ef.Max()), int(itV)) + } + + { + v2, ok2 := ef.Search(ef.Min()) + require.True(t, ok2, v2) + require.Equal(t, int(ef.Min()), int(v2)) + it := ef.Iterator() + it.Seek(ef.Min()) + require.True(t, it.HasNext(), v2) + itV, err := it.Next() + require.NoError(t, err) + require.Equal(t, int(ef.Min()), int(itV)) + } + + { + v2, ok2 := ef.Search(0) + require.True(t, ok2, v2) + require.Equal(t, int(ef.Min()), int(v2)) + it := ef.Iterator() + it.Seek(0) + require.True(t, it.HasNext(), v2) + itV, err := it.Next() + require.NoError(t, err) + require.Equal(t, int(ef.Min()), int(itV)) + } + + { + v2, ok2 := ef.Search(math.MaxUint32) + require.False(t, ok2, v2) + it := ef.Iterator() + it.Seek(math.MaxUint32) + require.False(t, it.HasNext(), v2) + } + + { + v2, ok2 := ef.Search((count+1)*123 + 1) + require.False(t, ok2, v2) + it := ef.Iterator() + it.Seek((count+1)*123 + 1) + require.False(t, it.HasNext(), v2) + } + + t.Run("search and seek can't return smaller", func(t *testing.T) { + for i := uint64(0); i < count; i++ { + search := i * 123 + v, ok2 := ef.Search(search) + require.True(t, ok2, search) + require.GreaterOrEqual(t, int(v), int(search)) + it := ef.Iterator() + it.Seek(search) + itV, err := it.Next() + require.NoError(t, err) + require.GreaterOrEqual(t, int(itV), int(search), int(v)) + } + }) + +} + func TestEliasFano(t *testing.T) { offsets := []uint64{1, 4, 6, 8, 10, 14, 16, 19, 22, 34, 37, 39, 41, 43, 48, 51, 54, 58, 62} count := uint64(len(offsets)) @@ -80,14 +193,101 @@ func TestIterator(t *testing.T) { ef.AddOffset(offset) } ef.Build() + t.Run("scan", func(t *testing.T) { + efi := ef.Iterator() + i := 0 + var values []uint64 + for efi.HasNext() { + v, _ := efi.Next() + values = append(values, v) + assert.Equal(t, offsets[i], v, "iter") + i++ + } + iter.ExpectEqualU64(t, iter.ReverseArray(values), ef.ReverseIterator()) + }) + + t.Run("seek", func(t *testing.T) { + iter2 := ef.Iterator() + iter2.Seek(2) + n, err := iter2.Next() + require.NoError(t, err) + require.Equal(t, 4, int(n)) + + iter2.Seek(5) + n, err = iter2.Next() + require.NoError(t, err) + require.Equal(t, 6, int(n)) + + iter2.Seek(62) + n, err = iter2.Next() + require.NoError(t, err) + require.Equal(t, 62, int(n)) + + iter2.Seek(1024) + require.False(t, iter2.HasNext()) + }) +} + +func TestIteratorAndSeekAreBasedOnSameFields(t *testing.T) { + vals := []uint64{1, 123, 789} + ef := NewEliasFano(uint64(len(vals)), vals[len(vals)-1]) + for _, v := range vals { + ef.AddOffset(v) + } + ef.Build() + + for i := range vals { + checkSeek(t, i, ef, vals) + } +} + +func checkSeek(t *testing.T, j int, ef *EliasFano, vals []uint64) { + t.Helper() efi := ef.Iterator() - i := 0 - var values []uint64 - for efi.HasNext() { - v, _ := efi.Next() - values = append(values, v) - assert.Equal(t, offsets[i], v, "iter") - i++ + // drain iterator to given item + for i := 0; i < j; i++ { + efi.Next() } - iter.ExpectEqualU64(t, iter.ReverseArray(values), ef.ReverseIterator()) + //save all fields values + v1, v2, v3, v4, v5 := efi.upperIdx, efi.upperMask, efi.lowerIdx, efi.upper, efi.idx + // seek to same item and check new fields + efi.Seek(vals[j]) + require.Equal(t, int(v1), int(efi.upperIdx)) + require.Equal(t, int(v3), int(efi.lowerIdx)) + require.Equal(t, int(v4), int(efi.upper)) + require.Equal(t, int(v5), int(efi.idx)) + require.Equal(t, bits.TrailingZeros64(v2), bits.TrailingZeros64(efi.upperMask)) +} + +func BenchmarkName(b *testing.B) { + count := uint64(1_000_000) + maxOffset := (count - 1) * 123 + ef := NewEliasFano(count, maxOffset) + for offset := uint64(0); offset < count; offset++ { + ef.AddOffset(offset * 123) + } + ef.Build() + b.Run("next", func(b *testing.B) { + for i := 0; i < b.N; i++ { + it := ef.Iterator() + for it.HasNext() { + n, _ := it.Next() + if n > 1_000_000 { + break + } + } + } + }) + b.Run("seek", func(b *testing.B) { + for i := 0; i < b.N; i++ { + it := ef.Iterator() + it.SeekDeprecated(1_000_000) + } + }) + b.Run("seek2", func(b *testing.B) { + for i := 0; i < b.N; i++ { + it := ef.Iterator() + it.Seek(1_000_000) + } + }) } diff --git a/recsplit/index.go b/recsplit/index.go index 09c6edfe4..85cc7cecc 100644 --- a/recsplit/index.go +++ b/recsplit/index.go @@ -24,6 +24,7 @@ import ( "math/bits" "os" "path/filepath" + "sync" "time" "unsafe" @@ -59,6 +60,8 @@ type Index struct { secondaryAggrBound uint16 // The lower bound for secondary key aggregation (computed from leadSize) primaryAggrBound uint16 // The lower bound for primary key aggregation (computed from leafSize) enums bool + + readers *sync.Pool } func MustOpen(indexFile string) *Index { @@ -152,6 +155,12 @@ func OpenIndex(indexFilePath string) (*Index, error) { idx.grData = p[:l] offset += 8 * int(l) idx.ef.Read(idx.data[offset:]) + + idx.readers = &sync.Pool{ + New: func() interface{} { + return NewIndexReader(idx) + }, + } return idx, nil } @@ -342,3 +351,7 @@ func (idx *Index) EnableWillNeed() *Index { _ = mmap.MadviseWillNeed(idx.mmapHandle1) return idx } + +func (idx *Index) GetReaderFromPool() *IndexReader { + return idx.readers.Get().(*IndexReader) +} diff --git a/recsplit/index_reader.go b/recsplit/index_reader.go index 7cb85a4d4..0ad10ea09 100644 --- a/recsplit/index_reader.go +++ b/recsplit/index_reader.go @@ -74,3 +74,10 @@ func (r *IndexReader) Lookup2(key1, key2 []byte) uint64 { func (r *IndexReader) Empty() bool { return r.index.Empty() } + +func (r *IndexReader) Close() { + if r == nil || r.index == nil { + return + } + r.index.readers.Put(r) +} diff --git a/state/aggregator.go b/state/aggregator.go index 0b252f7b1..4f42839e6 100644 --- a/state/aggregator.go +++ b/state/aggregator.go @@ -27,9 +27,13 @@ import ( "sync/atomic" "time" + "github.com/VictoriaMetrics/metrics" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/common/background" + "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" "github.com/ledgerwatch/erigon-lib/commitment" "github.com/ledgerwatch/erigon-lib/common/length" @@ -40,9 +44,31 @@ import ( // files of smaller size are also immutable, but can be removed after merge to bigger files. const StepsInBiggestFile = 32 -// Reconstruction of the aggregator in another package, `aggregator` +var ( + mxCurrentTx = metrics.GetOrCreateCounter("domain_tx_processed") + mxCurrentBlock = metrics.GetOrCreateCounter("domain_block_current") + mxRunningMerges = metrics.GetOrCreateCounter("domain_running_merges") + mxRunningCollations = metrics.GetOrCreateCounter("domain_running_collations") + mxCollateTook = metrics.GetOrCreateHistogram("domain_collate_took") + mxPruneTook = metrics.GetOrCreateHistogram("domain_prune_took") + mxPruneHistTook = metrics.GetOrCreateHistogram("domain_prune_hist_took") + mxPruningProgress = metrics.GetOrCreateCounter("domain_pruning_progress") + mxCollationSize = metrics.GetOrCreateCounter("domain_collation_size") + mxCollationSizeHist = metrics.GetOrCreateCounter("domain_collation_hist_size") + mxPruneSize = metrics.GetOrCreateCounter("domain_prune_size") + mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took") + mxStepCurrent = metrics.GetOrCreateCounter("domain_step_current") + mxStepTook = metrics.GetOrCreateHistogram("domain_step_took") + mxCommitmentKeys = metrics.GetOrCreateCounter("domain_commitment_keys") + mxCommitmentRunning = metrics.GetOrCreateCounter("domain_running_commitment") + mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") + mxCommitmentWriteTook = metrics.GetOrCreateHistogram("domain_commitment_write_took") + mxCommitmentUpdates = metrics.GetOrCreateCounter("domain_commitment_updates") + mxCommitmentUpdatesApplied = metrics.GetOrCreateCounter("domain_commitment_updates_applied") +) type Aggregator struct { + db kv.RwDB aggregationStep uint64 accounts *Domain storage *Domain @@ -55,19 +81,33 @@ type Aggregator struct { txNum uint64 seekTxNum uint64 blockNum uint64 - commitFn func(txNum uint64) error + stepDoneNotice chan [length.Hash]byte rwTx kv.RwTx stats FilesStats tmpdir string defaultCtx *AggregatorContext + + ps *background.ProgressSet } -func NewAggregator( - dir, tmpdir string, - aggregationStep uint64, -) (*Aggregator, error) { +//type exposedMetrics struct { +// CollationSize *metrics.Gauge +// CollationSizeHist *metrics.Gauge +// PruneSize *metrics.Gauge +// +// lastCollSize int +// lastColHistSize int +// lastPruneSize int +//} +// +//func (e exposedMetrics) init() { +// e.CollationSize = metrics.GetOrCreateGauge("domain_collation_size", func() float64 { return 0 }) +// e.CollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size", func() float64 { return 0 }) +// e.PruneSize = metrics.GetOrCreateGauge("domain_prune_size", func() float64 { return e.lastPruneSize }) +//} - a := &Aggregator{aggregationStep: aggregationStep, tmpdir: tmpdir} +func NewAggregator(dir, tmpdir string, aggregationStep uint64, commitmentMode CommitmentMode, commitTrieVariant commitment.TrieVariant) (*Aggregator, error) { + a := &Aggregator{aggregationStep: aggregationStep, ps: background.NewProgressSet(), tmpdir: tmpdir, stepDoneNotice: make(chan [length.Hash]byte, 1)} closeAgg := true defer func() { @@ -79,21 +119,21 @@ func NewAggregator( if err != nil { return nil, err } - if a.accounts, err = NewDomain(dir, tmpdir, aggregationStep, "accounts", kv.AccountKeys, kv.AccountVals, kv.AccountHistoryKeys, kv.AccountHistoryVals, kv.AccountSettings, kv.AccountIdx, 0 /* prefixLen */, false /* compressVals */); err != nil { + if a.accounts, err = NewDomain(dir, tmpdir, aggregationStep, "accounts", kv.AccountKeys, kv.AccountVals, kv.AccountHistoryKeys, kv.AccountHistoryVals, kv.AccountIdx, false, false); err != nil { return nil, err } - if a.storage, err = NewDomain(dir, tmpdir, aggregationStep, "storage", kv.StorageKeys, kv.StorageVals, kv.StorageHistoryKeys, kv.StorageHistoryVals, kv.StorageSettings, kv.StorageIdx, 20 /* prefixLen */, false /* compressVals */); err != nil { + if a.storage, err = NewDomain(dir, tmpdir, aggregationStep, "storage", kv.StorageKeys, kv.StorageVals, kv.StorageHistoryKeys, kv.StorageHistoryVals, kv.StorageIdx, false, false); err != nil { return nil, err } - if a.code, err = NewDomain(dir, tmpdir, aggregationStep, "code", kv.CodeKeys, kv.CodeVals, kv.CodeHistoryKeys, kv.CodeHistoryVals, kv.CodeSettings, kv.CodeIdx, 0 /* prefixLen */, true /* compressVals */); err != nil { + if a.code, err = NewDomain(dir, tmpdir, aggregationStep, "code", kv.CodeKeys, kv.CodeVals, kv.CodeHistoryKeys, kv.CodeHistoryVals, kv.CodeIdx, true, true); err != nil { return nil, err } - commitd, err := NewDomain(dir, tmpdir, aggregationStep, "commitment", kv.CommitmentKeys, kv.CommitmentVals, kv.CommitmentHistoryKeys, kv.CommitmentHistoryVals, kv.CommitmentSettings, kv.CommitmentIdx, 0 /* prefixLen */, false /* compressVals */) + commitd, err := NewDomain(dir, tmpdir, aggregationStep, "commitment", kv.CommitmentKeys, kv.CommitmentVals, kv.CommitmentHistoryKeys, kv.CommitmentHistoryVals, kv.CommitmentIdx, false, true) if err != nil { return nil, err } - a.commitment = NewCommittedDomain(commitd, CommitmentModeDirect) + a.commitment = NewCommittedDomain(commitd, commitmentMode, commitTrieVariant) if a.logAddrs, err = NewInvertedIndex(dir, tmpdir, aggregationStep, "logaddrs", kv.LogAddressKeys, kv.LogAddressIdx, false, nil); err != nil { return nil, err @@ -109,42 +149,94 @@ func NewAggregator( } closeAgg = false - a.defaultCtx = a.MakeContext() - a.commitment.patriciaTrie.ResetFns(a.defaultCtx.branchFn, a.defaultCtx.accountFn, a.defaultCtx.storageFn) + a.seekTxNum = a.EndTxNumMinimax() return a, nil } -func (a *Aggregator) ReopenFolder() error { +func (a *Aggregator) SetDB(db kv.RwDB) { a.db = db } + +func (a *Aggregator) buildMissedIdxBlocking(d *Domain) error { + eg, ctx := errgroup.WithContext(context.Background()) + eg.SetLimit(32) + if err := d.BuildMissedIndices(ctx, eg, a.ps); err != nil { + return err + } + return eg.Wait() +} +func (a *Aggregator) ReopenFolder() (err error) { + { + if err = a.buildMissedIdxBlocking(a.accounts); err != nil { + return err + } + if err = a.buildMissedIdxBlocking(a.storage); err != nil { + return err + } + if err = a.buildMissedIdxBlocking(a.code); err != nil { + return err + } + if err = a.buildMissedIdxBlocking(a.commitment.Domain); err != nil { + return err + } + } + + if err = a.accounts.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + if err = a.storage.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + if err = a.code.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + if err = a.commitment.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + if err = a.logAddrs.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + if err = a.logTopics.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + if err = a.tracesFrom.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + if err = a.tracesTo.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + return nil +} + +func (a *Aggregator) ReopenList(fNames []string) error { var err error - if err = a.accounts.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.accounts.OpenList(fNames); err != nil { + return err } - if err = a.storage.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.storage.OpenList(fNames); err != nil { + return err } - if err = a.code.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.code.OpenList(fNames); err != nil { + return err } - if err = a.commitment.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.commitment.OpenList(fNames); err != nil { + return err } - if err = a.logAddrs.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.logAddrs.OpenList(fNames); err != nil { + return err } - if err = a.logTopics.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.logTopics.OpenList(fNames); err != nil { + return err } - if err = a.tracesFrom.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.tracesFrom.OpenList(fNames); err != nil { + return err } - if err = a.tracesTo.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.tracesTo.OpenList(fNames); err != nil { + return err } return nil } func (a *Aggregator) GetAndResetStats() DomainStats { - stats := DomainStats{} + stats := DomainStats{HistoryQueries: &atomic.Uint64{}, TotalQueries: &atomic.Uint64{}} stats.Accumulate(a.accounts.GetAndResetStats()) stats.Accumulate(a.storage.GetAndResetStats()) stats.Accumulate(a.code.GetAndResetStats()) @@ -164,6 +256,12 @@ func (a *Aggregator) GetAndResetStats() DomainStats { } func (a *Aggregator) Close() { + if a.defaultCtx != nil { + a.defaultCtx.Close() + } + if a.stepDoneNotice != nil { + close(a.stepDoneNotice) + } if a.accounts != nil { a.accounts.Close() } @@ -204,6 +302,8 @@ func (a *Aggregator) SetTx(tx kv.RwTx) { } func (a *Aggregator) SetTxNum(txNum uint64) { + mxCurrentTx.Set(txNum) + a.txNum = txNum a.accounts.SetTxNum(txNum) a.storage.SetTxNum(txNum) @@ -215,8 +315,10 @@ func (a *Aggregator) SetTxNum(txNum uint64) { a.tracesTo.SetTxNum(txNum) } -// todo useless -func (a *Aggregator) SetBlockNum(bn uint64) { a.blockNum = bn } +func (a *Aggregator) SetBlockNum(blockNum uint64) { + a.blockNum = blockNum + mxCurrentBlock.Set(blockNum) +} func (a *Aggregator) SetWorkers(i int) { a.accounts.compressWorkers = i @@ -259,39 +361,90 @@ func (a *Aggregator) EndTxNumMinimax() uint64 { return min } -func (a *Aggregator) SeekCommitment() (txNum uint64, err error) { +func (a *Aggregator) DomainEndTxNumMinimax() uint64 { + min := a.accounts.endTxNumMinimax() + if txNum := a.storage.endTxNumMinimax(); txNum < min { + min = txNum + } + if txNum := a.code.endTxNumMinimax(); txNum < min { + min = txNum + } + if txNum := a.commitment.endTxNumMinimax(); txNum < min { + min = txNum + } + return min +} + +func (a *Aggregator) SeekCommitment() (blockNum, txNum uint64, err error) { filesTxNum := a.EndTxNumMinimax() - txNum, err = a.commitment.SeekCommitment(a.aggregationStep, filesTxNum) + blockNum, txNum, err = a.commitment.SeekCommitment(a.aggregationStep, filesTxNum) if err != nil { - return 0, err + return 0, 0, err } if txNum == 0 { return } a.seekTxNum = txNum + 1 - return txNum + 1, nil + return blockNum, txNum + 1, nil } -func (a *Aggregator) aggregate(ctx context.Context, step uint64) error { - defer func(t time.Time) { - log.Info("[snapshots] aggregation step is done", "step", step, "took", time.Since(t)) - }(time.Now()) +func (a *Aggregator) mergeDomainSteps(ctx context.Context) error { + mergeStartedAt := time.Now() + maxEndTxNum := a.DomainEndTxNumMinimax() + + var upmerges int + for { + a.defaultCtx.Close() + a.defaultCtx = a.MakeContext() + + somethingMerged, err := a.mergeLoopStep(ctx, maxEndTxNum, 1) + if err != nil { + return err + } + if !somethingMerged { + break + } + upmerges++ + } + + if upmerges > 1 { + log.Info("[stat] aggregation merged", + "upto_tx", maxEndTxNum, + "merge_took", time.Since(mergeStartedAt), + "merges_count", upmerges) + } + + return nil +} + +func (a *Aggregator) aggregate(ctx context.Context, step uint64) error { var ( logEvery = time.NewTicker(time.Second * 30) wg sync.WaitGroup errCh = make(chan error, 8) - //maxSpan = StepsInBiggestFile * a.aggregationStep - txFrom = step * a.aggregationStep - txTo = (step + 1) * a.aggregationStep - //workers = 1 + maxSpan = StepsInBiggestFile * a.aggregationStep + txFrom = step * a.aggregationStep + txTo = (step + 1) * a.aggregationStep + workers = 1 + + stepStartedAt = time.Now() ) + defer logEvery.Stop() for _, d := range []*Domain{a.accounts, a.storage, a.code, a.commitment.Domain} { wg.Add(1) - collation, err := d.collate(ctx, step, txFrom, txTo, d.tx, logEvery) + mxRunningCollations.Inc() + start := time.Now() + collation, err := d.collateStream(ctx, step, txFrom, txTo, d.tx) + mxRunningCollations.Dec() + mxCollateTook.UpdateDuration(start) + + //mxCollationSize.Set(uint64(collation.valuesComp.Count())) + mxCollationSizeHist.Set(uint64(collation.historyComp.Count())) + if err != nil { collation.Close() return fmt.Errorf("domain collation %q has failed: %w", d.filenameBase, err) @@ -299,53 +452,101 @@ func (a *Aggregator) aggregate(ctx context.Context, step uint64) error { go func(wg *sync.WaitGroup, d *Domain, collation Collation) { defer wg.Done() + mxRunningMerges.Inc() - defer func(t time.Time) { - log.Info("[snapshots] domain collate-build is done", "took", time.Since(t), "domain", d.filenameBase) - }(time.Now()) - - sf, err := d.buildFiles(ctx, step, collation) + start := time.Now() + sf, err := d.buildFiles(ctx, step, collation, a.ps) collation.Close() + if err != nil { errCh <- err + sf.Close() + mxRunningMerges.Dec() return } + mxRunningMerges.Dec() + d.integrateFiles(sf, step*a.aggregationStep, (step+1)*a.aggregationStep) + d.stats.LastFileBuildingTook = time.Since(start) }(&wg, d, collation) + mxPruningProgress.Add(2) // domain and history if err := d.prune(ctx, step, txFrom, txTo, math.MaxUint64, logEvery); err != nil { return err } + mxPruningProgress.Dec() + mxPruningProgress.Dec() + + mxPruneTook.Update(d.stats.LastPruneTook.Seconds()) + mxPruneHistTook.Update(d.stats.LastPruneHistTook.Seconds()) } + // when domain files are build and db is pruned, we can merge them + wg.Add(1) + go func(wg *sync.WaitGroup) { + defer wg.Done() + + if err := a.mergeDomainSteps(ctx); err != nil { + errCh <- err + } + }(&wg) + + // indices are built concurrently for _, d := range []*InvertedIndex{a.logTopics, a.logAddrs, a.tracesFrom, a.tracesTo} { wg.Add(1) - collation, err := d.collate(ctx, step*a.aggregationStep, (step+1)*a.aggregationStep, d.tx, logEvery) + mxRunningCollations.Inc() + start := time.Now() + collation, err := d.collate(ctx, step*a.aggregationStep, (step+1)*a.aggregationStep, d.tx) + mxRunningCollations.Dec() + mxCollateTook.UpdateDuration(start) + if err != nil { return fmt.Errorf("index collation %q has failed: %w", d.filenameBase, err) } go func(wg *sync.WaitGroup, d *InvertedIndex, tx kv.Tx) { defer wg.Done() - defer func(t time.Time) { - log.Info("[snapshots] index collate-build is done", "took", time.Since(t), "domain", d.filenameBase) - }(time.Now()) - sf, err := d.buildFiles(ctx, step, collation) + mxRunningMerges.Inc() + start := time.Now() + + sf, err := d.buildFiles(ctx, step, collation, a.ps) if err != nil { errCh <- err sf.Close() return } + + mxRunningMerges.Dec() + mxBuildTook.UpdateDuration(start) + d.integrateFiles(sf, step*a.aggregationStep, (step+1)*a.aggregationStep) + + icx := d.MakeContext() + mxRunningMerges.Inc() + + if err := d.mergeRangesUpTo(ctx, d.endTxNumMinimax(), maxSpan, workers, icx, a.ps); err != nil { + errCh <- err + + mxRunningMerges.Dec() + icx.Close() + return + } + + mxRunningMerges.Dec() + icx.Close() }(&wg, d, d.tx) + mxPruningProgress.Inc() + startPrune := time.Now() if err := d.prune(ctx, txFrom, txTo, math.MaxUint64, logEvery); err != nil { return err } + mxPruneTook.UpdateDuration(startPrune) + mxPruningProgress.Dec() } go func() { @@ -358,34 +559,26 @@ func (a *Aggregator) aggregate(ctx context.Context, step uint64) error { return fmt.Errorf("domain collate-build failed: %w", err) } - ac := a.MakeContext() - defer ac.Close() + log.Info("[stat] aggregation is finished", + "range", fmt.Sprintf("%.2fM-%.2fM", float64(txFrom)/10e5, float64(txTo)/10e5), + "took", time.Since(stepStartedAt)) + + mxStepTook.UpdateDuration(stepStartedAt) - maxEndTxNum := a.EndTxNumMinimax() - for { - somethingMerged, err := a.mergeLoopStep(ctx, maxEndTxNum, 1) - if err != nil { - return err - } - if !somethingMerged { - break - } - } return nil } func (a *Aggregator) mergeLoopStep(ctx context.Context, maxEndTxNum uint64, workers int) (somethingDone bool, err error) { closeAll := true + mergeStartedAt := time.Now() + maxSpan := a.aggregationStep * StepsInBiggestFile r := a.findMergeRange(maxEndTxNum, maxSpan) if !r.any() { return false, nil } - ac := a.MakeContext() // this need, to ensure we do all operations on files in "transaction-style", maybe we will ensure it on type-level in future - defer ac.Close() - - outs := a.staticFilesInRange(r, ac) + outs := a.staticFilesInRange(r, a.defaultCtx) defer func() { if closeAll { outs.Close() @@ -404,30 +597,30 @@ func (a *Aggregator) mergeLoopStep(ctx context.Context, maxEndTxNum uint64, work a.integrateMergedFiles(outs, in) a.cleanAfterFreeze(in) closeAll = false + + for _, s := range []DomainStats{a.accounts.stats, a.code.stats, a.storage.stats} { + mxBuildTook.Update(s.LastFileBuildingTook.Seconds()) + } + + log.Info("[stat] finished merge step", + "upto_tx", maxEndTxNum, "merge_step_took", time.Since(mergeStartedAt)) + return true, nil } type Ranges struct { - accounts DomainRanges - storage DomainRanges - code DomainRanges - commitment DomainRanges - logTopicsEndTxNum uint64 - logAddrsEndTxNum uint64 - logTopicsStartTxNum uint64 - logAddrsStartTxNum uint64 - tracesFromStartTxNum uint64 - tracesFromEndTxNum uint64 - tracesToStartTxNum uint64 - tracesToEndTxNum uint64 - logAddrs bool - logTopics bool - tracesFrom bool - tracesTo bool + accounts DomainRanges + storage DomainRanges + code DomainRanges + commitment DomainRanges +} + +func (r Ranges) String() string { + return fmt.Sprintf("accounts=%s, storage=%s, code=%s, commitment=%s", r.accounts.String(), r.storage.String(), r.code.String(), r.commitment.String()) } func (r Ranges) any() bool { - return r.accounts.any() || r.storage.any() || r.code.any() || r.commitment.any() //|| r.logAddrs || r.logTopics || r.tracesFrom || r.tracesTo + return r.accounts.any() || r.storage.any() || r.code.any() || r.commitment.any() } func (a *Aggregator) findMergeRange(maxEndTxNum, maxSpan uint64) Ranges { @@ -436,11 +629,9 @@ func (a *Aggregator) findMergeRange(maxEndTxNum, maxSpan uint64) Ranges { r.storage = a.storage.findMergeRange(maxEndTxNum, maxSpan) r.code = a.code.findMergeRange(maxEndTxNum, maxSpan) r.commitment = a.commitment.findMergeRange(maxEndTxNum, maxSpan) - r.logAddrs, r.logAddrsStartTxNum, r.logAddrsEndTxNum = a.logAddrs.findMergeRange(maxEndTxNum, maxSpan) - r.logTopics, r.logTopicsStartTxNum, r.logTopicsEndTxNum = a.logTopics.findMergeRange(maxEndTxNum, maxSpan) - r.tracesFrom, r.tracesFromStartTxNum, r.tracesFromEndTxNum = a.tracesFrom.findMergeRange(maxEndTxNum, maxSpan) - r.tracesTo, r.tracesToStartTxNum, r.tracesToEndTxNum = a.tracesTo.findMergeRange(maxEndTxNum, maxSpan) + //if r.any() { //log.Info(fmt.Sprintf("findMergeRange(%d, %d)=%+v\n", maxEndTxNum, maxSpan, r)) + //} return r } @@ -457,18 +648,10 @@ type SelectedStaticFiles struct { commitment []*filesItem commitmentIdx []*filesItem commitmentHist []*filesItem - tracesTo []*filesItem - tracesFrom []*filesItem - logTopics []*filesItem - logAddrs []*filesItem codeI int storageI int accountsI int commitmentI int - logAddrsI int - tracesFromI int - logTopicsI int - tracesToI int } func (sf SelectedStaticFiles) Close() { @@ -477,7 +660,6 @@ func (sf SelectedStaticFiles) Close() { sf.storage, sf.storageIdx, sf.storageHist, sf.code, sf.codeIdx, sf.codeHist, sf.commitment, sf.commitmentIdx, sf.commitmentHist, - //sf.logAddrs, sf.logTopics, sf.tracesFrom, sf.tracesTo, } { for _, item := range group { if item != nil { @@ -487,6 +669,9 @@ func (sf SelectedStaticFiles) Close() { if item.index != nil { item.index.Close() } + if item.bindex != nil { + item.bindex.Close() + } } } } @@ -506,18 +691,6 @@ func (a *Aggregator) staticFilesInRange(r Ranges, ac *AggregatorContext) Selecte if r.commitment.any() { sf.commitment, sf.commitmentIdx, sf.commitmentHist, sf.commitmentI = a.commitment.staticFilesInRange(r.commitment, ac.commitment) } - if r.logAddrs { - sf.logAddrs, sf.logAddrsI = a.logAddrs.staticFilesInRange(r.logAddrsStartTxNum, r.logAddrsEndTxNum, ac.logAddrs) - } - if r.logTopics { - sf.logTopics, sf.logTopicsI = a.logTopics.staticFilesInRange(r.logTopicsStartTxNum, r.logTopicsEndTxNum, ac.logTopics) - } - if r.tracesFrom { - sf.tracesFrom, sf.tracesFromI = a.tracesFrom.staticFilesInRange(r.tracesFromStartTxNum, r.tracesFromEndTxNum, ac.tracesFrom) - } - if r.tracesTo { - sf.tracesTo, sf.tracesToI = a.tracesTo.staticFilesInRange(r.tracesToStartTxNum, r.tracesToEndTxNum, ac.tracesTo) - } return sf } @@ -530,10 +703,6 @@ type MergedFiles struct { codeIdx, codeHist *filesItem commitment *filesItem commitmentIdx, commitmentHist *filesItem - logAddrs *filesItem - logTopics *filesItem - tracesFrom *filesItem - tracesTo *filesItem } func (mf MergedFiles) Close() { @@ -551,12 +720,21 @@ func (mf MergedFiles) Close() { if item.decompressor != nil { item.index.Close() } + if item.bindex != nil { + item.bindex.Close() + } } } } func (a *Aggregator) mergeFiles(ctx context.Context, files SelectedStaticFiles, r Ranges, workers int) (MergedFiles, error) { - defer func(t time.Time) { log.Info("[snapshots] merge", "took", time.Since(t)) }(time.Now()) + started := time.Now() + defer func(t time.Time) { + log.Info("[snapshots] domain files has been merged", + "range", fmt.Sprintf("%d-%d", r.accounts.valuesStartTxNum/a.aggregationStep, r.accounts.valuesEndTxNum/a.aggregationStep), + "took", time.Since(t)) + }(started) + var mf MergedFiles closeFiles := true defer func() { @@ -566,96 +744,72 @@ func (a *Aggregator) mergeFiles(ctx context.Context, files SelectedStaticFiles, }() var ( - errCh = make(chan error, 8) + errCh = make(chan error, 4) wg sync.WaitGroup predicates sync.WaitGroup ) + wg.Add(4) predicates.Add(2) - wg.Add(8) go func() { + mxRunningMerges.Inc() + defer mxRunningMerges.Dec() defer wg.Done() - defer predicates.Done() - var err error - if r.accounts.any() { - if mf.accounts, mf.accountsIdx, mf.accountsHist, err = a.accounts.mergeFiles(ctx, files.accounts, files.accountsIdx, files.accountsHist, r.accounts, workers); err != nil { - errCh <- err - } - } - }() - go func() { - defer wg.Done() - defer predicates.Done() - var err error - if r.storage.any() { - if mf.storage, mf.storageIdx, mf.storageHist, err = a.storage.mergeFiles(ctx, files.storage, files.storageIdx, files.storageHist, r.storage, workers); err != nil { - errCh <- err - } - } - }() - go func() { - defer wg.Done() + var err error if r.code.any() { - if mf.code, mf.codeIdx, mf.codeHist, err = a.code.mergeFiles(ctx, files.code, files.codeIdx, files.codeHist, r.code, workers); err != nil { - errCh <- err - } - } - }() - go func() { - defer wg.Done() - var err error - if r.logAddrs { - if mf.logAddrs, err = a.logAddrs.mergeFiles(ctx, files.logAddrs, r.logAddrsStartTxNum, r.logAddrsEndTxNum, workers); err != nil { + if mf.code, mf.codeIdx, mf.codeHist, err = a.code.mergeFiles(ctx, files.code, files.codeIdx, files.codeHist, r.code, workers, a.ps); err != nil { errCh <- err } } }() - go func() { - defer wg.Done() - var err error - if r.logTopics { - if mf.logTopics, err = a.logTopics.mergeFiles(ctx, files.logTopics, r.logTopicsStartTxNum, r.logTopicsEndTxNum, workers); err != nil { - errCh <- err - } - } - }() - go func() { + + go func(predicates *sync.WaitGroup) { + mxRunningMerges.Inc() + defer mxRunningMerges.Dec() + defer wg.Done() + defer predicates.Done() var err error - if r.tracesFrom { - if mf.tracesFrom, err = a.tracesFrom.mergeFiles(ctx, files.tracesFrom, r.tracesFromStartTxNum, r.tracesFromEndTxNum, workers); err != nil { + if r.accounts.any() { + if mf.accounts, mf.accountsIdx, mf.accountsHist, err = a.accounts.mergeFiles(ctx, files.accounts, files.accountsIdx, files.accountsHist, r.accounts, workers, a.ps); err != nil { errCh <- err } } - }() - go func() { + }(&predicates) + go func(predicates *sync.WaitGroup) { + mxRunningMerges.Inc() + defer mxRunningMerges.Dec() + defer wg.Done() + defer predicates.Done() var err error - if r.tracesTo { - if mf.tracesTo, err = a.tracesTo.mergeFiles(ctx, files.tracesTo, r.tracesToStartTxNum, r.tracesToEndTxNum, workers); err != nil { + if r.storage.any() { + if mf.storage, mf.storageIdx, mf.storageHist, err = a.storage.mergeFiles(ctx, files.storage, files.storageIdx, files.storageHist, r.storage, workers, a.ps); err != nil { errCh <- err } } - }() + }(&predicates) - go func() { + go func(predicates *sync.WaitGroup) { defer wg.Done() predicates.Wait() + mxRunningMerges.Inc() + defer mxRunningMerges.Dec() + var err error // requires storage|accounts to be merged at this point if r.commitment.any() { - if mf.commitment, mf.commitmentIdx, mf.commitmentHist, err = a.commitment.mergeFiles(ctx, files, mf, r.commitment, workers); err != nil { + if mf.commitment, mf.commitmentIdx, mf.commitmentHist, err = a.commitment.mergeFiles(ctx, files, mf, r.commitment, workers, a.ps); err != nil { errCh <- err } } - }() + }(&predicates) go func() { wg.Wait() - close(errCh) }() @@ -674,144 +828,23 @@ func (a *Aggregator) integrateMergedFiles(outs SelectedStaticFiles, in MergedFil a.storage.integrateMergedFiles(outs.storage, outs.storageIdx, outs.storageHist, in.storage, in.storageIdx, in.storageHist) a.code.integrateMergedFiles(outs.code, outs.codeIdx, outs.codeHist, in.code, in.codeIdx, in.codeHist) a.commitment.integrateMergedFiles(outs.commitment, outs.commitmentIdx, outs.commitmentHist, in.commitment, in.commitmentIdx, in.commitmentHist) - a.logAddrs.integrateMergedFiles(outs.logAddrs, in.logAddrs) - a.logTopics.integrateMergedFiles(outs.logTopics, in.logTopics) - a.tracesFrom.integrateMergedFiles(outs.tracesFrom, in.tracesFrom) - a.tracesTo.integrateMergedFiles(outs.tracesTo, in.tracesTo) } + func (a *Aggregator) cleanAfterFreeze(in MergedFiles) { a.accounts.cleanAfterFreeze(in.accountsHist) a.storage.cleanAfterFreeze(in.storageHist) a.code.cleanAfterFreeze(in.codeHist) a.commitment.cleanAfterFreeze(in.commitment) - a.logAddrs.cleanAfterFreeze(in.logAddrs) - a.logTopics.cleanAfterFreeze(in.logTopics) - a.tracesFrom.cleanAfterFreeze(in.tracesFrom) - a.tracesTo.cleanAfterFreeze(in.tracesTo) -} -func (ac *AggregatorContext) ReadAccountData(addr []byte, roTx kv.Tx) ([]byte, error) { - return ac.accounts.Get(addr, nil, roTx) -} - -func (ac *AggregatorContext) ReadAccountDataBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { - return ac.accounts.GetBeforeTxNum(addr, txNum, roTx) -} - -func (ac *AggregatorContext) ReadAccountStorage(addr []byte, loc []byte, roTx kv.Tx) ([]byte, error) { - return ac.storage.Get(addr, loc, roTx) -} - -func (ac *AggregatorContext) ReadAccountStorageBeforeTxNum(addr []byte, loc []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { - if cap(ac.keyBuf) < len(addr)+len(loc) { - ac.keyBuf = make([]byte, len(addr)+len(loc)) - } else if len(ac.keyBuf) != len(addr)+len(loc) { - ac.keyBuf = ac.keyBuf[:len(addr)+len(loc)] - } - copy(ac.keyBuf, addr) - copy(ac.keyBuf[len(addr):], loc) - return ac.storage.GetBeforeTxNum(ac.keyBuf, txNum, roTx) -} - -func (ac *AggregatorContext) ReadAccountCode(addr []byte, roTx kv.Tx) ([]byte, error) { - return ac.code.Get(addr, nil, roTx) -} - -func (ac *AggregatorContext) ReadCommitment(addr []byte, roTx kv.Tx) ([]byte, error) { - return ac.commitment.Get(addr, nil, roTx) -} - -func (ac *AggregatorContext) ReadCommitmentBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { - return ac.commitment.GetBeforeTxNum(addr, txNum, roTx) -} - -func (ac *AggregatorContext) ReadAccountCodeBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { - return ac.code.GetBeforeTxNum(addr, txNum, roTx) -} - -func (ac *AggregatorContext) ReadAccountCodeSize(addr []byte, roTx kv.Tx) (int, error) { - code, err := ac.code.Get(addr, nil, roTx) - if err != nil { - return 0, err - } - return len(code), nil -} - -func (ac *AggregatorContext) ReadAccountCodeSizeBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) (int, error) { - code, err := ac.code.GetBeforeTxNum(addr, txNum, roTx) - if err != nil { - return 0, err - } - return len(code), nil -} - -func bytesToUint64(buf []byte) (x uint64) { - for i, b := range buf { - x = x<<8 + uint64(b) - if i == 7 { - return - } - } - return -} - -func (a *AggregatorContext) branchFn(prefix []byte) ([]byte, error) { - // Look in the summary table first - stateValue, err := a.ReadCommitment(prefix, a.a.rwTx) - if err != nil { - return nil, fmt.Errorf("failed read branch %x: %w", commitment.CompactedKeyToHex(prefix), err) - } - if stateValue == nil { - return nil, nil - } - // fmt.Printf("Returning branch data prefix [%x], mergeVal=[%x]\n", commitment.CompactedKeyToHex(prefix), stateValue) - return stateValue[2:], nil // Skip touchMap but keep afterMap } -func (a *AggregatorContext) accountFn(plainKey []byte, cell *commitment.Cell) error { - encAccount, err := a.ReadAccountData(plainKey, a.a.rwTx) - if err != nil { - return err - } - cell.Nonce = 0 - cell.Balance.Clear() - copy(cell.CodeHash[:], commitment.EmptyCodeHash) - if len(encAccount) > 0 { - nonce, balance, chash := DecodeAccountBytes(encAccount) - cell.Nonce = nonce - cell.Balance.Set(balance) - if chash != nil { - copy(cell.CodeHash[:], chash) - } - } - - code, err := a.ReadAccountCode(plainKey, a.a.rwTx) - if err != nil { - return err - } - if code != nil { - a.a.commitment.keccak.Reset() - a.a.commitment.keccak.Write(code) - copy(cell.CodeHash[:], a.a.commitment.keccak.Sum(nil)) - } - cell.Delete = len(encAccount) == 0 && len(code) == 0 - return nil -} - -func (a *AggregatorContext) storageFn(plainKey []byte, cell *commitment.Cell) error { - // Look in the summary table first - enc, err := a.ReadAccountStorage(plainKey[:length.Addr], plainKey[length.Addr:], a.a.rwTx) - if err != nil { - return err - } - cell.StorageLen = len(enc) - copy(cell.Storage[:], enc) - cell.Delete = cell.StorageLen == 0 - return nil -} - -// Evaluates commitment for processed state. Commit=true - store trie state after evaluation +// ComputeCommitment evaluates commitment for processed state. +// If `saveStateAfter`=true, then trie state will be saved to DB after commitment evaluation. func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []byte, err error) { + // if commitment mode is Disabled, there will be nothing to compute on. + mxCommitmentRunning.Inc() rootHash, branchNodeUpdates, err := a.commitment.ComputeCommitment(trace) + mxCommitmentRunning.Dec() + if err != nil { return nil, err } @@ -819,6 +852,11 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b saveStateAfter = false } + mxCommitmentKeys.Add(int(a.commitment.comKeys)) + mxCommitmentTook.Update(a.commitment.comTook.Seconds()) + + defer func(t time.Time) { mxCommitmentWriteTook.UpdateDuration(t) }(time.Now()) + for pref, update := range branchNodeUpdates { prefix := []byte(pref) @@ -826,7 +864,7 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b if err != nil { return nil, err } - + mxCommitmentUpdates.Inc() stated := commitment.BranchData(stateValue) merged, err := a.commitment.branchMerger.Merge(stated, update) if err != nil { @@ -841,6 +879,7 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b if err = a.UpdateCommitmentData(prefix, merged); err != nil { return nil, err } + mxCommitmentUpdatesApplied.Inc() } if saveStateAfter { @@ -852,51 +891,57 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b return rootHash, nil } -func (a *Aggregator) ReadyToFinishTx() bool { - return (a.txNum+1)%a.aggregationStep == 0 && a.seekTxNum < a.txNum +// Provides channel which receives commitment hash each time aggregation is occured +func (a *Aggregator) AggregatedRoots() chan [length.Hash]byte { + return a.stepDoneNotice } -func (a *Aggregator) SetCommitFn(fn func(txNum uint64) error) { - a.commitFn = fn +func (a *Aggregator) notifyAggregated(rootHash []byte) { + rh := (*[length.Hash]byte)(rootHash) + select { + case a.stepDoneNotice <- *rh: + default: + } } -func (a *Aggregator) FinishTx() error { +func (a *Aggregator) ReadyToFinishTx() bool { + return (a.txNum+1)%a.aggregationStep == 0 && a.seekTxNum < a.txNum +} + +func (a *Aggregator) FinishTx() (err error) { atomic.AddUint64(&a.stats.TxCount, 1) if !a.ReadyToFinishTx() { return nil } - _, err := a.ComputeCommitment(true, false) + + mxRunningMerges.Inc() + defer mxRunningMerges.Dec() + + a.commitment.patriciaTrie.ResetFns(a.defaultCtx.branchFn, a.defaultCtx.accountFn, a.defaultCtx.storageFn) + rootHash, err := a.ComputeCommitment(true, false) if err != nil { return err } step := a.txNum / a.aggregationStep + mxStepCurrent.Set(step) + if step == 0 { - if a.commitFn != nil { - if err := a.commitFn(a.txNum); err != nil { - return fmt.Errorf("aggregator: db commit on finishTx failed, txNum=%d err=%w", a.txNum, err) - } - } + a.notifyAggregated(rootHash) return nil } step-- // Leave one step worth in the DB - if err := a.Flush(context.TODO()); err != nil { - return err - } ctx := context.Background() - if err := a.aggregate(ctx, step); err != nil { + if err := a.Flush(ctx); err != nil { return err } - if a.commitFn != nil { - if err := a.commitFn(a.txNum); err != nil { - return err - } + if err := a.aggregate(ctx, step); err != nil { + return err } - //a.defaultCtx = a.MakeContext() - + a.notifyAggregated(rootHash) return nil } @@ -966,34 +1011,35 @@ func (a *Aggregator) AddLogTopic(topic []byte) error { return a.logTopics.Add(topic) } -func (ac *AggregatorContext) LogAddrIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (*InvertedIterator, error) { - return ac.logAddrs.IterateRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx) -} - -func (ac *AggregatorContext) LogTopicIterator(topic []byte, startTxNum, endTxNum int, roTx kv.Tx) (*InvertedIterator, error) { - return ac.logTopics.IterateRange(topic, startTxNum, endTxNum, order.Asc, -1, roTx) -} - -func (ac *AggregatorContext) TraceFromIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (*InvertedIterator, error) { - return ac.tracesFrom.IterateRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx) -} - -func (ac *AggregatorContext) TraceToIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (*InvertedIterator, error) { - return ac.tracesTo.IterateRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx) -} - // StartWrites - pattern: `defer agg.StartWrites().FinishWrites()` func (a *Aggregator) StartWrites() *Aggregator { - a.accounts.StartWrites(a.tmpdir) - a.storage.StartWrites(a.tmpdir) - a.code.StartWrites(a.tmpdir) - a.commitment.StartWrites(a.tmpdir) - a.logAddrs.StartWrites(a.tmpdir) - a.logTopics.StartWrites(a.tmpdir) - a.tracesFrom.StartWrites(a.tmpdir) - a.tracesTo.StartWrites(a.tmpdir) + a.accounts.StartWrites() + a.storage.StartWrites() + a.code.StartWrites() + a.commitment.StartWrites() + a.logAddrs.StartWrites() + a.logTopics.StartWrites() + a.tracesFrom.StartWrites() + a.tracesTo.StartWrites() + + if a.defaultCtx != nil { + a.defaultCtx.Close() + } + a.defaultCtx = &AggregatorContext{ + a: a, + accounts: a.accounts.defaultDc, + storage: a.storage.defaultDc, + code: a.code.defaultDc, + commitment: a.commitment.defaultDc, + logAddrs: a.logAddrs.MakeContext(), + logTopics: a.logTopics.MakeContext(), + tracesFrom: a.tracesFrom.MakeContext(), + tracesTo: a.tracesTo.MakeContext(), + } + a.commitment.patriciaTrie.ResetFns(a.defaultCtx.branchFn, a.defaultCtx.accountFn, a.defaultCtx.storageFn) return a } + func (a *Aggregator) FinishWrites() { a.accounts.FinishWrites() a.storage.FinishWrites() @@ -1007,7 +1053,6 @@ func (a *Aggregator) FinishWrites() { // Flush - must be called before Collate, if you did some writes func (a *Aggregator) Flush(ctx context.Context) error { - // TODO: Add support of commitment! flushers := []flusher{ a.accounts.Rotate(), a.storage.Rotate(), @@ -1028,10 +1073,13 @@ func (a *Aggregator) Flush(ctx context.Context) error { } type FilesStats struct { - TxCount uint64 - FilesCount uint64 - IdxSize uint64 - DataSize uint64 + HistoryReads uint64 + TotalReads uint64 + IdxAccess time.Duration + TxCount uint64 + FilesCount uint64 + IdxSize uint64 + DataSize uint64 } func (a *Aggregator) Stats() FilesStats { @@ -1040,6 +1088,9 @@ func (a *Aggregator) Stats() FilesStats { res.IdxSize = stat.IndexSize res.DataSize = stat.DataSize res.FilesCount = stat.FilesCount + res.HistoryReads = stat.HistoryQueries.Load() + res.TotalReads = stat.TotalQueries.Load() + res.IdxAccess = stat.EfSearchTime return res } @@ -1069,6 +1120,137 @@ func (a *Aggregator) MakeContext() *AggregatorContext { tracesTo: a.tracesTo.MakeContext(), } } + +func (ac *AggregatorContext) ReadAccountData(addr []byte, roTx kv.Tx) ([]byte, error) { + return ac.accounts.Get(addr, nil, roTx) +} + +func (ac *AggregatorContext) ReadAccountDataBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { + v, err := ac.accounts.GetBeforeTxNum(addr, txNum, roTx) + return v, err +} + +func (ac *AggregatorContext) ReadAccountStorage(addr []byte, loc []byte, roTx kv.Tx) ([]byte, error) { + return ac.storage.Get(addr, loc, roTx) +} + +func (ac *AggregatorContext) ReadAccountStorageBeforeTxNum(addr []byte, loc []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { + if cap(ac.keyBuf) < len(addr)+len(loc) { + ac.keyBuf = make([]byte, len(addr)+len(loc)) + } else if len(ac.keyBuf) != len(addr)+len(loc) { + ac.keyBuf = ac.keyBuf[:len(addr)+len(loc)] + } + copy(ac.keyBuf, addr) + copy(ac.keyBuf[len(addr):], loc) + v, err := ac.storage.GetBeforeTxNum(ac.keyBuf, txNum, roTx) + return v, err +} + +func (ac *AggregatorContext) ReadAccountCode(addr []byte, roTx kv.Tx) ([]byte, error) { + return ac.code.Get(addr, nil, roTx) +} + +func (ac *AggregatorContext) ReadCommitment(addr []byte, roTx kv.Tx) ([]byte, error) { + return ac.commitment.Get(addr, nil, roTx) +} + +func (ac *AggregatorContext) ReadCommitmentBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { + v, err := ac.commitment.GetBeforeTxNum(addr, txNum, roTx) + return v, err +} + +func (ac *AggregatorContext) ReadAccountCodeBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { + v, err := ac.code.GetBeforeTxNum(addr, txNum, roTx) + return v, err +} + +func (ac *AggregatorContext) ReadAccountCodeSize(addr []byte, roTx kv.Tx) (int, error) { + code, err := ac.code.Get(addr, nil, roTx) + if err != nil { + return 0, err + } + return len(code), nil +} + +func (ac *AggregatorContext) ReadAccountCodeSizeBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) (int, error) { + code, err := ac.code.GetBeforeTxNum(addr, txNum, roTx) + if err != nil { + return 0, err + } + return len(code), nil +} + +func (ac *AggregatorContext) branchFn(prefix []byte) ([]byte, error) { + // Look in the summary table first + stateValue, err := ac.ReadCommitment(prefix, ac.a.rwTx) + if err != nil { + return nil, fmt.Errorf("failed read branch %x: %w", commitment.CompactedKeyToHex(prefix), err) + } + if stateValue == nil { + return nil, nil + } + // fmt.Printf("Returning branch data prefix [%x], mergeVal=[%x]\n", commitment.CompactedKeyToHex(prefix), stateValue) + return stateValue[2:], nil // Skip touchMap but keep afterMap +} + +func (ac *AggregatorContext) accountFn(plainKey []byte, cell *commitment.Cell) error { + encAccount, err := ac.ReadAccountData(plainKey, ac.a.rwTx) + if err != nil { + return err + } + cell.Nonce = 0 + cell.Balance.Clear() + copy(cell.CodeHash[:], commitment.EmptyCodeHash) + if len(encAccount) > 0 { + nonce, balance, chash := DecodeAccountBytes(encAccount) + cell.Nonce = nonce + cell.Balance.Set(balance) + if chash != nil { + copy(cell.CodeHash[:], chash) + } + } + + code, err := ac.ReadAccountCode(plainKey, ac.a.rwTx) + if err != nil { + return err + } + if code != nil { + ac.a.commitment.keccak.Reset() + ac.a.commitment.keccak.Write(code) + copy(cell.CodeHash[:], ac.a.commitment.keccak.Sum(nil)) + } + cell.Delete = len(encAccount) == 0 && len(code) == 0 + return nil +} + +func (ac *AggregatorContext) storageFn(plainKey []byte, cell *commitment.Cell) error { + // Look in the summary table first + enc, err := ac.ReadAccountStorage(plainKey[:length.Addr], plainKey[length.Addr:], ac.a.rwTx) + if err != nil { + return err + } + cell.StorageLen = len(enc) + copy(cell.Storage[:], enc) + cell.Delete = cell.StorageLen == 0 + return nil +} + +func (ac *AggregatorContext) LogAddrIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) { + return ac.logAddrs.IdxRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx) +} + +func (ac *AggregatorContext) LogTopicIterator(topic []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) { + return ac.logTopics.IdxRange(topic, startTxNum, endTxNum, order.Asc, -1, roTx) +} + +func (ac *AggregatorContext) TraceFromIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) { + return ac.tracesFrom.IdxRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx) +} + +func (ac *AggregatorContext) TraceToIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) { + return ac.tracesTo.IdxRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx) +} + func (ac *AggregatorContext) Close() { ac.accounts.Close() ac.storage.Close() @@ -1156,7 +1338,7 @@ func EncodeAccountBytes(nonce uint64, balance *uint256.Int, hash []byte, incarna } else { value[pos] = 32 pos++ - copy(value[pos:pos+32], hash[:]) + copy(value[pos:pos+32], hash) pos += 32 } if incarnation == 0 { @@ -1172,3 +1354,13 @@ func EncodeAccountBytes(nonce uint64, balance *uint256.Int, hash []byte, incarna } return value } + +func bytesToUint64(buf []byte) (x uint64) { + for i, b := range buf { + x = x<<8 + uint64(b) + if i == 7 { + return + } + } + return +} diff --git a/state/aggregator_bench_test.go b/state/aggregator_bench_test.go index 830392cd3..b0107df39 100644 --- a/state/aggregator_bench_test.go +++ b/state/aggregator_bench_test.go @@ -1,20 +1,29 @@ package state import ( + "bytes" "context" + "fmt" "math/rand" "os" + "path" + "path/filepath" "testing" + "time" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" + "github.com/ledgerwatch/erigon-lib/commitment" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon-lib/recsplit" ) -func testDbAndAggregatorBench(b *testing.B, prefixLen int, aggStep uint64) (string, kv.RwDB, *Aggregator) { +func testDbAndAggregatorBench(b *testing.B, aggStep uint64) (string, kv.RwDB, *Aggregator) { b.Helper() path := b.TempDir() b.Cleanup(func() { os.RemoveAll(path) }) @@ -23,7 +32,7 @@ func testDbAndAggregatorBench(b *testing.B, prefixLen int, aggStep uint64) (stri return kv.ChaindataTablesCfg }).MustOpen() b.Cleanup(db.Close) - agg, err := NewAggregator(path, path, aggStep) + agg, err := NewAggregator(path, path, aggStep, CommitmentModeDirect, commitment.VariantHexPatriciaTrie) require.NoError(b, err) b.Cleanup(agg.Close) return path, db, agg @@ -33,12 +42,11 @@ func BenchmarkAggregator_Processing(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - //keys := queueKeys(ctx, 42, length.Addr) longKeys := queueKeys(ctx, 64, length.Addr+length.Hash) vals := queueKeys(ctx, 53, length.Hash) - aggStep := uint64(100_000) - _, db, agg := testDbAndAggregatorBench(b, length.Addr, aggStep) + aggStep := uint64(100_00) + _, db, agg := testDbAndAggregatorBench(b, aggStep) tx, err := db.BeginRw(ctx) require.NoError(b, err) @@ -46,49 +54,20 @@ func BenchmarkAggregator_Processing(b *testing.B) { if tx != nil { tx.Rollback() } - if agg != nil { - agg.Close() - } }() - commit := func(txN uint64) (err error) { - err = tx.Commit() - require.NoError(b, err) - if err != nil { - return err - } - - tx = nil - tx, err = db.BeginRw(ctx) - require.NoError(b, err) - if err != nil { - return err - } - agg.SetTx(tx) - return nil - } - agg.SetCommitFn(commit) agg.SetTx(tx) defer agg.StartWrites().FinishWrites() require.NoError(b, err) - agg.StartWrites() - defer agg.FinishWrites() b.ReportAllocs() b.ResetTimer() - //keyList := make([][]byte, 20000) + for i := 0; i < b.N; i++ { - //var key []byte - //if i >= len(keyList) { - // pi := i % (len(keyList)) - // key = keyList[pi] - //} else { - // key = <-longKeys - // keyList[i] = key - //} key := <-longKeys val := <-vals - agg.SetTxNum(uint64(i)) + txNum := uint64(i) + agg.SetTxNum(txNum) err := agg.WriteAccountStorage(key[:length.Addr], key[length.Addr:], val) require.NoError(b, err) err = agg.FinishTx() @@ -113,3 +92,161 @@ func queueKeys(ctx context.Context, seed, ofSize uint64) <-chan []byte { }() return keys } + +func Benchmark_BtreeIndex_Allocation(b *testing.B) { + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < b.N; i++ { + now := time.Now() + count := rnd.Intn(1000000000) + bt := newBtAlloc(uint64(count), uint64(1<<12), true) + bt.traverseDfs() + fmt.Printf("alloc %v\n", time.Since(now)) + } +} + +func Benchmark_BtreeIndex_Search(b *testing.B) { + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + tmp := b.TempDir() + defer os.RemoveAll(tmp) + dataPath := "../../data/storage.256-288.kv" + + indexPath := path.Join(tmp, filepath.Base(dataPath)+".bti") + err := BuildBtreeIndex(dataPath, indexPath) + require.NoError(b, err) + + M := 1024 + bt, err := OpenBtreeIndex(indexPath, dataPath, uint64(M)) + + require.NoError(b, err) + + idx := NewBtIndexReader(bt) + + keys, err := pivotKeysFromKV(dataPath) + require.NoError(b, err) + + for i := 0; i < b.N; i++ { + p := rnd.Intn(len(keys)) + cur, err := idx.Seek(keys[p]) + require.NoErrorf(b, err, "i=%d", i) + require.EqualValues(b, keys[p], cur.key) + require.NotEmptyf(b, cur.Value(), "i=%d", i) + } + + bt.Close() +} + +func benchInitBtreeIndex(b *testing.B, M uint64) (*BtIndex, [][]byte, string) { + b.Helper() + + tmp := b.TempDir() + b.Cleanup(func() { os.RemoveAll(tmp) }) + + dataPath := generateCompressedKV(b, tmp, 52, 10, 1000000) + indexPath := path.Join(tmp, filepath.Base(dataPath)+".bt") + bt, err := CreateBtreeIndex(indexPath, dataPath, M) + require.NoError(b, err) + + keys, err := pivotKeysFromKV(dataPath) + require.NoError(b, err) + return bt, keys, dataPath +} + +func Benchmark_BTree_Seek(b *testing.B) { + M := uint64(1024) + bt, keys, _ := benchInitBtreeIndex(b, M) + defer bt.Close() + + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + b.Run("seek_only", func(b *testing.B) { + for i := 0; i < b.N; i++ { + p := rnd.Intn(len(keys)) + + cur, err := bt.Seek(keys[p]) + require.NoError(b, err) + + require.EqualValues(b, keys[p], cur.key) + } + }) + + b.Run("seek_then_next", func(b *testing.B) { + for i := 0; i < b.N; i++ { + p := rnd.Intn(len(keys)) + + cur, err := bt.Seek(keys[p]) + require.NoError(b, err) + + require.EqualValues(b, keys[p], cur.key) + + prevKey := common.Copy(keys[p]) + ntimer := time.Duration(0) + nextKeys := 5000 + for j := 0; j < nextKeys; j++ { + ntime := time.Now() + + if !cur.Next() { + break + } + ntimer += time.Since(ntime) + + nk := cur.Key() + if bytes.Compare(prevKey, nk) > 0 { + b.Fatalf("prev %s cur %s, next key should be greater", prevKey, nk) + } + prevKey = nk + } + if i%1000 == 0 { + fmt.Printf("next_access_last[of %d keys] %v\n", nextKeys, ntimer/time.Duration(nextKeys)) + } + + } + }) +} + +// requires existing KV index file at ../../data/storage.kv +func Benchmark_Recsplit_Find_ExternalFile(b *testing.B) { + dataPath := "../../data/storage.kv" + f, err := os.Stat(dataPath) + if err != nil || f.IsDir() { + b.Skip("requires existing KV index file at ../../data/storage.kv") + } + + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + tmp := b.TempDir() + + defer os.RemoveAll(tmp) + + indexPath := dataPath + "i" + idx, err := recsplit.OpenIndex(indexPath) + require.NoError(b, err) + idxr := recsplit.NewIndexReader(idx) + + decomp, err := compress.NewDecompressor(dataPath) + require.NoError(b, err) + defer decomp.Close() + + getter := decomp.MakeGetter() + + keys, err := pivotKeysFromKV(dataPath) + require.NoError(b, err) + + for i := 0; i < b.N; i++ { + p := rnd.Intn(len(keys)) + + offset := idxr.Lookup(keys[p]) + getter.Reset(offset) + + require.True(b, getter.HasNext()) + + key, pa := getter.Next(nil) + require.NotEmpty(b, key) + + value, pb := getter.Next(nil) + if pb-pa != 1 { + require.NotEmpty(b, value) + } + + require.NoErrorf(b, err, "i=%d", i) + require.EqualValues(b, keys[p], key) + } +} diff --git a/state/aggregator_fuzz_test.go b/state/aggregator_fuzz_test.go new file mode 100644 index 000000000..0b471a923 --- /dev/null +++ b/state/aggregator_fuzz_test.go @@ -0,0 +1,24 @@ +//go:build !nofuzz + +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Fuzz_BtreeIndex_Allocation(f *testing.F) { + f.Add(uint64(1_000_000), uint64(1024)) + f.Fuzz(func(t *testing.T, keyCount, M uint64) { + if keyCount < M*4 || M < 4 { + t.Skip() + } + bt := newBtAlloc(keyCount, M, false) + bt.traverseDfs() + require.GreaterOrEqual(t, bt.N, keyCount) + + require.LessOrEqual(t, float64(bt.N-keyCount)/float64(bt.N), 0.05) + + }) +} diff --git a/state/aggregator_test.go b/state/aggregator_test.go index 5861fa7a3..1839aeb3a 100644 --- a/state/aggregator_test.go +++ b/state/aggregator_test.go @@ -6,36 +6,82 @@ import ( "fmt" "math/rand" "os" + "path" "path/filepath" "sync/atomic" "testing" + "time" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" + "github.com/ledgerwatch/erigon-lib/commitment" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" ) -func testDbAndAggregator(t *testing.T, prefixLen int, aggStep uint64) (string, kv.RwDB, *Aggregator) { +func testDbAndAggregator(t *testing.T, aggStep uint64) (string, kv.RwDB, *Aggregator) { t.Helper() path := t.TempDir() - t.Cleanup(func() { os.RemoveAll(path) }) logger := log.New() db := mdbx.NewMDBX(logger).InMem(filepath.Join(path, "db4")).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.ChaindataTablesCfg }).MustOpen() t.Cleanup(db.Close) - agg, err := NewAggregator(path, path, aggStep) + agg, err := NewAggregator(filepath.Join(path, "e4"), filepath.Join(path, "e4tmp"), aggStep, CommitmentModeDirect, commitment.VariantHexPatriciaTrie) require.NoError(t, err) - t.Cleanup(agg.Close) return path, db, agg } +func TestAggregator_WinAccess(t *testing.T) { + _, db, agg := testDbAndAggregator(t, 100) + defer agg.Close() + + tx, err := db.BeginRwNosync(context.Background()) + require.NoError(t, err) + defer func() { + if tx != nil { + tx.Rollback() + } + }() + agg.SetTx(tx) + + agg.StartWrites() + + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + for txNum := uint64(1); txNum <= 100; txNum++ { + agg.SetTxNum(txNum) + + addr := make([]byte, length.Addr) + n, err := rnd.Read(addr) + require.NoError(t, err) + require.EqualValues(t, length.Addr, n) + + buf := EncodeAccountBytes(1, uint256.NewInt(uint64(rand.Intn(10e9))), nil, 0) + err = agg.UpdateAccountData(addr, buf) + require.NoError(t, err) + + var v [8]byte + binary.BigEndian.PutUint64(v[:], txNum) + require.NoError(t, err) + require.NoError(t, agg.FinishTx()) + } + agg.FinishWrites() + + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + tx = nil +} + func TestAggregator_Merge(t *testing.T) { - _, db, agg := testDbAndAggregator(t, 0, 100) + _, db, agg := testDbAndAggregator(t, 1000) + defer agg.Close() tx, err := db.BeginRwNosync(context.Background()) require.NoError(t, err) @@ -46,17 +92,37 @@ func TestAggregator_Merge(t *testing.T) { }() agg.SetTx(tx) - defer agg.StartWrites().FinishWrites() + agg.StartWrites() + txs := uint64(10000) + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) // keys are encodings of numbers 1..31 // each key changes value on every txNum which is multiple of the key var maxWrite, otherMaxWrite uint64 for txNum := uint64(1); txNum <= txs; txNum++ { agg.SetTxNum(txNum) + + addr, loc := make([]byte, length.Addr), make([]byte, length.Hash) + + n, err := rnd.Read(addr) + require.NoError(t, err) + require.EqualValues(t, length.Addr, n) + + n, err = rnd.Read(loc) + require.NoError(t, err) + require.EqualValues(t, length.Hash, n) + //keys[txNum-1] = append(addr, loc...) + + buf := EncodeAccountBytes(1, uint256.NewInt(0), nil, 0) + err = agg.UpdateAccountData(addr, buf) + require.NoError(t, err) + + err = agg.WriteAccountStorage(addr, loc, []byte{addr[0], loc[0]}) + require.NoError(t, err) + var v [8]byte binary.BigEndian.PutUint64(v[:], txNum) - var err error if txNum%135 == 0 { err = agg.UpdateCommitmentData([]byte("otherroothash"), v[:]) otherMaxWrite = txNum @@ -67,7 +133,7 @@ func TestAggregator_Merge(t *testing.T) { require.NoError(t, err) require.NoError(t, agg.FinishTx()) } - err = agg.Flush(context.Background()) + agg.FinishWrites() require.NoError(t, err) err = tx.Commit() require.NoError(t, err) @@ -79,7 +145,7 @@ func TestAggregator_Merge(t *testing.T) { defer roTx.Rollback() dc := agg.MakeContext() - defer dc.Close() + v, err := dc.ReadCommitment([]byte("roothash"), roTx) require.NoError(t, err) @@ -87,6 +153,7 @@ func TestAggregator_Merge(t *testing.T) { v, err = dc.ReadCommitment([]byte("otherroothash"), roTx) require.NoError(t, err) + dc.Close() require.EqualValues(t, otherMaxWrite, binary.BigEndian.Uint64(v[:])) } @@ -98,7 +165,7 @@ func TestAggregator_Merge(t *testing.T) { // - new aggregator SeekCommitment must return txNum equal to amount of total txns func TestAggregator_RestartOnDatadir(t *testing.T) { aggStep := uint64(50) - path, db, agg := testDbAndAggregator(t, 0, aggStep) + path, db, agg := testDbAndAggregator(t, aggStep) tx, err := db.BeginRw(context.Background()) require.NoError(t, err) @@ -106,28 +173,13 @@ func TestAggregator_RestartOnDatadir(t *testing.T) { if tx != nil { tx.Rollback() } - if agg != nil { - agg.Close() - } }() agg.SetTx(tx) - defer agg.StartWrites().FinishWrites() + agg.StartWrites() var latestCommitTxNum uint64 - commit := func(txn uint64) error { - err = agg.Flush(context.Background()) - require.NoError(t, err) - err = tx.Commit() - require.NoError(t, err) - tx, err = db.BeginRw(context.Background()) - require.NoError(t, err) - t.Logf("commit to db txn=%d", txn) - atomic.StoreUint64(&latestCommitTxNum, txn) - agg.SetTx(tx) - return nil - } - agg.SetCommitFn(commit) + rnd := rand.New(rand.NewSource(time.Now().Unix())) txs := (aggStep / 2) * 19 t.Logf("step=%d tx_count=%d", aggStep, txs) @@ -139,20 +191,38 @@ func TestAggregator_RestartOnDatadir(t *testing.T) { agg.SetTxNum(txNum) binary.BigEndian.PutUint64(aux[:], txNum) + addr, loc := make([]byte, length.Addr), make([]byte, length.Hash) + n, err := rnd.Read(addr) + require.NoError(t, err) + require.EqualValues(t, length.Addr, n) + + n, err = rnd.Read(loc) + require.NoError(t, err) + require.EqualValues(t, length.Hash, n) + //keys[txNum-1] = append(addr, loc...) + + buf := EncodeAccountBytes(1, uint256.NewInt(0), nil, 0) + err = agg.UpdateAccountData(addr, buf) + require.NoError(t, err) + + err = agg.WriteAccountStorage(addr, loc, []byte{addr[0], loc[0]}) + require.NoError(t, err) + err = agg.UpdateCommitmentData([]byte("key"), aux[:]) + require.NoError(t, err) maxWrite = txNum require.NoError(t, agg.FinishTx()) } - err = agg.Flush(context.Background()) - require.NoError(t, err) + agg.FinishWrites() + agg.Close() + err = tx.Commit() require.NoError(t, err) - agg.Close() - tx, agg = nil, nil + tx = nil // Start another aggregator on same datadir - anotherAgg, err := NewAggregator(path, path, aggStep) + anotherAgg, err := NewAggregator(filepath.Join(path, "e4"), filepath.Join(path, "e4tmp"), aggStep, CommitmentModeDirect, commitment.VariantHexPatriciaTrie) require.NoError(t, err) require.NoError(t, anotherAgg.ReopenFolder()) @@ -168,7 +238,7 @@ func TestAggregator_RestartOnDatadir(t *testing.T) { anotherAgg.SetTx(rwTx) startTx := anotherAgg.EndTxNumMinimax() - sstartTx, err := anotherAgg.SeekCommitment() + _, sstartTx, err := anotherAgg.SeekCommitment() require.NoError(t, err) require.GreaterOrEqual(t, sstartTx, startTx) require.GreaterOrEqual(t, sstartTx, latestCommitTxNum) @@ -182,19 +252,17 @@ func TestAggregator_RestartOnDatadir(t *testing.T) { defer roTx.Rollback() dc := anotherAgg.MakeContext() - defer dc.Close() v, err := dc.ReadCommitment([]byte("key"), roTx) require.NoError(t, err) + dc.Close() require.EqualValues(t, maxWrite, binary.BigEndian.Uint64(v[:])) } func TestAggregator_RestartOnFiles(t *testing.T) { - aggStep := uint64(1000) + aggStep := uint64(100) - path, db, agg := testDbAndAggregator(t, 0, aggStep) - defer db.Close() - _ = path + path, db, agg := testDbAndAggregator(t, aggStep) tx, err := db.BeginRw(context.Background()) require.NoError(t, err) @@ -202,29 +270,12 @@ func TestAggregator_RestartOnFiles(t *testing.T) { if tx != nil { tx.Rollback() } - if agg != nil { - agg.Close() - } }() agg.SetTx(tx) - defer agg.StartWrites().FinishWrites() - - var latestCommitTxNum uint64 - commit := func(txn uint64) error { - err = tx.Commit() - require.NoError(t, err) - tx, err = db.BeginRw(context.Background()) - require.NoError(t, err) - t.Logf("commit to db txn=%d", txn) - - atomic.StoreUint64(&latestCommitTxNum, txn) - agg.SetTx(tx) - return nil - } - agg.SetCommitFn(commit) + agg.StartWrites() txs := aggStep * 5 - t.Logf("step=%d tx_count=%d", aggStep, txs) + t.Logf("step=%d tx_count=%d\n", aggStep, txs) rnd := rand.New(rand.NewSource(0)) keys := make([][]byte, txs) @@ -253,13 +304,13 @@ func TestAggregator_RestartOnFiles(t *testing.T) { err = agg.FinishTx() require.NoError(t, err) } + agg.FinishWrites() err = tx.Commit() + require.NoError(t, err) tx = nil db.Close() - db = nil agg.Close() - agg = nil require.NoError(t, os.RemoveAll(filepath.Join(path, "db4"))) @@ -273,27 +324,26 @@ func TestAggregator_RestartOnFiles(t *testing.T) { require.NoError(t, err) defer newTx.Rollback() - newAgg, err := NewAggregator(path, path, aggStep) + newAgg, err := NewAggregator(path, path, aggStep, CommitmentModeDirect, commitment.VariantHexPatriciaTrie) require.NoError(t, err) require.NoError(t, newAgg.ReopenFolder()) - defer newAgg.Close() newAgg.SetTx(newTx) + newAgg.StartWrites() - latestTx, err := newAgg.SeekCommitment() + _, latestTx, err := newAgg.SeekCommitment() require.NoError(t, err) t.Logf("seek to latest_tx=%d", latestTx) - ctx := newAgg.MakeContext() - defer ctx.Close() + ctx := newAgg.defaultCtx miss := uint64(0) for i, key := range keys { + if uint64(i+1) >= txs-aggStep { + continue // finishtx always stores last agg step in db which we deleted, so missing values which were not aggregated is expected + } stored, err := ctx.ReadAccountData(key[:length.Addr], newTx) require.NoError(t, err) if len(stored) == 0 { - if uint64(i+1) >= txs-aggStep { - continue // finishtx always stores last agg step in db which we deleteelete, so miss is expected - } miss++ fmt.Printf("%x [%d/%d]", key, miss, i+1) // txnum starts from 1 continue @@ -307,16 +357,18 @@ func TestAggregator_RestartOnFiles(t *testing.T) { require.EqualValues(t, key[0], storedV[0]) require.EqualValues(t, key[length.Addr], storedV[1]) } - require.NoError(t, err) + newAgg.FinishWrites() + ctx.Close() + newAgg.Close() + require.NoError(t, err) } func TestAggregator_ReplaceCommittedKeys(t *testing.T) { - aggStep := uint64(1000) + aggStep := uint64(500) - path, db, agg := testDbAndAggregator(t, 0, aggStep) - defer db.Close() - _ = path + _, db, agg := testDbAndAggregator(t, aggStep) + t.Cleanup(agg.Close) tx, err := db.BeginRw(context.Background()) require.NoError(t, err) @@ -324,9 +376,6 @@ func TestAggregator_ReplaceCommittedKeys(t *testing.T) { if tx != nil { tx.Rollback() } - if agg != nil { - agg.Close() - } }() agg.SetTx(tx) defer agg.StartWrites().FinishWrites() @@ -343,9 +392,9 @@ func TestAggregator_ReplaceCommittedKeys(t *testing.T) { agg.SetTx(tx) return nil } - agg.SetCommitFn(commit) - txs := aggStep / 2 * 20 + roots := agg.AggregatedRoots() + txs := (aggStep) * StepsInBiggestFile t.Logf("step=%d tx_count=%d", aggStep, txs) rnd := rand.New(rand.NewSource(0)) @@ -373,6 +422,12 @@ func TestAggregator_ReplaceCommittedKeys(t *testing.T) { err = agg.FinishTx() require.NoError(t, err) + select { + case <-roots: + require.NoError(t, commit(txNum)) + default: + continue + } } half := txs / 2 @@ -394,18 +449,14 @@ func TestAggregator_ReplaceCommittedKeys(t *testing.T) { tx, err = db.BeginRw(context.Background()) require.NoError(t, err) - ctx := agg.storage.MakeContext() - defer ctx.Close() + ctx := agg.defaultCtx for _, key := range keys { - storedV, err := ctx.Get(key[:length.Addr], key[length.Addr:], tx) + storedV, err := ctx.ReadAccountStorage(key[:length.Addr], key[length.Addr:], tx) require.NoError(t, err) require.EqualValues(t, key[0], storedV[0]) require.EqualValues(t, key[length.Addr], storedV[1]) } require.NoError(t, err) - - agg.Close() - agg = nil } func Test_EncodeCommitmentState(t *testing.T) { @@ -427,3 +478,157 @@ func Test_EncodeCommitmentState(t *testing.T) { require.EqualValues(t, cs.txNum, dec.txNum) require.EqualValues(t, cs.trieState, dec.trieState) } + +func Test_BtreeIndex_Seek(t *testing.T) { + tmp := t.TempDir() + + keyCount, M := 120000, 1024 + dataPath := generateCompressedKV(t, tmp, 52, 180 /*val size*/, keyCount) + defer os.RemoveAll(tmp) + + indexPath := path.Join(tmp, filepath.Base(dataPath)+".bti") + err := BuildBtreeIndex(dataPath, indexPath) + require.NoError(t, err) + + bt, err := OpenBtreeIndex(indexPath, dataPath, uint64(M)) + require.NoError(t, err) + require.EqualValues(t, bt.KeyCount(), keyCount) + + keys, err := pivotKeysFromKV(dataPath) + require.NoError(t, err) + + for i := 0; i < len(keys); i++ { + cur, err := bt.Seek(keys[i]) + require.NoErrorf(t, err, "i=%d", i) + require.EqualValues(t, keys[i], cur.key) + require.NotEmptyf(t, cur.Value(), "i=%d", i) + // require.EqualValues(t, uint64(i), cur.Value()) + } + for i := 1; i < len(keys); i++ { + alt := common.Copy(keys[i]) + for j := len(alt) - 1; j >= 0; j-- { + if alt[j] > 0 { + alt[j] -= 1 + break + } + } + cur, err := bt.Seek(keys[i]) + require.NoError(t, err) + require.EqualValues(t, keys[i], cur.Key()) + } + + bt.Close() +} + +func pivotKeysFromKV(dataPath string) ([][]byte, error) { + decomp, err := compress.NewDecompressor(dataPath) + if err != nil { + return nil, err + } + + getter := decomp.MakeGetter() + getter.Reset(0) + + key := make([]byte, 0, 64) + + listing := make([][]byte, 0, 1000) + + for getter.HasNext() { + if len(listing) > 100000 { + break + } + key, _ := getter.Next(key[:0]) + listing = append(listing, common.Copy(key)) + getter.Skip() + } + decomp.Close() + + return listing, nil +} + +func generateCompressedKV(tb testing.TB, tmp string, keySize, valueSize, keyCount int) string { + tb.Helper() + + args := BtIndexWriterArgs{ + IndexFile: path.Join(tmp, fmt.Sprintf("%dk.bt", keyCount/1000)), + TmpDir: tmp, + KeyCount: 12, + } + + iw, err := NewBtIndexWriter(args) + require.NoError(tb, err) + + defer iw.Close() + rnd := rand.New(rand.NewSource(0)) + values := make([]byte, valueSize) + + dataPath := path.Join(tmp, fmt.Sprintf("%dk.kv", keyCount/1000)) + comp, err := compress.NewCompressor(context.Background(), "cmp", dataPath, tmp, compress.MinPatternScore, 1, log.LvlDebug) + require.NoError(tb, err) + + for i := 0; i < keyCount; i++ { + key := make([]byte, keySize) + n, err := rnd.Read(key[:]) + require.EqualValues(tb, keySize, n) + binary.BigEndian.PutUint64(key[keySize-8:], uint64(i)) + require.NoError(tb, err) + err = comp.AddWord(key[:]) + require.NoError(tb, err) + + n, err = rnd.Read(values[:rnd.Intn(valueSize)+1]) + require.NoError(tb, err) + + err = comp.AddWord(values[:n]) + require.NoError(tb, err) + } + + err = comp.Compress() + require.NoError(tb, err) + comp.Close() + + decomp, err := compress.NewDecompressor(dataPath) + require.NoError(tb, err) + + getter := decomp.MakeGetter() + getter.Reset(0) + + var pos uint64 + key := make([]byte, keySize) + for i := 0; i < keyCount; i++ { + if !getter.HasNext() { + tb.Fatalf("not enough values at %d", i) + break + } + + keys, _ := getter.Next(key[:0]) + err = iw.AddKey(keys[:], pos) + + pos = getter.Skip() + require.NoError(tb, err) + } + decomp.Close() + + require.NoError(tb, iw.Build()) + iw.Close() + + return decomp.FilePath() +} + +func Test_InitBtreeIndex(t *testing.T) { + tmp := t.TempDir() + defer os.RemoveAll(tmp) + + keyCount, M := 100, uint64(4) + compPath := generateCompressedKV(t, tmp, 52, 300, keyCount) + decomp, err := compress.NewDecompressor(compPath) + require.NoError(t, err) + defer decomp.Close() + + err = BuildBtreeIndexWithDecompressor(tmp+".bt", decomp, &background.Progress{}) + require.NoError(t, err) + + bt, err := OpenBtreeIndexWithDecompressor(tmp+".bt", M, decomp) + require.NoError(t, err) + require.EqualValues(t, bt.KeyCount(), keyCount) + bt.Close() +} diff --git a/state/aggregator_v3.go b/state/aggregator_v3.go index 033acb0ac..e2f04d4e5 100644 --- a/state/aggregator_v3.go +++ b/state/aggregator_v3.go @@ -25,21 +25,21 @@ import ( "runtime" "strings" "sync" + "sync/atomic" "time" "github.com/RoaringBitmap/roaring/roaring64" - "github.com/ledgerwatch/erigon-lib/common/dbg" - "github.com/ledgerwatch/erigon-lib/kv/order" - "github.com/ledgerwatch/log/v3" - "go.uber.org/atomic" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" - common2 "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/cmp" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" + "github.com/ledgerwatch/erigon-lib/kv/iter" + "github.com/ledgerwatch/erigon-lib/kv/order" + "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/errgroup" ) type AggregatorV3 struct { @@ -61,71 +61,109 @@ type AggregatorV3 struct { keepInDB uint64 maxTxNum atomic.Uint64 - openCloseLock sync.Mutex + filesMutationLock sync.Mutex working atomic.Bool workingMerge atomic.Bool workingOptionalIndices atomic.Bool - warmupWorking atomic.Bool - ctx context.Context - ctxCancel context.CancelFunc + //warmupWorking atomic.Bool + ctx context.Context + ctxCancel context.CancelFunc - wg sync.WaitGroup + needSaveFilesListInDB atomic.Bool + wg sync.WaitGroup + + onFreeze OnFreezeFunc + walLock sync.RWMutex + + ps *background.ProgressSet } +type OnFreezeFunc func(frozenFileNames []string) + func NewAggregatorV3(ctx context.Context, dir, tmpdir string, aggregationStep uint64, db kv.RoDB) (*AggregatorV3, error) { ctx, ctxCancel := context.WithCancel(ctx) - a := &AggregatorV3{ctx: ctx, ctxCancel: ctxCancel, dir: dir, tmpdir: tmpdir, aggregationStep: aggregationStep, backgroundResult: &BackgroundResult{}, db: db, keepInDB: 2 * aggregationStep} + a := &AggregatorV3{ctx: ctx, ctxCancel: ctxCancel, ps: background.NewProgressSet(), onFreeze: func(frozenFileNames []string) {}, dir: dir, tmpdir: tmpdir, aggregationStep: aggregationStep, backgroundResult: &BackgroundResult{}, db: db, keepInDB: 2 * aggregationStep} var err error - if a.accounts, err = NewHistory(dir, a.tmpdir, aggregationStep, "accounts", kv.AccountHistoryKeys, kv.AccountIdx, kv.AccountHistoryVals, kv.AccountSettings, false /* compressVals */, nil); err != nil { - return nil, fmt.Errorf("ReopenFolder: %w", err) + if a.accounts, err = NewHistory(dir, a.tmpdir, aggregationStep, "accounts", kv.AccountHistoryKeys, kv.AccountIdx, kv.AccountHistoryVals, false, nil, false); err != nil { + return nil, err } - if a.storage, err = NewHistory(dir, a.tmpdir, aggregationStep, "storage", kv.StorageHistoryKeys, kv.StorageIdx, kv.StorageHistoryVals, kv.StorageSettings, false /* compressVals */, nil); err != nil { - return nil, fmt.Errorf("ReopenFolder: %w", err) + if a.storage, err = NewHistory(dir, a.tmpdir, aggregationStep, "storage", kv.StorageHistoryKeys, kv.StorageIdx, kv.StorageHistoryVals, false, nil, false); err != nil { + return nil, err } - if a.code, err = NewHistory(dir, a.tmpdir, aggregationStep, "code", kv.CodeHistoryKeys, kv.CodeIdx, kv.CodeHistoryVals, kv.CodeSettings, true /* compressVals */, nil); err != nil { - return nil, fmt.Errorf("ReopenFolder: %w", err) + if a.code, err = NewHistory(dir, a.tmpdir, aggregationStep, "code", kv.CodeHistoryKeys, kv.CodeIdx, kv.CodeHistoryVals, true, nil, true); err != nil { + return nil, err } if a.logAddrs, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "logaddrs", kv.LogAddressKeys, kv.LogAddressIdx, false, nil); err != nil { - return nil, fmt.Errorf("ReopenFolder: %w", err) + return nil, err } if a.logTopics, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "logtopics", kv.LogTopicsKeys, kv.LogTopicsIdx, false, nil); err != nil { - return nil, fmt.Errorf("ReopenFolder: %w", err) + return nil, err } if a.tracesFrom, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "tracesfrom", kv.TracesFromKeys, kv.TracesFromIdx, false, nil); err != nil { - return nil, fmt.Errorf("ReopenFolder: %w", err) + return nil, err } if a.tracesTo, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "tracesto", kv.TracesToKeys, kv.TracesToIdx, false, nil); err != nil { - return nil, fmt.Errorf("ReopenFolder: %w", err) + return nil, err } a.recalcMaxTxNum() return a, nil } +func (a *AggregatorV3) OnFreeze(f OnFreezeFunc) { a.onFreeze = f } -func (a *AggregatorV3) ReopenFolder() error { - a.openCloseLock.Lock() - defer a.openCloseLock.Unlock() +func (a *AggregatorV3) OpenFolder() error { + a.filesMutationLock.Lock() + defer a.filesMutationLock.Unlock() var err error - if err = a.accounts.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.accounts.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) } - if err = a.storage.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.storage.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) } - if err = a.code.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.code.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) } - if err = a.logAddrs.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.logAddrs.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) } - if err = a.logTopics.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.logTopics.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) } - if err = a.tracesFrom.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.tracesFrom.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) } - if err = a.tracesTo.reOpenFolder(); err != nil { - return fmt.Errorf("ReopenFolder: %w", err) + if err = a.tracesTo.OpenFolder(); err != nil { + return fmt.Errorf("OpenFolder: %w", err) + } + a.recalcMaxTxNum() + return nil +} +func (a *AggregatorV3) OpenList(fNames []string) error { + a.filesMutationLock.Lock() + defer a.filesMutationLock.Unlock() + + var err error + if err = a.accounts.OpenList(fNames); err != nil { + return err + } + if err = a.storage.OpenList(fNames); err != nil { + return err + } + if err = a.code.OpenList(fNames); err != nil { + return err + } + if err = a.logAddrs.OpenList(fNames); err != nil { + return err + } + if err = a.logTopics.OpenList(fNames); err != nil { + return err + } + if err = a.tracesFrom.OpenList(fNames); err != nil { + return err + } + if err = a.tracesTo.OpenList(fNames); err != nil { + return err } a.recalcMaxTxNum() return nil @@ -135,8 +173,8 @@ func (a *AggregatorV3) Close() { a.ctxCancel() a.wg.Wait() - a.openCloseLock.Lock() - defer a.openCloseLock.Unlock() + a.filesMutationLock.Lock() + defer a.filesMutationLock.Unlock() a.accounts.Close() a.storage.Close() @@ -170,9 +208,12 @@ func (a *AggregatorV3) SetWorkers(i int) { a.tracesTo.compressWorkers = i } +func (a *AggregatorV3) HasBackgroundFilesBuild() bool { return a.ps.Has() } +func (a *AggregatorV3) BackgroundProgress() string { return a.ps.String() } + func (a *AggregatorV3) Files() (res []string) { - a.openCloseLock.Lock() - defer a.openCloseLock.Unlock() + a.filesMutationLock.Lock() + defer a.filesMutationLock.Unlock() res = append(res, a.accounts.Files()...) res = append(res, a.storage.Files()...) @@ -184,16 +225,17 @@ func (a *AggregatorV3) Files() (res []string) { return res } func (a *AggregatorV3) BuildOptionalMissedIndicesInBackground(ctx context.Context, workers int) { - if a.workingOptionalIndices.Load() { + if ok := a.workingOptionalIndices.CompareAndSwap(false, true); !ok { return } - a.workingOptionalIndices.Store(true) - a.wg.Add(1) go func() { - a.wg.Done() + defer a.wg.Done() defer a.workingOptionalIndices.Store(false) if err := a.BuildOptionalMissedIndices(ctx, workers); err != nil { + if errors.Is(err, context.Canceled) { + return + } log.Warn("merge", "err", err) } }() @@ -214,34 +256,45 @@ func (a *AggregatorV3) BuildOptionalMissedIndices(ctx context.Context, workers i return g.Wait() } -func (a *AggregatorV3) BuildMissedIndices(ctx context.Context, sem *semaphore.Weighted) error { - g, ctx := errgroup.WithContext(ctx) - if a.accounts != nil { - g.Go(func() error { return a.accounts.BuildMissedIndices(ctx, sem) }) - } - if a.storage != nil { - g.Go(func() error { return a.storage.BuildMissedIndices(ctx, sem) }) - } - if a.code != nil { - g.Go(func() error { return a.code.BuildMissedIndices(ctx, sem) }) - } - if a.logAddrs != nil { - g.Go(func() error { return a.logAddrs.BuildMissedIndices(ctx, sem) }) - } - if a.logTopics != nil { - g.Go(func() error { return a.logTopics.BuildMissedIndices(ctx, sem) }) - } - if a.tracesFrom != nil { - g.Go(func() error { return a.tracesFrom.BuildMissedIndices(ctx, sem) }) - } - if a.tracesTo != nil { - g.Go(func() error { return a.tracesTo.BuildMissedIndices(ctx, sem) }) - } +func (a *AggregatorV3) BuildMissedIndices(ctx context.Context, workers int) error { + startIndexingTime := time.Now() + { + ps := background.NewProgressSet() - if err := g.Wait(); err != nil { - return err + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(workers) + go func() { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + for { + select { + case <-ctx.Done(): + return + case <-logEvery.C: + var m runtime.MemStats + dbg.ReadMemStats(&m) + log.Info("[snapshots] Indexing", "progress", ps.String(), "total-indexing-time", time.Since(startIndexingTime).Round(time.Second).String(), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) + } + } + }() + + a.accounts.BuildMissedIndices(ctx, g, ps) + a.storage.BuildMissedIndices(ctx, g, ps) + a.code.BuildMissedIndices(ctx, g, ps) + a.logAddrs.BuildMissedIndices(ctx, g, ps) + a.logTopics.BuildMissedIndices(ctx, g, ps) + a.tracesFrom.BuildMissedIndices(ctx, g, ps) + a.tracesTo.BuildMissedIndices(ctx, g, ps) + + if err := g.Wait(); err != nil { + return err + } + if err := a.OpenFolder(); err != nil { + return err + } } - return a.BuildOptionalMissedIndices(ctx, 4) + + return a.BuildOptionalMissedIndices(ctx, workers) } func (a *AggregatorV3) SetLogPrefix(v string) { a.logPrefix = v } @@ -297,12 +350,12 @@ func (c AggV3Collation) Close() { } } -func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo uint64, db kv.RoDB) (AggV3StaticFiles, error) { - logEvery := time.NewTicker(60 * time.Second) - defer logEvery.Stop() - defer func(t time.Time) { - log.Info(fmt.Sprintf("[snapshot] build %d-%d", step, step+1), "took", time.Since(t)) - }(time.Now()) +func (a *AggregatorV3) buildFiles(ctx context.Context, step, txFrom, txTo uint64) (AggV3StaticFiles, error) { + //logEvery := time.NewTicker(60 * time.Second) + //defer logEvery.Stop() + //defer func(t time.Time) { + // log.Info(fmt.Sprintf("[snapshot] build %d-%d", step, step+1), "took", time.Since(t)) + //}(time.Now()) var sf AggV3StaticFiles var ac AggV3Collation closeColl := true @@ -317,15 +370,15 @@ func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo //go func() { // defer wg.Done() var err error - if err = db.View(ctx, func(tx kv.Tx) error { - ac.accounts, err = a.accounts.collate(step, txFrom, txTo, tx, logEvery) + if err = a.db.View(ctx, func(tx kv.Tx) error { + ac.accounts, err = a.accounts.collate(step, txFrom, txTo, tx) return err }); err != nil { return sf, err //errCh <- err } - if sf.accounts, err = a.accounts.buildFiles(ctx, step, ac.accounts); err != nil { + if sf.accounts, err = a.accounts.buildFiles(ctx, step, ac.accounts, a.ps); err != nil { return sf, err //errCh <- err } @@ -334,15 +387,15 @@ func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo //go func() { // defer wg.Done() // var err error - if err = db.View(ctx, func(tx kv.Tx) error { - ac.storage, err = a.storage.collate(step, txFrom, txTo, tx, logEvery) + if err = a.db.View(ctx, func(tx kv.Tx) error { + ac.storage, err = a.storage.collate(step, txFrom, txTo, tx) return err }); err != nil { return sf, err //errCh <- err } - if sf.storage, err = a.storage.buildFiles(ctx, step, ac.storage); err != nil { + if sf.storage, err = a.storage.buildFiles(ctx, step, ac.storage, a.ps); err != nil { return sf, err //errCh <- err } @@ -350,15 +403,15 @@ func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo //go func() { // defer wg.Done() // var err error - if err = db.View(ctx, func(tx kv.Tx) error { - ac.code, err = a.code.collate(step, txFrom, txTo, tx, logEvery) + if err = a.db.View(ctx, func(tx kv.Tx) error { + ac.code, err = a.code.collate(step, txFrom, txTo, tx) return err }); err != nil { return sf, err //errCh <- err } - if sf.code, err = a.code.buildFiles(ctx, step, ac.code); err != nil { + if sf.code, err = a.code.buildFiles(ctx, step, ac.code, a.ps); err != nil { return sf, err //errCh <- err } @@ -366,15 +419,15 @@ func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo //go func() { // defer wg.Done() // var err error - if err = db.View(ctx, func(tx kv.Tx) error { - ac.logAddrs, err = a.logAddrs.collate(ctx, txFrom, txTo, tx, logEvery) + if err = a.db.View(ctx, func(tx kv.Tx) error { + ac.logAddrs, err = a.logAddrs.collate(ctx, txFrom, txTo, tx) return err }); err != nil { return sf, err //errCh <- err } - if sf.logAddrs, err = a.logAddrs.buildFiles(ctx, step, ac.logAddrs); err != nil { + if sf.logAddrs, err = a.logAddrs.buildFiles(ctx, step, ac.logAddrs, a.ps); err != nil { return sf, err //errCh <- err } @@ -382,15 +435,15 @@ func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo //go func() { // defer wg.Done() // var err error - if err = db.View(ctx, func(tx kv.Tx) error { - ac.logTopics, err = a.logTopics.collate(ctx, txFrom, txTo, tx, logEvery) + if err = a.db.View(ctx, func(tx kv.Tx) error { + ac.logTopics, err = a.logTopics.collate(ctx, txFrom, txTo, tx) return err }); err != nil { return sf, err //errCh <- err } - if sf.logTopics, err = a.logTopics.buildFiles(ctx, step, ac.logTopics); err != nil { + if sf.logTopics, err = a.logTopics.buildFiles(ctx, step, ac.logTopics, a.ps); err != nil { return sf, err //errCh <- err } @@ -398,15 +451,15 @@ func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo //go func() { // defer wg.Done() // var err error - if err = db.View(ctx, func(tx kv.Tx) error { - ac.tracesFrom, err = a.tracesFrom.collate(ctx, txFrom, txTo, tx, logEvery) + if err = a.db.View(ctx, func(tx kv.Tx) error { + ac.tracesFrom, err = a.tracesFrom.collate(ctx, txFrom, txTo, tx) return err }); err != nil { return sf, err //errCh <- err } - if sf.tracesFrom, err = a.tracesFrom.buildFiles(ctx, step, ac.tracesFrom); err != nil { + if sf.tracesFrom, err = a.tracesFrom.buildFiles(ctx, step, ac.tracesFrom, a.ps); err != nil { return sf, err //errCh <- err } @@ -414,15 +467,15 @@ func (a *AggregatorV3) buildFiles(ctx context.Context, step uint64, txFrom, txTo //go func() { // defer wg.Done() // var err error - if err = db.View(ctx, func(tx kv.Tx) error { - ac.tracesTo, err = a.tracesTo.collate(ctx, txFrom, txTo, tx, logEvery) + if err = a.db.View(ctx, func(tx kv.Tx) error { + ac.tracesTo, err = a.tracesTo.collate(ctx, txFrom, txTo, tx) return err }); err != nil { return sf, err //errCh <- err } - if sf.tracesTo, err = a.tracesTo.buildFiles(ctx, step, ac.tracesTo); err != nil { + if sf.tracesTo, err = a.tracesTo.buildFiles(ctx, step, ac.tracesTo, a.ps); err != nil { return sf, err // errCh <- err } @@ -474,7 +527,7 @@ func (a *AggregatorV3) BuildFiles(ctx context.Context, db kv.RoDB) (err error) { // - during files build, may happen commit of new data. on each loop step getting latest id in db step := a.EndTxNumMinimax() / a.aggregationStep for ; step < lastIdInDB(db, a.accounts.indexKeysTable)/a.aggregationStep; step++ { - if err := a.buildFilesInBackground(ctx, step, db); err != nil { + if err := a.buildFilesInBackground(ctx, step); err != nil { if !errors.Is(err, context.Canceled) { log.Warn("buildFilesInBackground", "err", err) } @@ -484,10 +537,10 @@ func (a *AggregatorV3) BuildFiles(ctx context.Context, db kv.RoDB) (err error) { return nil } -func (a *AggregatorV3) buildFilesInBackground(ctx context.Context, step uint64, db kv.RoDB) (err error) { +func (a *AggregatorV3) buildFilesInBackground(ctx context.Context, step uint64) (err error) { closeAll := true - log.Info("[snapshots] history build", "step", fmt.Sprintf("%d-%d", step, step+1)) - sf, err := a.buildFiles(ctx, step, step*a.aggregationStep, (step+1)*a.aggregationStep, db) + //log.Info("[snapshots] history build", "step", fmt.Sprintf("%d-%d", step, step+1)) + sf, err := a.buildFiles(ctx, step, step*a.aggregationStep, (step+1)*a.aggregationStep) if err != nil { return err } @@ -497,6 +550,7 @@ func (a *AggregatorV3) buildFilesInBackground(ctx context.Context, step uint64, } }() a.integrateFiles(sf, step*a.aggregationStep, (step+1)*a.aggregationStep) + //a.notifyAboutNewSnapshots() closeAll = false return nil @@ -533,7 +587,7 @@ func (a *AggregatorV3) mergeLoopStep(ctx context.Context, workers int) (somethin } }() a.integrateMergedFiles(outs, in) - a.cleanAfterFreeze(in) + a.onFreeze(in.FrozenList()) closeAll = false return true, nil } @@ -550,6 +604,10 @@ func (a *AggregatorV3) MergeLoop(ctx context.Context, workers int) error { } func (a *AggregatorV3) integrateFiles(sf AggV3StaticFiles, txNumFrom, txNumTo uint64) { + a.filesMutationLock.Lock() + defer a.filesMutationLock.Unlock() + defer a.needSaveFilesListInDB.Store(true) + defer a.recalcMaxTxNum() a.accounts.integrateFiles(sf.accounts, txNumFrom, txNumTo) a.storage.integrateFiles(sf.storage, txNumFrom, txNumTo) a.code.integrateFiles(sf.code, txNumFrom, txNumTo) @@ -557,7 +615,10 @@ func (a *AggregatorV3) integrateFiles(sf AggV3StaticFiles, txNumFrom, txNumTo ui a.logTopics.integrateFiles(sf.logTopics, txNumFrom, txNumTo) a.tracesFrom.integrateFiles(sf.tracesFrom, txNumFrom, txNumTo) a.tracesTo.integrateFiles(sf.tracesTo, txNumFrom, txNumTo) - a.recalcMaxTxNum() +} + +func (a *AggregatorV3) NeedSaveFilesListInDB() bool { + return a.needSaveFilesListInDB.CompareAndSwap(true, false) } func (a *AggregatorV3) Unwind(ctx context.Context, txUnwindTo uint64, stateLoad etl.LoadFunc) error { @@ -594,55 +655,40 @@ func (a *AggregatorV3) Unwind(ctx context.Context, txUnwindTo uint64, stateLoad return nil } -func (a *AggregatorV3) Warmup(ctx context.Context, txFrom, limit uint64) { +func (a *AggregatorV3) Warmup(ctx context.Context, txFrom, limit uint64) error { if a.db == nil { - return - } - if limit < 10_000 { - return - } - if a.warmupWorking.Load() { - return + return nil } - a.warmupWorking.Store(true) - a.wg.Add(1) - go func() { - defer a.wg.Done() - defer a.warmupWorking.Store(false) - if err := a.db.View(ctx, func(tx kv.Tx) error { - if err := a.accounts.warmup(ctx, txFrom, limit, tx); err != nil { - return err - } - if err := a.storage.warmup(ctx, txFrom, limit, tx); err != nil { - return err - } - if err := a.code.warmup(ctx, txFrom, limit, tx); err != nil { - return err - } - if err := a.logAddrs.warmup(txFrom, limit, tx); err != nil { - return err - } - if err := a.logTopics.warmup(txFrom, limit, tx); err != nil { - return err - } - if err := a.tracesFrom.warmup(txFrom, limit, tx); err != nil { - return err - } - if err := a.tracesTo.warmup(txFrom, limit, tx); err != nil { - return err - } - return nil - }); err != nil { - log.Warn("[snapshots] prune warmup", "err", err) - } - }() + e, ctx := errgroup.WithContext(ctx) + e.Go(func() error { + return a.db.View(ctx, func(tx kv.Tx) error { return a.accounts.warmup(ctx, txFrom, limit, tx) }) + }) + e.Go(func() error { + return a.db.View(ctx, func(tx kv.Tx) error { return a.storage.warmup(ctx, txFrom, limit, tx) }) + }) + e.Go(func() error { + return a.db.View(ctx, func(tx kv.Tx) error { return a.code.warmup(ctx, txFrom, limit, tx) }) + }) + e.Go(func() error { + return a.db.View(ctx, func(tx kv.Tx) error { return a.logAddrs.warmup(ctx, txFrom, limit, tx) }) + }) + e.Go(func() error { + return a.db.View(ctx, func(tx kv.Tx) error { return a.logTopics.warmup(ctx, txFrom, limit, tx) }) + }) + e.Go(func() error { + return a.db.View(ctx, func(tx kv.Tx) error { return a.tracesFrom.warmup(ctx, txFrom, limit, tx) }) + }) + e.Go(func() error { + return a.db.View(ctx, func(tx kv.Tx) error { return a.tracesTo.warmup(ctx, txFrom, limit, tx) }) + }) + return e.Wait() } // StartWrites - pattern: `defer agg.StartWrites().FinishWrites()` func (a *AggregatorV3) DiscardHistory() *AggregatorV3 { - a.accounts.DiscardHistory(a.tmpdir) - a.storage.DiscardHistory(a.tmpdir) - a.code.DiscardHistory(a.tmpdir) + a.accounts.DiscardHistory() + a.storage.DiscardHistory() + a.code.DiscardHistory() a.logAddrs.DiscardHistory(a.tmpdir) a.logTopics.DiscardHistory(a.tmpdir) a.tracesFrom.DiscardHistory(a.tmpdir) @@ -652,16 +698,32 @@ func (a *AggregatorV3) DiscardHistory() *AggregatorV3 { // StartWrites - pattern: `defer agg.StartWrites().FinishWrites()` func (a *AggregatorV3) StartWrites() *AggregatorV3 { - a.accounts.StartWrites(a.tmpdir) - a.storage.StartWrites(a.tmpdir) - a.code.StartWrites(a.tmpdir) - a.logAddrs.StartWrites(a.tmpdir) - a.logTopics.StartWrites(a.tmpdir) - a.tracesFrom.StartWrites(a.tmpdir) - a.tracesTo.StartWrites(a.tmpdir) + a.walLock.Lock() + defer a.walLock.Unlock() + a.accounts.StartWrites() + a.storage.StartWrites() + a.code.StartWrites() + a.logAddrs.StartWrites() + a.logTopics.StartWrites() + a.tracesFrom.StartWrites() + a.tracesTo.StartWrites() + return a +} +func (a *AggregatorV3) StartUnbufferedWrites() *AggregatorV3 { + a.walLock.Lock() + defer a.walLock.Unlock() + a.accounts.StartWrites() + a.storage.StartWrites() + a.code.StartWrites() + a.logAddrs.StartWrites() + a.logTopics.StartWrites() + a.tracesFrom.StartWrites() + a.tracesTo.StartWrites() return a } func (a *AggregatorV3) FinishWrites() { + a.walLock.Lock() + defer a.walLock.Unlock() a.accounts.FinishWrites() a.storage.FinishWrites() a.code.FinishWrites() @@ -676,6 +738,7 @@ type flusher interface { } func (a *AggregatorV3) Flush(ctx context.Context, tx kv.RwTx) error { + a.walLock.Lock() flushers := []flusher{ a.accounts.Rotate(), a.storage.Rotate(), @@ -685,6 +748,7 @@ func (a *AggregatorV3) Flush(ctx context.Context, tx kv.RwTx) error { a.tracesFrom.Rotate(), a.tracesTo.Rotate(), } + a.walLock.Unlock() defer func(t time.Time) { log.Debug("[snapshots] history flush", "took", time.Since(t)) }(time.Now()) for _, f := range flushers { if err := f.Flush(ctx, tx); err != nil { @@ -717,11 +781,16 @@ func (a *AggregatorV3) PruneWithTiemout(ctx context.Context, timeout time.Durati } func (a *AggregatorV3) Prune(ctx context.Context, limit uint64) error { - //ctx, cancel := context.WithCancel(ctx) - //defer cancel() - //go func() { - // a.Warmup(ctx, 0, cmp.Max(a.aggregationStep, limit)) // warmup is asyn and moving faster than data deletion - //}() + //if limit/a.aggregationStep > StepsInBiggestFile { + // ctx, cancel := context.WithCancel(ctx) + // defer cancel() + // + // a.wg.Add(1) + // go func() { + // defer a.wg.Done() + // _ = a.Warmup(ctx, 0, cmp.Max(a.aggregationStep, limit)) // warmup is asyn and moving faster than data deletion + // }() + //} return a.prune(ctx, 0, a.maxTxNum.Load(), limit) } @@ -783,7 +852,7 @@ func (a *AggregatorV3) LogStats(tx kv.Tx, tx2block func(endTxNumMinimax uint64) var m runtime.MemStats dbg.ReadMemStats(&m) - log.Info("[Snapshots] History Stat", + log.Info("[snapshots] History Stat", "blocks", fmt.Sprintf("%dk", (histBlockNumProgress+1)/1000), "txs", fmt.Sprintf("%dm", a.maxTxNum.Load()/1_000_000), "txNum2blockNum", strings.Join(str, ","), @@ -940,6 +1009,42 @@ type MergedFilesV3 struct { tracesTo *filesItem } +func (mf MergedFilesV3) FrozenList() (frozen []string) { + if mf.accountsHist != nil && mf.accountsHist.frozen { + frozen = append(frozen, mf.accountsHist.decompressor.FileName()) + } + if mf.accountsIdx != nil && mf.accountsIdx.frozen { + frozen = append(frozen, mf.accountsIdx.decompressor.FileName()) + } + + if mf.storageHist != nil && mf.storageHist.frozen { + frozen = append(frozen, mf.storageHist.decompressor.FileName()) + } + if mf.storageIdx != nil && mf.storageIdx.frozen { + frozen = append(frozen, mf.storageIdx.decompressor.FileName()) + } + + if mf.codeHist != nil && mf.codeHist.frozen { + frozen = append(frozen, mf.codeHist.decompressor.FileName()) + } + if mf.codeIdx != nil && mf.codeIdx.frozen { + frozen = append(frozen, mf.codeIdx.decompressor.FileName()) + } + + if mf.logAddrs != nil && mf.logAddrs.frozen { + frozen = append(frozen, mf.logAddrs.decompressor.FileName()) + } + if mf.logTopics != nil && mf.logTopics.frozen { + frozen = append(frozen, mf.logTopics.decompressor.FileName()) + } + if mf.tracesFrom != nil && mf.tracesFrom.frozen { + frozen = append(frozen, mf.tracesFrom.decompressor.FileName()) + } + if mf.tracesTo != nil && mf.tracesTo.frozen { + frozen = append(frozen, mf.tracesTo.decompressor.FileName()) + } + return frozen +} func (mf MergedFilesV3) Close() { for _, item := range []*filesItem{mf.accountsIdx, mf.accountsHist, mf.storageIdx, mf.storageHist, mf.codeIdx, mf.codeHist, mf.logAddrs, mf.logTopics, mf.tracesFrom, mf.tracesTo} { @@ -967,7 +1072,7 @@ func (a *AggregatorV3) mergeFiles(ctx context.Context, files SelectedStaticFiles if r.accounts.any() { g.Go(func() error { var err error - mf.accountsIdx, mf.accountsHist, err = a.accounts.mergeFiles(ctx, files.accountsIdx, files.accountsHist, r.accounts, workers) + mf.accountsIdx, mf.accountsHist, err = a.accounts.mergeFiles(ctx, files.accountsIdx, files.accountsHist, r.accounts, workers, a.ps) return err }) } @@ -975,42 +1080,42 @@ func (a *AggregatorV3) mergeFiles(ctx context.Context, files SelectedStaticFiles if r.storage.any() { g.Go(func() error { var err error - mf.storageIdx, mf.storageHist, err = a.storage.mergeFiles(ctx, files.storageIdx, files.storageHist, r.storage, workers) + mf.storageIdx, mf.storageHist, err = a.storage.mergeFiles(ctx, files.storageIdx, files.storageHist, r.storage, workers, a.ps) return err }) } if r.code.any() { g.Go(func() error { var err error - mf.codeIdx, mf.codeHist, err = a.code.mergeFiles(ctx, files.codeIdx, files.codeHist, r.code, workers) + mf.codeIdx, mf.codeHist, err = a.code.mergeFiles(ctx, files.codeIdx, files.codeHist, r.code, workers, a.ps) return err }) } if r.logAddrs { g.Go(func() error { var err error - mf.logAddrs, err = a.logAddrs.mergeFiles(ctx, files.logAddrs, r.logAddrsStartTxNum, r.logAddrsEndTxNum, workers) + mf.logAddrs, err = a.logAddrs.mergeFiles(ctx, files.logAddrs, r.logAddrsStartTxNum, r.logAddrsEndTxNum, workers, a.ps) return err }) } if r.logTopics { g.Go(func() error { var err error - mf.logTopics, err = a.logTopics.mergeFiles(ctx, files.logTopics, r.logTopicsStartTxNum, r.logTopicsEndTxNum, workers) + mf.logTopics, err = a.logTopics.mergeFiles(ctx, files.logTopics, r.logTopicsStartTxNum, r.logTopicsEndTxNum, workers, a.ps) return err }) } if r.tracesFrom { g.Go(func() error { var err error - mf.tracesFrom, err = a.tracesFrom.mergeFiles(ctx, files.tracesFrom, r.tracesFromStartTxNum, r.tracesFromEndTxNum, workers) + mf.tracesFrom, err = a.tracesFrom.mergeFiles(ctx, files.tracesFrom, r.tracesFromStartTxNum, r.tracesFromEndTxNum, workers, a.ps) return err }) } if r.tracesTo { g.Go(func() error { var err error - mf.tracesTo, err = a.tracesTo.mergeFiles(ctx, files.tracesTo, r.tracesToStartTxNum, r.tracesToEndTxNum, workers) + mf.tracesTo, err = a.tracesTo.mergeFiles(ctx, files.tracesTo, r.tracesToStartTxNum, r.tracesToEndTxNum, workers, a.ps) return err }) } @@ -1021,7 +1126,11 @@ func (a *AggregatorV3) mergeFiles(ctx context.Context, files SelectedStaticFiles return mf, err } -func (a *AggregatorV3) integrateMergedFiles(outs SelectedStaticFilesV3, in MergedFilesV3) { +func (a *AggregatorV3) integrateMergedFiles(outs SelectedStaticFilesV3, in MergedFilesV3) (frozen []string) { + a.filesMutationLock.Lock() + defer a.filesMutationLock.Unlock() + defer a.needSaveFilesListInDB.Store(true) + defer a.recalcMaxTxNum() a.accounts.integrateMergedFiles(outs.accountsIdx, outs.accountsHist, in.accountsIdx, in.accountsHist) a.storage.integrateMergedFiles(outs.storageIdx, outs.storageHist, in.storageIdx, in.storageHist) a.code.integrateMergedFiles(outs.codeIdx, outs.codeHist, in.codeIdx, in.codeHist) @@ -1029,42 +1138,42 @@ func (a *AggregatorV3) integrateMergedFiles(outs SelectedStaticFilesV3, in Merge a.logTopics.integrateMergedFiles(outs.logTopics, in.logTopics) a.tracesFrom.integrateMergedFiles(outs.tracesFrom, in.tracesFrom) a.tracesTo.integrateMergedFiles(outs.tracesTo, in.tracesTo) + a.cleanFrozenParts(in) + return frozen } -func (a *AggregatorV3) cleanAfterFreeze(in MergedFilesV3) { - a.accounts.cleanAfterFreeze(in.accountsHist) - a.storage.cleanAfterFreeze(in.storageHist) - a.code.cleanAfterFreeze(in.codeHist) - a.logAddrs.cleanAfterFreeze(in.logAddrs) - a.logTopics.cleanAfterFreeze(in.logTopics) - a.tracesFrom.cleanAfterFreeze(in.tracesFrom) - a.tracesTo.cleanAfterFreeze(in.tracesTo) +func (a *AggregatorV3) cleanFrozenParts(in MergedFilesV3) { + a.accounts.cleanFrozenParts(in.accountsHist) + a.storage.cleanFrozenParts(in.storageHist) + a.code.cleanFrozenParts(in.codeHist) + a.logAddrs.cleanFrozenParts(in.logAddrs) + a.logTopics.cleanFrozenParts(in.logTopics) + a.tracesFrom.cleanFrozenParts(in.tracesFrom) + a.tracesTo.cleanFrozenParts(in.tracesTo) } // KeepInDB - usually equal to one a.aggregationStep, but when we exec blocks from snapshots // we can set it to 0, because no re-org on this blocks are possible func (a *AggregatorV3) KeepInDB(v uint64) { a.keepInDB = v } -func (a *AggregatorV3) BuildFilesInBackground(db kv.RoDB) error { +func (a *AggregatorV3) BuildFilesInBackground() { if (a.txNum.Load() + 1) <= a.maxTxNum.Load()+a.aggregationStep+a.keepInDB { // Leave one step worth in the DB - return nil + return } step := a.maxTxNum.Load() / a.aggregationStep - if a.working.Load() { - return nil + if ok := a.working.CompareAndSwap(false, true); !ok { + return } toTxNum := (step + 1) * a.aggregationStep hasData := false - - a.working.Store(true) a.wg.Add(1) go func() { defer a.wg.Done() defer a.working.Store(false) // check if db has enough data (maybe we didn't commit them yet) - lastInDB := lastIdInDB(db, a.accounts.indexKeysTable) + lastInDB := lastIdInDB(a.db, a.accounts.indexKeysTable) hasData = lastInDB >= toTxNum if !hasData { return @@ -1074,8 +1183,8 @@ func (a *AggregatorV3) BuildFilesInBackground(db kv.RoDB) error { // - to reduce amount of small merges // - to remove old data from db as early as possible // - during files build, may happen commit of new data. on each loop step getting latest id in db - for step < lastIdInDB(db, a.accounts.indexKeysTable)/a.aggregationStep { - if err := a.buildFilesInBackground(a.ctx, step, db); err != nil { + for step < lastIdInDB(a.db, a.accounts.indexKeysTable)/a.aggregationStep { + if err := a.buildFilesInBackground(a.ctx, step); err != nil { if errors.Is(err, context.Canceled) { return } @@ -1085,48 +1194,44 @@ func (a *AggregatorV3) BuildFilesInBackground(db kv.RoDB) error { step++ } - if a.workingMerge.Load() { + if ok := a.workingMerge.CompareAndSwap(false, true); !ok { return } - a.workingMerge.Store(true) a.wg.Add(1) go func() { defer a.wg.Done() defer a.workingMerge.Store(false) if err := a.MergeLoop(a.ctx, 1); err != nil { + if errors.Is(err, context.Canceled) { + return + } log.Warn("merge", "err", err) } a.BuildOptionalMissedIndicesInBackground(a.ctx, 1) }() }() +} - //if err := a.prune(0, a.maxTxNum.Load(), a.aggregationStep); err != nil { - // return err - //} - return nil +func (a *AggregatorV3) BatchHistoryWriteStart() *AggregatorV3 { + a.walLock.RLock() + return a +} +func (a *AggregatorV3) BatchHistoryWriteEnd() { + a.walLock.RUnlock() } func (a *AggregatorV3) AddAccountPrev(addr []byte, prev []byte) error { - if err := a.accounts.AddPrevValue(addr, nil, prev); err != nil { - return err - } - return nil + return a.accounts.AddPrevValue(addr, nil, prev) } func (a *AggregatorV3) AddStoragePrev(addr []byte, loc []byte, prev []byte) error { - if err := a.storage.AddPrevValue(addr, loc, prev); err != nil { - return err - } - return nil + return a.storage.AddPrevValue(addr, loc, prev) } // AddCodePrev - addr+inc => code func (a *AggregatorV3) AddCodePrev(addr []byte, prev []byte) error { - if err := a.code.AddPrevValue(addr, nil, prev); err != nil { - return err - } - return nil + return a.code.AddPrevValue(addr, nil, prev) } func (a *AggregatorV3) AddTraceFrom(addr []byte) error { @@ -1187,29 +1292,29 @@ func (a *AggregatorV3) EnableMadvNormal() *AggregatorV3 { } // -- range -func (ac *AggregatorV3Context) LogAddrIterator(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (*InvertedIterator, error) { - return ac.logAddrs.IterateRange(addr, startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) LogAddrRange(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.U64, error) { + return ac.logAddrs.IdxRange(addr, startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) LogTopicIterator(topic []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (*InvertedIterator, error) { - return ac.logTopics.IterateRange(topic, startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) LogTopicRange(topic []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.U64, error) { + return ac.logTopics.IdxRange(topic, startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) TraceFromIterator(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (*InvertedIterator, error) { - return ac.tracesFrom.IterateRange(addr, startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) TraceFromRange(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.U64, error) { + return ac.tracesFrom.IdxRange(addr, startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) TraceToIterator(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (*InvertedIterator, error) { - return ac.tracesTo.IterateRange(addr, startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) TraceToRange(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.U64, error) { + return ac.tracesTo.IdxRange(addr, startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) AccountHistoyIdxIterator(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (*InvertedIterator, error) { - return ac.accounts.ic.IterateRange(addr, startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) AccountHistoyIdxRange(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.U64, error) { + return ac.accounts.IdxRange(addr, startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) StorageHistoyIdxIterator(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (*InvertedIterator, error) { - return ac.storage.ic.IterateRange(addr, startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) StorageHistoyIdxRange(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.U64, error) { + return ac.storage.IdxRange(addr, startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) CodeHistoyIdxIterator(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (*InvertedIterator, error) { - return ac.code.ic.IterateRange(addr, startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) CodeHistoyIdxRange(addr []byte, startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.U64, error) { + return ac.code.IdxRange(addr, startTxNum, endTxNum, asc, limit, tx) } // -- range end @@ -1269,27 +1374,27 @@ func (ac *AggregatorV3Context) ReadAccountCodeSizeNoState(addr []byte, txNum uin return len(code), noState, nil } -func (ac *AggregatorV3Context) AccountHistoryIterateChanged(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) *HistoryChangesIter { - return ac.accounts.IterateChanged(startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) AccountHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { + return ac.accounts.HistoryRange(startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) StorageHistoryIterateChanged(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) *HistoryChangesIter { - return ac.storage.IterateChanged(startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) StorageHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { + return ac.storage.HistoryRange(startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) CodeHistoryIterateChanged(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) *HistoryChangesIter { - return ac.code.IterateChanged(startTxNum, endTxNum, asc, limit, tx) +func (ac *AggregatorV3Context) CodeHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { + return ac.code.HistoryRange(startTxNum, endTxNum, asc, limit, tx) } -func (ac *AggregatorV3Context) AccountHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) *StateAsOfIter { +func (ac *AggregatorV3Context) AccountHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) iter.KV { return ac.accounts.WalkAsOf(startTxNum, from, to, tx, limit) } -func (ac *AggregatorV3Context) StorageHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) *StateAsOfIter { +func (ac *AggregatorV3Context) StorageHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) iter.KV { return ac.storage.WalkAsOf(startTxNum, from, to, tx, limit) } -func (ac *AggregatorV3Context) CodeHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) *StateAsOfIter { +func (ac *AggregatorV3Context) CodeHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) iter.KV { return ac.code.WalkAsOf(startTxNum, from, to, tx, limit) } diff --git a/state/btree_index.go b/state/btree_index.go new file mode 100644 index 000000000..39ab4a4c6 --- /dev/null +++ b/state/btree_index.go @@ -0,0 +1,1086 @@ +package state + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "math" + "math/bits" + "os" + "path" + "path/filepath" + "time" + + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/common/background" + "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/compress" + "github.com/ledgerwatch/erigon-lib/etl" + "github.com/ledgerwatch/erigon-lib/mmap" +) + +func logBase(n, base uint64) uint64 { + return uint64(math.Ceil(math.Log(float64(n)) / math.Log(float64(base)))) +} + +func min64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +type markupCursor struct { + l, p, di, si uint64 + //l - level + //p - pos inside level + //si - current, actual son index + //di - data array index +} + +type node struct { + p, d, s, fc uint64 + key []byte + val []byte +} + +type Cursor struct { + ctx context.Context + ix *btAlloc + + key []byte + value []byte + d uint64 +} + +func (a *btAlloc) newCursor(ctx context.Context, k, v []byte, d uint64) *Cursor { + return &Cursor{ + ctx: ctx, + key: common.Copy(k), + value: common.Copy(v), + d: d, + ix: a, + } +} + +func (c *Cursor) Key() []byte { + return c.key +} + +func (c *Cursor) Ordinal() uint64 { + return c.d +} + +func (c *Cursor) Value() []byte { + return c.value +} + +func (c *Cursor) Next() bool { + if c.d > c.ix.K-1 { + return false + } + k, v, err := c.ix.dataLookup(c.d + 1) + if err != nil { + return false + } + c.key = common.Copy(k) + c.value = common.Copy(v) + c.d++ + return true +} + +type btAlloc struct { + d uint64 // depth + M uint64 // child limit of any node + N uint64 + K uint64 + vx []uint64 // vertex count on level + sons [][]uint64 // i - level; 0 <= i < d; j_k - amount, j_k+1 - child count + cursors []markupCursor + nodes [][]node + naccess uint64 + trace bool + + dataLookup func(di uint64) ([]byte, []byte, error) +} + +func newBtAlloc(k, M uint64, trace bool) *btAlloc { + if k == 0 { + return nil + } + + d := logBase(k, M) + a := &btAlloc{ + vx: make([]uint64, d+1), + sons: make([][]uint64, d+1), + cursors: make([]markupCursor, d), + nodes: make([][]node, d), + M: M, + K: k, + d: d, + trace: trace, + } + if trace { + fmt.Printf("k=%d d=%d, M=%d\n", k, d, M) + } + a.vx[0], a.vx[d] = 1, k + + if k < M/2 { + a.N = k + a.nodes = make([][]node, 1) + return a + } + + //nnc := func(vx uint64) uint64 { + // return uint64(math.Ceil(float64(vx) / float64(M))) + //} + nvc := func(vx uint64) uint64 { + return uint64(math.Ceil(float64(vx) / float64(M>>1))) + } + + for i := a.d - 1; i > 0; i-- { + nnc := uint64(math.Ceil(float64(a.vx[i+1]) / float64(M))) + //nvc := uint64(math.Floor(float64(a.vx[i+1]) / float64(m))-1) + //nnc := a.vx[i+1] / M + //nvc := a.vx[i+1] / m + //bvc := a.vx[i+1] / (m + (m >> 1)) + a.vx[i] = min64(uint64(math.Pow(float64(M), float64(i))), nnc) + } + + ncount := uint64(0) + pnv := uint64(0) + for l := a.d - 1; l > 0; l-- { + //s := nnc(a.vx[l+1]) + sh := nvc(a.vx[l+1]) + + if sh&1 == 1 { + a.sons[l] = append(a.sons[l], sh>>1, M, 1, M>>1) + } else { + a.sons[l] = append(a.sons[l], sh>>1, M) + } + + for ik := 0; ik < len(a.sons[l]); ik += 2 { + ncount += a.sons[l][ik] * a.sons[l][ik+1] + if l == 1 { + pnv += a.sons[l][ik] + } + } + } + a.sons[0] = []uint64{1, pnv} + ncount += a.sons[0][0] * a.sons[0][1] // last one + a.N = ncount + + if trace { + for i, v := range a.sons { + fmt.Printf("L%d=%v\n", i, v) + } + } + + return a +} + +// nolint +// another implementation of traverseDfs supposed to be a bit cleaner but buggy yet +func (a *btAlloc) traverseTrick() { + for l := 0; l < len(a.sons)-1; l++ { + if len(a.sons[l]) < 2 { + panic("invalid btree allocation markup") + } + a.cursors[l] = markupCursor{uint64(l), 1, 0, 0} + a.nodes[l] = make([]node, 0) + } + + lf := a.cursors[len(a.cursors)-1] + c := a.cursors[(len(a.cursors) - 2)] + + var d uint64 + var fin bool + + lf.di = d + lf.si++ + d++ + a.cursors[len(a.cursors)-1] = lf + + moved := true + for int(c.p) <= len(a.sons[c.l]) { + if fin || d > a.K { + break + } + c, lf = a.cursors[c.l], a.cursors[lf.l] + + c.di = d + c.si++ + + sons := a.sons[lf.l][lf.p] + for i := uint64(1); i < sons; i++ { + lf.si++ + d++ + } + lf.di = d + d++ + + a.nodes[lf.l] = append(a.nodes[lf.l], node{p: lf.p, s: lf.si, d: lf.di}) + a.nodes[c.l] = append(a.nodes[c.l], node{p: c.p, s: c.si, d: c.di}) + a.cursors[lf.l] = lf + a.cursors[c.l] = c + + for l := lf.l; l >= 0; l-- { + sc := a.cursors[l] + sons, gsons := a.sons[sc.l][sc.p-1], a.sons[sc.l][sc.p] + if l < c.l && moved { + sc.di = d + a.nodes[sc.l] = append(a.nodes[sc.l], node{d: sc.di}) + sc.si++ + d++ + } + moved = (sc.si-1)/gsons != sc.si/gsons + if sc.si/gsons >= sons { + sz := uint64(len(a.sons[sc.l]) - 1) + if sc.p+2 > sz { + fin = l == lf.l + break + } else { + sc.p += 2 + sc.si, sc.di = 0, 0 + } + //moved = true + } + if l == lf.l { + sc.si++ + sc.di = d + d++ + } + a.cursors[l] = sc + if l == 0 { + break + } + } + moved = false + } +} + +func (a *btAlloc) traverseDfs() { + for l := 0; l < len(a.sons)-1; l++ { + a.cursors[l] = markupCursor{uint64(l), 1, 0, 0} + a.nodes[l] = make([]node, 0) + } + + if len(a.cursors) <= 1 { + if a.nodes[0] == nil { + a.nodes[0] = make([]node, 0) + } + a.nodes[0] = append(a.nodes[0], node{d: a.K}) + a.N = a.K + if a.trace { + fmt.Printf("ncount=%d ∂%.5f\n", a.N, float64(a.N-a.K)/float64(a.N)) + } + return + } + + c := a.cursors[len(a.cursors)-1] + pc := a.cursors[(len(a.cursors) - 2)] + root := new(node) + trace := false + + var di uint64 + for stop := false; !stop; { + // fill leaves, mark parent if needed (until all grandparents not marked up until root) + // check if eldest parent has brothers + // -- has bros -> fill their leaves from the bottom + // -- no bros -> shift cursor (tricky) + if di > a.K { + a.N = di - 1 // actually filled node count + if a.trace { + fmt.Printf("ncount=%d ∂%.5f\n", a.N, float64(a.N-a.K)/float64(a.N)) + } + break + } + + bros, parents := a.sons[c.l][c.p], a.sons[c.l][c.p-1] + for i := uint64(0); i < bros; i++ { + c.di = di + if trace { + fmt.Printf("L%d |%d| d %2d s %2d\n", c.l, c.p, c.di, c.si) + } + c.si++ + di++ + + if i == 0 { + pc.di = di + if trace { + fmt.Printf("P%d |%d| d %2d s %2d\n", pc.l, pc.p, pc.di, pc.si) + } + pc.si++ + di++ + } + if di > a.K { + a.N = di - 1 // actually filled node count + stop = true + break + } + } + + a.nodes[c.l] = append(a.nodes[c.l], node{p: c.p, d: c.di, s: c.si}) + a.nodes[pc.l] = append(a.nodes[pc.l], node{p: pc.p, d: pc.di, s: pc.si, fc: uint64(len(a.nodes[c.l]) - 1)}) + + pid := c.si / bros + if pid >= parents { + if c.p+2 >= uint64(len(a.sons[c.l])) { + stop = true // end of row + if trace { + fmt.Printf("F%d |%d| d %2d\n", c.l, c.p, c.di) + } + } else { + c.p += 2 + c.si = 0 + c.di = 0 + } + } + a.cursors[c.l] = c + a.cursors[pc.l] = pc + + //nolint + for l := pc.l; l >= 0; l-- { + pc := a.cursors[l] + uncles := a.sons[pc.l][pc.p] + grands := a.sons[pc.l][pc.p-1] + + pi1 := pc.si / uncles + pc.si++ + pc.di = 0 + + pi2 := pc.si / uncles + moved := pi2-pi1 != 0 + + switch { + case pc.l > 0: + gp := a.cursors[pc.l-1] + if gp.di == 0 { + gp.di = di + di++ + if trace { + fmt.Printf("P%d |%d| d %2d s %2d\n", gp.l, gp.p, gp.di, gp.si) + } + a.nodes[gp.l] = append(a.nodes[gp.l], node{p: gp.p, d: gp.di, s: gp.si, fc: uint64(len(a.nodes[l]) - 1)}) + a.cursors[gp.l] = gp + } + default: + if root.d == 0 { + root.d = di + //di++ + if trace { + fmt.Printf("ROOT | d %2d\n", root.d) + } + } + } + + //fmt.Printf("P%d |%d| d %2d s %2d pid %d\n", pc.l, pc.p, pc.di, pc.si-1) + if pi2 >= grands { // skip one step of si due to different parental filling order + if pc.p+2 >= uint64(len(a.sons[pc.l])) { + if trace { + fmt.Printf("EoRow %d |%d|\n", pc.l, pc.p) + } + break // end of row + } + //fmt.Printf("N %d d%d s%d\n", pc.l, pc.di, pc.si) + //fmt.Printf("P%d |%d| d %2d s %2d pid %d\n", pc.l, pc.p, pc.di, pc.si, pid) + pc.p += 2 + pc.si = 0 + pc.di = 0 + } + a.cursors[pc.l] = pc + + if !moved { + break + } + } + } + + if a.trace { + fmt.Printf("ncount=%d ∂%.5f\n", a.N, float64(a.N-a.K)/float64(a.N)) + } +} + +func (a *btAlloc) bsKey(x []byte, l, r uint64) (*Cursor, error) { + for l <= r { + di := (l + r) >> 1 + + mk, value, err := a.dataLookup(di) + a.naccess++ + + cmp := bytes.Compare(mk, x) + switch { + case err != nil: + return nil, err + case cmp == 0: + return a.newCursor(context.TODO(), mk, value, di), nil + case cmp == -1: + l = di + 1 + default: + r = di + } + if l == r { + break + } + } + k, v, err := a.dataLookup(l) + if err != nil { + return nil, fmt.Errorf("key >= %x was not found at pos %d", x, l) + } + return a.newCursor(context.TODO(), k, v, l), nil +} + +func (a *btAlloc) bsNode(i, l, r uint64, x []byte) (n node, lm int64, rm int64) { + n, lm, rm = node{}, -1, -1 + + for l < r { + m := (l + r) >> 1 + + n = a.nodes[i][m] + a.naccess++ + + cmp := bytes.Compare(n.key, x) + switch { + case cmp == 0: + return n, int64(m), int64(m) + case cmp > 0: + r = m + rm = int64(m) + case cmp < 0: + lm = int64(m) + l = m + 1 + default: + panic(fmt.Errorf("compare error %d, %x ? %x", cmp, n.key, x)) + } + } + return n, lm, rm +} + +// find position of key with node.di <= d at level lvl +func (a *btAlloc) seekLeast(lvl, d uint64) uint64 { + for i, node := range a.nodes[lvl] { + if node.d >= d { + return uint64(i) + } + } + return uint64(len(a.nodes[lvl])) +} + +func (a *btAlloc) Seek(ik []byte) (*Cursor, error) { + if a.trace { + fmt.Printf("seek key %x\n", ik) + } + + var ( + lm, rm int64 + L, R = uint64(0), uint64(len(a.nodes[0]) - 1) + minD, maxD = uint64(0), a.K + ln node + ) + + for l, level := range a.nodes { + if len(level) == 1 && l == 0 { + ln = a.nodes[0][0] + maxD = ln.d + break + } + ln, lm, rm = a.bsNode(uint64(l), L, R, ik) + if ln.key == nil { // should return node which is nearest to key from the left so never nil + if a.trace { + fmt.Printf("found nil key %x pos_range[%d-%d] naccess_ram=%d\n", l, lm, rm, a.naccess) + } + panic(fmt.Errorf("bt index nil node at level %d", l)) + } + + switch bytes.Compare(ln.key, ik) { + case 1: // key > ik + maxD = ln.d + case -1: // key < ik + minD = ln.d + case 0: + if a.trace { + fmt.Printf("found key %x v=%x naccess_ram=%d\n", ik, ln.val /*level[m].d,*/, a.naccess) + } + return a.newCursor(context.TODO(), common.Copy(ln.key), common.Copy(ln.val), ln.d), nil + } + + if rm-lm == 1 { + break + } + if lm >= 0 { + minD = a.nodes[l][lm].d + L = level[lm].fc + } else if l+1 != len(a.nodes) { + L = a.seekLeast(uint64(l+1), minD) + if L == uint64(len(a.nodes[l+1])) { + L-- + } + } + if rm >= 0 { + maxD = a.nodes[l][rm].d + R = level[rm].fc + } else if l+1 != len(a.nodes) { + R = a.seekLeast(uint64(l+1), maxD) + if R == uint64(len(a.nodes[l+1])) { + R-- + } + } + + if a.trace { + fmt.Printf("range={%x d=%d p=%d} (%d, %d) L=%d naccess_ram=%d\n", ln.key, ln.d, ln.p, minD, maxD, l, a.naccess) + } + } + + a.naccess = 0 // reset count before actually go to disk + cursor, err := a.bsKey(ik, minD, maxD) + if err != nil { + if a.trace { + fmt.Printf("key %x not found\n", ik) + } + return nil, err + } + + if a.trace { + fmt.Printf("finally found key %x v=%x naccess_disk=%d\n", cursor.key, cursor.value, a.naccess) + } + return cursor, nil +} + +func (a *btAlloc) fillSearchMx() { + for i, n := range a.nodes { + if a.trace { + fmt.Printf("D%d |%d| ", i, len(n)) + } + for j, s := range n { + if a.trace { + fmt.Printf("%d ", s.d) + } + if s.d >= a.K { + break + } + + kb, v, err := a.dataLookup(s.d) + if err != nil { + fmt.Printf("d %d not found %v\n", s.d, err) + } + a.nodes[i][j].key = common.Copy(kb) + a.nodes[i][j].val = common.Copy(v) + } + if a.trace { + fmt.Printf("\n") + } + } +} + +// deprecated +type BtIndexReader struct { + index *BtIndex +} + +func NewBtIndexReader(index *BtIndex) *BtIndexReader { + return &BtIndexReader{ + index: index, + } +} + +// Lookup wraps index Lookup +func (r *BtIndexReader) Lookup(key []byte) uint64 { + if r.index != nil { + return r.index.Lookup(key) + } + return 0 +} + +func (r *BtIndexReader) Lookup2(key1, key2 []byte) uint64 { + fk := make([]byte, 52) + copy(fk[:length.Addr], key1) + copy(fk[length.Addr:], key2) + + if r.index != nil { + return r.index.Lookup(fk) + } + return 0 +} + +func (r *BtIndexReader) Seek(x []byte) (*Cursor, error) { + if r.index != nil { + cursor, err := r.index.alloc.Seek(x) + if err != nil { + return nil, fmt.Errorf("seek key %x: %w", x, err) + } + return cursor, nil + } + return nil, fmt.Errorf("seek has been failed") +} + +func (r *BtIndexReader) Empty() bool { + return r.index.Empty() +} + +type BtIndexWriter struct { + built bool + lvl log.Lvl + maxOffset uint64 + prevOffset uint64 + minDelta uint64 + indexW *bufio.Writer + indexF *os.File + bucketCollector *etl.Collector // Collector that sorts by buckets + indexFileName string + indexFile string + tmpDir string + numBuf [8]byte + keyCount uint64 + etlBufLimit datasize.ByteSize + bytesPerRec int +} + +type BtIndexWriterArgs struct { + IndexFile string // File name where the index and the minimal perfect hash function will be written to + TmpDir string + KeyCount int + EtlBufLimit datasize.ByteSize +} + +const BtreeLogPrefix = "btree" + +// NewBtIndexWriter creates a new BtIndexWriter instance with given number of keys +// Typical bucket size is 100 - 2048, larger bucket sizes result in smaller representations of hash functions, at a cost of slower access +// salt parameters is used to randomise the hash function construction, to ensure that different Erigon instances (nodes) +// are likely to use different hash function, to collision attacks are unlikely to slow down any meaningful number of nodes at the same time +func NewBtIndexWriter(args BtIndexWriterArgs) (*BtIndexWriter, error) { + btw := &BtIndexWriter{lvl: log.LvlDebug} + btw.tmpDir = args.TmpDir + btw.indexFile = args.IndexFile + + _, fname := filepath.Split(btw.indexFile) + btw.indexFileName = fname + btw.etlBufLimit = args.EtlBufLimit + if btw.etlBufLimit == 0 { + btw.etlBufLimit = etl.BufferOptimalSize + } + + btw.bucketCollector = etl.NewCollector(BtreeLogPrefix+" "+fname, btw.tmpDir, etl.NewSortableBuffer(btw.etlBufLimit)) + btw.bucketCollector.LogLvl(log.LvlDebug) + + btw.maxOffset = 0 + return btw, nil +} + +// loadFuncBucket is required to satisfy the type etl.LoadFunc type, to use with collector.Load +func (btw *BtIndexWriter) loadFuncBucket(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { + // k is the BigEndian encoding of the bucket number, and the v is the key that is assigned into that bucket + //if uint64(len(btw.vals)) >= btw.batchSizeLimit { + // if err := btw.drainBatch(); err != nil { + // return err + // } + //} + + // if _, err := btw.indexW.Write(k); err != nil { + // return err + // } + if _, err := btw.indexW.Write(v[8-btw.bytesPerRec:]); err != nil { + return err + } + + //btw.keys = append(btw.keys, binary.BigEndian.Uint64(k), binary.BigEndian.Uint64(k[8:])) + //btw.vals = append(btw.vals, binary.BigEndian.Uint64(v)) + return nil +} + +// Build has to be called after all the keys have been added, and it initiates the process +// of building the perfect hash function and writing index into a file +func (btw *BtIndexWriter) Build() error { + tmpIdxFilePath := btw.indexFile + ".tmp" + + if btw.built { + return fmt.Errorf("already built") + } + //if btw.keysAdded != btw.keyCount { + // return fmt.Errorf("expected keys %d, got %d", btw.keyCount, btw.keysAdded) + //} + var err error + if btw.indexF, err = os.Create(tmpIdxFilePath); err != nil { + return fmt.Errorf("create index file %s: %w", btw.indexFile, err) + } + defer btw.indexF.Sync() + defer btw.indexF.Close() + btw.indexW = bufio.NewWriterSize(btw.indexF, etl.BufIOSize) + defer btw.indexW.Flush() + + // Write number of keys + binary.BigEndian.PutUint64(btw.numBuf[:], btw.keyCount) + if _, err = btw.indexW.Write(btw.numBuf[:]); err != nil { + return fmt.Errorf("write number of keys: %w", err) + } + // Write number of bytes per index record + btw.bytesPerRec = (bits.Len64(btw.maxOffset) + 7) / 8 + if err = btw.indexW.WriteByte(byte(btw.bytesPerRec)); err != nil { + return fmt.Errorf("write bytes per record: %w", err) + } + + defer btw.bucketCollector.Close() + log.Log(btw.lvl, "[index] calculating", "file", btw.indexFileName) + if err := btw.bucketCollector.Load(nil, "", btw.loadFuncBucket, etl.TransformArgs{}); err != nil { + return err + } + + log.Log(btw.lvl, "[index] write", "file", btw.indexFileName) + btw.built = true + + _ = btw.indexW.Flush() + _ = btw.indexF.Sync() + _ = btw.indexF.Close() + _ = os.Rename(tmpIdxFilePath, btw.indexFile) + return nil +} + +func (btw *BtIndexWriter) Close() { + if btw.indexF != nil { + btw.indexF.Close() + } + if btw.bucketCollector != nil { + btw.bucketCollector.Close() + } + //if btw.offsetCollector != nil { + // btw.offsetCollector.Close() + //} +} + +func (btw *BtIndexWriter) AddKey(key []byte, offset uint64) error { + if btw.built { + return fmt.Errorf("cannot add keys after perfect hash function had been built") + } + + binary.BigEndian.PutUint64(btw.numBuf[:], offset) + if offset > btw.maxOffset { + btw.maxOffset = offset + } + if btw.keyCount > 0 { + delta := offset - btw.prevOffset + if btw.keyCount == 1 || delta < btw.minDelta { + btw.minDelta = delta + } + } + + if err := btw.bucketCollector.Collect(key, btw.numBuf[:]); err != nil { + return err + } + btw.keyCount++ + btw.prevOffset = offset + return nil +} + +type BtIndex struct { + alloc *btAlloc + mmapWin *[mmap.MaxMapSize]byte + mmapUnix []byte + data []byte + file *os.File + size int64 + modTime time.Time + filePath string + keyCount uint64 + bytesPerRec int + dataoffset uint64 + auxBuf []byte + decompressor *compress.Decompressor + getter *compress.Getter +} + +func CreateBtreeIndex(indexPath, dataPath string, M uint64) (*BtIndex, error) { + err := BuildBtreeIndex(dataPath, indexPath) + if err != nil { + return nil, err + } + return OpenBtreeIndex(indexPath, dataPath, M) +} + +var DefaultBtreeM = uint64(2048) + +func CreateBtreeIndexWithDecompressor(indexPath string, M uint64, decompressor *compress.Decompressor, p *background.Progress) (*BtIndex, error) { + err := BuildBtreeIndexWithDecompressor(indexPath, decompressor, p) + if err != nil { + return nil, err + } + return OpenBtreeIndexWithDecompressor(indexPath, M, decompressor) +} + +func BuildBtreeIndexWithDecompressor(indexPath string, kv *compress.Decompressor, p *background.Progress) error { + args := BtIndexWriterArgs{ + IndexFile: indexPath, + TmpDir: filepath.Dir(indexPath), + } + + iw, err := NewBtIndexWriter(args) + if err != nil { + return err + } + + getter := kv.MakeGetter() + getter.Reset(0) + + key := make([]byte, 0, 64) + ks := make(map[int]int) + + var pos uint64 + emptys := 0 + for getter.HasNext() { + p.Processed.Add(1) + key, kp := getter.Next(key[:0]) + err = iw.AddKey(key, pos) + if err != nil { + return err + } + + pos = getter.Skip() + if pos-kp == 1 { + ks[len(key)]++ + emptys++ + } + } + //fmt.Printf("emptys %d %#+v\n", emptys, ks) + + if err := iw.Build(); err != nil { + return err + } + iw.Close() + return nil +} + +// Opens .kv at dataPath and generates index over it to file 'indexPath' +func BuildBtreeIndex(dataPath, indexPath string) error { + decomp, err := compress.NewDecompressor(dataPath) + if err != nil { + return err + } + + args := BtIndexWriterArgs{ + IndexFile: indexPath, + TmpDir: filepath.Dir(indexPath), + } + + iw, err := NewBtIndexWriter(args) + if err != nil { + return err + } + + getter := decomp.MakeGetter() + getter.Reset(0) + + key := make([]byte, 0, 64) + + var pos uint64 + for getter.HasNext() { + key, _ := getter.Next(key[:0]) + err = iw.AddKey(key, pos) + if err != nil { + return err + } + + pos = getter.Skip() + } + decomp.Close() + + if err := iw.Build(); err != nil { + return err + } + iw.Close() + return nil +} + +func OpenBtreeIndexWithDecompressor(indexPath string, M uint64, kv *compress.Decompressor) (*BtIndex, error) { + s, err := os.Stat(indexPath) + if err != nil { + return nil, err + } + + idx := &BtIndex{ + filePath: indexPath, + size: s.Size(), + modTime: s.ModTime(), + auxBuf: make([]byte, 64), + } + + idx.file, err = os.Open(indexPath) + if err != nil { + return nil, err + } + + if idx.mmapUnix, idx.mmapWin, err = mmap.Mmap(idx.file, int(idx.size)); err != nil { + return nil, err + } + idx.data = idx.mmapUnix[:idx.size] + + // Read number of keys and bytes per record + pos := 8 + idx.keyCount = binary.BigEndian.Uint64(idx.data[:pos]) + if idx.keyCount == 0 { + return idx, nil + } + idx.bytesPerRec = int(idx.data[pos]) + pos += 1 + + //p := (*[]byte)(unsafe.Pointer(&idx.data[pos])) + //l := int(idx.keyCount)*idx.bytesPerRec + (16 * int(idx.keyCount)) + + idx.getter = kv.MakeGetter() + + idx.alloc = newBtAlloc(idx.keyCount, M, false) + idx.alloc.dataLookup = idx.dataLookup + idx.dataoffset = uint64(pos) + idx.alloc.traverseDfs() + idx.alloc.fillSearchMx() + return idx, nil +} + +func OpenBtreeIndex(indexPath, dataPath string, M uint64) (*BtIndex, error) { + s, err := os.Stat(indexPath) + if err != nil { + return nil, err + } + + idx := &BtIndex{ + filePath: indexPath, + size: s.Size(), + modTime: s.ModTime(), + auxBuf: make([]byte, 64), + } + + idx.file, err = os.Open(indexPath) + if err != nil { + return nil, err + } + + if idx.mmapUnix, idx.mmapWin, err = mmap.Mmap(idx.file, int(idx.size)); err != nil { + return nil, err + } + idx.data = idx.mmapUnix[:idx.size] + + // Read number of keys and bytes per record + pos := 8 + idx.keyCount = binary.BigEndian.Uint64(idx.data[:pos]) + idx.bytesPerRec = int(idx.data[pos]) + pos += 1 + + // offset := int(idx.keyCount) * idx.bytesPerRec //+ (idx.keySize * int(idx.keyCount)) + // if offset < 0 { + // return nil, fmt.Errorf("offset is: %d which is below zero, the file: %s is broken", offset, indexPath) + // } + + //p := (*[]byte)(unsafe.Pointer(&idx.data[pos])) + //l := int(idx.keyCount)*idx.bytesPerRec + (16 * int(idx.keyCount)) + + idx.decompressor, err = compress.NewDecompressor(dataPath) + if err != nil { + idx.Close() + return nil, err + } + idx.getter = idx.decompressor.MakeGetter() + + idx.alloc = newBtAlloc(idx.keyCount, M, false) + idx.alloc.dataLookup = idx.dataLookup + idx.dataoffset = uint64(pos) + idx.alloc.traverseDfs() + idx.alloc.fillSearchMx() + return idx, nil +} + +func (b *BtIndex) dataLookup(di uint64) ([]byte, []byte, error) { + if b.keyCount < di { + return nil, nil, fmt.Errorf("ki is greater than key count in index") + } + + p := b.dataoffset + di*uint64(b.bytesPerRec) + if uint64(len(b.data)) < p+uint64(b.bytesPerRec) { + return nil, nil, fmt.Errorf("data lookup gone too far (%d after %d)", p+uint64(b.bytesPerRec)-uint64(len(b.data)), len(b.data)) + } + + offt := b.data[p : p+uint64(b.bytesPerRec)] + var aux [8]byte + copy(aux[8-len(offt):], offt) + + offset := binary.BigEndian.Uint64(aux[:]) + b.getter.Reset(offset) + if !b.getter.HasNext() { + return nil, nil, fmt.Errorf("pair %d not found", di) + } + + key, kp := b.getter.Next(nil) + + if !b.getter.HasNext() { + return nil, nil, fmt.Errorf("pair %d not found", di) + } + val, vp := b.getter.Next(nil) + _, _ = kp, vp + return key, val, nil +} + +func (b *BtIndex) Size() int64 { return b.size } + +func (b *BtIndex) ModTime() time.Time { return b.modTime } + +func (b *BtIndex) FilePath() string { return b.filePath } + +func (b *BtIndex) FileName() string { return path.Base(b.filePath) } + +func (b *BtIndex) Empty() bool { return b.keyCount == 0 } + +func (b *BtIndex) KeyCount() uint64 { return b.keyCount } + +func (b *BtIndex) Close() error { + if b == nil { + return nil + } + if err := mmap.Munmap(b.mmapUnix, b.mmapWin); err != nil { + return err + } + if err := b.file.Close(); err != nil { + return err + } + if b.decompressor != nil { + if err := b.decompressor.Close(); err != nil { + return err + } + } + return nil +} + +func (b *BtIndex) Seek(x []byte) (*Cursor, error) { + if b.alloc != nil { + cursor, err := b.alloc.Seek(x) + if err != nil { + return nil, fmt.Errorf("seek key %x: %w", x, err) + } + return cursor, nil + } + return nil, fmt.Errorf("seek has been failed") +} + +// deprecated +func (b *BtIndex) Lookup(key []byte) uint64 { + cursor, err := b.alloc.Seek(key) + if err != nil { + panic(err) + } + return binary.BigEndian.Uint64(cursor.value) +} + +func (b *BtIndex) OrdinalLookup(i uint64) *Cursor { + if i > b.alloc.K { + return nil + } + k, v, err := b.dataLookup(i) + if err != nil { + return nil + } + + return &Cursor{ + key: k, value: v, d: i, ix: b.alloc, + } +} diff --git a/state/domain.go b/state/domain.go index 46f3f9d88..6046ff117 100644 --- a/state/domain.go +++ b/state/domain.go @@ -22,50 +22,47 @@ import ( "context" "encoding/binary" "fmt" - "io/fs" "math" "os" "path/filepath" "regexp" "strconv" + "strings" "sync/atomic" "time" "github.com/RoaringBitmap/roaring/roaring64" - "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common/background" btree2 "github.com/tidwall/btree" - atomic2 "go.uber.org/atomic" - "golang.org/x/sync/semaphore" + "golang.org/x/sync/errgroup" + + "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" "github.com/ledgerwatch/erigon-lib/recsplit" - "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" -) - -var ( - historyValCountKey = []byte("ValCount") ) // filesItem corresponding to a pair of files (.dat and .idx) type filesItem struct { decompressor *compress.Decompressor index *recsplit.Index + bindex *BtIndex startTxNum uint64 endTxNum uint64 // Frozen: file of size StepsInBiggestFile. Completely immutable. // Cold: file of size < StepsInBiggestFile. Immutable, but can be closed/removed after merge to bigger file. // Hot: Stored in DB. Providing Snapshot-Isolation by CopyOnWrite. - frozen bool // immutable, don't need atomic - refcount atomic2.Uint64 // only for `frozen=false` + frozen bool // immutable, don't need atomic + refcount atomic.Int32 // only for `frozen=false` // file can be deleted in 2 cases: 1. when `refcount == 0 && canDelete == true` 2. on app startup when `file.isSubsetOfFrozenFile()` // other processes (which also reading files, may have same logic) - canDelete atomic2.Bool + canDelete atomic.Bool } func (i *filesItem) isSubsetOf(j *filesItem) bool { @@ -97,11 +94,28 @@ func (i *filesItem) closeFilesAndRemove() { } i.index = nil } + if i.bindex != nil { + if err := i.bindex.Close(); err != nil { + log.Trace("close", "err", err, "file", i.bindex.FileName()) + } + if err := os.Remove(i.bindex.FilePath()); err != nil { + log.Trace("close", "err", err, "file", i.bindex.FileName()) + } + i.bindex = nil + } } type DomainStats struct { - MergesCount uint64 - HistoryQueries uint64 + MergesCount uint64 + LastCollationTook time.Duration + LastPruneTook time.Duration + LastPruneHistTook time.Duration + LastFileBuildingTook time.Duration + LastCollationSize uint64 + LastPruneSize uint64 + + HistoryQueries *atomic.Uint64 + TotalQueries *atomic.Uint64 EfSearchTime time.Duration DataSize uint64 IndexSize uint64 @@ -109,7 +123,8 @@ type DomainStats struct { } func (ds *DomainStats) Accumulate(other DomainStats) { - ds.HistoryQueries += other.HistoryQueries + ds.HistoryQueries.Add(other.HistoryQueries.Load()) + ds.TotalQueries.Add(other.TotalQueries.Load()) ds.EfSearchTime += other.EfSearchTime ds.IndexSize += other.IndexSize ds.DataSize += other.DataSize @@ -123,51 +138,69 @@ type Domain struct { files *btree2.BTreeG[*filesItem] // thread-safe, but maybe need 1 RWLock for all trees in AggregatorV3 // roFiles derivative from field `file`, but without garbage (canDelete=true, overlaps, etc...) // MakeContext() using this field in zero-copy way - roFiles atomic2.Pointer[[]ctxItem] + roFiles atomic.Pointer[[]ctxItem] defaultDc *DomainContext keysTable string // key -> invertedStep , invertedStep = ^(txNum / aggregationStep), Needs to be table with DupSort valsTable string // key + invertedStep -> values stats DomainStats - prefixLen int // Number of bytes in the keys that can be used for prefix iteration mergesCount uint64 } -func NewDomain( - dir, tmpdir string, - aggregationStep uint64, - filenameBase string, - keysTable string, - valsTable string, - indexKeysTable string, - historyValsTable string, - settingsTable string, - indexTable string, - prefixLen int, - compressVals bool, -) (*Domain, error) { +func NewDomain(dir, tmpdir string, aggregationStep uint64, + filenameBase, keysTable, valsTable, indexKeysTable, historyValsTable, indexTable string, + compressVals, largeValues bool) (*Domain, error) { d := &Domain{ keysTable: keysTable, valsTable: valsTable, - prefixLen: prefixLen, files: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), - roFiles: *atomic2.NewPointer(&[]ctxItem{}), + stats: DomainStats{HistoryQueries: &atomic.Uint64{}, TotalQueries: &atomic.Uint64{}}, } + d.roFiles.Store(&[]ctxItem{}) var err error - if d.History, err = NewHistory(dir, tmpdir, aggregationStep, filenameBase, indexKeysTable, indexTable, historyValsTable, settingsTable, compressVals, []string{"kv"}); err != nil { + if d.History, err = NewHistory(dir, tmpdir, aggregationStep, filenameBase, indexKeysTable, indexTable, historyValsTable, compressVals, []string{"kv"}, largeValues); err != nil { return nil, err } - files, err := os.ReadDir(dir) - if err != nil { - return nil, err + + return d, nil +} + +func (d *Domain) StartWrites() { + d.defaultDc = d.MakeContext() + d.History.StartWrites() +} + +func (d *Domain) FinishWrites() { + d.defaultDc.Close() + d.History.FinishWrites() +} + +// OpenList - main method to open list of files. +// It's ok if some files was open earlier. +// If some file already open: noop. +// If some file already open but not in provided list: close and remove from `files` field. +func (d *Domain) OpenList(fNames []string) error { + if err := d.History.OpenList(fNames); err != nil { + return err } - _ = d.scanStateFiles(files) + return d.openList(fNames) +} - if err = d.openFiles(); err != nil { - return nil, err +func (d *Domain) openList(fNames []string) error { + d.closeWhatNotInList(fNames) + _ = d.scanStateFiles(fNames) + if err := d.openFiles(); err != nil { + return fmt.Errorf("History.OpenList: %s, %w", d.filenameBase, err) } - d.defaultDc = d.MakeContext() - return d, nil + return nil +} + +func (d *Domain) OpenFolder() error { + files, err := d.fileNamesOnDisk() + if err != nil { + return err + } + return d.OpenList(files) } func (d *Domain) GetAndResetStats() DomainStats { @@ -178,14 +211,10 @@ func (d *Domain) GetAndResetStats() DomainStats { return r } -func (d *Domain) scanStateFiles(files []fs.DirEntry) (uselessFiles []string) { +func (d *Domain) scanStateFiles(fileNames []string) (uselessFiles []string) { re := regexp.MustCompile("^" + d.filenameBase + ".([0-9]+)-([0-9]+).kv$") var err error - for _, f := range files { - if !f.Type().IsRegular() { - continue - } - name := f.Name() + for _, name := range fileNames { subs := re.FindStringSubmatch(name) if len(subs) != 3 { if len(subs) != 0 { @@ -209,6 +238,10 @@ func (d *Domain) scanStateFiles(files []fs.DirEntry) (uselessFiles []string) { startTxNum, endTxNum := startStep*d.aggregationStep, endStep*d.aggregationStep var newFile = &filesItem{startTxNum: startTxNum, endTxNum: endTxNum, frozen: endStep-startStep == StepsInBiggestFile} + if _, has := d.files.Get(newFile); has { + continue + } + { var subSets []*filesItem var superSet *filesItem @@ -239,19 +272,17 @@ func (d *Domain) scanStateFiles(files []fs.DirEntry) (uselessFiles []string) { } d.files.Set(newFile) } - d.reCalcRoFiles() return uselessFiles } -func (d *Domain) openFiles() error { - var err error +func (d *Domain) openFiles() (err error) { var totalKeys uint64 invalidFileItems := make([]*filesItem, 0) d.files.Walk(func(items []*filesItem) bool { for _, item := range items { if item.decompressor != nil { - item.decompressor.Close() + continue } fromStep, toStep := item.startTxNum/d.aggregationStep, item.endTxNum/d.aggregationStep datPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kv", d.filenameBase, fromStep, toStep)) @@ -263,15 +294,24 @@ func (d *Domain) openFiles() error { return false } - if item.index == nil { - idxPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, fromStep, toStep)) - if dir.FileExist(idxPath) { - if item.index, err = recsplit.OpenIndex(idxPath); err != nil { - log.Debug("InvertedIndex.openFiles: %w, %s", err, idxPath) - return false - } - totalKeys += item.index.KeyCount() + if item.index != nil { + continue + } + idxPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, fromStep, toStep)) + if dir.FileExist(idxPath) { + if item.index, err = recsplit.OpenIndex(idxPath); err != nil { + log.Debug("InvertedIndex.openFiles: %w, %s", err, idxPath) + return false + } + totalKeys += item.index.KeyCount() + } + if item.bindex == nil { + bidxPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.bt", d.filenameBase, fromStep, toStep)) + if item.bindex, err = OpenBtreeIndexWithDecompressor(bidxPath, 2048, item.decompressor); err != nil { + log.Debug("InvertedIndex.openFiles: %w, %s", err, bidxPath) + return false } + //totalKeys += item.bindex.KeyCount() } } return true @@ -282,29 +322,46 @@ func (d *Domain) openFiles() error { for _, item := range invalidFileItems { d.files.Delete(item) } + + d.reCalcRoFiles() return nil } -func (d *Domain) closeFiles() { +func (d *Domain) closeWhatNotInList(fNames []string) { + var toDelete []*filesItem d.files.Walk(func(items []*filesItem) bool { + Loop1: for _, item := range items { - if item.decompressor != nil { - if err := item.decompressor.Close(); err != nil { - log.Trace("close", "err", err, "file", item.index.FileName()) + for _, protectName := range fNames { + if item.decompressor != nil && item.decompressor.FileName() == protectName { + continue Loop1 } - item.decompressor = nil - } - if item.index != nil { - if err := item.index.Close(); err != nil { - log.Trace("close", "err", err, "file", item.index.FileName()) - } - item.index = nil } + toDelete = append(toDelete, item) } return true }) - d.files.Clear() - d.reCalcRoFiles() + for _, item := range toDelete { + if item.decompressor != nil { + if err := item.decompressor.Close(); err != nil { + log.Trace("close", "err", err, "file", item.decompressor.FileName()) + } + item.decompressor = nil + } + if item.index != nil { + if err := item.index.Close(); err != nil { + log.Trace("close", "err", err, "file", item.index.FileName()) + } + item.index = nil + } + if item.bindex != nil { + if err := item.bindex.Close(); err != nil { + log.Trace("close", "err", err, "file", item.bindex.FileName()) + } + item.bindex = nil + } + d.files.Delete(item) + } } func (d *Domain) reCalcRoFiles() { @@ -349,13 +406,15 @@ func (d *Domain) reCalcRoFiles() { } func (d *Domain) Close() { - // Closing state files only after background aggregation goroutine is finished d.History.Close() - d.closeFiles() + d.closeWhatNotInList([]string{}) + d.reCalcRoFiles() } func (dc *DomainContext) get(key []byte, fromTxNum uint64, roTx kv.Tx) ([]byte, bool, error) { //var invertedStep [8]byte + dc.d.stats.TotalQueries.Add(1) + invertedStep := dc.numBuf binary.BigEndian.PutUint64(invertedStep[:], ^(fromTxNum / dc.d.aggregationStep)) keyCursor, err := roTx.CursorDupSort(dc.d.keysTable) @@ -368,7 +427,7 @@ func (dc *DomainContext) get(key []byte, fromTxNum uint64, roTx kv.Tx) ([]byte, return nil, false, err } if len(foundInvStep) == 0 { - atomic.AddUint64(&dc.d.stats.HistoryQueries, 1) + dc.d.stats.HistoryQueries.Add(1) v, found := dc.readFromFiles(key, fromTxNum) return v, found, nil } @@ -522,11 +581,10 @@ type ctxItem struct { src *filesItem } -type ctxLocalityItem struct { +type ctxLocalityIdx struct { reader *recsplit.IndexReader bm *bitmapdb.FixedSizeBitmaps - - file *filesItem + file *ctxItem } func ctxItemLess(i, j ctxItem) bool { //nolint @@ -541,7 +599,7 @@ type DomainContext struct { d *Domain files []ctxItem getters []*compress.Getter - readers []*recsplit.IndexReader + readers []*BtIndex hc *HistoryContext keyBuf [60]byte // 52b key and 8b for inverted step numBuf [8]byte @@ -558,17 +616,19 @@ func (dc *DomainContext) statelessGetter(i int) *compress.Getter { } return r } -func (dc *DomainContext) statelessIdxReader(i int) *recsplit.IndexReader { + +func (dc *DomainContext) statelessBtree(i int) *BtIndex { if dc.readers == nil { - dc.readers = make([]*recsplit.IndexReader, len(dc.files)) + dc.readers = make([]*BtIndex, len(dc.files)) } r := dc.readers[i] if r == nil { - r = recsplit.NewIndexReader(dc.files[i].src.index) + r = dc.files[i].src.bindex dc.readers[i] = r } return r } + func (d *Domain) collectFilesStats() (datsz, idxsz, files uint64) { d.History.files.Walk(func(items []*filesItem) bool { for _, item := range items { @@ -589,7 +649,8 @@ func (d *Domain) collectFilesStats() (datsz, idxsz, files uint64) { } datsz += uint64(item.decompressor.Size()) idxsz += uint64(item.index.Size()) - files += 2 + idxsz += uint64(item.bindex.Size()) + files += 3 } return true }) @@ -609,7 +670,7 @@ func (d *Domain) MakeContext() *DomainContext { } for _, item := range dc.files { if !item.src.frozen { - item.src.refcount.Inc() + item.src.refcount.Add(1) } } @@ -621,7 +682,7 @@ func (dc *DomainContext) Close() { if item.src.frozen { continue } - refCnt := item.src.refcount.Dec() + refCnt := item.src.refcount.Add(-1) //GC: last reader responsible to remove useles files: close it and delete if refCnt == 0 && item.src.canDelete.Load() { item.src.closeFilesAndRemove() @@ -631,14 +692,12 @@ func (dc *DomainContext) Close() { } // IteratePrefix iterates over key-value pairs of the domain that start with given prefix -// The length of the prefix has to match the `prefixLen` parameter used to create the domain // Such iteration is not intended to be used in public API, therefore it uses read-write transaction // inside the domain. Another version of this for public API use needs to be created, that uses // roTx instead and supports ending the iterations before it reaches the end. func (dc *DomainContext) IteratePrefix(prefix []byte, it func(k, v []byte)) error { - if len(prefix) != dc.d.prefixLen { - return fmt.Errorf("wrong prefix length, this %s domain supports prefixLen %d, given [%x]", dc.d.filenameBase, dc.d.prefixLen, prefix) - } + dc.d.stats.HistoryQueries.Add(1) + var cp CursorHeap heap.Init(&cp) var k, v []byte @@ -663,26 +722,21 @@ func (dc *DomainContext) IteratePrefix(prefix []byte, it func(k, v []byte)) erro heap.Push(&cp, &CursorItem{t: DB_CURSOR, key: common.Copy(k), val: common.Copy(v), c: keysCursor, endTxNum: txNum, reverse: true}) } for i, item := range dc.files { - reader := dc.statelessIdxReader(i) - if reader.Empty() { + bg := dc.statelessBtree(i) + if bg.Empty() { continue } - offset := reader.Lookup(prefix) - // Creating dedicated getter because the one in the item may be used to delete storage, for example - g := dc.statelessGetter(i) - g.Reset(offset) - if g.HasNext() { - if keyMatch, _ := g.Match(prefix); !keyMatch { - continue - } - g.Skip() + + cursor, err := bg.Seek(prefix) + if err != nil { + continue } - if g.HasNext() { - key, _ := g.Next(nil) - if bytes.HasPrefix(key, prefix) { - val, _ := g.Next(nil) - heap.Push(&cp, &CursorItem{t: FILE_CURSOR, key: key, val: val, dg: g, endTxNum: item.endTxNum, reverse: true}) - } + + g := dc.statelessGetter(i) + key := cursor.Key() + if bytes.HasPrefix(key, prefix) { + val := cursor.Value() + heap.Push(&cp, &CursorItem{t: FILE_CURSOR, key: key, val: val, dg: g, endTxNum: item.endTxNum, reverse: true}) } } for cp.Len() > 0 { @@ -751,11 +805,176 @@ func (c Collation) Close() { } } +type kvpair struct { + k, v []byte +} + +func (d *Domain) writeCollationPair(valuesComp *compress.Compressor, pairs chan kvpair) (count int, err error) { + for kv := range pairs { + if err = valuesComp.AddUncompressedWord(kv.k); err != nil { + return count, fmt.Errorf("add %s values key [%x]: %w", d.filenameBase, kv.k, err) + } + mxCollationSize.Inc() + count++ // Only counting keys, not values + if err = valuesComp.AddUncompressedWord(kv.v); err != nil { + return count, fmt.Errorf("add %s values val [%x]=>[%x]: %w", d.filenameBase, kv.k, kv.v, err) + } + } + return count, nil +} + +// nolint +func (d *Domain) aggregate(ctx context.Context, step uint64, txFrom, txTo uint64, tx kv.Tx, ps *background.ProgressSet) (err error) { + mxRunningCollations.Inc() + start := time.Now() + collation, err := d.collateStream(ctx, step, txFrom, txTo, tx) + mxRunningCollations.Dec() + mxCollateTook.UpdateDuration(start) + + mxCollationSize.Set(uint64(collation.valuesComp.Count())) + mxCollationSizeHist.Set(uint64(collation.historyComp.Count())) + + if err != nil { + collation.Close() + //return fmt.Errorf("domain collation %q has failed: %w", d.filenameBase, err) + return err + } + + mxRunningMerges.Inc() + + start = time.Now() + sf, err := d.buildFiles(ctx, step, collation, ps) + collation.Close() + defer sf.Close() + + if err != nil { + sf.Close() + mxRunningMerges.Dec() + return + } + + mxRunningMerges.Dec() + + d.integrateFiles(sf, step*d.aggregationStep, (step+1)*d.aggregationStep) + d.stats.LastFileBuildingTook = time.Since(start) + return nil +} + +// collate gathers domain changes over the specified step, using read-only transaction, +// and returns compressors, elias fano, and bitmaps +// [txFrom; txTo) +func (d *Domain) collateStream(ctx context.Context, step, txFrom, txTo uint64, roTx kv.Tx) (Collation, error) { + started := time.Now() + defer func() { + d.stats.LastCollationTook = time.Since(started) + }() + + hCollation, err := d.History.collate(step, txFrom, txTo, roTx) + if err != nil { + return Collation{}, err + } + + var valuesComp *compress.Compressor + closeComp := true + defer func() { + if closeComp { + if valuesComp != nil { + valuesComp.Close() + } + } + }() + + valuesPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kv", d.filenameBase, step, step+1)) + if valuesComp, err = compress.NewCompressor(context.Background(), "collate values", valuesPath, d.tmpdir, compress.MinPatternScore, 1, log.LvlTrace); err != nil { + return Collation{}, fmt.Errorf("create %s values compressor: %w", d.filenameBase, err) + } + + keysCursor, err := roTx.CursorDupSort(d.keysTable) + if err != nil { + return Collation{}, fmt.Errorf("create %s keys cursor: %w", d.filenameBase, err) + } + defer keysCursor.Close() + + var ( + k, v []byte + pos uint64 + valCount int + pairs = make(chan kvpair, 1024) + ) + + //totalKeys, err := keysCursor.Count() + //if err != nil { + // return Collation{}, fmt.Errorf("failed to obtain keys count for domain %q", d.filenameBase) + //} + + eg, _ := errgroup.WithContext(ctx) + eg.Go(func() error { + valCount, err = d.writeCollationPair(valuesComp, pairs) + return err + }) + + var ( + stepBytes = make([]byte, 8) + keySuffix = make([]byte, 256+8) + ) + binary.BigEndian.PutUint64(stepBytes, ^step) + + for k, _, err = keysCursor.First(); err == nil && k != nil; k, _, err = keysCursor.NextNoDup() { + pos++ + + if v, err = keysCursor.LastDup(); err != nil { + return Collation{}, fmt.Errorf("find last %s key for aggregation step k=[%x]: %w", d.filenameBase, k, err) + } + if bytes.Equal(v, stepBytes) { + copy(keySuffix, k) + copy(keySuffix[len(k):], v) + ks := len(k) + len(v) + + v, err := roTx.GetOne(d.valsTable, keySuffix[:ks]) + if err != nil { + return Collation{}, fmt.Errorf("find last %s value for aggregation step k=[%x]: %w", d.filenameBase, k, err) + } + + select { + case <-ctx.Done(): + return Collation{}, ctx.Err() + default: + } + + pairs <- kvpair{k: k, v: v} + } + } + close(pairs) + if err != nil { + return Collation{}, fmt.Errorf("iterate over %s keys cursor: %w", d.filenameBase, err) + } + + if err := eg.Wait(); err != nil { + return Collation{}, fmt.Errorf("collate over %s keys cursor: %w", d.filenameBase, err) + } + + closeComp = false + return Collation{ + valuesPath: valuesPath, + valuesComp: valuesComp, + valuesCount: valCount, + historyPath: hCollation.historyPath, + historyComp: hCollation.historyComp, + historyCount: hCollation.historyCount, + indexBitmaps: hCollation.indexBitmaps, + }, nil +} + // collate gathers domain changes over the specified step, using read-only transaction, // and returns compressors, elias fano, and bitmaps // [txFrom; txTo) func (d *Domain) collate(ctx context.Context, step, txFrom, txTo uint64, roTx kv.Tx, logEvery *time.Ticker) (Collation, error) { - hCollation, err := d.History.collate(step, txFrom, txTo, roTx, logEvery) + started := time.Now() + defer func() { + d.stats.LastCollationTook = time.Since(started) + }() + + hCollation, err := d.History.collate(step, txFrom, txTo, roTx) if err != nil { return Collation{}, err } @@ -780,26 +999,25 @@ func (d *Domain) collate(ctx context.Context, step, txFrom, txTo uint64, roTx kv defer keysCursor.Close() var ( - prefix []byte // Track prefix to insert it before entries k, v []byte pos uint64 valuesCount uint ) - totalKeys, err := keysCursor.Count() - if err != nil { - return Collation{}, fmt.Errorf("failed to obtain keys count for domain %q", d.filenameBase) - } + //TODO: use prorgesSet + //totalKeys, err := keysCursor.Count() + //if err != nil { + // return Collation{}, fmt.Errorf("failed to obtain keys count for domain %q", d.filenameBase) + //} for k, _, err = keysCursor.First(); err == nil && k != nil; k, _, err = keysCursor.NextNoDup() { + if err != nil { + return Collation{}, err + } pos++ select { - case <-logEvery.C: - log.Info("[snapshots] collate domain", "name", d.filenameBase, - "range", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(d.aggregationStep), float64(txTo)/float64(d.aggregationStep)), - "progress", fmt.Sprintf("%.2f%%", float64(pos)/float64(totalKeys)*100)) case <-ctx.Done(): log.Warn("[snapshots] collate domain cancelled", "name", d.filenameBase, "err", ctx.Err()) - return Collation{}, err + return Collation{}, ctx.Err() default: } @@ -815,16 +1033,6 @@ func (d *Domain) collate(ctx context.Context, step, txFrom, txTo uint64, roTx kv if err != nil { return Collation{}, fmt.Errorf("find last %s value for aggregation step k=[%x]: %w", d.filenameBase, k, err) } - if d.prefixLen > 0 && (prefix == nil || !bytes.HasPrefix(k, prefix)) { - prefix = append(prefix[:0], k[:d.prefixLen]...) - if err = valuesComp.AddUncompressedWord(prefix); err != nil { - return Collation{}, fmt.Errorf("add %s values prefix [%x]: %w", d.filenameBase, prefix, err) - } - if err = valuesComp.AddUncompressedWord(nil); err != nil { - return Collation{}, fmt.Errorf("add %s values prefix val [%x]: %w", d.filenameBase, prefix, err) - } - valuesCount++ - } if err = valuesComp.AddUncompressedWord(k); err != nil { return Collation{}, fmt.Errorf("add %s values key [%x]: %w", d.filenameBase, k, err) } @@ -852,6 +1060,7 @@ func (d *Domain) collate(ctx context.Context, step, txFrom, txTo uint64, roTx kv type StaticFiles struct { valuesDecomp *compress.Decompressor valuesIdx *recsplit.Index + valuesBt *BtIndex historyDecomp *compress.Decompressor historyIdx *recsplit.Index efHistoryDecomp *compress.Decompressor @@ -865,6 +1074,9 @@ func (sf StaticFiles) Close() { if sf.valuesIdx != nil { sf.valuesIdx.Close() } + if sf.valuesBt != nil { + sf.valuesBt.Close() + } if sf.historyDecomp != nil { sf.historyDecomp.Close() } @@ -881,13 +1093,13 @@ func (sf StaticFiles) Close() { // buildFiles performs potentially resource intensive operations of creating // static files and their indices -func (d *Domain) buildFiles(ctx context.Context, step uint64, collation Collation) (StaticFiles, error) { +func (d *Domain) buildFiles(ctx context.Context, step uint64, collation Collation, ps *background.ProgressSet) (StaticFiles, error) { hStaticFiles, err := d.History.buildFiles(ctx, step, HistoryCollation{ historyPath: collation.historyPath, historyComp: collation.historyComp, historyCount: collation.historyCount, indexBitmaps: collation.indexBitmaps, - }) + }, ps) if err != nil { return StaticFiles{}, err } @@ -909,7 +1121,6 @@ func (d *Domain) buildFiles(ctx context.Context, step uint64, collation Collatio } } }() - valuesIdxPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, step, step+1)) if err = valuesComp.Compress(); err != nil { return StaticFiles{}, fmt.Errorf("compress %s values: %w", d.filenameBase, err) } @@ -918,13 +1129,34 @@ func (d *Domain) buildFiles(ctx context.Context, step uint64, collation Collatio if valuesDecomp, err = compress.NewDecompressor(collation.valuesPath); err != nil { return StaticFiles{}, fmt.Errorf("open %s values decompressor: %w", d.filenameBase, err) } - if valuesIdx, err = buildIndex(ctx, valuesDecomp, valuesIdxPath, d.tmpdir, collation.valuesCount, false); err != nil { - return StaticFiles{}, fmt.Errorf("build %s values idx: %w", d.filenameBase, err) + + valuesIdxFileName := fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, step, step+1) + valuesIdxPath := filepath.Join(d.dir, valuesIdxFileName) + { + p := ps.AddNew(valuesIdxFileName, uint64(valuesDecomp.Count()*2)) + defer ps.Delete(p) + if valuesIdx, err = buildIndexThenOpen(ctx, valuesDecomp, valuesIdxPath, d.tmpdir, collation.valuesCount, false, p); err != nil { + return StaticFiles{}, fmt.Errorf("build %s values idx: %w", d.filenameBase, err) + } + } + + var bt *BtIndex + { + btFileName := strings.TrimSuffix(valuesIdxFileName, "kvi") + "bt" + btPath := filepath.Join(d.dir, btFileName) + p := ps.AddNew(btFileName, uint64(valuesDecomp.Count()*2)) + defer ps.Delete(p) + bt, err = CreateBtreeIndexWithDecompressor(btPath, DefaultBtreeM, valuesDecomp, p) + if err != nil { + return StaticFiles{}, fmt.Errorf("build %s values bt idx: %w", d.filenameBase, err) + } } + closeComp = false return StaticFiles{ valuesDecomp: valuesDecomp, valuesIdx: valuesIdx, + valuesBt: bt, historyDecomp: hStaticFiles.historyDecomp, historyIdx: hStaticFiles.historyIdx, efHistoryDecomp: hStaticFiles.efHistoryDecomp, @@ -936,7 +1168,7 @@ func (d *Domain) missedIdxFiles() (l []*filesItem) { d.files.Walk(func(items []*filesItem) bool { // don't run slow logic while iterating on btree for _, item := range items { fromStep, toStep := item.startTxNum/d.aggregationStep, item.endTxNum/d.aggregationStep - if !dir.FileExist(filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, fromStep, toStep))) { + if !dir.FileExist(filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.bt", d.filenameBase, fromStep, toStep))) { l = append(l, item) } } @@ -946,18 +1178,35 @@ func (d *Domain) missedIdxFiles() (l []*filesItem) { } // BuildMissedIndices - produce .efi/.vi/.kvi from .ef/.v/.kv -func (d *Domain) BuildMissedIndices(ctx context.Context, sem *semaphore.Weighted) (err error) { - if err := d.History.BuildMissedIndices(ctx, sem); err != nil { - return err - } +func (d *Domain) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps *background.ProgressSet) (err error) { + d.History.BuildMissedIndices(ctx, g, ps) + d.InvertedIndex.BuildMissedIndices(ctx, g, ps) for _, item := range d.missedIdxFiles() { //TODO: build .kvi - _ = item + fitem := item + g.Go(func() error { + idxPath := filepath.Join(fitem.decompressor.FilePath(), fitem.decompressor.FileName()) + idxPath = strings.TrimSuffix(idxPath, "kv") + "bt" + + p := ps.AddNew("fixme", uint64(fitem.decompressor.Count())) + defer ps.Delete(p) + if err := BuildBtreeIndexWithDecompressor(idxPath, fitem.decompressor, p); err != nil { + return fmt.Errorf("failed to build btree index for %s: %w", fitem.decompressor.FileName(), err) + } + return nil + }) + } + return nil +} + +func buildIndexThenOpen(ctx context.Context, d *compress.Decompressor, idxPath, tmpdir string, count int, values bool, p *background.Progress) (*recsplit.Index, error) { + if err := buildIndex(ctx, d, idxPath, tmpdir, count, values, p); err != nil { + return nil, err } - return d.openFiles() + return recsplit.OpenIndex(idxPath) } -func buildIndex(ctx context.Context, d *compress.Decompressor, idxPath, tmpdir string, count int, values bool) (*recsplit.Index, error) { +func buildIndex(ctx context.Context, d *compress.Decompressor, idxPath, tmpdir string, count int, values bool, p *background.Progress) error { var rs *recsplit.RecSplit var err error if rs, err = recsplit.NewRecSplit(recsplit.RecSplitArgs{ @@ -968,7 +1217,7 @@ func buildIndex(ctx context.Context, d *compress.Decompressor, idxPath, tmpdir s TmpDir: tmpdir, IndexFile: idxPath, }); err != nil { - return nil, fmt.Errorf("create recsplit: %w", err) + return fmt.Errorf("create recsplit: %w", err) } defer rs.Close() rs.LogLvl(log.LvlTrace) @@ -980,39 +1229,37 @@ func buildIndex(ctx context.Context, d *compress.Decompressor, idxPath, tmpdir s for { if err := ctx.Err(); err != nil { log.Warn("recsplit index building cancelled", "err", err) - return nil, err + return err } g.Reset(0) for g.HasNext() { word, valPos = g.Next(word[:0]) if values { if err = rs.AddKey(word, valPos); err != nil { - return nil, fmt.Errorf("add idx key [%x]: %w", word, err) + return fmt.Errorf("add idx key [%x]: %w", word, err) } } else { if err = rs.AddKey(word, keyPos); err != nil { - return nil, fmt.Errorf("add idx key [%x]: %w", word, err) + return fmt.Errorf("add idx key [%x]: %w", word, err) } } // Skip value keyPos = g.Skip() + + p.Processed.Add(1) } if err = rs.Build(); err != nil { if rs.Collision() { log.Info("Building recsplit. Collision happened. It's ok. Restarting...") rs.ResetNextSalt() } else { - return nil, fmt.Errorf("build idx: %w", err) + return fmt.Errorf("build idx: %w", err) } } else { break } } - var idx *recsplit.Index - if idx, err = recsplit.OpenIndex(idxPath); err != nil { - return nil, fmt.Errorf("open idx: %w", err) - } - return idx, nil + return nil } func (d *Domain) integrateFiles(sf StaticFiles, txNumFrom, txNumTo uint64) { @@ -1028,103 +1275,156 @@ func (d *Domain) integrateFiles(sf StaticFiles, txNumFrom, txNumTo uint64) { endTxNum: txNumTo, decompressor: sf.valuesDecomp, index: sf.valuesIdx, + bindex: sf.valuesBt, }) d.reCalcRoFiles() } // [txFrom; txTo) func (d *Domain) prune(ctx context.Context, step uint64, txFrom, txTo, limit uint64, logEvery *time.Ticker) error { - // It is important to clean up tables in a specific order - // First keysTable, because it is the first one access in the `get` function, i.e. if the record is canDelete from there, other tables will not be accessed + defer func(t time.Time) { d.stats.LastPruneTook = time.Since(t) }(time.Now()) + mxPruningProgress.Inc() + defer mxPruningProgress.Dec() + + var ( + _state = "scan steps" + pos atomic.Uint64 + totalKeys uint64 + ) + keysCursor, err := d.tx.RwCursorDupSort(d.keysTable) if err != nil { return fmt.Errorf("%s keys cursor: %w", d.filenameBase, err) } defer keysCursor.Close() - var k, v []byte - keyMaxSteps := make(map[string]uint64) - for k, v, err = keysCursor.First(); err == nil && k != nil; k, v, err = keysCursor.Next() { - select { - case <-logEvery.C: - log.Info("[snapshots] prune domain", "name", d.filenameBase, "stage", "collect keys", "range", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(d.aggregationStep), float64(txTo)/float64(d.aggregationStep))) - case <-ctx.Done(): - log.Warn("[snapshots] prune domain cancelled", "name", d.filenameBase, "err", ctx.Err()) - return err - default: - } - - s := ^binary.BigEndian.Uint64(v) - if maxS, seen := keyMaxSteps[string(k)]; !seen || s > maxS { - keyMaxSteps[string(k)] = s - } - } + totalKeys, err = keysCursor.Count() if err != nil { - return fmt.Errorf("iterate of %s keys: %w", d.filenameBase, err) + return fmt.Errorf("get count of %s keys: %w", d.filenameBase, err) } - for k, v, err = keysCursor.First(); err == nil && k != nil; k, v, err = keysCursor.Next() { - select { - case <-logEvery.C: - log.Info("[snapshots] prune domain", "name", d.filenameBase, "stage", "prune keys", "range", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(d.aggregationStep), float64(txTo)/float64(d.aggregationStep))) - case <-ctx.Done(): - log.Warn("[snapshots] prune domain cancelled", "name", d.filenameBase, "err", ctx.Err()) - return err - default: - } + var ( + k, v, stepBytes []byte + keyMaxSteps = make(map[string]uint64) + c = 0 + ) + stepBytes = make([]byte, 8) + binary.BigEndian.PutUint64(stepBytes, ^step) - s := ^binary.BigEndian.Uint64(v) - if s == step { - if maxS := keyMaxSteps[string(k)]; maxS <= step { + for k, v, err = keysCursor.First(); err == nil && k != nil; k, v, err = keysCursor.Next() { + if bytes.Equal(v, stepBytes) { + c++ + kl, vl, err := keysCursor.PrevDup() + if err != nil { + break + } + if kl == nil && vl == nil { continue } - if err = keysCursor.DeleteCurrent(); err != nil { - return fmt.Errorf("clean up %s for [%x]=>[%x]: %w", d.filenameBase, k, v, err) + s := ^binary.BigEndian.Uint64(vl) + if s > step { + kn, vn, err := keysCursor.NextDup() + if err != nil { + break + } + if bytes.Equal(kn, k) && bytes.Equal(vn, stepBytes) { + if err := keysCursor.DeleteCurrent(); err != nil { + return fmt.Errorf("prune key %x: %w", k, err) + } + mxPruneSize.Inc() + keyMaxSteps[string(k)] = s + } } + } + pos.Add(1) - if bytes.HasPrefix(k, keyCommitmentState) { - fmt.Printf("domain prune key %x [s%d] txn=%d\n", string(k), s, ^binary.BigEndian.Uint64(v)) - } + if ctx.Err() != nil { + log.Warn("[snapshots] prune domain cancelled", "name", d.filenameBase, "err", ctx.Err()) + return ctx.Err() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + log.Info("[snapshots] prune domain", "name", d.filenameBase, + "stage", _state, + "range", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(d.aggregationStep), float64(txTo)/float64(d.aggregationStep)), + "progress", fmt.Sprintf("%.2f%%", (float64(pos.Load())/float64(totalKeys))*100)) + default: } } if err != nil { return fmt.Errorf("iterate of %s keys: %w", d.filenameBase, err) } + + _state = "delete vals" + pos.Store(0) + // It is important to clean up tables in a specific order + // First keysTable, because it is the first one access in the `get` function, i.e. if the record is deleted from there, other tables will not be accessed var valsCursor kv.RwCursor if valsCursor, err = d.tx.RwCursor(d.valsTable); err != nil { return fmt.Errorf("%s vals cursor: %w", d.filenameBase, err) } defer valsCursor.Close() - for k, _, err = valsCursor.First(); err == nil && k != nil; k, _, err = valsCursor.Next() { - select { - case <-logEvery.C: - log.Info("[snapshots] prune domain", "name", d.filenameBase, "stage", "prune values", "range", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(d.aggregationStep), float64(txTo)/float64(d.aggregationStep))) - case <-ctx.Done(): - log.Warn("[snapshots] prune domain cancelled", "name", d.filenameBase, "err", ctx.Err()) - return err - default: - } - s := ^binary.BigEndian.Uint64(k[len(k)-8:]) - if s == step { - if maxS := keyMaxSteps[string(k[:len(k)-8])]; maxS <= step { + + totalKeys, err = valsCursor.Count() + if err != nil { + return fmt.Errorf("count of %s keys: %w", d.filenameBase, err) + } + + for k, _, err := valsCursor.First(); err == nil && k != nil; k, _, err = valsCursor.Next() { + if bytes.HasSuffix(k, stepBytes) { + if _, ok := keyMaxSteps[string(k)]; !ok { continue } - if err = valsCursor.DeleteCurrent(); err != nil { - return fmt.Errorf("clean up %s for [%x]: %w", d.filenameBase, k, err) + if err := valsCursor.DeleteCurrent(); err != nil { + return fmt.Errorf("prune val %x: %w", k, err) } - //fmt.Printf("domain prune value for %x (invs %x) [s%d]\n", string(k),k[len(k)-8):], s) + mxPruneSize.Inc() + } + pos.Add(1) + //_prog = 100 * (float64(pos) / float64(totalKeys)) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + log.Info("[snapshots] prune domain", "name", d.filenameBase, + "stage", _state, + "range", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(d.aggregationStep), float64(txTo)/float64(d.aggregationStep)), + "progress", fmt.Sprintf("%.2f%%", (float64(pos.Load())/float64(totalKeys))*100)) + default: } } if err != nil { return fmt.Errorf("iterate over %s vals: %w", d.filenameBase, err) } + defer func(t time.Time) { d.stats.LastPruneHistTook = time.Since(t) }(time.Now()) + if err = d.History.prune(ctx, txFrom, txTo, limit, logEvery); err != nil { return fmt.Errorf("prune history at step %d [%d, %d): %w", step, txFrom, txTo, err) } return nil } +func (d *Domain) isEmpty(tx kv.Tx) (bool, error) { + k, err := kv.FirstKey(tx, d.keysTable) + if err != nil { + return false, err + } + k2, err := kv.FirstKey(tx, d.valsTable) + if err != nil { + return false, err + } + isEmptyHist, err := d.History.isEmpty(tx) + if err != nil { + return false, err + } + return k == nil && k2 == nil && isEmptyHist, nil +} + // nolint func (d *Domain) warmup(ctx context.Context, txFrom, limit uint64, tx kv.Tx) error { domainKeysCursor, err := tx.CursorDupSort(d.keysTable) @@ -1163,6 +1463,12 @@ func (d *Domain) warmup(ctx context.Context, txFrom, limit uint64, tx kv.Tx) err } _, _, _ = valsC.Seek(v[len(v)-8:]) _, _ = idxC.SeekBothRange(v[:len(v)-8], k) + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } } if err != nil { return fmt.Errorf("iterate over %s domain keys: %w", d.filenameBase, err) @@ -1171,6 +1477,8 @@ func (d *Domain) warmup(ctx context.Context, txFrom, limit uint64, tx kv.Tx) err return d.History.warmup(ctx, txFrom, limit, tx) } +var COMPARE_INDEXES = false // if true, will compare values from Btree and INvertedIndex + func (dc *DomainContext) readFromFiles(filekey []byte, fromTxNum uint64) ([]byte, bool) { var val []byte var found bool @@ -1179,19 +1487,37 @@ func (dc *DomainContext) readFromFiles(filekey []byte, fromTxNum uint64) ([]byte if dc.files[i].endTxNum < fromTxNum { break } - reader := dc.statelessIdxReader(i) + reader := dc.statelessBtree(i) if reader.Empty() { continue } - offset := reader.Lookup(filekey) - g := dc.statelessGetter(i) - g.Reset(offset) - if g.HasNext() { - if keyMatch, _ := g.Match(filekey); keyMatch { - val, _ = g.Next(nil) - found = true - break + cur, err := reader.Seek(filekey) + if err != nil { + log.Warn("failed to read from file", "file", reader.FileName(), "err", err) + continue + } + + if bytes.Equal(cur.Key(), filekey) { + val = cur.Value() + found = true + + if COMPARE_INDEXES { + rd := recsplit.NewIndexReader(dc.files[i].src.index) + oft := rd.Lookup(filekey) + gt := dc.statelessGetter(i) + gt.Reset(oft) + var k, v []byte + if gt.HasNext() { + k, _ = gt.Next(nil) + v, _ = gt.Next(nil) + } + fmt.Printf("key: %x, val: %x\n", k, v) + if !bytes.Equal(v, val) { + panic("not equal") + } + } + break } } return val, found @@ -1200,127 +1526,55 @@ func (dc *DomainContext) readFromFiles(filekey []byte, fromTxNum uint64) ([]byte // historyBeforeTxNum searches history for a value of specified key before txNum // second return value is true if the value is found in the history (even if it is nil) func (dc *DomainContext) historyBeforeTxNum(key []byte, txNum uint64, roTx kv.Tx) ([]byte, bool, error) { - var foundTxNum uint64 - var foundEndTxNum uint64 - var foundStartTxNum uint64 - var found bool - var anyItem bool // Whether any filesItem has been looked at in the loop below + dc.d.stats.HistoryQueries.Add(1) + + v, found, err := dc.hc.GetNoState(key, txNum) + if err != nil { + return nil, false, err + } + if found { + return v, true, nil + } + + var anyItem bool var topState ctxItem for _, item := range dc.hc.ic.files { if item.endTxNum < txNum { continue } + anyItem = true topState = item break } - for _, item := range dc.hc.ic.files { - if item.endTxNum < txNum { - continue - } - anyItem = true - reader := dc.hc.ic.statelessIdxReader(item.i) - offset := reader.Lookup(key) - g := dc.hc.ic.statelessGetter(item.i) - g.Reset(offset) - if k, _ := g.NextUncompressed(); bytes.Equal(k, key) { - eliasVal, _ := g.NextUncompressed() - ef, _ := eliasfano32.ReadEliasFano(eliasVal) - //start := time.Now() - n, ok := ef.Search(txNum) - //d.stats.EfSearchTime += time.Since(start) - if ok { - foundTxNum = n - foundEndTxNum = item.endTxNum - foundStartTxNum = item.startTxNum - found = true - break - } else if item.endTxNum > txNum && item.endTxNum >= topState.endTxNum { - break - } - } - } - if !found { - if anyItem { - // If there were no changes but there were history files, the value can be obtained from value files - var val []byte - for i := len(dc.files) - 1; i >= 0; i-- { - if dc.files[i].startTxNum > topState.startTxNum { - continue - } - reader := dc.statelessIdxReader(i) - if reader.Empty() { - continue - } - offset := reader.Lookup(key) - g := dc.statelessGetter(i) - g.Reset(offset) - if g.HasNext() { - if k, _ := g.NextUncompressed(); bytes.Equal(k, key) { - if dc.d.compressVals { - val, _ = g.Next(nil) - } else { - val, _ = g.NextUncompressed() - } - break - } - } - } - return val, true, nil - } - // Value not found in history files, look in the recent history - if roTx == nil { - return nil, false, fmt.Errorf("roTx is nil") - } - indexCursor, err := roTx.CursorDupSort(dc.d.indexTable) - if err != nil { - return nil, false, err - } - defer indexCursor.Close() - var txKey [8]byte - binary.BigEndian.PutUint64(txKey[:], txNum) - var foundTxNumVal []byte - if foundTxNumVal, err = indexCursor.SeekBothRange(key, txKey[:]); err != nil { - return nil, false, err - } - if foundTxNumVal != nil { - var historyKeysCursor kv.CursorDupSort - if historyKeysCursor, err = roTx.CursorDupSort(dc.d.indexKeysTable); err != nil { - return nil, false, err + if anyItem { + // If there were no changes but there were history files, the value can be obtained from value files + var val []byte + for i := len(dc.files) - 1; i >= 0; i-- { + if dc.files[i].startTxNum > topState.startTxNum { + continue } - defer historyKeysCursor.Close() - var vn []byte - if vn, err = historyKeysCursor.SeekBothRange(foundTxNumVal, key); err != nil { - return nil, false, err + reader := dc.statelessBtree(i) + if reader.Empty() { + continue } - valNum := binary.BigEndian.Uint64(vn[len(vn)-8:]) - if valNum == 0 { - // This is special valNum == 0, which is empty value - return nil, true, nil + cur, err := reader.Seek(key) + if err != nil { + log.Warn("failed to read history before from file", "key", key, "err", err) + continue } - var v []byte - if v, err = roTx.GetOne(dc.d.historyValsTable, vn[len(vn)-8:]); err != nil { - return nil, false, err + + if bytes.Equal(cur.Key(), key) { + val = cur.Value() + break } - return v, true, nil } - return nil, false, nil + return val, true, nil } - var txKey [8]byte - binary.BigEndian.PutUint64(txKey[:], foundTxNum) - historyItem, ok := dc.hc.getFile(foundStartTxNum, foundEndTxNum) - if !ok { - return nil, false, fmt.Errorf("no %s file found for [%x]", dc.d.filenameBase, key) - } - reader := dc.hc.statelessIdxReader(historyItem.i) - offset := reader.Lookup2(txKey[:], key) - g := dc.hc.statelessGetter(historyItem.i) - g.Reset(offset) - if dc.d.compressVals { - v, _ := g.Next(nil) - return v, true, nil + // Value not found in history files, look in the recent history + if roTx == nil { + return nil, false, fmt.Errorf("roTx is nil") } - v, _ := g.NextUncompressed() - return v, true, nil + return dc.hc.getNoStateFromDB(key, txNum, roTx) } // GetBeforeTxNum does not always require usage of roTx. If it is possible to determine @@ -1331,6 +1585,11 @@ func (dc *DomainContext) GetBeforeTxNum(key []byte, txNum uint64, roTx kv.Tx) ([ return nil, err } if hOk { + // if history returned marker of key creation + // domain must return nil + if len(v) == 0 { + return nil, nil + } return v, nil } if v, _, err = dc.get(key, txNum-1, roTx); err != nil { diff --git a/state/domain_committed.go b/state/domain_committed.go index 198f8c025..81f9f6cc2 100644 --- a/state/domain_committed.go +++ b/state/domain_committed.go @@ -24,8 +24,11 @@ import ( "fmt" "hash" "path/filepath" + "strings" + "time" "github.com/google/btree" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/log/v3" "golang.org/x/crypto/sha3" @@ -33,7 +36,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/compress" - "github.com/ledgerwatch/erigon-lib/recsplit" ) // Defines how to evaluate commitments @@ -45,6 +47,32 @@ const ( CommitmentModeUpdate CommitmentMode = 2 ) +func (m CommitmentMode) String() string { + switch m { + case CommitmentModeDisabled: + return "disabled" + case CommitmentModeDirect: + return "direct" + case CommitmentModeUpdate: + return "update" + default: + return "unknown" + } +} + +func ParseCommitmentMode(s string) CommitmentMode { + var mode CommitmentMode + switch s { + case "off": + mode = CommitmentModeDisabled + case "update": + mode = CommitmentModeUpdate + default: + mode = CommitmentModeDirect + } + return mode +} + type ValueMerger func(prev, current []byte) (merged []byte, err error) type DomainCommitted struct { @@ -53,15 +81,17 @@ type DomainCommitted struct { trace bool commTree *btree.BTreeG[*CommitmentItem] keccak hash.Hash - patriciaTrie *commitment.HexPatriciaHashed - keyReplaceFn ValueMerger // defines logic performed with stored values during files merge + patriciaTrie commitment.Trie branchMerger *commitment.BranchMerger + + comKeys uint64 + comTook time.Duration } -func NewCommittedDomain(d *Domain, mode CommitmentMode) *DomainCommitted { +func NewCommittedDomain(d *Domain, mode CommitmentMode, trieVariant commitment.TrieVariant) *DomainCommitted { return &DomainCommitted{ Domain: d, - patriciaTrie: commitment.NewHexPatriciaHashed(length.Addr, nil, nil, nil), + patriciaTrie: commitment.InitializeTrie(trieVariant), commTree: btree.NewG[*CommitmentItem](32, commitmentItemLess), keccak: sha3.NewLegacyKeccak256(), mode: mode, @@ -69,8 +99,6 @@ func NewCommittedDomain(d *Domain, mode CommitmentMode) *DomainCommitted { } } -func (d *DomainCommitted) SetKeyReplacer(vm ValueMerger) { d.keyReplaceFn = vm } - func (d *DomainCommitted) SetCommitmentMode(m CommitmentMode) { d.mode = m } // TouchPlainKey marks plainKey as updated and applies different fn for different key types @@ -88,17 +116,17 @@ func (d *DomainCommitted) TouchPlainKey(key, val []byte, fn func(c *CommitmentIt func (d *DomainCommitted) TouchPlainKeyAccount(c *CommitmentItem, val []byte) { if len(val) == 0 { - c.update.Flags = commitment.DELETE_UPDATE + c.update.Flags = commitment.DeleteUpdate return } c.update.DecodeForStorage(val) - c.update.Flags = commitment.BALANCE_UPDATE | commitment.NONCE_UPDATE + c.update.Flags = commitment.BalanceUpdate | commitment.NonceUpdate item, found := d.commTree.Get(&CommitmentItem{hashedKey: c.hashedKey}) if !found { return } - if item.update.Flags&commitment.CODE_UPDATE != 0 { - c.update.Flags |= commitment.CODE_UPDATE + if item.update.Flags&commitment.CodeUpdate != 0 { + c.update.Flags |= commitment.CodeUpdate copy(c.update.CodeHashOrStorage[:], item.update.CodeHashOrStorage[:]) } } @@ -106,15 +134,15 @@ func (d *DomainCommitted) TouchPlainKeyAccount(c *CommitmentItem, val []byte) { func (d *DomainCommitted) TouchPlainKeyStorage(c *CommitmentItem, val []byte) { c.update.ValLength = len(val) if len(val) == 0 { - c.update.Flags = commitment.DELETE_UPDATE + c.update.Flags = commitment.DeleteUpdate } else { - c.update.Flags = commitment.STORAGE_UPDATE + c.update.Flags = commitment.StorageUpdate copy(c.update.CodeHashOrStorage[:], val) } } func (d *DomainCommitted) TouchPlainKeyCode(c *CommitmentItem, val []byte) { - c.update.Flags = commitment.CODE_UPDATE + c.update.Flags = commitment.CodeUpdate item, found := d.commTree.Get(c) if !found { d.keccak.Reset() @@ -122,16 +150,16 @@ func (d *DomainCommitted) TouchPlainKeyCode(c *CommitmentItem, val []byte) { copy(c.update.CodeHashOrStorage[:], d.keccak.Sum(nil)) return } - if item.update.Flags&commitment.BALANCE_UPDATE != 0 { - c.update.Flags |= commitment.BALANCE_UPDATE + if item.update.Flags&commitment.BalanceUpdate != 0 { + c.update.Flags |= commitment.BalanceUpdate c.update.Balance.Set(&item.update.Balance) } - if item.update.Flags&commitment.NONCE_UPDATE != 0 { - c.update.Flags |= commitment.NONCE_UPDATE + if item.update.Flags&commitment.NonceUpdate != 0 { + c.update.Flags |= commitment.NonceUpdate c.update.Nonce = item.update.Nonce } - if item.update.Flags == commitment.DELETE_UPDATE && len(val) == 0 { - c.update.Flags = commitment.DELETE_UPDATE + if item.update.Flags == commitment.DeleteUpdate && len(val) == 0 { + c.update.Flags = commitment.DeleteUpdate } else { d.keccak.Reset() d.keccak.Write(val) @@ -192,9 +220,17 @@ func (d *DomainCommitted) hashAndNibblizeKey(key []byte) []byte { } func (d *DomainCommitted) storeCommitmentState(blockNum, txNum uint64) error { - state, err := d.patriciaTrie.EncodeCurrentState(nil) - if err != nil { - return err + var state []byte + var err error + + switch trie := (d.patriciaTrie).(type) { + case *commitment.HexPatriciaHashed: + state, err = trie.EncodeCurrentState(nil) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported state storing for patricia trie type: %T", d.patriciaTrie) } cs := &commitmentState{txNum: txNum, trieState: state, blockNum: blockNum} encoded, err := cs.Encode() @@ -216,54 +252,44 @@ func (d *DomainCommitted) replaceKeyWithReference(fullKey, shortKey []byte, type numBuf := [2]byte{} var found bool for _, item := range list { - g := item.decompressor.MakeGetter() - index := recsplit.NewIndexReader(item.index) + //g := item.decompressor.MakeGetter() + //index := recsplit.NewIndexReader(item.index) - offset := index.Lookup(fullKey) - g.Reset(offset) - if !g.HasNext() { + cur, err := item.bindex.Seek(fullKey) + if err != nil { continue } - if keyMatch, _ := g.Match(fullKey); keyMatch { - step := uint16(item.endTxNum / d.aggregationStep) - binary.BigEndian.PutUint16(numBuf[:], step) + step := uint16(item.endTxNum / d.aggregationStep) + binary.BigEndian.PutUint16(numBuf[:], step) - shortKey = encodeU64(offset, numBuf[:]) + shortKey = encodeU64(cur.Ordinal(), numBuf[:]) - if d.trace { - fmt.Printf("replacing %s [%x] => {%x} [step=%d, offset=%d, file=%s.%d-%d]\n", typeAS, fullKey, shortKey, step, offset, typeAS, item.startTxNum, item.endTxNum) - } - found = true - break + if d.trace { + fmt.Printf("replacing %s [%x] => {%x} [step=%d, offset=%d, file=%s.%d-%d]\n", typeAS, fullKey, shortKey, step, cur.Ordinal(), typeAS, item.startTxNum, item.endTxNum) } + found = true + break } + //if !found { + // log.Warn("bt index key replacement seek failed", "key", fmt.Sprintf("%x", fullKey)) + //} return found } +// nolint func (d *DomainCommitted) lookupShortenedKey(shortKey, fullKey []byte, typAS string, list []*filesItem) bool { fileStep, offset := shortenedKey(shortKey) expected := uint64(fileStep) * d.aggregationStep - var size uint64 - switch typAS { - case "account": - size = length.Addr - case "storage": - size = length.Addr + length.Hash - default: - return false - } var found bool for _, item := range list { if item.startTxNum > expected || item.endTxNum < expected { continue } - g := item.decompressor.MakeGetter() - if uint64(g.Size()) <= offset+size { - continue - } - g.Reset(offset) - fullKey, _ = g.Next(fullKey[:0]) + + cur := item.bindex.OrdinalLookup(offset) + //nolint + fullKey = cur.Key() if d.trace { fmt.Printf("offsetToKey %s [%x]=>{%x} step=%d offset=%d, file=%s.%d-%d.kv\n", typAS, fullKey, shortKey, fileStep, offset, typAS, item.startTxNum, item.endTxNum) } @@ -325,12 +351,12 @@ func (d *DomainCommitted) commitmentValTransform(files *SelectedStaticFiles, mer return transValBuf, nil } -func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStaticFiles, mergedFiles MergedFiles, r DomainRanges, workers int) (valuesIn, indexIn, historyIn *filesItem, err error) { +func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStaticFiles, mergedFiles MergedFiles, r DomainRanges, workers int, ps *background.ProgressSet) (valuesIn, indexIn, historyIn *filesItem, err error) { if !r.any() { return } - valuesFiles := oldFiles.commitment + domainFiles := oldFiles.commitment indexFiles := oldFiles.commitmentIdx historyFiles := oldFiles.commitmentHist @@ -348,6 +374,9 @@ func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStati if indexIn.index != nil { indexIn.index.Close() } + if indexIn.bindex != nil { + indexIn.bindex.Close() + } } if historyIn != nil { if historyIn.decompressor != nil { @@ -356,6 +385,9 @@ func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStati if historyIn.index != nil { historyIn.index.Close() } + if historyIn.bindex != nil { + historyIn.bindex.Close() + } } if valuesIn != nil { if valuesIn.decompressor != nil { @@ -364,6 +396,9 @@ func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStati if valuesIn.index != nil { valuesIn.index.Close() } + if valuesIn.bindex != nil { + valuesIn.bindex.Close() + } } } }() @@ -374,17 +409,22 @@ func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStati history: r.history, indexStartTxNum: r.indexStartTxNum, indexEndTxNum: r.indexEndTxNum, - index: r.index}, workers); err != nil { + index: r.index}, workers, ps); err != nil { return nil, nil, nil, err } + if r.values { - datPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kv", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep)) - if comp, err = compress.NewCompressor(context.Background(), "merge", datPath, d.dir, compress.MinPatternScore, workers, log.LvlTrace); err != nil { - return nil, nil, nil, fmt.Errorf("merge %s history compressor: %w", d.filenameBase, err) + datFileName := fmt.Sprintf("%s.%d-%d.kv", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep) + datPath := filepath.Join(d.dir, datFileName) + p := ps.AddNew(datFileName, 1) + defer ps.Delete(p) + + if comp, err = compress.NewCompressor(ctx, "merge", datPath, d.dir, compress.MinPatternScore, workers, log.LvlTrace); err != nil { + return nil, nil, nil, fmt.Errorf("merge %s compressor: %w", d.filenameBase, err) } var cp CursorHeap heap.Init(&cp) - for _, item := range valuesFiles { + for _, item := range domainFiles { g := item.decompressor.MakeGetter() g.Reset(0) if g.HasNext() { @@ -433,33 +473,20 @@ func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStati heap.Pop(&cp) } } - var skip bool - if d.prefixLen > 0 { - skip = r.valuesStartTxNum == 0 && len(lastVal) == 0 && len(lastKey) != d.prefixLen - } else { - // For the rest of types, empty value means deletion - skip = r.valuesStartTxNum == 0 && len(lastVal) == 0 - } + // For the rest of types, empty value means deletion + skip := r.valuesStartTxNum == 0 && len(lastVal) == 0 if !skip { - if keyBuf != nil && (d.prefixLen == 0 || len(keyBuf) != d.prefixLen || bytes.HasPrefix(lastKey, keyBuf)) { + if keyBuf != nil { if err = comp.AddUncompressedWord(keyBuf); err != nil { return nil, nil, nil, err } keyCount++ // Only counting keys, not values - - if d.trace { - fmt.Printf("merge: multi-way key %x, total keys %d\n", keyBuf, keyCount) - } - - valBuf, err = d.commitmentValTransform(&oldFiles, &mergedFiles, valBuf) - if err != nil { - return nil, nil, nil, fmt.Errorf("merge: valTransform [%x] %w", valBuf, err) - } - if d.compressVals { + switch d.compressVals { + case true: if err = comp.AddWord(valBuf); err != nil { return nil, nil, nil, err } - } else { + default: if err = comp.AddUncompressedWord(valBuf); err != nil { return nil, nil, nil, err } @@ -494,14 +521,26 @@ func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStati } comp.Close() comp = nil - idxPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep)) valuesIn = &filesItem{startTxNum: r.valuesStartTxNum, endTxNum: r.valuesEndTxNum, frozen: (r.valuesEndTxNum-r.valuesStartTxNum)/d.aggregationStep == StepsInBiggestFile} if valuesIn.decompressor, err = compress.NewDecompressor(datPath); err != nil { return nil, nil, nil, fmt.Errorf("merge %s decompressor [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err) } - if valuesIn.index, err = buildIndex(ctx, valuesIn.decompressor, idxPath, d.dir, keyCount, false /* values */); err != nil { + ps.Delete(p) + + idxFileName := fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep) + idxPath := filepath.Join(d.dir, idxFileName) + + p = ps.AddNew(datFileName, uint64(keyCount)) + defer ps.Delete(p) + if valuesIn.index, err = buildIndexThenOpen(ctx, valuesIn.decompressor, idxPath, d.dir, keyCount, false /* values */, p); err != nil { return nil, nil, nil, fmt.Errorf("merge %s buildIndex [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err) } + + btPath := strings.TrimSuffix(idxPath, "kvi") + "bt" + valuesIn.bindex, err = CreateBtreeIndexWithDecompressor(btPath, 2048, valuesIn.decompressor, p) + if err != nil { + return nil, nil, nil, fmt.Errorf("create btindex %s [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err) + } } closeItem = false d.stats.MergesCount++ @@ -511,7 +550,11 @@ func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStati // Evaluates commitment for processed state. Commit=true - store trie state after evaluation func (d *DomainCommitted) ComputeCommitment(trace bool) (rootHash []byte, branchNodeUpdates map[string]commitment.BranchData, err error) { + defer func(s time.Time) { d.comTook = time.Since(s) }(time.Now()) + touchedKeys, hashedKeys, updates := d.TouchedKeyList() + d.comKeys = uint64(len(touchedKeys)) + if len(touchedKeys) == 0 { rootHash, err = d.patriciaTrie.RootHash() return rootHash, nil, err @@ -532,6 +575,8 @@ func (d *DomainCommitted) ComputeCommitment(trace bool) (rootHash []byte, branch if err != nil { return nil, nil, err } + case CommitmentModeDisabled: + return nil, nil, nil default: return nil, nil, fmt.Errorf("invalid commitment mode: %d", d.mode) } @@ -542,22 +587,29 @@ var keyCommitmentState = []byte("state") // SeekCommitment searches for last encoded state from DomainCommitted // and if state found, sets it up to current domain -func (d *DomainCommitted) SeekCommitment(aggStep, sinceTx uint64) (uint64, error) { +func (d *DomainCommitted) SeekCommitment(aggStep, sinceTx uint64) (blockNum, txNum uint64, err error) { + if d.patriciaTrie.Variant() != commitment.VariantHexPatriciaTrie { + return 0, 0, fmt.Errorf("state storing is only supported hex patricia trie") + } + // todo add support of bin state dumping + var ( latestState []byte stepbuf [2]byte - step uint16 = uint16(sinceTx/aggStep) - 1 + step = uint16(sinceTx/aggStep) - 1 latestTxNum uint64 = sinceTx - 1 ) + d.SetTxNum(latestTxNum) ctx := d.MakeContext() + defer ctx.Close() for { binary.BigEndian.PutUint16(stepbuf[:], step) s, err := ctx.Get(keyCommitmentState, stepbuf[:], d.tx) if err != nil { - return 0, err + return 0, 0, err } if len(s) < 8 { break @@ -567,20 +619,25 @@ func (d *DomainCommitted) SeekCommitment(aggStep, sinceTx uint64) (uint64, error break } latestTxNum, latestState = v, s - lookupTxN := latestTxNum + aggStep // - 1 + lookupTxN := latestTxNum + aggStep step = uint16(latestTxNum/aggStep) + 1 d.SetTxNum(lookupTxN) } var latest commitmentState if err := latest.Decode(latestState); err != nil { - return 0, nil + return 0, 0, nil } - if err := d.patriciaTrie.SetState(latest.trieState); err != nil { - return 0, err + if hext, ok := d.patriciaTrie.(*commitment.HexPatriciaHashed); ok { + if err := hext.SetState(latest.trieState); err != nil { + return 0, 0, err + } + } else { + return 0, 0, fmt.Errorf("state storing is only supported hex patricia trie") } - return latest.txNum, nil + + return latest.blockNum, latest.txNum, nil } type commitmentState struct { diff --git a/state/domain_test.go b/state/domain_test.go index 8c5a530e6..9fcf9c60b 100644 --- a/state/domain_test.go +++ b/state/domain_test.go @@ -24,9 +24,9 @@ import ( "os" "strings" "testing" - "testing/fstest" "time" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" btree2 "github.com/tidwall/btree" @@ -36,10 +36,9 @@ import ( "github.com/ledgerwatch/erigon-lib/recsplit" ) -func testDbAndDomain(t *testing.T, prefixLen int) (string, kv.RwDB, *Domain) { +func testDbAndDomain(t *testing.T) (string, kv.RwDB, *Domain) { t.Helper() path := t.TempDir() - t.Cleanup(func() { os.RemoveAll(path) }) logger := log.New() keysTable := "Keys" valsTable := "Vals" @@ -52,29 +51,31 @@ func testDbAndDomain(t *testing.T, prefixLen int) (string, kv.RwDB, *Domain) { keysTable: kv.TableCfgItem{Flags: kv.DupSort}, valsTable: kv.TableCfgItem{}, historyKeysTable: kv.TableCfgItem{Flags: kv.DupSort}, - historyValsTable: kv.TableCfgItem{}, + historyValsTable: kv.TableCfgItem{Flags: kv.DupSort}, settingsTable: kv.TableCfgItem{}, indexTable: kv.TableCfgItem{Flags: kv.DupSort}, } }).MustOpen() t.Cleanup(db.Close) - d, err := NewDomain(path, path, 16 /* aggregationStep */, "base" /* filenameBase */, keysTable, valsTable, historyKeysTable, historyValsTable, settingsTable, indexTable, prefixLen, true /* compressVals */) + d, err := NewDomain(path, path, 16, "base", keysTable, valsTable, historyKeysTable, historyValsTable, indexTable, true, false) require.NoError(t, err) t.Cleanup(d.Close) return path, db, d } +// btree index should work correctly if K < m func TestCollationBuild(t *testing.T) { logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - _, db, d := testDbAndDomain(t, 0 /* prefixLen */) + _, db, d := testDbAndDomain(t) ctx := context.Background() + defer d.Close() tx, err := db.BeginRw(ctx) require.NoError(t, err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() d.SetTxNum(2) @@ -93,6 +94,7 @@ func TestCollationBuild(t *testing.T) { require.NoError(t, err) c, err := d.collate(ctx, 0, 0, 7, tx, logEvery) + require.NoError(t, err) require.True(t, strings.HasSuffix(c.valuesPath, "base.0-1.kv")) require.Equal(t, 2, c.valuesCount) @@ -102,9 +104,11 @@ func TestCollationBuild(t *testing.T) { require.Equal(t, []uint64{3}, c.indexBitmaps["key2"].ToArray()) require.Equal(t, []uint64{2, 6}, c.indexBitmaps["key1"].ToArray()) - sf, err := d.buildFiles(ctx, 0, c) + sf, err := d.buildFiles(ctx, 0, c, background.NewProgressSet()) require.NoError(t, err) defer sf.Close() + c.Close() + g := sf.valuesDecomp.MakeGetter() g.Reset(0) var words []string @@ -115,7 +119,9 @@ func TestCollationBuild(t *testing.T) { require.Equal(t, []string{"key1", "value1.2", "key2", "value2.1"}, words) // Check index require.Equal(t, 2, int(sf.valuesIdx.KeyCount())) + r := recsplit.NewIndexReader(sf.valuesIdx) + defer r.Close() for i := 0; i < len(words); i += 2 { offset := r.Lookup([]byte(words[i])) g.Reset(offset) @@ -127,13 +133,13 @@ func TestCollationBuild(t *testing.T) { } func TestIterationBasic(t *testing.T) { - _, db, d := testDbAndDomain(t, 5 /* prefixLen */) + _, db, d := testDbAndDomain(t) ctx := context.Background() tx, err := db.BeginRw(ctx) require.NoError(t, err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() d.SetTxNum(2) @@ -167,14 +173,14 @@ func TestIterationBasic(t *testing.T) { func TestAfterPrune(t *testing.T) { logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - _, db, d := testDbAndDomain(t, 0 /* prefixLen */) + _, db, d := testDbAndDomain(t) ctx := context.Background() tx, err := db.BeginRw(ctx) require.NoError(t, err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() d.SetTxNum(2) @@ -203,7 +209,7 @@ func TestAfterPrune(t *testing.T) { c, err := d.collate(ctx, 0, 0, 16, tx, logEvery) require.NoError(t, err) - sf, err := d.buildFiles(ctx, 0, c) + sf, err := d.buildFiles(ctx, 0, c, background.NewProgressSet()) require.NoError(t, err) d.integrateFiles(sf, 0, 16) @@ -220,16 +226,9 @@ func TestAfterPrune(t *testing.T) { err = d.prune(ctx, 0, 0, 16, math.MaxUint64, logEvery) require.NoError(t, err) - for _, table := range []string{d.keysTable, d.valsTable, d.indexKeysTable, d.historyValsTable, d.indexTable} { - var cur kv.Cursor - cur, err = tx.Cursor(table) - require.NoError(t, err) - defer cur.Close() - var k []byte - k, _, err = cur.First() - require.NoError(t, err) - require.NotNilf(t, k, table, string(k)) - } + isEmpty, err := d.isEmpty(tx) + require.NoError(t, err) + require.False(t, isEmpty) v, err = dc.Get([]byte("key1"), nil, tx) require.NoError(t, err) @@ -241,13 +240,13 @@ func TestAfterPrune(t *testing.T) { func filledDomain(t *testing.T) (string, kv.RwDB, *Domain, uint64) { t.Helper() - path, db, d := testDbAndDomain(t, 0 /* prefixLen */) + path, db, d := testDbAndDomain(t) ctx := context.Background() tx, err := db.BeginRw(ctx) require.NoError(t, err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() txs := uint64(1000) @@ -332,7 +331,7 @@ func TestHistory(t *testing.T) { func() { c, err := d.collate(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, tx, logEvery) require.NoError(t, err) - sf, err := d.buildFiles(ctx, step, c) + sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet()) require.NoError(t, err) d.integrateFiles(sf, step*d.aggregationStep, (step+1)*d.aggregationStep) @@ -348,13 +347,13 @@ func TestHistory(t *testing.T) { func TestIterationMultistep(t *testing.T) { logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - _, db, d := testDbAndDomain(t, 5 /* prefixLen */) + _, db, d := testDbAndDomain(t) ctx := context.Background() tx, err := db.BeginRw(ctx) require.NoError(t, err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() d.SetTxNum(2) @@ -394,7 +393,7 @@ func TestIterationMultistep(t *testing.T) { func() { c, err := d.collate(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, tx, logEvery) require.NoError(t, err) - sf, err := d.buildFiles(ctx, step, c) + sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet()) require.NoError(t, err) d.integrateFiles(sf, step*d.aggregationStep, (step+1)*d.aggregationStep) err = d.prune(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, math.MaxUint64, logEvery) @@ -433,7 +432,7 @@ func collateAndMerge(t *testing.T, db kv.RwDB, tx kv.RwTx, d *Domain, txs uint64 for step := uint64(0); step < txs/d.aggregationStep-1; step++ { c, err := d.collate(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, tx, logEvery) require.NoError(t, err) - sf, err := d.buildFiles(ctx, step, c) + sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet()) require.NoError(t, err) d.integrateFiles(sf, step*d.aggregationStep, (step+1)*d.aggregationStep) err = d.prune(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, math.MaxUint64, logEvery) @@ -447,7 +446,7 @@ func collateAndMerge(t *testing.T, db kv.RwDB, tx kv.RwTx, d *Domain, txs uint64 dc := d.MakeContext() defer dc.Close() valuesOuts, indexOuts, historyOuts, _ := d.staticFilesInRange(r, dc) - valuesIn, indexIn, historyIn, err := d.mergeFiles(ctx, valuesOuts, indexOuts, historyOuts, r, 1) + valuesIn, indexIn, historyIn, err := d.mergeFiles(ctx, valuesOuts, indexOuts, historyOuts, r, 1, background.NewProgressSet()) require.NoError(t, err) d.integrateMergedFiles(valuesOuts, indexOuts, historyOuts, valuesIn, indexIn, historyIn) }() @@ -468,7 +467,7 @@ func collateAndMergeOnce(t *testing.T, d *Domain, step uint64) { c, err := d.collate(ctx, step, txFrom, txTo, d.tx, logEvery) require.NoError(t, err) - sf, err := d.buildFiles(ctx, step, c) + sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet()) require.NoError(t, err) d.integrateFiles(sf, txFrom, txTo) @@ -481,7 +480,7 @@ func collateAndMergeOnce(t *testing.T, d *Domain, step uint64) { for r = d.findMergeRange(maxEndTxNum, maxSpan); r.any(); r = d.findMergeRange(maxEndTxNum, maxSpan) { dc := d.MakeContext() valuesOuts, indexOuts, historyOuts, _ := d.staticFilesInRange(r, dc) - valuesIn, indexIn, historyIn, err := d.mergeFiles(ctx, valuesOuts, indexOuts, historyOuts, r, 1) + valuesIn, indexIn, historyIn, err := d.mergeFiles(ctx, valuesOuts, indexOuts, historyOuts, r, 1, background.NewProgressSet()) require.NoError(t, err) d.integrateMergedFiles(valuesOuts, indexOuts, historyOuts, valuesIn, indexIn, historyIn) @@ -498,30 +497,26 @@ func TestMergeFiles(t *testing.T) { func TestScanFiles(t *testing.T) { path, db, d, txs := filledDomain(t) - + _ = path collateAndMerge(t, db, nil, d, txs) // Recreate domain and re-scan the files txNum := d.txNum - d.Close() + d.closeWhatNotInList([]string{}) + d.OpenFolder() - var err error - d, err = NewDomain(path, path, d.aggregationStep, d.filenameBase, d.keysTable, d.valsTable, d.indexKeysTable, d.historyValsTable, d.settingsTable, d.indexTable, d.prefixLen, d.compressVals) - require.NoError(t, err) - require.NoError(t, d.reOpenFolder()) - defer d.Close() d.SetTxNum(txNum) // Check the history checkHistory(t, db, d, txs) } func TestDelete(t *testing.T) { - _, db, d := testDbAndDomain(t, 0 /* prefixLen */) - ctx := context.Background() + _, db, d := testDbAndDomain(t) + ctx, require := context.Background(), require.New(t) tx, err := db.BeginRw(ctx) - require.NoError(t, err) + require.NoError(err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() // Put on even txNum, delete on odd txNum @@ -532,38 +527,42 @@ func TestDelete(t *testing.T) { } else { err = d.Delete([]byte("key1"), nil) } - require.NoError(t, err) + require.NoError(err) } err = d.Rotate().Flush(ctx, tx) - require.NoError(t, err) + require.NoError(err) collateAndMerge(t, db, tx, d, 1000) // Check the history dc := d.MakeContext() defer dc.Close() for txNum := uint64(0); txNum < 1000; txNum++ { - val, err := dc.GetBeforeTxNum([]byte("key1"), txNum+1, tx) - require.NoError(t, err) label := fmt.Sprintf("txNum=%d", txNum) - if txNum%2 == 0 { - require.Equal(t, []byte("value1"), val, label) - } else { - require.Nil(t, val, label) - } - val, err = dc.GetBeforeTxNum([]byte("key2"), txNum+1, tx) - require.NoError(t, err) - require.Nil(t, val, label) + //val, ok, err := dc.GetBeforeTxNum([]byte("key1"), txNum+1, tx) + //require.NoError(err) + //require.True(ok) + //if txNum%2 == 0 { + // require.Equal([]byte("value1"), val, label) + //} else { + // require.Nil(val, label) + //} + //if txNum == 976 { + val, err := dc.GetBeforeTxNum([]byte("key2"), txNum+1, tx) + require.NoError(err) + //require.False(ok, label) + require.Nil(val, label) + //} } } func filledDomainFixedSize(t *testing.T, keysCount, txCount uint64) (string, kv.RwDB, *Domain, map[string][]bool) { t.Helper() - path, db, d := testDbAndDomain(t, 0 /* prefixLen */) + path, db, d := testDbAndDomain(t) ctx := context.Background() tx, err := db.BeginRw(ctx) require.NoError(t, err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() // keys are encodings of numbers 1..31 @@ -656,7 +655,7 @@ func TestDomain_Prune_AfterAllWrites(t *testing.T) { func TestDomain_PruneOnWrite(t *testing.T) { keysCount, txCount := uint64(16), uint64(64) - path, db, d := testDbAndDomain(t, 0 /* prefixLen */) + path, db, d := testDbAndDomain(t) ctx := context.Background() defer os.Remove(path) @@ -664,7 +663,7 @@ func TestDomain_PruneOnWrite(t *testing.T) { require.NoError(t, err) defer tx.Rollback() d.SetTx(tx) - d.StartWrites("") + d.StartWrites() defer d.FinishWrites() // keys are encodings of numbers 1..31 @@ -718,6 +717,7 @@ func TestDomain_PruneOnWrite(t *testing.T) { binary.BigEndian.PutUint64(v[:], valNum) val, err := dc.GetBeforeTxNum(k[:], txNum+1, tx) + require.NoError(t, err) if keyNum == txNum%d.aggregationStep { if txNum > 1 { binary.BigEndian.PutUint64(v[:], txNum-1) @@ -751,16 +751,14 @@ func TestScanStaticFilesD(t *testing.T) { ii := &Domain{History: &History{InvertedIndex: &InvertedIndex{filenameBase: "test", aggregationStep: 1}}, files: btree2.NewBTreeG[*filesItem](filesItemLess), } - ffs := fstest.MapFS{ - "test.0-1.kv": {}, - "test.1-2.kv": {}, - "test.0-4.kv": {}, - "test.2-3.kv": {}, - "test.3-4.kv": {}, - "test.4-5.kv": {}, + files := []string{ + "test.0-1.kv", + "test.1-2.kv", + "test.0-4.kv", + "test.2-3.kv", + "test.3-4.kv", + "test.4-5.kv", } - files, err := ffs.ReadDir(".") - require.NoError(t, err) ii.scanStateFiles(files) var found []string ii.files.Walk(func(items []*filesItem) bool { diff --git a/state/gc_test.go b/state/gc_test.go index d5bfe7596..2ef6da87f 100644 --- a/state/gc_test.go +++ b/state/gc_test.go @@ -3,80 +3,170 @@ package state import ( "context" "testing" + "time" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/stretchr/testify/require" ) func TestGCReadAfterRemoveFile(t *testing.T) { - require := require.New(t) - _, db, h, txs := filledHistory(t) - collateAndMergeHistory(t, db, h, txs) + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() ctx := context.Background() - t.Run("read after: remove when have reader", func(t *testing.T) { - tx, err := db.BeginRo(ctx) - require.NoError(err) - defer tx.Rollback() - - // - create immutable view - // - del cold file - // - read from canDelete file - // - close view - // - open new view - // - make sure there is no canDelete file - hc := h.MakeContext() - _ = hc - lastOnFs, _ := h.files.Max() - require.False(lastOnFs.frozen) // prepared dataset must have some non-frozen files. or it's bad dataset. - h.integrateMergedFiles(nil, []*filesItem{lastOnFs}, nil, nil) - require.NotNil(lastOnFs.decompressor) - - lastInView := hc.files[len(hc.files)-1] - g := lastInView.src.decompressor.MakeGetter() - require.Equal(lastInView.startTxNum, lastOnFs.startTxNum) - require.Equal(lastInView.endTxNum, lastOnFs.endTxNum) - if g.HasNext() { - k, _ := g.Next(nil) - require.Equal(8, len(k)) - v, _ := g.Next(nil) - require.Equal(8, len(v)) - } - - require.NotNil(lastOnFs.decompressor) - loc := hc.ic.loc // replace of locality index must not affect current HistoryContext, but expect to be closed after last reader - h.localityIndex.integrateFiles(LocalityIndexFiles{}, 0, 0) - require.NotNil(loc.file) - hc.Close() - require.Nil(lastOnFs.decompressor) - require.NotNil(loc.file) - - nonDeletedOnFs, _ := h.files.Max() - require.False(nonDeletedOnFs.frozen) - require.NotNil(nonDeletedOnFs.decompressor) // non-canDelete files are not closed - - hc = h.MakeContext() - newLastInView := hc.files[len(hc.files)-1] - require.False(lastOnFs.frozen) - require.False(lastInView.startTxNum == newLastInView.startTxNum && lastInView.endTxNum == newLastInView.endTxNum) - - hc.Close() - }) + test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) { + t.Helper() + require := require.New(t) + collateAndMergeHistory(t, db, h, txs) + + t.Run("read after: remove when have reader", func(t *testing.T) { + tx, err := db.BeginRo(ctx) + require.NoError(err) + defer tx.Rollback() + + // - create immutable view + // - del cold file + // - read from canDelete file + // - close view + // - open new view + // - make sure there is no canDelete file + hc := h.MakeContext() + _ = hc + lastOnFs, _ := h.files.Max() + require.False(lastOnFs.frozen) // prepared dataset must have some non-frozen files. or it's bad dataset. + h.integrateMergedFiles(nil, []*filesItem{lastOnFs}, nil, nil) + require.NotNil(lastOnFs.decompressor) + + lastInView := hc.files[len(hc.files)-1] + g := lastInView.src.decompressor.MakeGetter() + require.Equal(lastInView.startTxNum, lastOnFs.startTxNum) + require.Equal(lastInView.endTxNum, lastOnFs.endTxNum) + if g.HasNext() { + k, _ := g.Next(nil) + require.Equal(8, len(k)) + v, _ := g.Next(nil) + require.Equal(8, len(v)) + } + + require.NotNil(lastOnFs.decompressor) + loc := hc.ic.loc // replace of locality index must not affect current HistoryContext, but expect to be closed after last reader + h.localityIndex.integrateFiles(LocalityIndexFiles{}, 0, 0) + require.NotNil(loc.file) + hc.Close() + require.Nil(lastOnFs.decompressor) + require.NotNil(loc.file) + + nonDeletedOnFs, _ := h.files.Max() + require.False(nonDeletedOnFs.frozen) + require.NotNil(nonDeletedOnFs.decompressor) // non-canDelete files are not closed - t.Run("read after: remove when no readers", func(t *testing.T) { - tx, err := db.BeginRo(ctx) - require.NoError(err) - defer tx.Rollback() - - // - del cold file - // - new reader must not see canDelete file - hc := h.MakeContext() - lastOnFs, _ := h.files.Max() - require.False(lastOnFs.frozen) // prepared dataset must have some non-frozen files. or it's bad dataset. - h.integrateMergedFiles(nil, []*filesItem{lastOnFs}, nil, nil) - - require.NotNil(lastOnFs.decompressor) - hc.Close() - require.Nil(lastOnFs.decompressor) + hc = h.MakeContext() + newLastInView := hc.files[len(hc.files)-1] + require.False(lastOnFs.frozen) + require.False(lastInView.startTxNum == newLastInView.startTxNum && lastInView.endTxNum == newLastInView.endTxNum) + + hc.Close() + }) + + t.Run("read after: remove when no readers", func(t *testing.T) { + tx, err := db.BeginRo(ctx) + require.NoError(err) + defer tx.Rollback() + + // - del cold file + // - new reader must not see canDelete file + hc := h.MakeContext() + lastOnFs, _ := h.files.Max() + require.False(lastOnFs.frozen) // prepared dataset must have some non-frozen files. or it's bad dataset. + h.integrateMergedFiles(nil, []*filesItem{lastOnFs}, nil, nil) + + require.NotNil(lastOnFs.decompressor) + hc.Close() + require.Nil(lastOnFs.decompressor) + }) + } + t.Run("large_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, true) + test(t, h, db, txs) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, false) + test(t, h, db, txs) }) +} + +func TestDomainGCReadAfterRemoveFile(t *testing.T) { + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + ctx := context.Background() + + test := func(t *testing.T, h *Domain, db kv.RwDB, txs uint64) { + t.Helper() + require := require.New(t) + collateAndMerge(t, db, nil, h, txs) + + t.Run("read after: remove when have reader", func(t *testing.T) { + tx, err := db.BeginRo(ctx) + require.NoError(err) + defer tx.Rollback() + + // - create immutable view + // - del cold file + // - read from canDelete file + // - close view + // - open new view + // - make sure there is no canDelete file + hc := h.MakeContext() + _ = hc + lastOnFs, _ := h.files.Max() + require.False(lastOnFs.frozen) // prepared dataset must have some non-frozen files. or it's bad dataset. + h.integrateMergedFiles([]*filesItem{lastOnFs}, nil, nil, nil, nil, nil) + require.NotNil(lastOnFs.decompressor) + + lastInView := hc.files[len(hc.files)-1] + g := lastInView.src.decompressor.MakeGetter() + require.Equal(lastInView.startTxNum, lastOnFs.startTxNum) + require.Equal(lastInView.endTxNum, lastOnFs.endTxNum) + if g.HasNext() { + k, _ := g.Next(nil) + require.Equal(8, len(k)) + v, _ := g.Next(nil) + require.Equal(8, len(v)) + } + + require.NotNil(lastOnFs.decompressor) + hc.Close() + require.Nil(lastOnFs.decompressor) + + nonDeletedOnFs, _ := h.files.Max() + require.False(nonDeletedOnFs.frozen) + require.NotNil(nonDeletedOnFs.decompressor) // non-canDelete files are not closed + + hc = h.MakeContext() + newLastInView := hc.files[len(hc.files)-1] + require.False(lastOnFs.frozen) + require.False(lastInView.startTxNum == newLastInView.startTxNum && lastInView.endTxNum == newLastInView.endTxNum) + + hc.Close() + }) + + t.Run("read after: remove when no readers", func(t *testing.T) { + tx, err := db.BeginRo(ctx) + require.NoError(err) + defer tx.Rollback() + + // - del cold file + // - new reader must not see canDelete file + hc := h.MakeContext() + lastOnFs, _ := h.files.Max() + require.False(lastOnFs.frozen) // prepared dataset must have some non-frozen files. or it's bad dataset. + h.integrateMergedFiles([]*filesItem{lastOnFs}, nil, nil, nil, nil, nil) + require.NotNil(lastOnFs.decompressor) + hc.Close() + require.Nil(lastOnFs.decompressor) + }) + } + _, db, d, txs := filledDomain(t) + test(t, d, db, txs) } diff --git a/state/history.go b/state/history.go index 2dba97057..943bfaca9 100644 --- a/state/history.go +++ b/state/history.go @@ -22,23 +22,22 @@ import ( "context" "encoding/binary" "fmt" - "io/fs" "math" "os" "path/filepath" "regexp" "strconv" - "sync" + "sync/atomic" "time" "github.com/RoaringBitmap/roaring/roaring64" - "github.com/ledgerwatch/erigon-lib/kv/order" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/log/v3" btree2 "github.com/tidwall/btree" - atomic2 "go.uber.org/atomic" "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" + + "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" @@ -47,6 +46,7 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" + "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/recsplit" "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" ) @@ -61,75 +61,73 @@ type History struct { // roFiles derivative from field `file`, but without garbage (canDelete=true, overlaps, etc...) // MakeContext() using this field in zero-copy way - roFiles atomic2.Pointer[[]ctxItem] + roFiles atomic.Pointer[[]ctxItem] historyValsTable string // key1+key2+txnNum -> oldValue , stores values BEFORE change - settingsTable string compressWorkers int compressVals bool integrityFileExtensions []string + largeValues bool // can't use DupSort optimization (aka. prefix-compression) if values size > 4kb + + wal *historyWAL +} - wal *historyWAL - walLock sync.RWMutex -} - -func NewHistory( - dir, tmpdir string, - aggregationStep uint64, - filenameBase string, - indexKeysTable string, - indexTable string, - historyValsTable string, - settingsTable string, - compressVals bool, - integrityFileExtensions []string, -) (*History, error) { +func NewHistory(dir, tmpdir string, aggregationStep uint64, + filenameBase, indexKeysTable, indexTable, historyValsTable string, + compressVals bool, integrityFileExtensions []string, largeValues bool) (*History, error) { h := History{ files: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), - roFiles: *atomic2.NewPointer(&[]ctxItem{}), historyValsTable: historyValsTable, - settingsTable: settingsTable, compressVals: compressVals, compressWorkers: 1, integrityFileExtensions: integrityFileExtensions, + largeValues: largeValues, } - + h.roFiles.Store(&[]ctxItem{}) var err error h.InvertedIndex, err = NewInvertedIndex(dir, tmpdir, aggregationStep, filenameBase, indexKeysTable, indexTable, true, append(slices.Clone(h.integrityFileExtensions), "v")) if err != nil { return nil, fmt.Errorf("NewHistory: %s, %w", filenameBase, err) } - //if err := h.reOpenFolder(); err != nil { - // return nil, err - //} return &h, nil } -func (h *History) reOpenFolder() error { - h.closeFiles() - files, err := os.ReadDir(h.dir) - if err != nil { + +// OpenList - main method to open list of files. +// It's ok if some files was open earlier. +// If some file already open: noop. +// If some file already open but not in provided list: close and remove from `files` field. +func (h *History) OpenList(fNames []string) error { + if err := h.InvertedIndex.OpenList(fNames); err != nil { return err } - _ = h.scanStateFiles(files, h.integrityFileExtensions) - if err = h.openFiles(); err != nil { - return fmt.Errorf("NewHistory.openFiles: %s, %w", h.filenameBase, err) + return h.openList(fNames) + +} +func (h *History) openList(fNames []string) error { + h.closeWhatNotInList(fNames) + _ = h.scanStateFiles(fNames) + if err := h.openFiles(); err != nil { + return fmt.Errorf("History.OpenList: %s, %w", h.filenameBase, err) + } + return nil +} + +func (h *History) OpenFolder() error { + files, err := h.fileNamesOnDisk() + if err != nil { + return err } - return h.InvertedIndex.reOpenFolder() + return h.OpenList(files) } // scanStateFiles // returns `uselessFiles` where file "is useless" means: it's subset of frozen file. such files can be safely deleted. subset of non-frozen file may be useful -func (h *History) scanStateFiles(files []fs.DirEntry, integrityFileExtensions []string) (uselessFiles []*filesItem) { +func (h *History) scanStateFiles(fNames []string) (uselessFiles []*filesItem) { re := regexp.MustCompile("^" + h.filenameBase + ".([0-9]+)-([0-9]+).v$") var err error Loop: - for _, f := range files { - if !f.Type().IsRegular() { - continue - } - - name := f.Name() + for _, name := range fNames { subs := re.FindStringSubmatch(name) if len(subs) != 3 { if len(subs) != 0 { @@ -154,7 +152,7 @@ Loop: startTxNum, endTxNum := startStep*h.aggregationStep, endStep*h.aggregationStep frozen := endStep-startStep == StepsInBiggestFile - for _, ext := range integrityFileExtensions { + for _, ext := range h.integrityFileExtensions { requiredFile := fmt.Sprintf("%s.%d-%d.%s", h.filenameBase, startStep, endStep, ext) if !dir.FileExist(filepath.Join(h.dir, requiredFile)) { log.Debug(fmt.Sprintf("[snapshots] skip %s because %s doesn't exists", name, requiredFile)) @@ -163,6 +161,10 @@ Loop: } var newFile = &filesItem{startTxNum: startTxNum, endTxNum: endTxNum, frozen: frozen} + if _, has := h.files.Get(newFile); has { + continue + } + addNewFile := true var subSets []*filesItem h.files.Walk(func(items []*filesItem) bool { @@ -189,19 +191,17 @@ Loop: h.files.Set(newFile) } } - h.reCalcRoFiles() return uselessFiles } func (h *History) openFiles() error { var totalKeys uint64 var err error - invalidFileItems := make([]*filesItem, 0) h.files.Walk(func(items []*filesItem) bool { for _, item := range items { if item.decompressor != nil { - item.decompressor.Close() + continue } fromStep, toStep := item.startTxNum/h.aggregationStep, item.endTxNum/h.aggregationStep datPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.v", h.filenameBase, fromStep, toStep)) @@ -213,15 +213,17 @@ func (h *History) openFiles() error { log.Debug("Hisrory.openFiles: %w, %s", err, datPath) return false } - if item.index == nil { - idxPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, fromStep, toStep)) - if dir.FileExist(idxPath) { - if item.index, err = recsplit.OpenIndex(idxPath); err != nil { - log.Debug(fmt.Errorf("Hisrory.openFiles: %w, %s", err, idxPath).Error()) - return false - } - totalKeys += item.index.KeyCount() + + if item.index != nil { + continue + } + idxPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, fromStep, toStep)) + if dir.FileExist(idxPath) { + if item.index, err = recsplit.OpenIndex(idxPath); err != nil { + log.Debug(fmt.Errorf("Hisrory.openFiles: %w, %s", err, idxPath).Error()) + return false } + totalKeys += item.index.KeyCount() } } return true @@ -233,42 +235,52 @@ func (h *History) openFiles() error { h.files.Delete(item) } + h.reCalcRoFiles() return nil } -func (h *History) closeFiles() { +func (h *History) closeWhatNotInList(fNames []string) { + var toDelete []*filesItem h.files.Walk(func(items []*filesItem) bool { + Loop1: for _, item := range items { - if item.decompressor != nil { - if err := item.decompressor.Close(); err != nil { - log.Trace("close", "err", err, "file", item.decompressor.FileName()) - } - item.decompressor = nil - } - if item.index != nil { - if err := item.index.Close(); err != nil { - log.Trace("close", "err", err, "file", item.index.FileName()) + for _, protectName := range fNames { + if item.decompressor != nil && item.decompressor.FileName() == protectName { + continue Loop1 } - item.index = nil } + toDelete = append(toDelete, item) } return true }) - h.files.Clear() - h.reCalcRoFiles() + for _, item := range toDelete { + if item.decompressor != nil { + if err := item.decompressor.Close(); err != nil { + log.Trace("close", "err", err, "file", item.index.FileName()) + } + item.decompressor = nil + } + if item.index != nil { + if err := item.index.Close(); err != nil { + log.Trace("close", "err", err, "file", item.index.FileName()) + } + item.index = nil + } + h.files.Delete(item) + } } func (h *History) Close() { h.InvertedIndex.Close() - h.closeFiles() + h.closeWhatNotInList([]string{}) + h.reCalcRoFiles() } func (h *History) Files() (res []string) { h.files.Walk(func(items []*filesItem) bool { for _, item := range items { if item.decompressor != nil { - _, fName := filepath.Split(item.decompressor.FilePath()) - res = append(res, filepath.Join("history", fName)) + res = append(res, item.decompressor.FileName()) } } return true @@ -295,45 +307,44 @@ func (h *History) BuildOptionalMissedIndices(ctx context.Context) (err error) { return h.localityIndex.BuildMissedIndices(ctx, h.InvertedIndex) } -func (h *History) BuildMissedIndices(ctx context.Context, sem *semaphore.Weighted) (err error) { - if err := h.InvertedIndex.BuildMissedIndices(ctx, sem); err != nil { +func (h *History) buildVi(ctx context.Context, item *filesItem, p *background.Progress) (err error) { + search := &filesItem{startTxNum: item.startTxNum, endTxNum: item.endTxNum} + iiItem, ok := h.InvertedIndex.files.Get(search) + if !ok { + return nil + } + + fromStep, toStep := item.startTxNum/h.aggregationStep, item.endTxNum/h.aggregationStep + fName := fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, fromStep, toStep) + idxPath := filepath.Join(h.dir, fName) + + //log.Info("[snapshots] build idx", "file", fName) + + p.Name.Store(&fName) + p.Total.Store(uint64(iiItem.decompressor.Count()) * 2) + + count, err := iterateForVi(item, iiItem, p, h.compressVals, func(v []byte) error { return nil }) + if err != nil { return err } + return buildVi(ctx, item, iiItem, idxPath, h.tmpdir, count, p, h.compressVals) +} + +func (h *History) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps *background.ProgressSet) { + h.InvertedIndex.BuildMissedIndices(ctx, g, ps) missedFiles := h.missedIdxFiles() - g, ctx := errgroup.WithContext(ctx) for _, item := range missedFiles { item := item g.Go(func() error { - if err := sem.Acquire(ctx, 1); err != nil { - return err - } - defer sem.Release(1) - - search := &filesItem{startTxNum: item.startTxNum, endTxNum: item.endTxNum} - iiItem, ok := h.InvertedIndex.files.Get(search) - if !ok { - return nil - } - - fromStep, toStep := item.startTxNum/h.aggregationStep, item.endTxNum/h.aggregationStep - fName := fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, fromStep, toStep) - idxPath := filepath.Join(h.dir, fName) - log.Info("[snapshots] build idx", "file", fName) - count, err := iterateForVi(item, iiItem, h.compressVals, func(v []byte) error { return nil }) - if err != nil { - return err - } - return buildVi(item, iiItem, idxPath, h.tmpdir, count, false /* values */, h.compressVals) + p := &background.Progress{} + ps.Add(p) + defer ps.Delete(p) + return h.buildVi(ctx, item, p) }) } - if err := g.Wait(); err != nil { - return err - } - - return h.openFiles() } -func iterateForVi(historyItem, iiItem *filesItem, compressVals bool, f func(v []byte) error) (count int, err error) { +func iterateForVi(historyItem, iiItem *filesItem, p *background.Progress, compressVals bool, f func(v []byte) error) (count int, err error) { var cp CursorHeap heap.Init(&cp) g := iiItem.decompressor.MakeGetter() @@ -384,14 +395,14 @@ func iterateForVi(historyItem, iiItem *filesItem, compressVals bool, f func(v [] } else { heap.Remove(&cp, 0) } + + p.Processed.Add(1) } } return count, nil } -func buildVi(historyItem, iiItem *filesItem, historyIdxPath, tmpdir string, count int, values, compressVals bool) error { - _, fName := filepath.Split(historyIdxPath) - log.Debug("[snapshots] build idx", "file", fName) +func buildVi(ctx context.Context, historyItem, iiItem *filesItem, historyIdxPath, tmpdir string, count int, p *background.Progress, compressVals bool) error { rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: count, Enums: false, @@ -421,6 +432,12 @@ func buildVi(historyItem, iiItem *filesItem, historyIdxPath, tmpdir string, coun g2.Reset(0) valOffset = 0 for g.HasNext() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + keyBuf, _ = g.NextUncompressed() valBuf, _ = g.NextUncompressed() ef, _ := eliasfano32.ReadEliasFano(valBuf) @@ -438,6 +455,8 @@ func buildVi(historyItem, iiItem *filesItem, historyIdxPath, tmpdir string, coun valOffset = g2.SkipUncompressed() } } + + p.Processed.Add(1) } if err = rs.Build(); err != nil { if rs.Collision() { @@ -454,40 +473,30 @@ func buildVi(historyItem, iiItem *filesItem, historyIdxPath, tmpdir string, coun } func (h *History) AddPrevValue(key1, key2, original []byte) (err error) { - h.walLock.RLock() // read-lock for reading fielw `w` and writing into it, write-lock for setting new `w` - err = h.wal.addPrevValue(key1, key2, original) - h.walLock.RUnlock() - return err + if original == nil { + original = []byte{} + } + return h.wal.addPrevValue(key1, key2, original) } -func (h *History) DiscardHistory(tmpdir string) { - h.InvertedIndex.StartWrites(tmpdir) - h.walLock.Lock() - defer h.walLock.Unlock() - h.wal = h.newWriter(tmpdir, false, true) +func (h *History) DiscardHistory() { + h.InvertedIndex.StartWrites() + h.wal = h.newWriter(h.tmpdir, false, true) } -func (h *History) StartWrites(tmpdir string) { - h.InvertedIndex.StartWrites(tmpdir) - h.walLock.Lock() - defer h.walLock.Unlock() - h.wal = h.newWriter(tmpdir, true, false) +func (h *History) StartWrites() { + h.InvertedIndex.StartWrites() + h.wal = h.newWriter(h.tmpdir, true, false) } func (h *History) FinishWrites() { h.InvertedIndex.FinishWrites() - h.walLock.Lock() - defer h.walLock.Unlock() h.wal.close() h.wal = nil } func (h *History) Rotate() historyFlusher { - h.walLock.Lock() - defer h.walLock.Unlock() - if h.wal != nil { - h.wal.historyValsFlushing, h.wal.historyVals = h.wal.historyVals, h.wal.historyValsFlushing - h.wal.autoIncrementFlush = h.wal.autoIncrement - } - return historyFlusher{h.wal, h.InvertedIndex.Rotate()} + w := h.wal + h.wal = h.newWriter(h.wal.tmpdir, h.wal.buffered, h.wal.discard) + return historyFlusher{w, h.InvertedIndex.Rotate()} } type historyFlusher struct { @@ -506,16 +515,14 @@ func (f historyFlusher) Flush(ctx context.Context, tx kv.RwTx) error { } type historyWAL struct { - h *History - historyVals *etl.Collector - historyValsFlushing *etl.Collector - tmpdir string - autoIncrementBuf []byte - historyKey []byte - autoIncrement uint64 - autoIncrementFlush uint64 - buffered bool - discard bool + h *History + historyVals *etl.Collector + tmpdir string + autoIncrementBuf []byte + historyKey []byte + buffered bool + discard bool + largeValues bool } func (h *historyWAL) close() { @@ -535,24 +542,12 @@ func (h *History) newWriter(tmpdir string, buffered, discard bool) *historyWAL { autoIncrementBuf: make([]byte, 8), historyKey: make([]byte, 0, 128), + largeValues: h.largeValues, } if buffered { - w.historyVals = etl.NewCollector(h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam)) - w.historyValsFlushing = etl.NewCollector(h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam)) + w.historyVals = etl.NewCollector(h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM)) w.historyVals.LogLvl(log.LvlTrace) - w.historyValsFlushing.LogLvl(log.LvlTrace) } - - val, err := h.tx.GetOne(h.settingsTable, historyValCountKey) - if err != nil { - panic(err) - //return err - } - var valNum uint64 - if len(val) > 0 { - valNum = binary.BigEndian.Uint64(val) - } - w.autoIncrement = valNum return w } @@ -560,13 +555,10 @@ func (h *historyWAL) flush(ctx context.Context, tx kv.RwTx) error { if h.discard { return nil } - binary.BigEndian.PutUint64(h.autoIncrementBuf, h.autoIncrementFlush) - if err := tx.Put(h.h.settingsTable, historyValCountKey, h.autoIncrementBuf); err != nil { - return err - } - if err := h.historyValsFlushing.Load(tx, h.h.historyValsTable, loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + if err := h.historyVals.Load(tx, h.h.historyValsTable, loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } + h.close() return nil } @@ -575,60 +567,47 @@ func (h *historyWAL) addPrevValue(key1, key2, original []byte) error { return nil } - /* + ii := h.h.InvertedIndex + if h.largeValues { lk := len(key1) + len(key2) - historyKey := make([]byte, lk+8) + historyKey := h.historyKey[:lk+8] copy(historyKey, key1) if len(key2) > 0 { copy(historyKey[len(key1):], key2) } - if len(original) > 0 { - val, err := h.h.tx.GetOne(h.h.settingsTable, historyValCountKey) - if err != nil { - return err - } - var valNum uint64 - if len(val) > 0 { - valNum = binary.BigEndian.Uint64(val) - } - valNum++ - binary.BigEndian.PutUint64(historyKey[lk:], valNum) - if err = h.h.tx.Put(h.h.settingsTable, historyValCountKey, historyKey[lk:]); err != nil { + copy(historyKey[lk:], h.h.InvertedIndex.txNumBytes[:]) + + if !h.buffered { + if err := h.h.tx.Put(h.h.historyValsTable, historyKey, original); err != nil { return err } - if err = h.h.tx.Put(h.h.historyValsTable, historyKey[lk:], original); err != nil { + if err := ii.tx.Put(ii.indexKeysTable, ii.txNumBytes[:], historyKey[:lk]); err != nil { return err } + return nil + } + if err := h.historyVals.Collect(historyKey, original); err != nil { + return err + } + if err := ii.wal.indexKeys.Collect(ii.txNumBytes[:], historyKey[:lk]); err != nil { + return err } - */ + return nil + } lk := len(key1) + len(key2) - historyKey := h.historyKey[:lk+8] + historyKey := h.historyKey[:lk+8+len(original)] copy(historyKey, key1) - if len(key2) > 0 { - copy(historyKey[len(key1):], key2) - } - if len(original) > 0 { - h.autoIncrement++ - binary.BigEndian.PutUint64(historyKey[lk:], h.autoIncrement) - //if err := h.h.tx.Put(h.h.settingsTable, historyValCountKey, historyKey[lk:]); err != nil { - // return err - //} - - if h.buffered { - if err := h.historyVals.Collect(historyKey[lk:], original); err != nil { - return err - } - } else { - if err := h.h.tx.Put(h.h.historyValsTable, historyKey[lk:], original); err != nil { - return err - } - } - } else { - binary.BigEndian.PutUint64(historyKey[lk:], 0) + copy(historyKey[len(key1):], key2) + copy(historyKey[lk:], h.h.InvertedIndex.txNumBytes[:]) + copy(historyKey[lk+8:], original) + historyKey1 := historyKey[:lk] + historyVal := historyKey[lk:] + invIdxVal := historyKey[:lk] + if err := h.historyVals.Collect(historyKey1, historyVal); err != nil { + return err } - - if err := h.h.InvertedIndex.add(historyKey, historyKey[:lk]); err != nil { + if err := ii.wal.indexKeys.Collect(ii.txNumBytes[:], invIdxVal); err != nil { return err } return nil @@ -650,7 +629,7 @@ func (c HistoryCollation) Close() { } } -func (h *History) collate(step, txFrom, txTo uint64, roTx kv.Tx, logEvery *time.Ticker) (HistoryCollation, error) { +func (h *History) collate(step, txFrom, txTo uint64, roTx kv.Tx) (HistoryCollation, error) { var historyComp *compress.Compressor var err error closeComp := true @@ -673,7 +652,6 @@ func (h *History) collate(step, txFrom, txTo uint64, roTx kv.Tx, logEvery *time. indexBitmaps := map[string]*roaring64.Bitmap{} var txKey [8]byte binary.BigEndian.PutUint64(txKey[:], txFrom) - var val []byte var k, v []byte for k, v, err = keysCursor.Seek(txKey[:]); err == nil && k != nil; k, v, err = keysCursor.Next() { txNum := binary.BigEndian.Uint64(k) @@ -682,17 +660,11 @@ func (h *History) collate(step, txFrom, txTo uint64, roTx kv.Tx, logEvery *time. } var bitmap *roaring64.Bitmap var ok bool - if bitmap, ok = indexBitmaps[string(v[:len(v)-8])]; !ok { + if bitmap, ok = indexBitmaps[string(v)]; !ok { bitmap = bitmapdb.NewBitmap64() - indexBitmaps[string(v[:len(v)-8])] = bitmap + indexBitmaps[string(v)] = bitmap } bitmap.Add(txNum) - select { - case <-logEvery.C: - log.Info("[snapshots] collate history", "name", h.filenameBase, "range", fmt.Sprintf("%.2f-%.2f", float64(txNum)/float64(h.aggregationStep), float64(txTo)/float64(h.aggregationStep))) - bitmap.RunOptimize() - default: - } } if err != nil { return HistoryCollation{}, fmt.Errorf("iterate over %s history cursor: %w", h.filenameBase, err) @@ -703,29 +675,56 @@ func (h *History) collate(step, txFrom, txTo uint64, roTx kv.Tx, logEvery *time. } slices.Sort(keys) historyCount := 0 + keyBuf := make([]byte, 256) + + var c kv.Cursor + var cd kv.CursorDupSort + if h.largeValues { + c, err = roTx.Cursor(h.historyValsTable) + if err != nil { + return HistoryCollation{}, err + } + defer c.Close() + } else { + cd, err = roTx.CursorDupSort(h.historyValsTable) + if err != nil { + return HistoryCollation{}, err + } + defer cd.Close() + } for _, key := range keys { bitmap := indexBitmaps[key] it := bitmap.Iterator() + copy(keyBuf, key) + keyBuf = keyBuf[:len(key)+8] for it.HasNext() { txNum := it.Next() - binary.BigEndian.PutUint64(txKey[:], txNum) - v, err := keysCursor.SeekBothRange(txKey[:], []byte(key)) - if err != nil { - return HistoryCollation{}, err - } - if !bytes.HasPrefix(v, []byte(key)) { - continue - } - valNum := binary.BigEndian.Uint64(v[len(v)-8:]) - if valNum == 0 { - val = nil + binary.BigEndian.PutUint64(keyBuf[len(key):], txNum) + //TODO: use cursor range + if h.largeValues { + val, err := roTx.GetOne(h.historyValsTable, keyBuf) + if err != nil { + return HistoryCollation{}, fmt.Errorf("get %s history val [%x]: %w", h.filenameBase, k, err) + } + if len(val) == 0 { + val = nil + } + if err = historyComp.AddUncompressedWord(val); err != nil { + return HistoryCollation{}, fmt.Errorf("add %s history val [%x]=>[%x]: %w", h.filenameBase, k, val, err) + } } else { - if val, err = roTx.GetOne(h.historyValsTable, v[len(v)-8:]); err != nil { - return HistoryCollation{}, fmt.Errorf("get %s history val [%x]=>%d: %w", h.filenameBase, k, valNum, err) + val, err := cd.SeekBothRange(keyBuf[:len(key)], keyBuf[len(key):]) + if err != nil { + return HistoryCollation{}, err + } + if val != nil && binary.BigEndian.Uint64(val) == txNum { + val = val[8:] + } else { + val = nil + } + if err = historyComp.AddUncompressedWord(val); err != nil { + return HistoryCollation{}, fmt.Errorf("add %s history val [%x]=>[%x]: %w", h.filenameBase, k, val, err) } - } - if err = historyComp.AddUncompressedWord(val); err != nil { - return HistoryCollation{}, fmt.Errorf("add %s history val [%x]=>[%x]: %w", h.filenameBase, k, val, err) } historyCount++ } @@ -803,7 +802,7 @@ func (h *History) reCalcRoFiles() { // buildFiles performs potentially resource intensive operations of creating // static files and their indices -func (h *History) buildFiles(ctx context.Context, step uint64, collation HistoryCollation) (HistoryFiles, error) { +func (h *History) buildFiles(ctx context.Context, step uint64, collation HistoryCollation, ps *background.ProgressSet) (HistoryFiles, error) { historyComp := collation.historyComp var historyDecomp, efHistoryDecomp *compress.Decompressor var historyIdx, efHistoryIdx *recsplit.Index @@ -835,55 +834,79 @@ func (h *History) buildFiles(ctx context.Context, step uint64, collation History } } }() - historyIdxPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, step, step+1)) - if err := historyComp.Compress(); err != nil { - return HistoryFiles{}, fmt.Errorf("compress %s history: %w", h.filenameBase, err) - } - historyComp.Close() - historyComp = nil - var err error - if historyDecomp, err = compress.NewDecompressor(collation.historyPath); err != nil { - return HistoryFiles{}, fmt.Errorf("open %s history decompressor: %w", h.filenameBase, err) - } - // Build history ef - efHistoryPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.ef", h.filenameBase, step, step+1)) - efHistoryComp, err = compress.NewCompressor(ctx, "ef history", efHistoryPath, h.tmpdir, compress.MinPatternScore, h.compressWorkers, log.LvlTrace) - if err != nil { - return HistoryFiles{}, fmt.Errorf("create %s ef history compressor: %w", h.filenameBase, err) + + var historyIdxPath, efHistoryPath string + + { + historyIdxFileName := fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, step, step+1) + p := ps.AddNew(historyIdxFileName, 1) + defer ps.Delete(p) + historyIdxPath = filepath.Join(h.dir, historyIdxFileName) + if err := historyComp.Compress(); err != nil { + return HistoryFiles{}, fmt.Errorf("compress %s history: %w", h.filenameBase, err) + } + historyComp.Close() + historyComp = nil + ps.Delete(p) } - var buf []byte + keys := make([]string, 0, len(collation.indexBitmaps)) for key := range collation.indexBitmaps { keys = append(keys, key) } slices.Sort(keys) - for _, key := range keys { - if err = efHistoryComp.AddUncompressedWord([]byte(key)); err != nil { - return HistoryFiles{}, fmt.Errorf("add %s ef history key [%x]: %w", h.InvertedIndex.filenameBase, key, err) + + { + var err error + if historyDecomp, err = compress.NewDecompressor(collation.historyPath); err != nil { + return HistoryFiles{}, fmt.Errorf("open %s history decompressor: %w", h.filenameBase, err) } - bitmap := collation.indexBitmaps[key] - ef := eliasfano32.NewEliasFano(bitmap.GetCardinality(), bitmap.Maximum()) - it := bitmap.Iterator() - for it.HasNext() { - txNum := it.Next() - ef.AddOffset(txNum) + + // Build history ef + efHistoryFileName := fmt.Sprintf("%s.%d-%d.ef", h.filenameBase, step, step+1) + + p := ps.AddNew(efHistoryFileName, 1) + defer ps.Delete(p) + efHistoryPath = filepath.Join(h.dir, efHistoryFileName) + efHistoryComp, err = compress.NewCompressor(ctx, "ef history", efHistoryPath, h.tmpdir, compress.MinPatternScore, h.compressWorkers, log.LvlTrace) + if err != nil { + return HistoryFiles{}, fmt.Errorf("create %s ef history compressor: %w", h.filenameBase, err) + } + var buf []byte + for _, key := range keys { + if err = efHistoryComp.AddUncompressedWord([]byte(key)); err != nil { + return HistoryFiles{}, fmt.Errorf("add %s ef history key [%x]: %w", h.InvertedIndex.filenameBase, key, err) + } + bitmap := collation.indexBitmaps[key] + ef := eliasfano32.NewEliasFano(bitmap.GetCardinality(), bitmap.Maximum()) + it := bitmap.Iterator() + for it.HasNext() { + txNum := it.Next() + ef.AddOffset(txNum) + } + ef.Build() + buf = ef.AppendBytes(buf[:0]) + if err = efHistoryComp.AddUncompressedWord(buf); err != nil { + return HistoryFiles{}, fmt.Errorf("add %s ef history val: %w", h.filenameBase, err) + } } - ef.Build() - buf = ef.AppendBytes(buf[:0]) - if err = efHistoryComp.AddUncompressedWord(buf); err != nil { - return HistoryFiles{}, fmt.Errorf("add %s ef history val: %w", h.filenameBase, err) + if err = efHistoryComp.Compress(); err != nil { + return HistoryFiles{}, fmt.Errorf("compress %s ef history: %w", h.filenameBase, err) } + efHistoryComp.Close() + efHistoryComp = nil + ps.Delete(p) } - if err = efHistoryComp.Compress(); err != nil { - return HistoryFiles{}, fmt.Errorf("compress %s ef history: %w", h.filenameBase, err) - } - efHistoryComp.Close() - efHistoryComp = nil + + var err error if efHistoryDecomp, err = compress.NewDecompressor(efHistoryPath); err != nil { return HistoryFiles{}, fmt.Errorf("open %s ef history decompressor: %w", h.filenameBase, err) } - efHistoryIdxPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.efi", h.filenameBase, step, step+1)) - if efHistoryIdx, err = buildIndex(ctx, efHistoryDecomp, efHistoryIdxPath, h.tmpdir, len(keys), false /* values */); err != nil { + efHistoryIdxFileName := fmt.Sprintf("%s.%d-%d.efi", h.filenameBase, step, step+1) + efHistoryIdxPath := filepath.Join(h.dir, efHistoryIdxFileName) + p := ps.AddNew(efHistoryIdxFileName, uint64(len(keys)*2)) + defer ps.Delete(p) + if efHistoryIdx, err = buildIndexThenOpen(ctx, efHistoryDecomp, efHistoryIdxPath, h.tmpdir, len(keys), false /* values */, p); err != nil { return HistoryFiles{}, fmt.Errorf("build %s ef history idx: %w", h.filenameBase, err) } if rs, err = recsplit.NewRecSplit(recsplit.RecSplitArgs{ @@ -965,11 +988,6 @@ func (h *History) warmup(ctx context.Context, txFrom, limit uint64, tx kv.Tx) er defer historyKeysCursor.Close() var txKey [8]byte binary.BigEndian.PutUint64(txKey[:], txFrom) - idxC, err := tx.CursorDupSort(h.indexTable) - if err != nil { - return err - } - defer idxC.Close() valsC, err := tx.Cursor(h.historyValsTable) if err != nil { return err @@ -987,19 +1005,24 @@ func (h *History) warmup(ctx context.Context, txFrom, limit uint64, tx kv.Tx) er if limit != math.MaxUint64 && limit != 0 { txTo = txFrom + limit } + keyBuf := make([]byte, 256) for ; err == nil && k != nil; k, v, err = historyKeysCursor.Next() { if err != nil { return err } - if err = ctx.Err(); err != nil { - return err - } txNum := binary.BigEndian.Uint64(k) if txNum >= txTo { break } - _, _, _ = valsC.Seek(v[len(v)-8:]) - _, _ = idxC.SeekBothRange(v[:len(v)-8], k) + copy(keyBuf, v) + binary.BigEndian.PutUint64(keyBuf[len(v):], txNum) + _, _, _ = valsC.Seek(keyBuf) + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } } if err != nil { return fmt.Errorf("iterate over %s history keys: %w", h.filenameBase, err) @@ -1008,6 +1031,29 @@ func (h *History) warmup(ctx context.Context, txFrom, limit uint64, tx kv.Tx) er return nil } +func (h *History) isEmpty(tx kv.Tx) (bool, error) { + if h.largeValues { + k, err := kv.FirstKey(tx, h.historyValsTable) + if err != nil { + return false, err + } + k2, err := kv.FirstKey(tx, h.indexKeysTable) + if err != nil { + return false, err + } + return k == nil && k2 == nil, nil + } + k, err := kv.FirstKey(tx, h.historyValsTable) + if err != nil { + return false, err + } + k2, err := kv.FirstKey(tx, h.indexKeysTable) + if err != nil { + return false, err + } + return k == nil && k2 == nil, nil +} + func (h *History) prune(ctx context.Context, txFrom, txTo, limit uint64, logEvery *time.Ticker) error { historyKeysCursor, err := h.tx.RwCursorDupSort(h.indexKeysTable) if err != nil { @@ -1032,16 +1078,8 @@ func (h *History) prune(ctx context.Context, txFrom, txTo, limit uint64, logEver return nil } - valsC, err := h.tx.RwCursor(h.historyValsTable) - if err != nil { - return err - } - defer valsC.Close() - idxC, err := h.tx.RwCursorDupSort(h.indexTable) - if err != nil { - return err - } - defer idxC.Close() + collector := etl.NewCollector("snapshots", h.tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) + defer collector.Close() // Invariant: if some `txNum=N` pruned - it's pruned Fully // Means: can use DeleteCurrentDuplicates all values of given `txNum` @@ -1051,41 +1089,86 @@ func (h *History) prune(ctx context.Context, txFrom, txTo, limit uint64, logEver break } for ; err == nil && k != nil; k, v, err = historyKeysCursor.NextDup() { - if err = valsC.Delete(v[len(v)-8:]); err != nil { - return err - } - - if err = idxC.DeleteExact(v[:len(v)-8], k); err != nil { + if err := collector.Collect(v, nil); err != nil { return err } - //for vv, err := idxC.SeekBothRange(v[:len(v)-8], k); vv != nil; _, vv, err = idxC.NextDup() { - // if err != nil { - // return err - // } - // if binary.BigEndian.Uint64(vv) >= txTo { - // break - // } - // if err = idxC.DeleteCurrent(); err != nil { - // return err - // } - //} } // This DeleteCurrent needs to the last in the loop iteration, because it invalidates k and v if err = historyKeysCursor.DeleteCurrentDuplicates(); err != nil { return err } + } + + if h.largeValues { + valsC, err := h.tx.RwCursor(h.historyValsTable) + if err != nil { + return err + } + defer valsC.Close() - select { - case <-ctx.Done(): + if err := collector.Load(h.tx, "", func(key, _ []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + for k, _, err := valsC.Seek(key); k != nil; k, _, err = valsC.Next() { + if err != nil { + return err + } + if !bytes.HasPrefix(k, key) { + break + } + txNum := binary.BigEndian.Uint64(k[len(k)-8:]) + if txNum >= txTo { + break + } + if err = valsC.DeleteCurrent(); err != nil { + return err + } + + select { + case <-logEvery.C: + log.Info("[snapshots] prune history", "name", h.filenameBase, "to_step", fmt.Sprintf("%.2f", float64(txTo)/float64(h.aggregationStep)), "prefix", fmt.Sprintf("%x", key[:8])) + default: + } + } return nil - case <-logEvery.C: - log.Info("[snapshots] prune history", "name", h.filenameBase, "range", fmt.Sprintf("%.2f-%.2f", float64(txNum)/float64(h.aggregationStep), float64(txTo)/float64(h.aggregationStep))) - default: + }, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + return err + } + if err != nil { + return fmt.Errorf("iterate over %s history keys: %w", h.filenameBase, err) + } + } else { + valsC, err := h.tx.RwCursorDupSort(h.historyValsTable) + if err != nil { + return err + } + defer valsC.Close() + + if err := collector.Load(h.tx, "", func(key, _ []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + for k, v, err := valsC.SeekExact(key); k != nil; k, v, err = valsC.NextDup() { + if err != nil { + return err + } + txNum := binary.BigEndian.Uint64(v) + if txNum >= txTo { + break + } + if err = valsC.DeleteCurrent(); err != nil { + return err + } + + select { + case <-logEvery.C: + log.Info("[snapshots] prune history", "name", h.filenameBase, "to_step", fmt.Sprintf("%.2f", float64(txTo)/float64(h.aggregationStep)), "prefix", fmt.Sprintf("%x", key[:8])) + default: + } + } + return nil + }, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + return err + } + if err != nil { + return fmt.Errorf("iterate over %s history keys: %w", h.filenameBase, err) } - } - if err != nil { - return fmt.Errorf("iterate over %s history keys: %w", h.filenameBase, err) } return nil } @@ -1099,28 +1182,34 @@ func (h *History) pruneF(txFrom, txTo uint64, f func(txNum uint64, k, v []byte) var txKey [8]byte binary.BigEndian.PutUint64(txKey[:], txFrom) var k, v []byte - idxC, err := h.tx.RwCursorDupSort(h.indexTable) - if err != nil { - return err - } - defer idxC.Close() - valsC, err := h.tx.RwCursor(h.historyValsTable) - if err != nil { - return err + var valsC kv.RwCursor + var valsCDup kv.RwCursorDupSort + if h.largeValues { + valsC, err = h.tx.RwCursor(h.historyValsTable) + if err != nil { + return err + } + defer valsC.Close() + } else { + valsCDup, err = h.tx.RwCursorDupSort(h.historyValsTable) + if err != nil { + return err + } + defer valsCDup.Close() } - defer valsC.Close() for k, v, err = historyKeysCursor.Seek(txKey[:]); err == nil && k != nil; k, v, err = historyKeysCursor.Next() { txNum := binary.BigEndian.Uint64(k) if txNum >= txTo { break } - key, txnNumBytes := v[:len(v)-8], v[len(v)-8:] - { - kk, vv, err := valsC.SeekExact(txnNumBytes) + + if h.largeValues { + seek := append(common.Copy(v), k...) + kk, vv, err := valsC.SeekExact(seek) if err != nil { return err } - if err := f(txNum, key, vv); err != nil { + if err := f(txNum, kk[:len(kk)-8], vv); err != nil { return err } if kk != nil { @@ -1128,10 +1217,22 @@ func (h *History) pruneF(txFrom, txTo uint64, f func(txNum uint64, k, v []byte) return err } } + } else { + vv, err := valsCDup.SeekBothRange(v, k) + if err != nil { + return err + } + if binary.BigEndian.Uint64(vv) != txNum { + continue + } + if err := f(txNum, v, vv[8:]); err != nil { + return err + } + if err = valsCDup.DeleteCurrent(); err != nil { + return err + } } - if err = idxC.DeleteExact(key, k); err != nil { - return err - } + // This DeleteCurrent needs to the last in the loop iteration, because it invalidates k and v if err = historyKeysCursor.DeleteCurrent(); err != nil { return err @@ -1164,7 +1265,7 @@ func (h *History) MakeContext() *HistoryContext { } for _, item := range hc.files { if !item.src.frozen { - item.src.refcount.Inc() + item.src.refcount.Add(1) } } @@ -1188,7 +1289,7 @@ func (hc *HistoryContext) statelessIdxReader(i int) *recsplit.IndexReader { } r := hc.readers[i] if r == nil { - r = recsplit.NewIndexReader(hc.files[i].src.index) + r = hc.files[i].src.index.GetReaderFromPool() hc.readers[i] = r } return r @@ -1200,12 +1301,16 @@ func (hc *HistoryContext) Close() { if item.src.frozen { continue } - refCnt := item.src.refcount.Dec() + refCnt := item.src.refcount.Add(-1) //GC: last reader responsible to remove useles files: close it and delete if refCnt == 0 && item.src.canDelete.Load() { item.src.closeFilesAndRemove() } } + for _, r := range hc.readers { + r.Close() + } + } func (hc *HistoryContext) getFile(from, to uint64) (it ctxItem, ok bool) { @@ -1218,7 +1323,7 @@ func (hc *HistoryContext) getFile(from, to uint64) (it ctxItem, ok bool) { } func (hc *HistoryContext) GetNoState(key []byte, txNum uint64) ([]byte, bool, error) { - exactStep1, exactStep2, lastIndexedTxNum, foundExactShard1, foundExactShard2 := hc.h.localityIndex.lookupIdxFiles(hc.ic.loc.reader, hc.ic.loc.bm, hc.ic.loc.file, key, txNum) + exactStep1, exactStep2, lastIndexedTxNum, foundExactShard1, foundExactShard2 := hc.h.localityIndex.lookupIdxFiles(hc.ic.loc, key, txNum) //fmt.Printf("GetNoState [%x] %d\n", key, txNum) var foundTxNum uint64 @@ -1391,67 +1496,55 @@ func (hc *HistoryContext) GetNoStateWithRecent(key []byte, txNum uint64, roTx kv if roTx == nil { return nil, false, fmt.Errorf("roTx is nil") } - v, ok, err = hc.getNoStateFromDB(key, txNum, roTx) - if err != nil { - return nil, ok, err - } - if ok { - return v, true, nil - } - return nil, false, err + return hc.getNoStateFromDB(key, txNum, roTx) } func (hc *HistoryContext) getNoStateFromDB(key []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) { - indexCursor, err := tx.CursorDupSort(hc.h.indexTable) - if err != nil { - return nil, false, err - } - defer indexCursor.Close() - var txKey [8]byte - binary.BigEndian.PutUint64(txKey[:], txNum) - var foundTxNumVal []byte - if foundTxNumVal, err = indexCursor.SeekBothRange(key, txKey[:]); err != nil { - return nil, false, err - } - if foundTxNumVal != nil { - if hc.trace { - _, vv, _ := indexCursor.NextDup() - indexCursor.Prev() - _, prevV, _ := indexCursor.Prev() - fmt.Printf("hist: db: %s, %d<-%d->%d->%d, %x\n", hc.h.filenameBase, u64or0(prevV), txNum, u64or0(foundTxNumVal), u64or0(vv), key) - } - - var historyKeysCursor kv.CursorDupSort - if historyKeysCursor, err = tx.CursorDupSort(hc.h.indexKeysTable); err != nil { + if hc.h.largeValues { + c, err := tx.Cursor(hc.h.historyValsTable) + if err != nil { return nil, false, err } - defer historyKeysCursor.Close() - var vn []byte - if vn, err = historyKeysCursor.SeekBothRange(foundTxNumVal, key); err != nil { + defer c.Close() + seek := make([]byte, len(key)+8) + copy(seek, key) + binary.BigEndian.PutUint64(seek[len(key):], txNum) + kAndTxNum, val, err := c.Seek(seek) + if err != nil { return nil, false, err } - valNum := binary.BigEndian.Uint64(vn[len(vn)-8:]) - if valNum == 0 { - // This is special valNum == 0, which is empty value - return nil, true, nil - } - var v []byte - if v, err = tx.GetOne(hc.h.historyValsTable, vn[len(vn)-8:]); err != nil { - return nil, false, err + if kAndTxNum == nil || !bytes.Equal(kAndTxNum[:len(kAndTxNum)-8], key) { + return nil, false, nil } - return v, true, nil + // val == []byte{},m eans key was created in this txNum and doesn't exists before. + return val, true, nil } - return nil, false, nil + c, err := tx.CursorDupSort(hc.h.historyValsTable) + if err != nil { + return nil, false, err + } + defer c.Close() + seek := make([]byte, len(key)+8) + copy(seek, key) + binary.BigEndian.PutUint64(seek[len(key):], txNum) + val, err := c.SeekBothRange(key, seek[len(key):]) + if err != nil { + return nil, false, err + } + if val == nil { + return nil, false, nil + } + // `val == []byte{}` means key was created in this txNum and doesn't exists before. + return val[8:], true, nil } -func (hc *HistoryContext) WalkAsOf(startTxNum uint64, from, to []byte, roTx kv.Tx, amount int) *StateAsOfIter { - hi := StateAsOfIter{ - hasNextInDb: true, - roTx: roTx, - indexTable: hc.h.indexTable, - idxKeysTable: hc.h.indexKeysTable, - valsTable: hc.h.historyValsTable, - from: from, to: to, limit: amount, +func (hc *HistoryContext) WalkAsOf(startTxNum uint64, from, to []byte, roTx kv.Tx, limit int) iter.KV { + hi := &StateAsOfIterF{ + from: from, to: to, limit: limit, + + hc: hc, + compressVals: hc.h.compressVals, + startTxNum: startTxNum, } for _, item := range hc.ic.files { if item.endTxNum <= startTxNum { @@ -1463,66 +1556,72 @@ func (hc *HistoryContext) WalkAsOf(startTxNum uint64, from, to []byte, roTx kv.T if g.HasNext() { key, offset := g.NextUncompressed() heap.Push(&hi.h, &ReconItem{g: g, key: key, startTxNum: item.startTxNum, endTxNum: item.endTxNum, txNum: item.endTxNum, startOffset: offset, lastOffset: offset}) - hi.hasNextInFiles = true } - hi.total += uint64(item.getter.Size()) } - hi.hc = hc - hi.compressVals = hc.h.compressVals - hi.startTxNum = startTxNum binary.BigEndian.PutUint64(hi.startTxKey[:], startTxNum) - hi.advanceInDb() - hi.advanceInFiles() - hi.advance() - return &hi + if err := hi.advanceInFiles(); err != nil { + panic(err) + } + + var dbit iter.KV + if hc.h.largeValues { + dbi := &StateAsOfIterDB{ + roTx: roTx, + indexTable: hc.h.indexTable, + idxKeysTable: hc.h.indexKeysTable, + valsTable: hc.h.historyValsTable, + from: from, to: to, limit: limit, + + hc: hc, + startTxNum: startTxNum, + } + binary.BigEndian.PutUint64(dbi.startTxKey[:], startTxNum) + if err := dbi.advance(); err != nil { + panic(err) + } + dbit = dbi + } else { + dbi := &StateAsOfIterDbDup{ + roTx: roTx, + indexTable: hc.h.indexTable, + idxKeysTable: hc.h.indexKeysTable, + valsTable: hc.h.historyValsTable, + from: from, to: to, limit: limit, + + hc: hc, + startTxNum: startTxNum, + } + binary.BigEndian.PutUint64(dbi.startTxKey[:], startTxNum) + if err := dbi.advanceInDb(); err != nil { + panic(err) + } + dbit = dbi + } + return iter.UnionKV(hi, dbit, limit) } -type StateAsOfIter struct { - roTx kv.Tx - txNum2kCursor kv.CursorDupSort - idxCursor kv.CursorDupSort - hc *HistoryContext - valsTable string - idxKeysTable string - indexTable string +// StateAsOfIter - returns state range at given time in history +type StateAsOfIterF struct { + hc *HistoryContext + limit int from, to []byte - limit int + nextVal []byte + nextKey []byte - nextFileKey []byte - nextDbKey []byte - nextDbVal []byte - nextFileVal []byte - nextVal []byte - nextKey []byte - - h ReconHeap - total uint64 - startTxNum uint64 - advFileCnt int - advDbCnt int - startTxKey [8]byte - txnKey [8]byte - hasNextInFiles bool - hasNextInDb bool - compressVals bool + h ReconHeap + startTxNum uint64 + startTxKey [8]byte + txnKey [8]byte + compressVals bool k, v, kBackup, vBackup []byte } -func (hi *StateAsOfIter) Stat() (int, int) { return hi.advDbCnt, hi.advFileCnt } - -func (hi *StateAsOfIter) Close() { - if hi.idxCursor != nil { - hi.idxCursor.Close() - } - if hi.txNum2kCursor != nil { - hi.txNum2kCursor.Close() - } +func (hi *StateAsOfIterF) Close() { } -func (hi *StateAsOfIter) advanceInFiles() { - hi.advFileCnt++ +func (hi *StateAsOfIterF) advanceInFiles() error { for hi.h.Len() > 0 { top := heap.Pop(&hi.h).(*ReconItem) key := top.key @@ -1547,7 +1646,7 @@ func (hi *StateAsOfIter) advanceInFiles() { continue } - if bytes.Equal(key, hi.nextFileKey) { + if bytes.Equal(key, hi.nextKey) { continue } ef, _ := eliasfano32.ReadEliasFano(idxVal) @@ -1556,168 +1655,264 @@ func (hi *StateAsOfIter) advanceInFiles() { continue } - hi.nextFileKey = key + hi.nextKey = key binary.BigEndian.PutUint64(hi.txnKey[:], n) historyItem, ok := hi.hc.getFile(top.startTxNum, top.endTxNum) if !ok { - panic(fmt.Errorf("no %s file found for [%x]", hi.hc.h.filenameBase, hi.nextFileKey)) + return fmt.Errorf("no %s file found for [%x]", hi.hc.h.filenameBase, hi.nextKey) } reader := hi.hc.statelessIdxReader(historyItem.i) - offset := reader.Lookup2(hi.txnKey[:], hi.nextFileKey) + offset := reader.Lookup2(hi.txnKey[:], hi.nextKey) g := hi.hc.statelessGetter(historyItem.i) g.Reset(offset) if hi.compressVals { - hi.nextFileVal, _ = g.Next(nil) + hi.nextVal, _ = g.Next(nil) } else { - hi.nextFileVal, _ = g.NextUncompressed() + hi.nextVal, _ = g.NextUncompressed() } - hi.nextFileKey = key - return + return nil } - hi.hasNextInFiles = false + hi.nextKey = nil + return nil } -func (hi *StateAsOfIter) advanceInDb() { - hi.advDbCnt++ - var k []byte +func (hi *StateAsOfIterF) HasNext() bool { + return hi.limit != 0 && hi.nextKey != nil +} + +func (hi *StateAsOfIterF) Next() ([]byte, []byte, error) { + hi.limit-- + hi.k, hi.v = append(hi.k[:0], hi.nextKey...), append(hi.v[:0], hi.nextVal...) + + // Satisfy iter.Dual Invariant 2 + hi.k, hi.kBackup, hi.v, hi.vBackup = hi.kBackup, hi.k, hi.vBackup, hi.v + if err := hi.advanceInFiles(); err != nil { + return nil, nil, err + } + return hi.kBackup, hi.vBackup, nil +} + +// StateAsOfIterDB - returns state range at given time in history +type StateAsOfIterDB struct { + roTx kv.Tx + txNum2kCursor kv.CursorDupSort + valsC kv.Cursor + hc *HistoryContext + valsTable string + idxKeysTable string + indexTable string + + from, to []byte + limit int + + nextKey, nextVal []byte + + startTxNum uint64 + startTxKey [8]byte + + k, v, kBackup, vBackup []byte + err error +} + +func (hi *StateAsOfIterDB) Close() { + if hi.valsC != nil { + hi.valsC.Close() + } + if hi.txNum2kCursor != nil { + hi.txNum2kCursor.Close() + } +} + +func (hi *StateAsOfIterDB) advance() error { + var seek []byte var err error - if hi.idxCursor == nil { - if hi.idxCursor, err = hi.roTx.CursorDupSort(hi.indexTable); err != nil { - // TODO pass error properly around - panic(err) + if hi.txNum2kCursor == nil { + if hi.valsC, err = hi.roTx.Cursor(hi.valsTable); err != nil { + return err } if hi.txNum2kCursor, err = hi.roTx.CursorDupSort(hi.idxKeysTable); err != nil { - panic(err) + return err } - if k, _, err = hi.idxCursor.Seek(hi.from); err != nil { - // TODO pass error properly around - panic(err) + firstKey, _, err := hi.valsC.Seek(hi.from) + if err != nil { + return err } + if firstKey == nil { + hi.nextKey = nil + return nil + } + seek = append(common.Copy(firstKey[:len(firstKey)-8]), hi.startTxKey[:]...) } else { - if k, _, err = hi.idxCursor.NextNoDup(); err != nil { - panic(err) + next, ok := kv.NextSubtree(hi.nextKey) + if !ok { + hi.nextKey = nil + return nil } + + seek = append(next, hi.startTxKey[:]...) } - for ; k != nil; k, _, err = hi.idxCursor.NextNoDup() { + for k, v, err := hi.valsC.Seek(seek); k != nil; k, v, err = hi.valsC.Seek(seek) { if err != nil { - panic(err) + return err } - if hi.to != nil && bytes.Compare(k, hi.to) >= 0 { + if hi.to != nil && bytes.Compare(k[:len(k)-8], hi.to) >= 0 { break } + if !bytes.Equal(seek[:len(k)-8], k[:len(k)-8]) { + copy(seek[:len(k)-8], k[:len(k)-8]) + continue + } + hi.nextKey = k[:len(k)-8] + hi.nextVal = v + return nil + } + hi.nextKey = nil + return nil +} - foundTxNumVal, err := hi.idxCursor.SeekBothRange(k, hi.startTxKey[:]) - if err != nil { - panic(err) +func (hi *StateAsOfIterDB) HasNext() bool { + if hi.err != nil { + return true + } + return hi.limit != 0 && hi.nextKey != nil +} + +func (hi *StateAsOfIterDB) Next() ([]byte, []byte, error) { + if hi.err != nil { + return nil, nil, hi.err + } + hi.limit-- + hi.k, hi.v = hi.nextKey, hi.nextVal + + // Satisfy iter.Dual Invariant 2 + hi.k, hi.kBackup, hi.v, hi.vBackup = hi.kBackup, hi.k, hi.vBackup, hi.v + if err := hi.advance(); err != nil { + return nil, nil, err + } + return hi.kBackup, hi.vBackup, nil +} + +// StateAsOfIter - returns state range at given time in history +type StateAsOfIterDbDup struct { + roTx kv.Tx + txNum2kCursor kv.CursorDupSort + valsC kv.CursorDupSort + hc *HistoryContext + valsTable string + idxKeysTable string + indexTable string + + from, to []byte + limit int + + nextKey, nextVal []byte + + startTxNum uint64 + startTxKey [8]byte + + k, v, kBackup, vBackup []byte + err error +} + +func (hi *StateAsOfIterDbDup) Close() { + if hi.valsC != nil { + hi.valsC.Close() + } + if hi.txNum2kCursor != nil { + hi.txNum2kCursor.Close() + } +} + +func (hi *StateAsOfIterDbDup) advanceInDb() error { + var seek []byte + var err error + if hi.txNum2kCursor == nil { + if hi.valsC, err = hi.roTx.CursorDupSort(hi.valsTable); err != nil { + return err } - if foundTxNumVal == nil { - continue + if hi.txNum2kCursor, err = hi.roTx.CursorDupSort(hi.idxKeysTable); err != nil { + return err } - //txNum := binary.BigEndian.Uint64(foundTxNumVal) - //if txNum >= hi.endTxNum { - // continue - //} - hi.nextDbKey = append(hi.nextDbKey[:0], k...) - vn, err := hi.txNum2kCursor.SeekBothRange(foundTxNumVal, k) + seek = hi.from + } else { + next, ok := kv.NextSubtree(hi.nextKey) + if !ok { + hi.nextKey = nil + return nil + } + seek = next + } + for k, _, err := hi.valsC.Seek(seek); k != nil; k, _, err = hi.valsC.NextNoDup() { if err != nil { - panic(err) + return err } - valNum := binary.BigEndian.Uint64(vn[len(vn)-8:]) - if valNum == 0 { - // This is special valNum == 0, which is empty value - hi.nextDbVal = hi.nextDbVal[:0] - return + if hi.to != nil && bytes.Compare(k, hi.to) >= 0 { + break } - v, err := hi.roTx.GetOne(hi.valsTable, vn[len(vn)-8:]) + v, err := hi.valsC.SeekBothRange(k, hi.startTxKey[:]) if err != nil { - panic(err) + return err } - hi.nextDbVal = append(hi.nextDbVal[:0], v...) - return - } - hi.idxCursor.Close() - hi.idxCursor = nil - hi.hasNextInDb = false -} - -func (hi *StateAsOfIter) advance() { - if hi.hasNextInFiles { - if hi.hasNextInDb { - c := bytes.Compare(hi.nextFileKey, hi.nextDbKey) - if c < 0 { - hi.nextKey = append(hi.nextKey[:0], hi.nextFileKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextFileVal...) - hi.advanceInFiles() - } else if c > 0 { - hi.nextKey = append(hi.nextKey[:0], hi.nextDbKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextDbVal...) - hi.advanceInDb() - } else { - hi.nextKey = append(hi.nextKey[:0], hi.nextFileKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextFileVal...) - hi.advanceInDb() - hi.advanceInFiles() - } - } else { - hi.nextKey = append(hi.nextKey[:0], hi.nextFileKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextFileVal...) - hi.advanceInFiles() - } - } else if hi.hasNextInDb { - hi.nextKey = append(hi.nextKey[:0], hi.nextDbKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextDbVal...) - hi.advanceInDb() - } else { - hi.nextKey = nil - hi.nextVal = nil + if v == nil { + continue + } + hi.nextKey = k + hi.nextVal = v[8:] + return nil } + hi.nextKey = nil + return nil } -func (hi *StateAsOfIter) HasNext() bool { - return hi.limit != 0 && (hi.hasNextInFiles || hi.hasNextInDb || hi.nextKey != nil) +func (hi *StateAsOfIterDbDup) HasNext() bool { + if hi.err != nil { + return true + } + return hi.limit != 0 && hi.nextKey != nil } -func (hi *StateAsOfIter) Next() ([]byte, []byte, error) { +func (hi *StateAsOfIterDbDup) Next() ([]byte, []byte, error) { + if hi.err != nil { + return nil, nil, hi.err + } hi.limit-- - hi.k, hi.v = append(hi.k[:0], hi.nextKey...), append(hi.v[:0], hi.nextVal...) + hi.k, hi.v = hi.nextKey, hi.nextVal // Satisfy iter.Dual Invariant 2 hi.k, hi.kBackup, hi.v, hi.vBackup = hi.kBackup, hi.k, hi.vBackup, hi.v - hi.advance() + if err := hi.advanceInDb(); err != nil { + return nil, nil, err + } return hi.kBackup, hi.vBackup, nil } -func (hc *HistoryContext) IterateChanged(fromTxNum, toTxNum int, asc order.By, limit int, roTx kv.Tx) *HistoryChangesIter { - if asc == order.Desc { +func (hc *HistoryContext) iterateChangedFrozen(fromTxNum, toTxNum int, asc order.By, limit int) (iter.KV, error) { + if asc == false { panic("not supported yet") } - if limit >= 0 { - panic("not supported yet") + if len(hc.ic.files) == 0 { + return iter.EmptyKV, nil } - if fromTxNum < 0 { - panic("not supported yet") - } - if toTxNum < 0 { - panic("not supported yet") - } - startTxNum, endTxNum := uint64(fromTxNum), uint64(toTxNum) - hi := HistoryChangesIter{ - hasNextInDb: true, - roTx: roTx, - indexTable: hc.h.indexTable, - idxKeysTable: hc.h.indexKeysTable, - valsTable: hc.h.historyValsTable, + if fromTxNum >= 0 && hc.ic.files[len(hc.ic.files)-1].endTxNum <= uint64(fromTxNum) { + return iter.EmptyKV, nil } + hi := &HistoryChangesIterF{ + hc: hc, + compressVals: hc.h.compressVals, + startTxNum: cmp.Max(0, uint64(fromTxNum)), + endTxNum: toTxNum, + limit: limit, + } + if fromTxNum >= 0 { + binary.BigEndian.PutUint64(hi.startTxKey[:], uint64(fromTxNum)) + } for _, item := range hc.ic.files { - if item.endTxNum >= endTxNum { - hi.hasNextInDb = false - } - if item.endTxNum <= startTxNum { + if fromTxNum >= 0 && item.endTxNum <= uint64(fromTxNum) { continue } - if item.startTxNum >= endTxNum { + if toTxNum >= 0 && item.startTxNum >= uint64(toTxNum) { break } g := item.src.decompressor.MakeGetter() @@ -1725,63 +1920,94 @@ func (hc *HistoryContext) IterateChanged(fromTxNum, toTxNum int, asc order.By, l if g.HasNext() { key, offset := g.NextUncompressed() heap.Push(&hi.h, &ReconItem{g: g, key: key, startTxNum: item.startTxNum, endTxNum: item.endTxNum, txNum: item.endTxNum, startOffset: offset, lastOffset: offset}) - hi.hasNextInFiles = true } - hi.total += uint64(g.Size()) } - hi.hc = hc - hi.compressVals = hc.h.compressVals - hi.startTxNum = startTxNum - hi.endTxNum = endTxNum - binary.BigEndian.PutUint64(hi.startTxKey[:], startTxNum) - hi.advanceInDb() - hi.advanceInFiles() - hi.advance() - return &hi -} - -type HistoryChangesIter struct { - roTx kv.Tx - txNum2kCursor kv.CursorDupSort - idxCursor kv.CursorDupSort - hc *HistoryContext - valsTable string - idxKeysTable string - indexTable string - nextFileKey []byte - nextDbKey []byte - nextDbVal []byte - nextFileVal []byte - nextVal []byte - nextKey []byte - h ReconHeap - total uint64 - endTxNum uint64 - startTxNum uint64 - advFileCnt int - advDbCnt int - startTxKey [8]byte - txnKey [8]byte - hasNextInFiles bool - hasNextInDb bool - compressVals bool - - k, v []byte -} - -func (hi *HistoryChangesIter) Stat() (int, int) { return hi.advDbCnt, hi.advFileCnt } - -func (hi *HistoryChangesIter) Close() { - if hi.idxCursor != nil { - hi.idxCursor.Close() + if err := hi.advance(); err != nil { + return nil, err } - if hi.txNum2kCursor != nil { - hi.txNum2kCursor.Close() + return hi, nil +} + +func (hc *HistoryContext) iterateChangedRecent(fromTxNum, toTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.KV, error) { + if asc == order.Desc { + panic("not supported yet") + } + rangeIsInFiles := toTxNum >= 0 && len(hc.ic.files) > 0 && hc.ic.files[len(hc.ic.files)-1].endTxNum >= uint64(toTxNum) + if rangeIsInFiles { + return iter.EmptyKV, nil + } + if hc.h.largeValues { + dbi := &HistoryChangesIterDB{ + hc: hc, + endTxNum: toTxNum, + roTx: roTx, + indexTable: hc.h.indexTable, + idxKeysTable: hc.h.indexKeysTable, + valsTable: hc.h.historyValsTable, + limit: limit, + } + if fromTxNum >= 0 { + binary.BigEndian.PutUint64(dbi.startTxKey[:], uint64(fromTxNum)) + } + if err := dbi.advance(); err != nil { + return nil, err + } + return dbi, nil + } + dbi := &HistoryChangesIterDBDup{ + hc: hc, + endTxNum: toTxNum, + + roTx: roTx, + indexTable: hc.h.indexTable, + idxKeysTable: hc.h.indexKeysTable, + valsTable: hc.h.historyValsTable, + } + if fromTxNum >= 0 { + binary.BigEndian.PutUint64(dbi.startTxKey[:], uint64(fromTxNum)) } + if err := dbi.advance(); err != nil { + return nil, err + } + return dbi, nil +} + +func (hc *HistoryContext) HistoryRange(fromTxNum, toTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.KV, error) { + if asc == order.Desc { + panic("not supported yet") + } + itOnFiles, err := hc.iterateChangedFrozen(fromTxNum, toTxNum, asc, limit) + if err != nil { + return nil, err + } + itOnDB, err := hc.iterateChangedRecent(fromTxNum, toTxNum, asc, limit, roTx) + if err != nil { + return nil, err + } + + return iter.UnionKV(itOnFiles, itOnDB, limit), nil } -func (hi *HistoryChangesIter) advanceInFiles() { - hi.advFileCnt++ +type HistoryChangesIterF struct { + hc *HistoryContext + nextVal []byte + nextKey []byte + h ReconHeap + startTxNum uint64 + endTxNum int + startTxKey [8]byte + txnKey [8]byte + compressVals bool + + k, v, kBackup, vBackup []byte + err error + limit int +} + +func (hi *HistoryChangesIterF) Close() { +} + +func (hi *HistoryChangesIterF) advance() error { for hi.h.Len() > 0 { top := heap.Pop(&hi.h).(*ReconItem) key := top.key @@ -1800,267 +2026,263 @@ func (hi *HistoryChangesIter) advanceInFiles() { heap.Push(&hi.h, top) } - if bytes.Equal(key, hi.nextFileKey) { + if bytes.Equal(key, hi.nextKey) { continue } ef, _ := eliasfano32.ReadEliasFano(idxVal) - n, ok := ef.Search(hi.startTxNum) + n, ok := ef.Search(hi.startTxNum) //TODO: if startTxNum==0, can do ef.Get(0) if !ok { continue } - if n >= hi.endTxNum { + if int(n) >= hi.endTxNum { continue } - hi.nextFileKey = key + hi.nextKey = key binary.BigEndian.PutUint64(hi.txnKey[:], n) historyItem, ok := hi.hc.getFile(top.startTxNum, top.endTxNum) if !ok { - panic(fmt.Errorf("no %s file found for [%x]", hi.hc.h.filenameBase, hi.nextFileKey)) + return fmt.Errorf("HistoryChangesIterF: no %s file found for [%x]", hi.hc.h.filenameBase, hi.nextKey) } reader := hi.hc.statelessIdxReader(historyItem.i) - offset := reader.Lookup2(hi.txnKey[:], hi.nextFileKey) + offset := reader.Lookup2(hi.txnKey[:], hi.nextKey) g := hi.hc.statelessGetter(historyItem.i) g.Reset(offset) if hi.compressVals { - hi.nextFileVal, _ = g.Next(nil) + hi.nextVal, _ = g.Next(nil) } else { - hi.nextFileVal, _ = g.NextUncompressed() + hi.nextVal, _ = g.NextUncompressed() } - hi.nextFileKey = key - return + return nil } - hi.hasNextInFiles = false + hi.nextKey = nil + return nil } -func (hi *HistoryChangesIter) advanceInDb() { - hi.advDbCnt++ +func (hi *HistoryChangesIterF) HasNext() bool { + if hi.err != nil { // always true, then .Next() call will return this error + return true + } + if hi.limit == 0 { // limit reached + return false + } + if hi.nextKey == nil { // EndOfTable + return false + } + return true + //if hi.toPrefix == nil { // s.nextK == nil check is above + // return true + //} +} + +func (hi *HistoryChangesIterF) Next() ([]byte, []byte, error) { + if hi.err != nil { + return nil, nil, hi.err + } + hi.limit-- + hi.k, hi.v = append(hi.k[:0], hi.nextKey...), append(hi.v[:0], hi.nextVal...) + + // Satisfy iter.Dual Invariant 2 + hi.k, hi.kBackup, hi.v, hi.vBackup = hi.kBackup, hi.k, hi.vBackup, hi.v + if err := hi.advance(); err != nil { + return nil, nil, err + } + return hi.kBackup, hi.vBackup, nil +} + +type HistoryChangesIterDB struct { + roTx kv.Tx + txNum2kCursor kv.CursorDupSort + idxCursor kv.CursorDupSort + hc *HistoryContext + valsTable string + idxKeysTable string + indexTable string + endTxNum int + startTxKey [8]byte + + nextKey, nextVal []byte + k, v, kBackup, vBackup []byte + err error + limit int +} + +func (hi *HistoryChangesIterDB) Close() { + if hi.idxCursor != nil { + hi.idxCursor.Close() + } + if hi.txNum2kCursor != nil { + hi.txNum2kCursor.Close() + } +} + +func (hi *HistoryChangesIterDB) advance() (err error) { var k []byte - var err error if hi.idxCursor == nil { if hi.idxCursor, err = hi.roTx.CursorDupSort(hi.indexTable); err != nil { - // TODO pass error properly around - panic(err) + return err } if hi.txNum2kCursor, err = hi.roTx.CursorDupSort(hi.idxKeysTable); err != nil { - panic(err) + return err } if k, _, err = hi.idxCursor.First(); err != nil { - // TODO pass error properly around - panic(err) + return err } } else { if k, _, err = hi.idxCursor.NextNoDup(); err != nil { - panic(err) + return err } } for ; k != nil; k, _, err = hi.idxCursor.NextNoDup() { if err != nil { - panic(err) + return err } foundTxNumVal, err := hi.idxCursor.SeekBothRange(k, hi.startTxKey[:]) if err != nil { - panic(err) + return err } if foundTxNumVal == nil { continue } txNum := binary.BigEndian.Uint64(foundTxNumVal) - if txNum >= hi.endTxNum { + if hi.endTxNum >= 0 && int(txNum) >= hi.endTxNum { continue } - hi.nextDbKey = append(hi.nextDbKey[:0], k...) + hi.nextKey = k vn, err := hi.txNum2kCursor.SeekBothRange(foundTxNumVal, k) if err != nil { - panic(err) + return err } valNum := binary.BigEndian.Uint64(vn[len(vn)-8:]) if valNum == 0 { // This is special valNum == 0, which is empty value - hi.nextDbVal = hi.nextDbVal[:0] - return - } - v, err := hi.roTx.GetOne(hi.valsTable, vn[len(vn)-8:]) - if err != nil { - panic(err) + hi.nextVal = hi.nextVal[:0] + return err } - hi.nextDbVal = append(hi.nextDbVal[:0], v...) - return - } - hi.idxCursor.Close() - hi.idxCursor = nil - hi.hasNextInDb = false -} - -func (hi *HistoryChangesIter) advance() { - if hi.hasNextInFiles { - if hi.hasNextInDb { - c := bytes.Compare(hi.nextFileKey, hi.nextDbKey) - if c < 0 { - hi.nextKey = append(hi.nextKey[:0], hi.nextFileKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextFileVal...) - hi.advanceInFiles() - } else if c > 0 { - hi.nextKey = append(hi.nextKey[:0], hi.nextDbKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextDbVal...) - hi.advanceInDb() - } else { - hi.nextKey = append(hi.nextKey[:0], hi.nextFileKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextFileVal...) - hi.advanceInDb() - hi.advanceInFiles() - } - } else { - hi.nextKey = append(hi.nextKey[:0], hi.nextFileKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextFileVal...) - hi.advanceInFiles() - } - } else if hi.hasNextInDb { - hi.nextKey = append(hi.nextKey[:0], hi.nextDbKey...) - hi.nextVal = append(hi.nextVal[:0], hi.nextDbVal...) - hi.advanceInDb() - } else { - hi.nextKey = nil - hi.nextVal = nil + hi.nextVal, err = hi.roTx.GetOne(hi.valsTable, vn[len(vn)-8:]) + return err } + hi.nextKey = nil + return nil } -func (hi *HistoryChangesIter) HasNext() bool { - return hi.hasNextInFiles || hi.hasNextInDb || hi.nextKey != nil -} - -func (hi *HistoryChangesIter) Next() ([]byte, []byte, error) { - hi.k = append(hi.k[:0], hi.nextKey...) - hi.v = append(hi.v[:0], hi.nextVal...) - hi.advance() - return hi.k, hi.v, nil +func (hi *HistoryChangesIterDB) HasNext() bool { + if hi.err != nil { // always true, then .Next() call will return this error + return true + } + if hi.limit == 0 { // limit reached + return false + } + if hi.nextKey == nil { // EndOfTable + return false + } + return true } -func (hc *HistoryContext) IterateRecentlyChanged(startTxNum, endTxNum uint64, roTx kv.Tx, f func([]byte, []byte) error) error { - col := etl.NewCollector("", hc.h.tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) - defer col.Close() - col.LogLvl(log.LvlTrace) - - it := hc.IterateRecentlyChangedUnordered(startTxNum, endTxNum, roTx) - defer it.Close() - for it.HasNext() { - k, v, err := it.Next() - if err != nil { - return err - } - if err := col.Collect(k, v); err != nil { - return err - } +func (hi *HistoryChangesIterDB) Next() ([]byte, []byte, error) { + if hi.err != nil { + return nil, nil, hi.err } - return col.Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - return f(k, v) - }, etl.TransformArgs{}) -} + hi.limit-- + hi.k, hi.v = append(hi.k[:0], hi.nextKey...), append(hi.v[:0], hi.nextVal...) -func (hc *HistoryContext) IterateRecentlyChangedUnordered(startTxNum, endTxNum uint64, roTx kv.Tx) *HistoryIterator2 { - hi := HistoryIterator2{ - hasNext: true, - roTx: roTx, - idxKeysTable: hc.h.indexKeysTable, - valsTable: hc.h.historyValsTable, - hc: hc, - startTxNum: startTxNum, - endTxNum: endTxNum, + // Satisfy iter.Dual Invariant 2 + hi.k, hi.kBackup, hi.v, hi.vBackup = hi.kBackup, hi.k, hi.vBackup, hi.v + if err := hi.advance(); err != nil { + return nil, nil, err } - binary.BigEndian.PutUint64(hi.startTxKey[:], startTxNum) - hi.advanceInDb() - return &hi + return hi.kBackup, hi.vBackup, nil } -type HistoryIterator2 struct { +type HistoryChangesIterDBDup struct { roTx kv.Tx txNum2kCursor kv.CursorDupSort + valsCursor kv.CursorDupSort hc *HistoryContext - idxKeysTable string valsTable string - nextKey []byte - nextVal []byte - nextErr error - endTxNum uint64 - startTxNum uint64 - advDbCnt int + idxKeysTable string + indexTable string + endTxNum int startTxKey [8]byte - hasNext bool -} -func (hi *HistoryIterator2) Stat() int { return hi.advDbCnt } + nextKey, nextVal []byte + k, v []byte + err error +} -func (hi *HistoryIterator2) Close() { +func (hi *HistoryChangesIterDBDup) Close() { + if hi.valsCursor != nil { + hi.valsCursor.Close() + } if hi.txNum2kCursor != nil { hi.txNum2kCursor.Close() } } -func (hi *HistoryIterator2) advanceInDb() { - hi.advDbCnt++ - var k, v []byte - var err error +func (hi *HistoryChangesIterDBDup) advance() (err error) { + var k []byte if hi.txNum2kCursor == nil { + if hi.valsCursor, err = hi.roTx.CursorDupSort(hi.valsTable); err != nil { + return err + } if hi.txNum2kCursor, err = hi.roTx.CursorDupSort(hi.idxKeysTable); err != nil { - hi.nextErr, hi.hasNext = err, true - return + return err } - if k, v, err = hi.txNum2kCursor.Seek(hi.startTxKey[:]); err != nil { - hi.nextErr, hi.hasNext = err, true - return + + if k, _, err = hi.valsCursor.First(); err != nil { + return err } } else { - if k, v, err = hi.txNum2kCursor.NextDup(); err != nil { - hi.nextErr, hi.hasNext = err, true - return - } - if k == nil { - k, v, err = hi.txNum2kCursor.NextNoDup() - if err != nil { - hi.nextErr, hi.hasNext = err, true - return - } - if k != nil && binary.BigEndian.Uint64(k) >= hi.endTxNum { - k = nil // end - } + if k, _, err = hi.valsCursor.NextNoDup(); err != nil { + return err } } - if k != nil { - hi.nextKey = v[:len(v)-8] - hi.hasNext = true - - valNum := v[len(v)-8:] - - if binary.BigEndian.Uint64(valNum) == 0 { - // This is special valNum == 0, which is empty value - hi.nextVal = []byte{} - return + for ; k != nil; k, _, err = hi.valsCursor.NextNoDup() { + if err != nil { + return err } - val, err := hi.roTx.GetOne(hi.valsTable, valNum) + foundTxNumVal, err := hi.valsCursor.SeekBothRange(k, hi.startTxKey[:]) if err != nil { - hi.nextErr, hi.hasNext = err, true - return + return err } - hi.nextVal = val - return + if foundTxNumVal == nil { + continue + } + txNum := binary.BigEndian.Uint64(foundTxNumVal) + if hi.endTxNum >= 0 && int(txNum) >= hi.endTxNum { + continue + } + hi.nextKey = k + hi.nextVal = foundTxNumVal[8:] + return nil } - hi.txNum2kCursor.Close() - hi.txNum2kCursor = nil - hi.hasNext = false + hi.nextKey = nil + return nil } -func (hi *HistoryIterator2) HasNext() bool { - return hi.hasNext +func (hi *HistoryChangesIterDBDup) HasNext() bool { + if hi.err != nil { // always true, then .Next() call will return this error + return true + } + //if hi.limit == 0 { // limit reached + // return false + //} + if hi.nextKey == nil { // EndOfTable + return false + } + return true } -func (hi *HistoryIterator2) Next() ([]byte, []byte, error) { - k, v, err := hi.nextKey, hi.nextVal, hi.nextErr - if err != nil { +func (hi *HistoryChangesIterDBDup) Next() ([]byte, []byte, error) { + if hi.err != nil { + return nil, nil, hi.err + } + hi.k, hi.v = hi.nextKey, hi.nextVal + if err := hi.advance(); err != nil { return nil, nil, err } - hi.advanceInDb() - return k, v, nil + return hi.k, hi.v, nil } func (h *History) DisableReadAhead() { @@ -2188,23 +2410,12 @@ func (hs *HistoryStep) Clone() *HistoryStep { } } -func u64or0(in []byte) (v uint64) { - if len(in) > 0 { - v = binary.BigEndian.Uint64(in) - } - return v -} - func (h *History) CleanupDir() { - files, err := os.ReadDir(h.dir) - if err != nil { - log.Warn("[clean] can't read dir", "err", err, "dir", h.dir) - return - } - uselessFiles := h.scanStateFiles(files, h.integrityFileExtensions) + files, _ := h.fileNamesOnDisk() + uselessFiles := h.scanStateFiles(files) for _, f := range uselessFiles { fName := fmt.Sprintf("%s.%d-%d.v", h.filenameBase, f.startTxNum/h.aggregationStep, f.endTxNum/h.aggregationStep) - err = os.Remove(filepath.Join(h.dir, fName)) + err := os.Remove(filepath.Join(h.dir, fName)) log.Debug("[clean] remove", "file", fName, "err", err) fIdxName := fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, f.startTxNum/h.aggregationStep, f.endTxNum/h.aggregationStep) err = os.Remove(filepath.Join(h.dir, fIdxName)) @@ -2212,3 +2423,69 @@ func (h *History) CleanupDir() { } h.InvertedIndex.CleanupDir() } + +func (hc *HistoryContext) idxRangeRecent(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + var dbIt iter.U64 + if hc.h.largeValues { + if asc { + from := make([]byte, len(key)+8) + copy(from, key) + var fromTxNum uint64 + if startTxNum >= 0 { + fromTxNum = uint64(startTxNum) + } + binary.BigEndian.PutUint64(from[len(key):], fromTxNum) + + to := common.Copy(from) + toTxNum := uint64(math.MaxUint64) + if endTxNum >= 0 { + toTxNum = uint64(endTxNum) + } + binary.BigEndian.PutUint64(to[len(key):], toTxNum) + + it, err := roTx.RangeAscend(hc.h.historyValsTable, from, to, limit) + if err != nil { + return nil, err + } + dbIt = iter.TransformKV2U64(it, func(k, _ []byte) (uint64, error) { + return binary.BigEndian.Uint64(k[len(k)-8:]), nil + }) + } else { + panic("implement me") + } + } else { + if asc { + var from, to []byte + if startTxNum >= 0 { + from = make([]byte, 8) + binary.BigEndian.PutUint64(from, uint64(startTxNum)) + } + if endTxNum >= 0 { + to = make([]byte, 8) + binary.BigEndian.PutUint64(to, uint64(endTxNum)) + } + it, err := roTx.RangeDupSort(hc.h.historyValsTable, key, from, to, asc, limit) + if err != nil { + return nil, err + } + dbIt = iter.TransformKV2U64(it, func(_, v []byte) (uint64, error) { + return binary.BigEndian.Uint64(v), nil + }) + } else { + panic("implement me") + } + } + + return dbIt, nil +} +func (hc *HistoryContext) IdxRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + frozenIt, err := hc.ic.iterateRangeFrozen(key, startTxNum, endTxNum, asc, limit) + if err != nil { + return nil, err + } + recentIt, err := hc.idxRangeRecent(key, startTxNum, endTxNum, asc, limit, roTx) + if err != nil { + return nil, err + } + return iter.Union[uint64](frozenIt, recentIt, asc, limit), nil +} diff --git a/state/history_test.go b/state/history_test.go index d211a4ca0..9a2f044da 100644 --- a/state/history_test.go +++ b/state/history_test.go @@ -23,10 +23,12 @@ import ( "math" "strings" "testing" - "testing/fstest" "time" + "github.com/ledgerwatch/erigon-lib/common/background" + "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/recsplit" @@ -36,23 +38,23 @@ import ( btree2 "github.com/tidwall/btree" ) -func testDbAndHistory(tb testing.TB) (string, kv.RwDB, *History) { +func testDbAndHistory(tb testing.TB, largeValues bool) (string, kv.RwDB, *History) { tb.Helper() path := tb.TempDir() logger := log.New() - keysTable := "Keys" - indexTable := "Index" - valsTable := "Vals" + keysTable := "AccountKeys" + indexTable := "AccountIndex" + valsTable := "AccountVals" settingsTable := "Settings" db := mdbx.NewMDBX(logger).InMem(path).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TableCfg{ keysTable: kv.TableCfgItem{Flags: kv.DupSort}, indexTable: kv.TableCfgItem{Flags: kv.DupSort}, - valsTable: kv.TableCfgItem{}, + valsTable: kv.TableCfgItem{Flags: kv.DupSort}, settingsTable: kv.TableCfgItem{}, } }).MustOpen() - h, err := NewHistory(path, path, 16 /* aggregationStep */, "hist" /* filenameBase */, keysTable, indexTable, valsTable, settingsTable, false /* compressVals */, nil) + h, err := NewHistory(path, path, 16, "hist", keysTable, indexTable, valsTable, false, nil, false) require.NoError(tb, err) tb.Cleanup(db.Close) tb.Cleanup(h.Close) @@ -60,181 +62,196 @@ func testDbAndHistory(tb testing.TB) (string, kv.RwDB, *History) { } func TestHistoryCollationBuild(t *testing.T) { - defer log.Root().SetHandler(log.Root().GetHandler()) - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StderrHandler)) - logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - require := require.New(t) - _, db, h := testDbAndHistory(t) ctx := context.Background() - tx, err := db.BeginRw(ctx) - require.NoError(err) - defer tx.Rollback() - h.SetTx(tx) - h.StartWrites("") - defer h.FinishWrites() - h.SetTxNum(2) - err = h.AddPrevValue([]byte("key1"), nil, nil) - require.NoError(err) + test := func(t *testing.T, h *History, db kv.RwDB) { + t.Helper() + require := require.New(t) + tx, err := db.BeginRw(ctx) + require.NoError(err) + defer tx.Rollback() + h.SetTx(tx) + h.StartWrites() + defer h.FinishWrites() - h.SetTxNum(3) - err = h.AddPrevValue([]byte("key2"), nil, nil) - require.NoError(err) + h.SetTxNum(2) + err = h.AddPrevValue([]byte("key1"), nil, nil) + require.NoError(err) - h.SetTxNum(6) - err = h.AddPrevValue([]byte("key1"), nil, []byte("value1.1")) - require.NoError(err) - err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.1")) - require.NoError(err) + h.SetTxNum(3) + err = h.AddPrevValue([]byte("key2"), nil, nil) + require.NoError(err) - flusher := h.Rotate() + h.SetTxNum(6) + err = h.AddPrevValue([]byte("key1"), nil, []byte("value1.1")) + require.NoError(err) + err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.1")) + require.NoError(err) - h.SetTxNum(7) - err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.2")) - require.NoError(err) - err = h.AddPrevValue([]byte("key3"), nil, nil) - require.NoError(err) + flusher := h.Rotate() - err = flusher.Flush(ctx, tx) - require.NoError(err) + h.SetTxNum(7) + err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.2")) + require.NoError(err) + err = h.AddPrevValue([]byte("key3"), nil, nil) + require.NoError(err) - err = h.Rotate().Flush(ctx, tx) - require.NoError(err) + err = flusher.Flush(ctx, tx) + require.NoError(err) - c, err := h.collate(0, 0, 8, tx, logEvery) - require.NoError(err) - require.True(strings.HasSuffix(c.historyPath, "hist.0-1.v")) - require.Equal(6, c.historyCount) - require.Equal(3, len(c.indexBitmaps)) - require.Equal([]uint64{7}, c.indexBitmaps["key3"].ToArray()) - require.Equal([]uint64{3, 6, 7}, c.indexBitmaps["key2"].ToArray()) - require.Equal([]uint64{2, 6}, c.indexBitmaps["key1"].ToArray()) - - sf, err := h.buildFiles(ctx, 0, c) - require.NoError(err) - defer sf.Close() - var valWords []string - g := sf.historyDecomp.MakeGetter() - g.Reset(0) - for g.HasNext() { - w, _ := g.Next(nil) - valWords = append(valWords, string(w)) - } - require.Equal([]string{"", "value1.1", "", "value2.1", "value2.2", ""}, valWords) - require.Equal(6, int(sf.historyIdx.KeyCount())) - g = sf.efHistoryDecomp.MakeGetter() - g.Reset(0) - var keyWords []string - var intArrs [][]uint64 - for g.HasNext() { - w, _ := g.Next(nil) - keyWords = append(keyWords, string(w)) - w, _ = g.Next(w[:0]) - ef, _ := eliasfano32.ReadEliasFano(w) - var ints []uint64 - it := ef.Iterator() - for it.HasNext() { - v, _ := it.Next() - ints = append(ints, v) + err = h.Rotate().Flush(ctx, tx) + require.NoError(err) + + c, err := h.collate(0, 0, 8, tx) + require.NoError(err) + require.True(strings.HasSuffix(c.historyPath, "hist.0-1.v")) + require.Equal(6, c.historyCount) + require.Equal(3, len(c.indexBitmaps)) + require.Equal([]uint64{7}, c.indexBitmaps["key3"].ToArray()) + require.Equal([]uint64{3, 6, 7}, c.indexBitmaps["key2"].ToArray()) + require.Equal([]uint64{2, 6}, c.indexBitmaps["key1"].ToArray()) + + sf, err := h.buildFiles(ctx, 0, c, background.NewProgressSet()) + require.NoError(err) + defer sf.Close() + var valWords []string + g := sf.historyDecomp.MakeGetter() + g.Reset(0) + for g.HasNext() { + w, _ := g.Next(nil) + valWords = append(valWords, string(w)) } - intArrs = append(intArrs, ints) - } - require.Equal([]string{"key1", "key2", "key3"}, keyWords) - require.Equal([][]uint64{{2, 6}, {3, 6, 7}, {7}}, intArrs) - r := recsplit.NewIndexReader(sf.efHistoryIdx) - for i := 0; i < len(keyWords); i++ { - offset := r.Lookup([]byte(keyWords[i])) - g.Reset(offset) - w, _ := g.Next(nil) - require.Equal(keyWords[i], string(w)) - } - r = recsplit.NewIndexReader(sf.historyIdx) - g = sf.historyDecomp.MakeGetter() - var vi int - for i := 0; i < len(keyWords); i++ { - ints := intArrs[i] - for j := 0; j < len(ints); j++ { - var txKey [8]byte - binary.BigEndian.PutUint64(txKey[:], ints[j]) - offset := r.Lookup2(txKey[:], []byte(keyWords[i])) + require.Equal([]string{"", "value1.1", "", "value2.1", "value2.2", ""}, valWords) + require.Equal(6, int(sf.historyIdx.KeyCount())) + g = sf.efHistoryDecomp.MakeGetter() + g.Reset(0) + var keyWords []string + var intArrs [][]uint64 + for g.HasNext() { + w, _ := g.Next(nil) + keyWords = append(keyWords, string(w)) + w, _ = g.Next(w[:0]) + ef, _ := eliasfano32.ReadEliasFano(w) + ints, err := iter.ToU64Arr(ef.Iterator()) + require.NoError(err) + intArrs = append(intArrs, ints) + } + require.Equal([]string{"key1", "key2", "key3"}, keyWords) + require.Equal([][]uint64{{2, 6}, {3, 6, 7}, {7}}, intArrs) + r := recsplit.NewIndexReader(sf.efHistoryIdx) + for i := 0; i < len(keyWords); i++ { + offset := r.Lookup([]byte(keyWords[i])) g.Reset(offset) w, _ := g.Next(nil) - require.Equal(valWords[vi], string(w)) - vi++ + require.Equal(keyWords[i], string(w)) + } + r = recsplit.NewIndexReader(sf.historyIdx) + g = sf.historyDecomp.MakeGetter() + var vi int + for i := 0; i < len(keyWords); i++ { + ints := intArrs[i] + for j := 0; j < len(ints); j++ { + var txKey [8]byte + binary.BigEndian.PutUint64(txKey[:], ints[j]) + offset := r.Lookup2(txKey[:], []byte(keyWords[i])) + g.Reset(offset) + w, _ := g.Next(nil) + require.Equal(valWords[vi], string(w)) + vi++ + } } } + t.Run("large_values", func(t *testing.T) { + _, db, h := testDbAndHistory(t, true) + test(t, h, db) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h := testDbAndHistory(t, false) + test(t, h, db) + }) } func TestHistoryAfterPrune(t *testing.T) { logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - _, db, h := testDbAndHistory(t) ctx := context.Background() - tx, err := db.BeginRw(ctx) - require.NoError(t, err) - defer tx.Rollback() - h.SetTx(tx) - h.StartWrites("") - defer h.FinishWrites() + test := func(t *testing.T, h *History, db kv.RwDB) { + t.Helper() + require := require.New(t) + tx, err := db.BeginRw(ctx) + require.NoError(err) + defer tx.Rollback() + h.SetTx(tx) + h.StartWrites() + defer h.FinishWrites() - h.SetTxNum(2) - err = h.AddPrevValue([]byte("key1"), nil, nil) - require.NoError(t, err) + h.SetTxNum(2) + err = h.AddPrevValue([]byte("key1"), nil, nil) + require.NoError(err) - h.SetTxNum(3) - err = h.AddPrevValue([]byte("key2"), nil, nil) - require.NoError(t, err) + h.SetTxNum(3) + err = h.AddPrevValue([]byte("key2"), nil, nil) + require.NoError(err) - h.SetTxNum(6) - err = h.AddPrevValue([]byte("key1"), nil, []byte("value1.1")) - require.NoError(t, err) - err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.1")) - require.NoError(t, err) + h.SetTxNum(6) + err = h.AddPrevValue([]byte("key1"), nil, []byte("value1.1")) + require.NoError(err) + err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.1")) + require.NoError(err) - h.SetTxNum(7) - err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.2")) - require.NoError(t, err) - err = h.AddPrevValue([]byte("key3"), nil, nil) - require.NoError(t, err) + h.SetTxNum(7) + err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.2")) + require.NoError(err) + err = h.AddPrevValue([]byte("key3"), nil, nil) + require.NoError(err) - err = h.Rotate().Flush(ctx, tx) - require.NoError(t, err) + err = h.Rotate().Flush(ctx, tx) + require.NoError(err) - c, err := h.collate(0, 0, 16, tx, logEvery) - require.NoError(t, err) + c, err := h.collate(0, 0, 16, tx) + require.NoError(err) - sf, err := h.buildFiles(ctx, 0, c) - require.NoError(t, err) + sf, err := h.buildFiles(ctx, 0, c, background.NewProgressSet()) + require.NoError(err) - h.integrateFiles(sf, 0, 16) + h.integrateFiles(sf, 0, 16) - err = h.prune(ctx, 0, 16, math.MaxUint64, logEvery) - require.NoError(t, err) - h.SetTx(tx) + err = h.prune(ctx, 0, 16, math.MaxUint64, logEvery) + require.NoError(err) + h.SetTx(tx) - for _, table := range []string{h.indexKeysTable, h.historyValsTable, h.indexTable} { - var cur kv.Cursor - cur, err = tx.Cursor(table) - require.NoError(t, err) - defer cur.Close() - var k []byte - k, _, err = cur.First() - require.NoError(t, err) - require.Nil(t, k, table) + for _, table := range []string{h.indexKeysTable, h.historyValsTable, h.indexTable} { + var cur kv.Cursor + cur, err = tx.Cursor(table) + require.NoError(err) + defer cur.Close() + var k []byte + k, _, err = cur.First() + require.NoError(err) + require.Nil(k, table) + } } + t.Run("large_values", func(t *testing.T) { + _, db, h := testDbAndHistory(t, true) + test(t, h, db) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h := testDbAndHistory(t, false) + test(t, h, db) + }) } -func filledHistory(tb testing.TB) (string, kv.RwDB, *History, uint64) { +func filledHistory(tb testing.TB, largeValues bool) (string, kv.RwDB, *History, uint64) { tb.Helper() - path, db, h := testDbAndHistory(tb) + path, db, h := testDbAndHistory(tb, largeValues) ctx := context.Background() tx, err := db.BeginRw(ctx) require.NoError(tb, err) defer tx.Rollback() h.SetTx(tx) - h.StartWrites("") + h.StartWrites() defer h.FinishWrites() txs := uint64(1000) @@ -270,7 +287,6 @@ func filledHistory(tb testing.TB) (string, kv.RwDB, *History, uint64) { if flusher != nil { err = flusher.Flush(ctx, tx) require.NoError(tb, err) - flusher = nil } err = h.Rotate().Flush(ctx, tx) require.NoError(tb, err) @@ -280,7 +296,7 @@ func filledHistory(tb testing.TB) (string, kv.RwDB, *History, uint64) { return path, db, h, txs } -func checkHistoryHistory(t *testing.T, db kv.RwDB, h *History, txs uint64) { +func checkHistoryHistory(t *testing.T, h *History, txs uint64) { t.Helper() // Check the history hc := h.MakeContext() @@ -313,26 +329,38 @@ func checkHistoryHistory(t *testing.T, db kv.RwDB, h *History, txs uint64) { func TestHistoryHistory(t *testing.T) { logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - _, db, h, txs := filledHistory(t) ctx := context.Background() - tx, err := db.BeginRw(ctx) - require.NoError(t, err) - h.SetTx(tx) - defer tx.Rollback() - - // Leave the last 2 aggregation steps un-collated - for step := uint64(0); step < txs/h.aggregationStep-1; step++ { - func() { - c, err := h.collate(step, step*h.aggregationStep, (step+1)*h.aggregationStep, tx, logEvery) - require.NoError(t, err) - sf, err := h.buildFiles(ctx, step, c) - require.NoError(t, err) - h.integrateFiles(sf, step*h.aggregationStep, (step+1)*h.aggregationStep) - err = h.prune(ctx, step*h.aggregationStep, (step+1)*h.aggregationStep, math.MaxUint64, logEvery) - require.NoError(t, err) - }() + test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) { + t.Helper() + require := require.New(t) + tx, err := db.BeginRw(ctx) + require.NoError(err) + h.SetTx(tx) + defer tx.Rollback() + + // Leave the last 2 aggregation steps un-collated + for step := uint64(0); step < txs/h.aggregationStep-1; step++ { + func() { + c, err := h.collate(step, step*h.aggregationStep, (step+1)*h.aggregationStep, tx) + require.NoError(err) + sf, err := h.buildFiles(ctx, step, c, background.NewProgressSet()) + require.NoError(err) + h.integrateFiles(sf, step*h.aggregationStep, (step+1)*h.aggregationStep) + err = h.prune(ctx, step*h.aggregationStep, (step+1)*h.aggregationStep, math.MaxUint64, logEvery) + require.NoError(err) + }() + } + checkHistoryHistory(t, h, txs) } - checkHistoryHistory(t, db, h, txs) + t.Run("large_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, true) + test(t, h, db, txs) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, false) + test(t, h, db, txs) + }) + } func collateAndMergeHistory(tb testing.TB, db kv.RwDB, h *History, txs uint64) { @@ -349,9 +377,9 @@ func collateAndMergeHistory(tb testing.TB, db kv.RwDB, h *History, txs uint64) { // Leave the last 2 aggregation steps un-collated for step := uint64(0); step < txs/h.aggregationStep-1; step++ { - c, err := h.collate(step, step*h.aggregationStep, (step+1)*h.aggregationStep, tx, logEvery) + c, err := h.collate(step, step*h.aggregationStep, (step+1)*h.aggregationStep, tx) require.NoError(err) - sf, err := h.buildFiles(ctx, step, c) + sf, err := h.buildFiles(ctx, step, c, background.NewProgressSet()) require.NoError(err) h.integrateFiles(sf, step*h.aggregationStep, (step+1)*h.aggregationStep) err = h.prune(ctx, step*h.aggregationStep, (step+1)*h.aggregationStep, math.MaxUint64, logEvery) @@ -370,7 +398,7 @@ func collateAndMergeHistory(tb testing.TB, db kv.RwDB, h *History, txs uint64) { indexOuts, historyOuts, _, err := h.staticFilesInRange(r, hc) require.NoError(err) - indexIn, historyIn, err := h.mergeFiles(ctx, indexOuts, historyOuts, r, 1) + indexIn, historyIn, err := h.mergeFiles(ctx, indexOuts, historyOuts, r, 1, background.NewProgressSet()) require.NoError(err) h.integrateMergedFiles(indexOuts, historyOuts, indexIn, historyIn) }() @@ -384,141 +412,75 @@ func collateAndMergeHistory(tb testing.TB, db kv.RwDB, h *History, txs uint64) { } func TestHistoryMergeFiles(t *testing.T) { - _, db, h, txs := filledHistory(t) + test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) { + t.Helper() + collateAndMergeHistory(t, db, h, txs) + checkHistoryHistory(t, h, txs) + } - collateAndMergeHistory(t, db, h, txs) - checkHistoryHistory(t, db, h, txs) + t.Run("large_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, true) + test(t, h, db, txs) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, false) + test(t, h, db, txs) + }) } func TestHistoryScanFiles(t *testing.T) { - _, db, h, txs := filledHistory(t) - var err error - - collateAndMergeHistory(t, db, h, txs) - // Recreate domain and re-scan the files - txNum := h.txNum - require.NoError(t, h.reOpenFolder()) - //h.Close() - //h, err = NewHistory(path, path, h.aggregationStep, h.filenameBase, h.indexKeysTable, h.indexTable, h.historyValsTable, h.settingsTable, h.compressVals, nil) - require.NoError(t, err) - h.SetTxNum(txNum) - // Check the history - checkHistoryHistory(t, db, h, txs) + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) { + t.Helper() + require := require.New(t) + + collateAndMergeHistory(t, db, h, txs) + // Recreate domain and re-scan the files + txNum := h.txNum + require.NoError(h.OpenFolder()) + h.SetTxNum(txNum) + // Check the history + checkHistoryHistory(t, h, txs) + } + + t.Run("large_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, true) + test(t, h, db, txs) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, false) + test(t, h, db, txs) + }) } func TestIterateChanged(t *testing.T) { - _, db, h, txs := filledHistory(t) - collateAndMergeHistory(t, db, h, txs) + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() ctx := context.Background() - tx, err := db.BeginRo(ctx) - require.NoError(t, err) - defer tx.Rollback() - var keys, vals []string - ic := h.MakeContext() - defer ic.Close() - - it := ic.IterateChanged(2, 20, order.Asc, -1, tx) - defer it.Close() - for it.HasNext() { - k, v, err := it.Next() - require.NoError(t, err) - keys = append(keys, fmt.Sprintf("%x", k)) - vals = append(vals, fmt.Sprintf("%x", v)) - } - it.Close() - require.Equal(t, []string{ - "0100000000000001", - "0100000000000002", - "0100000000000003", - "0100000000000004", - "0100000000000005", - "0100000000000006", - "0100000000000007", - "0100000000000008", - "0100000000000009", - "010000000000000a", - "010000000000000b", - "010000000000000c", - "010000000000000d", - "010000000000000e", - "010000000000000f", - "0100000000000010", - "0100000000000011", - "0100000000000012", - "0100000000000013"}, keys) - require.Equal(t, []string{ - "ff00000000000001", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - ""}, vals) - it = ic.IterateChanged(995, 1000, order.Asc, -1, tx) - keys, vals = keys[:0], vals[:0] - for it.HasNext() { - k, v, err := it.Next() - require.NoError(t, err) - keys = append(keys, fmt.Sprintf("%x", k)) - vals = append(vals, fmt.Sprintf("%x", v)) - } - it.Close() - require.Equal(t, []string{ - "0100000000000001", - "0100000000000002", - "0100000000000003", - "0100000000000004", - "0100000000000005", - "0100000000000006", - "0100000000000009", - "010000000000000c", - "010000000000001b", - }, keys) - - require.Equal(t, []string{ - "ff000000000003e2", - "ff000000000001f1", - "ff0000000000014b", - "ff000000000000f8", - "ff000000000000c6", - "ff000000000000a5", - "ff0000000000006e", - "ff00000000000052", - "ff00000000000024"}, vals) -} + test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) { + t.Helper() + require := require.New(t) -func TestIterateChanged2(t *testing.T) { - _, db, h, txs := filledHistory(t) - ctx := context.Background() + collateAndMergeHistory(t, db, h, txs) - roTx, err := db.BeginRo(ctx) - require.NoError(t, err) - defer roTx.Rollback() - var keys, vals []string - t.Run("before merge", func(t *testing.T) { + tx, err := db.BeginRo(ctx) + require.NoError(err) + defer tx.Rollback() + var keys, vals []string ic := h.MakeContext() defer ic.Close() - err := ic.IterateRecentlyChanged(2, 20, roTx, func(k, v []byte) error { + it, err := ic.HistoryRange(2, 20, order.Asc, -1, tx) + require.NoError(err) + for it.HasNext() { + k, v, err := it.Next() + require.NoError(err) keys = append(keys, fmt.Sprintf("%x", k)) vals = append(vals, fmt.Sprintf("%x", v)) - return nil - }) - require.NoError(t, err) - require.Equal(t, []string{ + } + require.Equal([]string{ "0100000000000001", "0100000000000002", "0100000000000003", @@ -538,7 +500,7 @@ func TestIterateChanged2(t *testing.T) { "0100000000000011", "0100000000000012", "0100000000000013"}, keys) - require.Equal(t, []string{ + require.Equal([]string{ "ff00000000000001", "", "", @@ -558,14 +520,16 @@ func TestIterateChanged2(t *testing.T) { "", "", ""}, vals) + it, err = ic.HistoryRange(995, 1000, order.Asc, -1, tx) + require.NoError(err) keys, vals = keys[:0], vals[:0] - err = ic.IterateRecentlyChanged(995, 1000, roTx, func(k, v []byte) error { + for it.HasNext() { + k, v, err := it.Next() + require.NoError(err) keys = append(keys, fmt.Sprintf("%x", k)) vals = append(vals, fmt.Sprintf("%x", v)) - return nil - }) - require.NoError(t, err) - require.Equal(t, []string{ + } + require.Equal([]string{ "0100000000000001", "0100000000000002", "0100000000000003", @@ -577,7 +541,7 @@ func TestIterateChanged2(t *testing.T) { "010000000000001b", }, keys) - require.Equal(t, []string{ + require.Equal([]string{ "ff000000000003e2", "ff000000000001f1", "ff0000000000014b", @@ -587,38 +551,244 @@ func TestIterateChanged2(t *testing.T) { "ff0000000000006e", "ff00000000000052", "ff00000000000024"}, vals) - }) - t.Run("after merge", func(t *testing.T) { - collateAndMergeHistory(t, db, h, txs) - ic := h.MakeContext() - defer ic.Close() - keys = keys[:0] - err = ic.IterateRecentlyChanged(2, 20, roTx, func(k, _ []byte) error { + // no upper bound + it, err = ic.HistoryRange(995, -1, order.Asc, -1, tx) + require.NoError(err) + keys, vals = keys[:0], vals[:0] + for it.HasNext() { + k, v, err := it.Next() + require.NoError(err) keys = append(keys, fmt.Sprintf("%x", k)) - return nil - }) + vals = append(vals, fmt.Sprintf("%x", v)) + } + require.Equal([]string{"0100000000000001", "0100000000000002", "0100000000000003", "0100000000000004", "0100000000000005", "0100000000000006", "0100000000000008", "0100000000000009", "010000000000000a", "010000000000000c", "0100000000000014", "0100000000000019", "010000000000001b"}, keys) + require.Equal([]string{"ff000000000003e2", "ff000000000001f1", "ff0000000000014b", "ff000000000000f8", "ff000000000000c6", "ff000000000000a5", "ff0000000000007c", "ff0000000000006e", "ff00000000000063", "ff00000000000052", "ff00000000000031", "ff00000000000027", "ff00000000000024"}, vals) + + // no upper bound, limit=2 + it, err = ic.HistoryRange(995, -1, order.Asc, 2, tx) + require.NoError(err) + keys, vals = keys[:0], vals[:0] + for it.HasNext() { + k, v, err := it.Next() + require.NoError(err) + keys = append(keys, fmt.Sprintf("%x", k)) + vals = append(vals, fmt.Sprintf("%x", v)) + } + require.Equal([]string{"0100000000000001", "0100000000000002"}, keys) + require.Equal([]string{"ff000000000003e2", "ff000000000001f1"}, vals) + + // no lower bound, limit=2 + it, err = ic.HistoryRange(-1, 1000, order.Asc, 2, tx) + require.NoError(err) + keys, vals = keys[:0], vals[:0] + for it.HasNext() { + k, v, err := it.Next() + require.NoError(err) + keys = append(keys, fmt.Sprintf("%x", k)) + vals = append(vals, fmt.Sprintf("%x", v)) + } + require.Equal([]string{"0100000000000001", "0100000000000002"}, keys) + require.Equal([]string{"ff000000000003cf", "ff000000000001e7"}, vals) + } + t.Run("large_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, true) + test(t, h, db, txs) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, false) + test(t, h, db, txs) + }) +} + +func TestIterateChanged2(t *testing.T) { + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + ctx := context.Background() + + test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) { + t.Helper() + roTx, err := db.BeginRo(ctx) require.NoError(t, err) - require.Equal(t, []string{ - "0100000000000001", - "0100000000000002", - "0100000000000003", - "0100000000000004", - "0100000000000005", - "0100000000000006", - "0100000000000007", - "0100000000000008", - "0100000000000009", - "010000000000000a", - "010000000000000b", - "010000000000000c", - "010000000000000d", - "010000000000000e", - "010000000000000f", - "0100000000000010", - "0100000000000011", - "0100000000000012", - "0100000000000013"}, keys) + defer roTx.Rollback() + + type testCase struct { + k, v string + txNum uint64 + } + testCases := []testCase{ + {txNum: 0, k: "0100000000000001", v: ""}, + {txNum: 900, k: "0100000000000001", v: "ff00000000000383"}, + {txNum: 1000, k: "0100000000000001", v: "ff000000000003e7"}, + } + var keys, vals []string + t.Run("before merge", func(t *testing.T) { + hc, require := h.MakeContext(), require.New(t) + defer hc.Close() + + it, err := hc.HistoryRange(2, 20, order.Asc, -1, roTx) + require.NoError(err) + for it.HasNext() { + k, v, err := it.Next() + require.NoError(err) + keys = append(keys, fmt.Sprintf("%x", k)) + vals = append(vals, fmt.Sprintf("%x", v)) + } + require.NoError(err) + require.Equal([]string{ + "0100000000000001", + "0100000000000002", + "0100000000000003", + "0100000000000004", + "0100000000000005", + "0100000000000006", + "0100000000000007", + "0100000000000008", + "0100000000000009", + "010000000000000a", + "010000000000000b", + "010000000000000c", + "010000000000000d", + "010000000000000e", + "010000000000000f", + "0100000000000010", + "0100000000000011", + "0100000000000012", + "0100000000000013"}, keys) + require.Equal([]string{ + "ff00000000000001", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ""}, vals) + keys, vals = keys[:0], vals[:0] + + it, err = hc.HistoryRange(995, 1000, order.Asc, -1, roTx) + require.NoError(err) + for it.HasNext() { + k, v, err := it.Next() + require.NoError(err) + keys = append(keys, fmt.Sprintf("%x", k)) + vals = append(vals, fmt.Sprintf("%x", v)) + } + require.NoError(err) + require.Equal([]string{ + "0100000000000001", + "0100000000000002", + "0100000000000003", + "0100000000000004", + "0100000000000005", + "0100000000000006", + "0100000000000009", + "010000000000000c", + "010000000000001b", + }, keys) + + require.Equal([]string{ + "ff000000000003e2", + "ff000000000001f1", + "ff0000000000014b", + "ff000000000000f8", + "ff000000000000c6", + "ff000000000000a5", + "ff0000000000006e", + "ff00000000000052", + "ff00000000000024"}, vals) + + // single Get test-cases + tx, err := db.BeginRo(ctx) + require.NoError(err) + defer tx.Rollback() + + v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx) + require.NoError(err) + require.True(ok) + require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx) + require.NoError(err) + require.True(ok) + require.Equal([]byte{}, v) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) + require.NoError(err) + require.True(ok) + require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v) + _ = testCases + }) + t.Run("after merge", func(t *testing.T) { + collateAndMergeHistory(t, db, h, txs) + hc, require := h.MakeContext(), require.New(t) + defer hc.Close() + + keys = keys[:0] + it, err := hc.HistoryRange(2, 20, order.Asc, -1, roTx) + require.NoError(err) + for it.HasNext() { + k, _, err := it.Next() + require.NoError(err) + keys = append(keys, fmt.Sprintf("%x", k)) + } + require.NoError(err) + require.Equal([]string{ + "0100000000000001", + "0100000000000002", + "0100000000000003", + "0100000000000004", + "0100000000000005", + "0100000000000006", + "0100000000000007", + "0100000000000008", + "0100000000000009", + "010000000000000a", + "010000000000000b", + "010000000000000c", + "010000000000000d", + "010000000000000e", + "010000000000000f", + "0100000000000010", + "0100000000000011", + "0100000000000012", + "0100000000000013"}, keys) + + // single Get test-cases + tx, err := db.BeginRo(ctx) + require.NoError(err) + defer tx.Rollback() + + v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx) + require.NoError(err) + require.True(ok) + require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx) + require.NoError(err) + require.True(ok) + require.Equal([]byte{}, v) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) + require.NoError(err) + require.True(ok) + require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v) + }) + } + t.Run("large_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, true) + test(t, h, db, txs) + }) + t.Run("small_values", func(t *testing.T) { + _, db, h, txs := filledHistory(t, false) + test(t, h, db, txs) }) } @@ -626,28 +796,20 @@ func TestScanStaticFilesH(t *testing.T) { h := &History{InvertedIndex: &InvertedIndex{filenameBase: "test", aggregationStep: 1}, files: btree2.NewBTreeG[*filesItem](filesItemLess), } - ffs := fstest.MapFS{ - "test.0-1.v": {}, - "test.1-2.v": {}, - "test.0-4.v": {}, - "test.2-3.v": {}, - "test.3-4.v": {}, - "test.4-5.v": {}, + files := []string{ + "test.0-1.v", + "test.1-2.v", + "test.0-4.v", + "test.2-3.v", + "test.3-4.v", + "test.4-5.v", } - files, err := ffs.ReadDir(".") - require.NoError(t, err) - h.scanStateFiles(files, nil) - var found []string - h.files.Walk(func(items []*filesItem) bool { - for _, item := range items { - found = append(found, fmt.Sprintf("%d-%d", item.startTxNum, item.endTxNum)) - } - return true - }) - require.Equal(t, 6, len(found)) + h.scanStateFiles(files) + require.Equal(t, 6, h.files.Len()) h.files.Clear() - h.scanStateFiles(files, []string{"kv"}) + h.integrityFileExtensions = []string{"kv"} + h.scanStateFiles(files) require.Equal(t, 0, h.files.Len()) } diff --git a/state/inverted_index.go b/state/inverted_index.go index bb0ec7ac2..4f4afddfe 100644 --- a/state/inverted_index.go +++ b/state/inverted_index.go @@ -22,34 +22,31 @@ import ( "context" "encoding/binary" "fmt" - "io/fs" "math" "os" "path/filepath" "regexp" "strconv" - "sync" + "sync/atomic" "time" "github.com/RoaringBitmap/roaring/roaring64" "github.com/c2h5oh/datasize" - "github.com/ledgerwatch/erigon-lib/kv/iter" - "github.com/ledgerwatch/erigon-lib/kv/order" - "github.com/ledgerwatch/log/v3" - btree2 "github.com/tidwall/btree" - atomic2 "go.uber.org/atomic" - "golang.org/x/exp/slices" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" - + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" + "github.com/ledgerwatch/erigon-lib/kv/iter" + "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/recsplit" "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" + "github.com/ledgerwatch/log/v3" + btree2 "github.com/tidwall/btree" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" ) type InvertedIndex struct { @@ -57,7 +54,7 @@ type InvertedIndex struct { // roFiles derivative from field `file`, but without garbage (canDelete=true, overlaps, etc...) // MakeContext() using this field in zero-copy way - roFiles atomic2.Pointer[[]ctxItem] + roFiles atomic.Pointer[[]ctxItem] indexKeysTable string // txnNum_u64 -> key (k+auto_increment) indexTable string // k -> txnNum_u64 , Needs to be table with DupSort @@ -75,7 +72,6 @@ type InvertedIndex struct { txNum uint64 txNumBytes [8]byte wal *invertedIndexWAL - walLock sync.RWMutex } func NewInvertedIndex( @@ -91,7 +87,6 @@ func NewInvertedIndex( dir: dir, tmpdir: tmpdir, files: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), - roFiles: *atomic2.NewPointer(&[]ctxItem{}), aggregationStep: aggregationStep, filenameBase: filenameBase, indexKeysTable: indexKeysTable, @@ -100,6 +95,8 @@ func NewInvertedIndex( integrityFileExtensions: integrityFileExtensions, withLocalityIndex: withLocalityIndex, } + ii.roFiles.Store(&[]ctxItem{}) + if ii.withLocalityIndex { var err error ii.localityIndex, err = NewLocalityIndex(ii.dir, ii.tmpdir, ii.aggregationStep, ii.filenameBase) @@ -107,35 +104,49 @@ func NewInvertedIndex( return nil, fmt.Errorf("NewHistory: %s, %w", ii.filenameBase, err) } } - //if err := ii.reOpenFolder(); err != nil { - // return nil, err - //} return &ii, nil } -func (ii *InvertedIndex) reOpenFolder() error { - ii.closeFiles() + +func (ii *InvertedIndex) fileNamesOnDisk() ([]string, error) { files, err := os.ReadDir(ii.dir) if err != nil { + return nil, err + } + filteredFiles := make([]string, 0, len(files)) + for _, f := range files { + if !f.Type().IsRegular() { + continue + } + filteredFiles = append(filteredFiles, f.Name()) + } + return filteredFiles, nil +} + +func (ii *InvertedIndex) OpenList(fNames []string) error { + if err := ii.localityIndex.OpenList(fNames); err != nil { return err } - _ = ii.scanStateFiles(files, ii.integrityFileExtensions) - if err = ii.openFiles(); err != nil { + ii.closeWhatNotInList(fNames) + _ = ii.scanStateFiles(fNames) + if err := ii.openFiles(); err != nil { return fmt.Errorf("NewHistory.openFiles: %s, %w", ii.filenameBase, err) } + return nil +} - return ii.localityIndex.reOpenFolder() +func (ii *InvertedIndex) OpenFolder() error { + files, err := ii.fileNamesOnDisk() + if err != nil { + return err + } + return ii.OpenList(files) } -func (ii *InvertedIndex) scanStateFiles(files []fs.DirEntry, integrityFileExtensions []string) (uselessFiles []*filesItem) { +func (ii *InvertedIndex) scanStateFiles(fileNames []string) (uselessFiles []*filesItem) { re := regexp.MustCompile("^" + ii.filenameBase + ".([0-9]+)-([0-9]+).ef$") var err error Loop: - for _, f := range files { - if !f.Type().IsRegular() { - continue - } - - name := f.Name() + for _, name := range fileNames { subs := re.FindStringSubmatch(name) if len(subs) != 3 { if len(subs) != 0 { @@ -160,7 +171,7 @@ Loop: startTxNum, endTxNum := startStep*ii.aggregationStep, endStep*ii.aggregationStep frozen := endStep-startStep == StepsInBiggestFile - for _, ext := range integrityFileExtensions { + for _, ext := range ii.integrityFileExtensions { requiredFile := fmt.Sprintf("%s.%d-%d.%s", ii.filenameBase, startStep, endStep, ext) if !dir.FileExist(filepath.Join(ii.dir, requiredFile)) { log.Debug(fmt.Sprintf("[snapshots] skip %s because %s doesn't exists", name, requiredFile)) @@ -169,6 +180,10 @@ Loop: } var newFile = &filesItem{startTxNum: startTxNum, endTxNum: endTxNum, frozen: frozen} + if _, has := ii.files.Get(newFile); has { + continue + } + addNewFile := true var subSets []*filesItem ii.files.Walk(func(items []*filesItem) bool { @@ -195,7 +210,7 @@ Loop: ii.files.Set(newFile) } } - ii.reCalcRoFiles() + return uselessFiles } @@ -253,32 +268,28 @@ func (ii *InvertedIndex) missedIdxFiles() (l []*filesItem) { return l } +func (ii *InvertedIndex) buildEfi(ctx context.Context, item *filesItem, p *background.Progress) (err error) { + fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep + fName := fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, fromStep, toStep) + idxPath := filepath.Join(ii.dir, fName) + p.Name.Store(&fName) + p.Total.Store(uint64(item.decompressor.Count())) + //log.Info("[snapshots] build idx", "file", fName) + return buildIndex(ctx, item.decompressor, idxPath, ii.tmpdir, item.decompressor.Count()/2, false, p) +} + // BuildMissedIndices - produce .efi/.vi/.kvi from .ef/.v/.kv -func (ii *InvertedIndex) BuildMissedIndices(ctx context.Context, sem *semaphore.Weighted) (err error) { +func (ii *InvertedIndex) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps *background.ProgressSet) { missedFiles := ii.missedIdxFiles() - g, ctx := errgroup.WithContext(ctx) for _, item := range missedFiles { item := item g.Go(func() error { - if err := sem.Acquire(ctx, 1); err != nil { - return err - } - defer sem.Release(1) - fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep - fName := fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, fromStep, toStep) - idxPath := filepath.Join(ii.dir, fName) - log.Info("[snapshots] build idx", "file", fName) - _, err := buildIndex(ctx, item.decompressor, idxPath, ii.tmpdir, item.decompressor.Count()/2, false) - if err != nil { - return err - } - return nil + p := &background.Progress{} + ps.Add(p) + defer ps.Delete(p) + return ii.buildEfi(ctx, item, p) }) } - if err := g.Wait(); err != nil { - return err - } - return ii.openFiles() } func (ii *InvertedIndex) openFiles() error { @@ -288,27 +299,30 @@ func (ii *InvertedIndex) openFiles() error { ii.files.Walk(func(items []*filesItem) bool { for _, item := range items { if item.decompressor != nil { - item.decompressor.Close() + continue } fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep datPath := filepath.Join(ii.dir, fmt.Sprintf("%s.%d-%d.ef", ii.filenameBase, fromStep, toStep)) if !dir.FileExist(datPath) { invalidFileItems = append(invalidFileItems, item) + continue } + if item.decompressor, err = compress.NewDecompressor(datPath); err != nil { log.Debug("InvertedIndex.openFiles: %w, %s", err, datPath) continue } - if item.index == nil { - idxPath := filepath.Join(ii.dir, fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, fromStep, toStep)) - if dir.FileExist(idxPath) { - if item.index, err = recsplit.OpenIndex(idxPath); err != nil { - log.Debug("InvertedIndex.openFiles: %w, %s", err, idxPath) - return false - } - totalKeys += item.index.KeyCount() + if item.index != nil { + continue + } + idxPath := filepath.Join(ii.dir, fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, fromStep, toStep)) + if dir.FileExist(idxPath) { + if item.index, err = recsplit.OpenIndex(idxPath); err != nil { + log.Debug("InvertedIndex.openFiles: %w, %s", err, idxPath) + return false } + totalKeys += item.index.KeyCount() } } return true @@ -319,36 +333,46 @@ func (ii *InvertedIndex) openFiles() error { if err != nil { return err } + + ii.reCalcRoFiles() return nil } -func (ii *InvertedIndex) closeFiles() { +func (ii *InvertedIndex) closeWhatNotInList(fNames []string) { + var toDelete []*filesItem ii.files.Walk(func(items []*filesItem) bool { + Loop1: for _, item := range items { - if item.decompressor != nil { - if err := item.decompressor.Close(); err != nil { - log.Trace("close", "err", err, "file", item.index.FileName()) - } - item.decompressor = nil - } - if item.index != nil { - if err := item.index.Close(); err != nil { - log.Trace("close", "err", err, "file", item.index.FileName()) + for _, protectName := range fNames { + if item.decompressor != nil && item.decompressor.FileName() == protectName { + continue Loop1 } - item.index = nil } + toDelete = append(toDelete, item) } return true }) - if ii.localityIndex != nil { - ii.localityIndex.Close() + for _, item := range toDelete { + if item.decompressor != nil { + if err := item.decompressor.Close(); err != nil { + log.Trace("close", "err", err, "file", item.index.FileName()) + } + item.decompressor = nil + } + if item.index != nil { + if err := item.index.Close(); err != nil { + log.Trace("close", "err", err, "file", item.index.FileName()) + } + item.index = nil + } + ii.files.Delete(item) } - ii.files.Clear() - ii.reCalcRoFiles() } func (ii *InvertedIndex) Close() { - ii.closeFiles() + ii.localityIndex.Close() + ii.closeWhatNotInList([]string{}) + ii.reCalcRoFiles() } func (ii *InvertedIndex) Files() (res []string) { @@ -372,51 +396,40 @@ func (ii *InvertedIndex) SetTxNum(txNum uint64) { binary.BigEndian.PutUint64(ii.txNumBytes[:], ii.txNum) } -func (ii *InvertedIndex) add(key, indexKey []byte) (err error) { - ii.walLock.RLock() - err = ii.wal.add(key, indexKey) - ii.walLock.RUnlock() - return err -} - +// Add - !NotThreadSafe. Must use WalRLock/BatchHistoryWriteEnd func (ii *InvertedIndex) Add(key []byte) error { - return ii.add(key, key) + return ii.wal.add(key, key) +} +func (ii *InvertedIndex) add(key, indexKey []byte) error { //nolint + return ii.wal.add(key, indexKey) } func (ii *InvertedIndex) DiscardHistory(tmpdir string) { - ii.walLock.Lock() - defer ii.walLock.Unlock() ii.wal = ii.newWriter(tmpdir, false, true) } -func (ii *InvertedIndex) StartWrites(tmpdir string) { - ii.walLock.Lock() - defer ii.walLock.Unlock() - ii.wal = ii.newWriter(tmpdir, WALCollectorRam > 0, false) +func (ii *InvertedIndex) StartWrites() { + ii.wal = ii.newWriter(ii.tmpdir, WALCollectorRAM > 0, false) } func (ii *InvertedIndex) FinishWrites() { - ii.walLock.Lock() - defer ii.walLock.Unlock() ii.wal.close() ii.wal = nil } func (ii *InvertedIndex) Rotate() *invertedIndexWAL { - ii.walLock.Lock() - defer ii.walLock.Unlock() - if ii.wal != nil { - ii.wal.index, ii.wal.indexFlushing = ii.wal.indexFlushing, ii.wal.index - ii.wal.indexKeys, ii.wal.indexKeysFlushing = ii.wal.indexKeysFlushing, ii.wal.indexKeys + wal := ii.wal + if wal != nil { + ii.wal = ii.newWriter(ii.wal.tmpdir, ii.wal.buffered, ii.wal.discard) } - return ii.wal + return wal } type invertedIndexWAL struct { - ii *InvertedIndex - index, indexFlushing *etl.Collector - indexKeys, indexKeysFlushing *etl.Collector - tmpdir string - buffered bool - discard bool + ii *InvertedIndex + index *etl.Collector + indexKeys *etl.Collector + tmpdir string + buffered bool + discard bool } // loadFunc - is analog of etl.Identity, but it signaling to etl - use .Put instead of .AppendDup - to allow duplicates @@ -429,12 +442,13 @@ func (ii *invertedIndexWAL) Flush(ctx context.Context, tx kv.RwTx) error { if ii.discard { return nil } - if err := ii.indexFlushing.Load(tx, ii.ii.indexTable, loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + if err := ii.index.Load(tx, ii.ii.indexTable, loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } - if err := ii.indexKeysFlushing.Load(tx, ii.ii.indexKeysTable, loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + if err := ii.indexKeys.Load(tx, ii.ii.indexKeysTable, loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } + ii.close() return nil } @@ -451,13 +465,13 @@ func (ii *invertedIndexWAL) close() { } // 3 history + 4 indices = 10 etl collectors, 10*256Mb/8 = 512mb - for all indices buffers -var WALCollectorRam = 2 * (etl.BufferOptimalSize / 8) +var WALCollectorRAM = 2 * (etl.BufferOptimalSize / 8) func init() { v, _ := os.LookupEnv("ERIGON_WAL_COLLETOR_RAM") if v != "" { var err error - WALCollectorRam, err = datasize.ParseString(v) + WALCollectorRAM, err = datasize.ParseString(v) if err != nil { panic(err) } @@ -472,14 +486,10 @@ func (ii *InvertedIndex) newWriter(tmpdir string, buffered, discard bool) *inver } if buffered { // etl collector doesn't fsync: means if have enough ram, all files produced by all collectors will be in ram - w.index = etl.NewCollector(ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam)) - w.indexFlushing = etl.NewCollector(ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam)) - w.indexKeys = etl.NewCollector(ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam)) - w.indexKeysFlushing = etl.NewCollector(ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam)) + w.index = etl.NewCollector(ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM)) + w.indexKeys = etl.NewCollector(ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM)) w.index.LogLvl(log.LvlTrace) - w.indexFlushing.LogLvl(log.LvlTrace) w.indexKeys.LogLvl(log.LvlTrace) - w.indexKeysFlushing.LogLvl(log.LvlTrace) } return w } @@ -512,19 +522,11 @@ func (ii *InvertedIndex) MakeContext() *InvertedIndexContext { var ic = InvertedIndexContext{ ii: ii, files: *ii.roFiles.Load(), + loc: ii.localityIndex.MakeContext(), } for _, item := range ic.files { if !item.src.frozen { - item.src.refcount.Inc() - } - } - - if ic.ii.localityIndex != nil { - ic.loc.file = ic.ii.localityIndex.file - ic.loc.reader = ic.ii.localityIndex.NewIdxReader() - ic.loc.bm = ic.ii.localityIndex.bm - if ic.loc.file != nil { - ic.loc.file.refcount.Inc() + item.src.refcount.Add(1) } } return &ic @@ -534,57 +536,228 @@ func (ic *InvertedIndexContext) Close() { if item.src.frozen { continue } - refCnt := item.src.refcount.Dec() + refCnt := item.src.refcount.Add(-1) //GC: last reader responsible to remove useles files: close it and delete if refCnt == 0 && item.src.canDelete.Load() { item.src.closeFilesAndRemove() } } - if ic.loc.file != nil { - refCnt := ic.loc.file.refcount.Dec() - if refCnt == 0 && ic.loc.file.canDelete.Load() { - ic.ii.localityIndex.closeFilesAndRemove(ic.loc) - ic.loc.file, ic.loc.bm = nil, nil + + for _, r := range ic.readers { + r.Close() + } + + ic.loc.Close() +} + +type InvertedIndexContext struct { + ii *InvertedIndex + files []ctxItem // have no garbage (overlaps, etc...) + getters []*compress.Getter + readers []*recsplit.IndexReader + loc *ctxLocalityIdx +} + +func (ic *InvertedIndexContext) statelessGetter(i int) *compress.Getter { + if ic.getters == nil { + ic.getters = make([]*compress.Getter, len(ic.files)) + } + r := ic.getters[i] + if r == nil { + r = ic.files[i].src.decompressor.MakeGetter() + ic.getters[i] = r + } + return r +} +func (ic *InvertedIndexContext) statelessIdxReader(i int) *recsplit.IndexReader { + if ic.readers == nil { + ic.readers = make([]*recsplit.IndexReader, len(ic.files)) + } + r := ic.readers[i] + if r == nil { + r = ic.files[i].src.index.GetReaderFromPool() + ic.readers[i] = r + } + return r +} + +func (ic *InvertedIndexContext) getFile(from, to uint64) (it ctxItem, ok bool) { + for _, item := range ic.files { + if item.startTxNum == from && item.endTxNum == to { + return item, true + } + } + return it, false +} + +// IdxRange - return range of txNums for given `key` +// is to be used in public API, therefore it relies on read-only transaction +// so that iteration can be done even when the inverted index is being updated. +// [startTxNum; endNumTx) +func (ic *InvertedIndexContext) IdxRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + frozenIt, err := ic.iterateRangeFrozen(key, startTxNum, endTxNum, asc, limit) + if err != nil { + return nil, err + } + recentIt, err := ic.recentIterateRange(key, startTxNum, endTxNum, asc, limit, roTx) + if err != nil { + return nil, err + } + return iter.Union[uint64](frozenIt, recentIt, asc, limit), nil +} + +func (ic *InvertedIndexContext) recentIterateRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + //optimization: return empty pre-allocated iterator if range is frozen + if asc { + isFrozenRange := len(ic.files) > 0 && endTxNum >= 0 && ic.files[len(ic.files)-1].endTxNum >= uint64(endTxNum) + if isFrozenRange { + return iter.EmptyU64, nil } + } else { + isFrozenRange := len(ic.files) > 0 && startTxNum >= 0 && ic.files[len(ic.files)-1].endTxNum >= uint64(startTxNum) + if isFrozenRange { + return iter.EmptyU64, nil + } + } + + var from []byte + if startTxNum >= 0 { + from = make([]byte, 8) + binary.BigEndian.PutUint64(from, uint64(startTxNum)) + } + + var to []byte + if endTxNum >= 0 { + to = make([]byte, 8) + binary.BigEndian.PutUint64(to, uint64(endTxNum)) } + + it, err := roTx.RangeDupSort(ic.ii.indexTable, key, from, to, asc, limit) + if err != nil { + return nil, err + } + return iter.TransformKV2U64(it, func(_, v []byte) (uint64, error) { + return binary.BigEndian.Uint64(v), nil + }), nil } -// InvertedIterator allows iteration over range of tx numbers +// IdxRange is to be used in public API, therefore it relies on read-only transaction +// so that iteration can be done even when the inverted index is being updated. +// [startTxNum; endNumTx) +func (ic *InvertedIndexContext) iterateRangeFrozen(key []byte, startTxNum, endTxNum int, asc order.By, limit int) (*FrozenInvertedIdxIter, error) { + if asc && (startTxNum >= 0 && endTxNum >= 0) && startTxNum > endTxNum { + return nil, fmt.Errorf("startTxNum=%d epected to be lower than endTxNum=%d", startTxNum, endTxNum) + } + if !asc && (startTxNum >= 0 && endTxNum >= 0) && startTxNum < endTxNum { + return nil, fmt.Errorf("startTxNum=%d epected to be bigger than endTxNum=%d", startTxNum, endTxNum) + } + + it := &FrozenInvertedIdxIter{ + key: key, + startTxNum: startTxNum, + endTxNum: endTxNum, + indexTable: ic.ii.indexTable, + orderAscend: asc, + limit: limit, + ef: eliasfano32.NewEliasFano(1, 1), + } + if asc { + for i := len(ic.files) - 1; i >= 0; i-- { + // [from,to) && from < to + if endTxNum >= 0 && int(ic.files[i].startTxNum) >= endTxNum { + continue + } + if startTxNum >= 0 && ic.files[i].endTxNum <= uint64(startTxNum) { + break + } + it.stack = append(it.stack, ic.files[i]) + it.stack[len(it.stack)-1].getter = it.stack[len(it.stack)-1].src.decompressor.MakeGetter() + it.stack[len(it.stack)-1].reader = it.stack[len(it.stack)-1].src.index.GetReaderFromPool() + it.hasNext = true + } + } else { + for i := 0; i < len(ic.files); i++ { + // [from,to) && from > to + if endTxNum >= 0 && int(ic.files[i].endTxNum) <= endTxNum { + continue + } + if startTxNum >= 0 && ic.files[i].startTxNum > uint64(startTxNum) { + break + } + + it.stack = append(it.stack, ic.files[i]) + it.stack[len(it.stack)-1].getter = it.stack[len(it.stack)-1].src.decompressor.MakeGetter() + it.stack[len(it.stack)-1].reader = it.stack[len(it.stack)-1].src.index.GetReaderFromPool() + it.hasNext = true + } + } + it.advance() + return it, nil +} + +// FrozenInvertedIdxIter allows iteration over range of tx numbers // Iteration is not implmented via callback function, because there is often // a requirement for interators to be composable (for example, to implement AND and OR for indices) -// InvertedIterator must be closed after use to prevent leaking of resources like cursor -type InvertedIterator struct { +// FrozenInvertedIdxIter must be closed after use to prevent leaking of resources like cursor +type FrozenInvertedIdxIter struct { key []byte startTxNum, endTxNum int limit int orderAscend order.By - roTx kv.Tx - cursor kv.CursorDupSort efIt iter.Unary[uint64] indexTable string stack []ctxItem - nextN uint64 - hasNextInDb, hasNextInFiles bool - nextErrInDB, nextErrInFile error + nextN uint64 + hasNext bool + err error - res []uint64 - bm *roaring64.Bitmap + ef *eliasfano32.EliasFano } -func (it *InvertedIterator) Close() { - if it.cursor != nil { - it.cursor.Close() +func (it *FrozenInvertedIdxIter) Close() { + for _, item := range it.stack { + item.reader.Close() } - bitmapdb.ReturnToPool64(it.bm) } -func (it *InvertedIterator) advanceInFiles() { +func (it *FrozenInvertedIdxIter) advance() { + if it.orderAscend { + if it.hasNext { + it.advanceInFiles() + } + } else { + if it.hasNext { + it.advanceInFiles() + } + } +} + +func (it *FrozenInvertedIdxIter) HasNext() bool { + if it.err != nil { // always true, then .Next() call will return this error + return true + } + if it.limit == 0 { // limit reached + return false + } + return it.hasNext +} + +func (it *FrozenInvertedIdxIter) Next() (uint64, error) { return it.next(), nil } + +func (it *FrozenInvertedIdxIter) next() uint64 { + it.limit-- + n := it.nextN + it.advance() + return n +} + +func (it *FrozenInvertedIdxIter) advanceInFiles() { for { for it.efIt == nil { //TODO: this loop may be optimized by LocalityIndex if len(it.stack) == 0 { - it.hasNextInFiles = false + it.hasNext = false return } item := it.stack[len(it.stack)-1] @@ -595,12 +768,15 @@ func (it *InvertedIterator) advanceInFiles() { k, _ := g.NextUncompressed() if bytes.Equal(k, it.key) { eliasVal, _ := g.NextUncompressed() - ef, _ := eliasfano32.ReadEliasFano(eliasVal) - + it.ef.Reset(eliasVal) if it.orderAscend { - it.efIt = ef.Iterator() + efiter := it.ef.Iterator() + if it.startTxNum > 0 { + efiter.Seek(uint64(it.startTxNum)) + } + it.efIt = efiter } else { - it.efIt = ef.ReverseIterator() + it.efIt = it.ef.ReverseIterator() } } } @@ -612,11 +788,11 @@ func (it *InvertedIterator) advanceInFiles() { for it.efIt.HasNext() { n, _ := it.efIt.Next() if it.endTxNum >= 0 && int(n) >= it.endTxNum { - it.hasNextInFiles = false + it.hasNext = false return } if int(n) >= it.startTxNum { - it.hasNextInFiles = true + it.hasNext = true it.nextN = n return } @@ -625,11 +801,11 @@ func (it *InvertedIterator) advanceInFiles() { for it.efIt.HasNext() { n, _ := it.efIt.Next() if int(n) <= it.endTxNum { - it.hasNextInFiles = false + it.hasNext = false return } if it.startTxNum >= 0 && int(n) <= it.startTxNum { - it.hasNextInFiles = true + it.hasNext = true it.nextN = n return } @@ -639,7 +815,34 @@ func (it *InvertedIterator) advanceInFiles() { } } -func (it *InvertedIterator) advanceInDb() { +// RecentInvertedIdxIter allows iteration over range of tx numbers +// Iteration is not implmented via callback function, because there is often +// a requirement for interators to be composable (for example, to implement AND and OR for indices) +type RecentInvertedIdxIter struct { + key []byte + startTxNum, endTxNum int + limit int + orderAscend order.By + + roTx kv.Tx + cursor kv.CursorDupSort + indexTable string + + nextN uint64 + hasNext bool + err error + + bm *roaring64.Bitmap +} + +func (it *RecentInvertedIdxIter) Close() { + if it.cursor != nil { + it.cursor.Close() + } + bitmapdb.ReturnToPool64(it.bm) +} + +func (it *RecentInvertedIdxIter) advanceInDB() { var v []byte var err error if it.cursor == nil { @@ -652,7 +855,7 @@ func (it *InvertedIterator) advanceInDb() { panic(err) } if k == nil { - it.hasNextInDb = false + it.hasNext = false return } //Asc: [from, to) AND from > to @@ -672,7 +875,7 @@ func (it *InvertedIterator) advanceInDb() { } } if v == nil { - it.hasNextInDb = false + it.hasNext = false return } } @@ -701,11 +904,11 @@ func (it *InvertedIterator) advanceInDb() { } n := binary.BigEndian.Uint64(v) if it.endTxNum >= 0 && int(n) >= it.endTxNum { - it.hasNextInDb = false + it.hasNext = false return } if int(n) >= it.startTxNum { - it.hasNextInDb = true + it.hasNext = true it.nextN = n return } @@ -718,173 +921,50 @@ func (it *InvertedIterator) advanceInDb() { } n := binary.BigEndian.Uint64(v) if int(n) <= it.endTxNum { - it.hasNextInDb = false + it.hasNext = false return } if it.startTxNum >= 0 && int(n) <= it.startTxNum { - it.hasNextInDb = true + it.hasNext = true it.nextN = n return } } } - it.hasNextInDb = false + it.hasNext = false } -func (it *InvertedIterator) advance() { +func (it *RecentInvertedIdxIter) advance() { if it.orderAscend { - if it.hasNextInFiles { - it.advanceInFiles() - } - if it.hasNextInDb && !it.hasNextInFiles { - it.advanceInDb() + if it.hasNext { + it.advanceInDB() } } else { - if it.hasNextInDb { - it.advanceInDb() - } - if it.hasNextInFiles && !it.hasNextInDb { - it.advanceInFiles() + if it.hasNext { + it.advanceInDB() } } } -func (it *InvertedIterator) HasNext() bool { - if it.nextErrInDB != nil || it.nextErrInFile != nil { // always true, then .Next() call will return this error +func (it *RecentInvertedIdxIter) HasNext() bool { + if it.err != nil { // always true, then .Next() call will return this error return true } if it.limit == 0 { // limit reached return false } - return it.hasNextInFiles || it.hasNextInDb + return it.hasNext } -func (it *InvertedIterator) Next() (uint64, error) { return it.next(), nil } -func (it *InvertedIterator) NextBatch() ([]uint64, error) { - it.res = append(it.res[:0], it.next()) - for it.HasNext() && len(it.res) < 128 { - it.res = append(it.res, it.next()) +func (it *RecentInvertedIdxIter) Next() (uint64, error) { + if it.err != nil { + return 0, it.err } - return it.res, nil -} - -func (it *InvertedIterator) next() uint64 { it.limit-- n := it.nextN it.advance() - return n -} -func (it *InvertedIterator) ToArray() (res []uint64) { - for it.HasNext() { - res = append(res, it.next()) - } - return res -} -func (it *InvertedIterator) ToBitmap() (*roaring64.Bitmap, error) { - it.bm = bitmapdb.NewBitmap64() - bm := it.bm - for it.HasNext() { - bm.Add(it.next()) - } - return bm, nil -} - -type InvertedIndexContext struct { - ii *InvertedIndex - files []ctxItem // have no garbage (overlaps, etc...) - getters []*compress.Getter - readers []*recsplit.IndexReader - loc ctxLocalityItem -} - -func (ic *InvertedIndexContext) statelessGetter(i int) *compress.Getter { - if ic.getters == nil { - ic.getters = make([]*compress.Getter, len(ic.files)) - } - r := ic.getters[i] - if r == nil { - r = ic.files[i].src.decompressor.MakeGetter() - ic.getters[i] = r - } - return r -} -func (ic *InvertedIndexContext) statelessIdxReader(i int) *recsplit.IndexReader { - if ic.readers == nil { - ic.readers = make([]*recsplit.IndexReader, len(ic.files)) - } - r := ic.readers[i] - if r == nil { - r = recsplit.NewIndexReader(ic.files[i].src.index) - ic.readers[i] = r - } - return r -} - -func (ic *InvertedIndexContext) getFile(from, to uint64) (it ctxItem, ok bool) { - for _, item := range ic.files { - if item.startTxNum == from && item.endTxNum == to { - return item, true - } - } - return it, false -} - -// IterateRange is to be used in public API, therefore it relies on read-only transaction -// so that iteration can be done even when the inverted index is being updated. -// [startTxNum; endNumTx) -func (ic *InvertedIndexContext) IterateRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (*InvertedIterator, error) { - if asc && (startTxNum >= 0 && endTxNum >= 0) && startTxNum > endTxNum { - return nil, fmt.Errorf("startTxNum=%d epected to be lower than endTxNum=%d", startTxNum, endTxNum) - } - if !asc && (startTxNum >= 0 && endTxNum >= 0) && startTxNum < endTxNum { - return nil, fmt.Errorf("startTxNum=%d epected to be bigger than endTxNum=%d", startTxNum, endTxNum) - } - - it := &InvertedIterator{ - key: key, - startTxNum: startTxNum, - endTxNum: endTxNum, - indexTable: ic.ii.indexTable, - roTx: roTx, - hasNextInDb: true, - orderAscend: asc, - limit: limit, - } - if asc { - for i := len(ic.files) - 1; i >= 0; i-- { - // [from,to) && from < to - if endTxNum >= 0 && int(ic.files[i].startTxNum) >= endTxNum { - continue - } - if startTxNum >= 0 && ic.files[i].endTxNum <= uint64(startTxNum) { - break - } - it.stack = append(it.stack, ic.files[i]) - it.stack[len(it.stack)-1].getter = it.stack[len(it.stack)-1].src.decompressor.MakeGetter() - it.stack[len(it.stack)-1].reader = recsplit.NewIndexReader(it.stack[len(it.stack)-1].src.index) - it.hasNextInFiles = true - } - it.hasNextInDb = len(it.stack) == 0 || endTxNum < 0 || it.stack[0].endTxNum < uint64(endTxNum) - } else { - for i := 0; i < len(ic.files); i++ { - // [from,to) && from > to - if endTxNum >= 0 && int(ic.files[i].endTxNum) <= endTxNum { - continue - } - if startTxNum >= 0 && ic.files[i].startTxNum > uint64(startTxNum) { - break - } - - it.stack = append(it.stack, ic.files[i]) - it.stack[len(it.stack)-1].getter = it.stack[len(it.stack)-1].src.decompressor.MakeGetter() - it.stack[len(it.stack)-1].reader = recsplit.NewIndexReader(it.stack[len(it.stack)-1].src.index) - it.hasNextInFiles = true - } - it.hasNextInDb = len(it.stack) == 0 || startTxNum < 0 || it.stack[len(it.stack)-1].endTxNum < uint64(startTxNum) - } - it.advance() - return it, nil + return n, nil } type InvertedIterator1 struct { @@ -1037,7 +1117,7 @@ func (ic *InvertedIndexContext) IterateChangedKeys(startTxNum, endTxNum uint64, return ii1 } -func (ii *InvertedIndex) collate(ctx context.Context, txFrom, txTo uint64, roTx kv.Tx, logEvery *time.Ticker) (map[string]*roaring64.Bitmap, error) { +func (ii *InvertedIndex) collate(ctx context.Context, txFrom, txTo uint64, roTx kv.Tx) (map[string]*roaring64.Bitmap, error) { keysCursor, err := roTx.CursorDupSort(ii.indexKeysTable) if err != nil { return nil, fmt.Errorf("create %s keys cursor: %w", ii.filenameBase, err) @@ -1061,12 +1141,8 @@ func (ii *InvertedIndex) collate(ctx context.Context, txFrom, txTo uint64, roTx bitmap.Add(txNum) select { - case <-logEvery.C: - log.Info("[snapshots] collate history", "name", ii.filenameBase, "range", fmt.Sprintf("%.2f-%.2f", float64(txNum)/float64(ii.aggregationStep), float64(txTo)/float64(ii.aggregationStep))) - bitmap.RunOptimize() case <-ctx.Done(): - err := ctx.Err() - return nil, err + return nil, ctx.Err() default: } } @@ -1090,7 +1166,7 @@ func (sf InvertedFiles) Close() { } } -func (ii *InvertedIndex) buildFiles(ctx context.Context, step uint64, bitmaps map[string]*roaring64.Bitmap) (InvertedFiles, error) { +func (ii *InvertedIndex) buildFiles(ctx context.Context, step uint64, bitmaps map[string]*roaring64.Bitmap, ps *background.ProgressSet) (InvertedFiles, error) { var decomp *compress.Decompressor var index *recsplit.Index var comp *compress.Compressor @@ -1111,43 +1187,53 @@ func (ii *InvertedIndex) buildFiles(ctx context.Context, step uint64, bitmaps ma }() txNumFrom := step * ii.aggregationStep txNumTo := (step + 1) * ii.aggregationStep - datPath := filepath.Join(ii.dir, fmt.Sprintf("%s.%d-%d.ef", ii.filenameBase, txNumFrom/ii.aggregationStep, txNumTo/ii.aggregationStep)) - comp, err = compress.NewCompressor(ctx, "ef", datPath, ii.tmpdir, compress.MinPatternScore, ii.compressWorkers, log.LvlTrace) - if err != nil { - return InvertedFiles{}, fmt.Errorf("create %s compressor: %w", ii.filenameBase, err) - } - var buf []byte + datFileName := fmt.Sprintf("%s.%d-%d.ef", ii.filenameBase, txNumFrom/ii.aggregationStep, txNumTo/ii.aggregationStep) + datPath := filepath.Join(ii.dir, datFileName) keys := make([]string, 0, len(bitmaps)) for key := range bitmaps { keys = append(keys, key) } slices.Sort(keys) - for _, key := range keys { - if err = comp.AddUncompressedWord([]byte(key)); err != nil { - return InvertedFiles{}, fmt.Errorf("add %s key [%x]: %w", ii.filenameBase, key, err) + { + p := ps.AddNew(datFileName, 1) + defer ps.Delete(p) + comp, err = compress.NewCompressor(ctx, "ef", datPath, ii.tmpdir, compress.MinPatternScore, ii.compressWorkers, log.LvlTrace) + if err != nil { + return InvertedFiles{}, fmt.Errorf("create %s compressor: %w", ii.filenameBase, err) } - bitmap := bitmaps[key] - ef := eliasfano32.NewEliasFano(bitmap.GetCardinality(), bitmap.Maximum()) - it := bitmap.Iterator() - for it.HasNext() { - ef.AddOffset(it.Next()) + var buf []byte + for _, key := range keys { + if err = comp.AddUncompressedWord([]byte(key)); err != nil { + return InvertedFiles{}, fmt.Errorf("add %s key [%x]: %w", ii.filenameBase, key, err) + } + bitmap := bitmaps[key] + ef := eliasfano32.NewEliasFano(bitmap.GetCardinality(), bitmap.Maximum()) + it := bitmap.Iterator() + for it.HasNext() { + ef.AddOffset(it.Next()) + } + ef.Build() + buf = ef.AppendBytes(buf[:0]) + if err = comp.AddUncompressedWord(buf); err != nil { + return InvertedFiles{}, fmt.Errorf("add %s val: %w", ii.filenameBase, err) + } } - ef.Build() - buf = ef.AppendBytes(buf[:0]) - if err = comp.AddUncompressedWord(buf); err != nil { - return InvertedFiles{}, fmt.Errorf("add %s val: %w", ii.filenameBase, err) + if err = comp.Compress(); err != nil { + return InvertedFiles{}, fmt.Errorf("compress %s: %w", ii.filenameBase, err) } + comp.Close() + comp = nil + ps.Delete(p) } - if err = comp.Compress(); err != nil { - return InvertedFiles{}, fmt.Errorf("compress %s: %w", ii.filenameBase, err) - } - comp.Close() - comp = nil if decomp, err = compress.NewDecompressor(datPath); err != nil { return InvertedFiles{}, fmt.Errorf("open %s decompressor: %w", ii.filenameBase, err) } - idxPath := filepath.Join(ii.dir, fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, txNumFrom/ii.aggregationStep, txNumTo/ii.aggregationStep)) - if index, err = buildIndex(ctx, decomp, idxPath, ii.tmpdir, len(keys), false /* values */); err != nil { + + idxFileName := fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, txNumFrom/ii.aggregationStep, txNumTo/ii.aggregationStep) + idxPath := filepath.Join(ii.dir, idxFileName) + p := ps.AddNew(idxFileName, uint64(decomp.Count()*2)) + defer ps.Delete(p) + if index, err = buildIndexThenOpen(ctx, decomp, idxPath, ii.tmpdir, len(keys), false /* values */, p); err != nil { return InvertedFiles{}, fmt.Errorf("build %s efi: %w", ii.filenameBase, err) } closeComp = false @@ -1165,7 +1251,7 @@ func (ii *InvertedIndex) integrateFiles(sf InvertedFiles, txNumFrom, txNumTo uin ii.reCalcRoFiles() } -func (ii *InvertedIndex) warmup(txFrom, limit uint64, tx kv.Tx) error { +func (ii *InvertedIndex) warmup(ctx context.Context, txFrom, limit uint64, tx kv.Tx) error { keysCursor, err := tx.CursorDupSort(ii.indexKeysTable) if err != nil { return fmt.Errorf("create %s keys cursor: %w", ii.filenameBase, err) @@ -1197,6 +1283,12 @@ func (ii *InvertedIndex) warmup(txFrom, limit uint64, tx kv.Tx) error { break } _, _ = idxC.SeekBothRange(v, k) + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } } if err != nil { return fmt.Errorf("iterate over %s keys: %w", ii.filenameBase, err) @@ -1228,6 +1320,9 @@ func (ii *InvertedIndex) prune(ctx context.Context, txFrom, txTo, limit uint64, return nil } + collector := etl.NewCollector("snapshots", ii.tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) + defer collector.Close() + idxC, err := ii.tx.RwCursorDupSort(ii.indexTable) if err != nil { return err @@ -1242,21 +1337,9 @@ func (ii *InvertedIndex) prune(ctx context.Context, txFrom, txTo, limit uint64, break } for ; err == nil && k != nil; k, v, err = keysCursor.NextDup() { - - if err = idxC.DeleteExact(v, k); err != nil { + if err := collector.Collect(v, nil); err != nil { return err } - //for vv, err := idxC.SeekBothRange(v, k); vv != nil; _, vv, err = idxC.NextDup() { - // if err != nil { - // return err - // } - // if binary.BigEndian.Uint64(vv) >= txTo { - // break - // } - // if err = idxC.DeleteCurrent(); err != nil { - // return err - // } - //} } // This DeleteCurrent needs to the last in the loop iteration, because it invalidates k and v @@ -1265,15 +1348,38 @@ func (ii *InvertedIndex) prune(ctx context.Context, txFrom, txTo, limit uint64, } select { case <-ctx.Done(): - return nil - case <-logEvery.C: - log.Info("[snapshots] prune history", "name", ii.filenameBase, "range", fmt.Sprintf("%.2f-%.2f", float64(txNum)/float64(ii.aggregationStep), float64(txTo)/float64(ii.aggregationStep))) + return ctx.Err() default: } } if err != nil { return fmt.Errorf("iterate over %s keys: %w", ii.filenameBase, err) } + + if err := collector.Load(ii.tx, "", func(key, _ []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + for v, err := idxC.SeekBothRange(key, txKey[:]); v != nil; _, v, err = idxC.NextDup() { + if err != nil { + return err + } + txNum := binary.BigEndian.Uint64(v) + if txNum >= txTo { + break + } + if err = idxC.DeleteCurrent(); err != nil { + return err + } + + select { + case <-logEvery.C: + log.Info("[snapshots] prune history", "name", ii.filenameBase, "to_step", fmt.Sprintf("%.2f", float64(txTo)/float64(ii.aggregationStep)), "prefix", fmt.Sprintf("%x", key[:8])) + default: + } + } + return nil + }, etl.TransformArgs{}); err != nil { + return err + } + return nil } @@ -1345,15 +1451,11 @@ func (ii *InvertedIndex) collectFilesStat() (filesCount, filesSize, idxSize uint } func (ii *InvertedIndex) CleanupDir() { - files, err := os.ReadDir(ii.dir) - if err != nil { - log.Warn("[clean] can't read dir", "err", err, "dir", ii.dir) - return - } - uselessFiles := ii.scanStateFiles(files, ii.integrityFileExtensions) + files, _ := ii.fileNamesOnDisk() + uselessFiles := ii.scanStateFiles(files) for _, f := range uselessFiles { fName := fmt.Sprintf("%s.%d-%d.ef", ii.filenameBase, f.startTxNum/ii.aggregationStep, f.endTxNum/ii.aggregationStep) - err = os.Remove(filepath.Join(ii.dir, fName)) + err := os.Remove(filepath.Join(ii.dir, fName)) log.Debug("[clean] remove", "file", fName, "err", err) fIdxName := fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, f.startTxNum/ii.aggregationStep, f.endTxNum/ii.aggregationStep) err = os.Remove(filepath.Join(ii.dir, fIdxName)) diff --git a/state/inverted_index_test.go b/state/inverted_index_test.go index ee7695a3c..3d56a25ef 100644 --- a/state/inverted_index_test.go +++ b/state/inverted_index_test.go @@ -23,9 +23,9 @@ import ( "math" "os" "testing" - "testing/fstest" "time" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/log/v3" @@ -67,7 +67,7 @@ func TestInvIndexCollationBuild(t *testing.T) { require.NoError(t, err) defer tx.Rollback() ii.SetTx(tx) - ii.StartWrites("") + ii.StartWrites() defer ii.FinishWrites() ii.SetTxNum(2) @@ -93,14 +93,14 @@ func TestInvIndexCollationBuild(t *testing.T) { require.NoError(t, err) defer roTx.Rollback() - bs, err := ii.collate(ctx, 0, 7, roTx, logEvery) + bs, err := ii.collate(ctx, 0, 7, roTx) require.NoError(t, err) require.Equal(t, 3, len(bs)) require.Equal(t, []uint64{3}, bs["key2"].ToArray()) require.Equal(t, []uint64{2, 6}, bs["key1"].ToArray()) require.Equal(t, []uint64{6}, bs["key3"].ToArray()) - sf, err := ii.buildFiles(ctx, 0, bs) + sf, err := ii.buildFiles(ctx, 0, bs, background.NewProgressSet()) require.NoError(t, err) defer sf.Close() @@ -145,7 +145,7 @@ func TestInvIndexAfterPrune(t *testing.T) { } }() ii.SetTx(tx) - ii.StartWrites("") + ii.StartWrites() defer ii.FinishWrites() ii.SetTxNum(2) @@ -171,10 +171,10 @@ func TestInvIndexAfterPrune(t *testing.T) { require.NoError(t, err) defer roTx.Rollback() - bs, err := ii.collate(ctx, 0, 16, roTx, logEvery) + bs, err := ii.collate(ctx, 0, 16, roTx) require.NoError(t, err) - sf, err := ii.buildFiles(ctx, 0, bs) + sf, err := ii.buildFiles(ctx, 0, bs, background.NewProgressSet()) require.NoError(t, err) tx, err = db.BeginRw(ctx) @@ -216,7 +216,7 @@ func filledInvIndexOfSize(tb testing.TB, txs, aggStep, module uint64) (string, k require.NoError(err) defer tx.Rollback() ii.SetTx(tx) - ii.StartWrites("") + ii.StartWrites() defer ii.FinishWrites() var flusher flusher @@ -262,9 +262,8 @@ func checkRanges(t *testing.T, db kv.RwDB, ii *InvertedIndex, txs uint64) { binary.BigEndian.PutUint64(k[:], keyNum) var values []uint64 t.Run("asc", func(t *testing.T) { - it, err := ic.IterateRange(k[:], 0, 976, order.Asc, -1, nil) + it, err := ic.IdxRange(k[:], 0, 976, order.Asc, -1, nil) require.NoError(t, err) - defer it.Close() for i := keyNum; i < 976; i += keyNum { label := fmt.Sprintf("keyNum=%d, txNum=%d", keyNum, i) require.True(t, it.HasNext(), label) @@ -277,34 +276,29 @@ func checkRanges(t *testing.T, db kv.RwDB, ii *InvertedIndex, txs uint64) { }) t.Run("desc", func(t *testing.T) { - reverseStream, err := ic.IterateRange(k[:], 976-1, 0, order.Desc, -1, nil) + reverseStream, err := ic.IdxRange(k[:], 976-1, 0, order.Desc, -1, nil) require.NoError(t, err) - defer reverseStream.Close() iter.ExpectEqualU64(t, iter.ReverseArray(values), reverseStream) }) t.Run("unbounded asc", func(t *testing.T) { - forwardLimited, err := ic.IterateRange(k[:], -1, 976, order.Asc, 2, nil) + forwardLimited, err := ic.IdxRange(k[:], -1, 976, order.Asc, 2, nil) require.NoError(t, err) - defer forwardLimited.Close() iter.ExpectEqualU64(t, iter.Array(values[:2]), forwardLimited) }) t.Run("unbounded desc", func(t *testing.T) { - reverseLimited, err := ic.IterateRange(k[:], 976-1, -1, order.Desc, 2, nil) + reverseLimited, err := ic.IdxRange(k[:], 976-1, -1, order.Desc, 2, nil) require.NoError(t, err) - defer reverseLimited.Close() iter.ExpectEqualU64(t, iter.ReverseArray(values[len(values)-2:]), reverseLimited) }) t.Run("tiny bound asc", func(t *testing.T) { - it, err := ic.IterateRange(k[:], 100, 102, order.Asc, -1, nil) + it, err := ic.IdxRange(k[:], 100, 102, order.Asc, -1, nil) require.NoError(t, err) - defer it.Close() expect := iter.FilterU64(iter.Array(values), func(k uint64) bool { return k >= 100 && k < 102 }) iter.ExpectEqualU64(t, expect, it) }) t.Run("tiny bound desc", func(t *testing.T) { - it, err := ic.IterateRange(k[:], 102, 100, order.Desc, -1, nil) + it, err := ic.IdxRange(k[:], 102, 100, order.Desc, -1, nil) require.NoError(t, err) - defer it.Close() expect := iter.FilterU64(iter.ReverseArray(values), func(k uint64) bool { return k <= 102 && k > 100 }) iter.ExpectEqualU64(t, expect, it) }) @@ -316,9 +310,8 @@ func checkRanges(t *testing.T, db kv.RwDB, ii *InvertedIndex, txs uint64) { for keyNum := uint64(1); keyNum <= uint64(31); keyNum++ { var k [8]byte binary.BigEndian.PutUint64(k[:], keyNum) - it, err := ic.IterateRange(k[:], 400, 1000, true, -1, roTx) + it, err := ic.IdxRange(k[:], 400, 1000, true, -1, roTx) require.NoError(t, err) - defer it.Close() var values []uint64 for i := keyNum * ((400 + keyNum - 1) / keyNum); i < txs; i += keyNum { label := fmt.Sprintf("keyNum=%d, txNum=%d", keyNum, i) @@ -330,10 +323,11 @@ func checkRanges(t *testing.T, db kv.RwDB, ii *InvertedIndex, txs uint64) { } require.False(t, it.HasNext()) - reverseStream, err := ic.IterateRange(k[:], 1000-1, 400-1, false, -1, roTx) + reverseStream, err := ic.IdxRange(k[:], 1000-1, 400-1, false, -1, roTx) require.NoError(t, err) - defer it.Close() - iter.ExpectEqualU64(t, iter.ReverseArray(values), reverseStream) + arr := iter.ToArrU64Must(reverseStream) + expect := iter.ToArrU64Must(iter.ReverseArray(values)) + require.Equal(t, expect, arr) } } @@ -351,9 +345,9 @@ func mergeInverted(tb testing.TB, db kv.RwDB, ii *InvertedIndex, txs uint64) { // Leave the last 2 aggregation steps un-collated for step := uint64(0); step < txs/ii.aggregationStep-1; step++ { func() { - bs, err := ii.collate(ctx, step*ii.aggregationStep, (step+1)*ii.aggregationStep, tx, logEvery) + bs, err := ii.collate(ctx, step*ii.aggregationStep, (step+1)*ii.aggregationStep, tx) require.NoError(tb, err) - sf, err := ii.buildFiles(ctx, step, bs) + sf, err := ii.buildFiles(ctx, step, bs, background.NewProgressSet()) require.NoError(tb, err) ii.integrateFiles(sf, step*ii.aggregationStep, (step+1)*ii.aggregationStep) err = ii.prune(ctx, step*ii.aggregationStep, (step+1)*ii.aggregationStep, math.MaxUint64, logEvery) @@ -365,7 +359,7 @@ func mergeInverted(tb testing.TB, db kv.RwDB, ii *InvertedIndex, txs uint64) { for found, startTxNum, endTxNum = ii.findMergeRange(maxEndTxNum, maxSpan); found; found, startTxNum, endTxNum = ii.findMergeRange(maxEndTxNum, maxSpan) { ic := ii.MakeContext() outs, _ := ii.staticFilesInRange(startTxNum, endTxNum, ic) - in, err := ii.mergeFiles(ctx, outs, startTxNum, endTxNum, 1) + in, err := ii.mergeFiles(ctx, outs, startTxNum, endTxNum, 1, background.NewProgressSet()) require.NoError(tb, err) ii.integrateMergedFiles(outs, in) require.NoError(tb, err) @@ -390,9 +384,9 @@ func TestInvIndexRanges(t *testing.T) { // Leave the last 2 aggregation steps un-collated for step := uint64(0); step < txs/ii.aggregationStep-1; step++ { func() { - bs, err := ii.collate(ctx, step*ii.aggregationStep, (step+1)*ii.aggregationStep, tx, logEvery) + bs, err := ii.collate(ctx, step*ii.aggregationStep, (step+1)*ii.aggregationStep, tx) require.NoError(t, err) - sf, err := ii.buildFiles(ctx, step, bs) + sf, err := ii.buildFiles(ctx, step, bs, background.NewProgressSet()) require.NoError(t, err) ii.integrateFiles(sf, step*ii.aggregationStep, (step+1)*ii.aggregationStep) err = ii.prune(ctx, step*ii.aggregationStep, (step+1)*ii.aggregationStep, math.MaxUint64, logEvery) @@ -425,17 +419,6 @@ func TestInvIndexScanFiles(t *testing.T) { checkRanges(t, db, ii, txs) } -func BenchmarkName(b *testing.B) { - _, db, ii, txs := filledInvIndex(b) - mergeInverted(b, db, ii, txs) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - ic := ii.MakeContext() - ic.Close() - } -} - func TestChangedKeysIterator(t *testing.T) { _, db, ii, txs := filledInvIndex(t) ctx := context.Background() @@ -501,39 +484,20 @@ func TestScanStaticFiles(t *testing.T) { ii := &InvertedIndex{filenameBase: "test", aggregationStep: 1, files: btree2.NewBTreeG[*filesItem](filesItemLess), } - ffs := fstest.MapFS{ - "test.0-1.ef": {}, - "test.1-2.ef": {}, - "test.0-4.ef": {}, - "test.2-3.ef": {}, - "test.3-4.ef": {}, - "test.4-5.ef": {}, + files := []string{ + "test.0-1.ef", + "test.1-2.ef", + "test.0-4.ef", + "test.2-3.ef", + "test.3-4.ef", + "test.4-5.ef", } - files, err := ffs.ReadDir(".") - require.NoError(t, err) - ii.scanStateFiles(files, nil) - var found []string - ii.files.Walk(func(items []*filesItem) bool { - for _, item := range items { - found = append(found, fmt.Sprintf("%d-%d", item.startTxNum, item.endTxNum)) - } - return true - }) - require.Equal(t, 6, len(found)) + ii.scanStateFiles(files) + require.Equal(t, 6, ii.files.Len()) + //integrity extension case ii.files.Clear() - ii.files.Walk(func(items []*filesItem) bool { - for _, item := range items { - fmt.Printf("%s\n", fmt.Sprintf("%d-%d", item.startTxNum, item.endTxNum)) - } - return true - }) - ii.scanStateFiles(files, []string{"v"}) - ii.files.Walk(func(items []*filesItem) bool { - for _, item := range items { - fmt.Printf("%s\n", fmt.Sprintf("%d-%d", item.startTxNum, item.endTxNum)) - } - return true - }) + ii.integrityFileExtensions = []string{"v"} + ii.scanStateFiles(files) require.Equal(t, 0, ii.files.Len()) } diff --git a/state/locality_index.go b/state/locality_index.go index 368d231a5..aa65db529 100644 --- a/state/locality_index.go +++ b/state/locality_index.go @@ -21,11 +21,11 @@ import ( "container/heap" "context" "fmt" - "io/fs" "os" "path/filepath" "regexp" "strconv" + "sync/atomic" "time" "github.com/ledgerwatch/erigon-lib/common/assert" @@ -47,6 +47,9 @@ type LocalityIndex struct { file *filesItem bm *bitmapdb.FixedSizeBitmaps + + roFiles atomic.Pointer[ctxItem] + roBmFile atomic.Pointer[bitmapdb.FixedSizeBitmaps] } func NewLocalityIndex( @@ -62,32 +65,39 @@ func NewLocalityIndex( } return li, nil } -func (li *LocalityIndex) reOpenFolder() error { - if li == nil { - return nil +func (li *LocalityIndex) closeWhatNotInList(fNames []string) { + if li == nil || li.bm == nil { + return } + for _, protectName := range fNames { + if li.bm.FileName() == protectName { + return + } + } li.closeFiles() - files, err := os.ReadDir(li.dir) - if err != nil { - return fmt.Errorf("LocalityIndex: %s, %w", li.filenameBase, err) +} + +func (li *LocalityIndex) OpenList(fNames []string) error { + if li == nil { + return nil } - _ = li.scanStateFiles(files) - if err = li.openFiles(); err != nil { - return fmt.Errorf("LocalityIndex: %s, %w", li.filenameBase, err) + li.closeWhatNotInList(fNames) + _ = li.scanStateFiles(fNames) + if err := li.openFiles(); err != nil { + return fmt.Errorf("NewHistory.openFiles: %s, %w", li.filenameBase, err) } return nil } -func (li *LocalityIndex) scanStateFiles(files []fs.DirEntry) (uselessFiles []*filesItem) { +func (li *LocalityIndex) scanStateFiles(fNames []string) (uselessFiles []*filesItem) { + if li == nil { + return nil + } + re := regexp.MustCompile("^" + li.filenameBase + ".([0-9]+)-([0-9]+).li$") var err error - for _, f := range files { - if !f.Type().IsRegular() { - continue - } - - name := f.Name() + for _, name := range fNames { subs := re.FindStringSubmatch(name) if len(subs) != 3 { if len(subs) != 0 { @@ -130,20 +140,30 @@ func (li *LocalityIndex) scanStateFiles(files []fs.DirEntry) (uselessFiles []*fi } func (li *LocalityIndex) openFiles() (err error) { - if li.file == nil { + if li == nil || li.file == nil { return nil } + fromStep, toStep := li.file.startTxNum/li.aggregationStep, li.file.endTxNum/li.aggregationStep - idxPath := filepath.Join(li.dir, fmt.Sprintf("%s.%d-%d.li", li.filenameBase, fromStep, toStep)) - li.file.index, err = recsplit.OpenIndex(idxPath) - if err != nil { - return fmt.Errorf("LocalityIndex.openFiles: %w, %s", err, idxPath) + if li.bm == nil { + dataPath := filepath.Join(li.dir, fmt.Sprintf("%s.%d-%d.l", li.filenameBase, fromStep, toStep)) + if dir.FileExist(dataPath) { + li.bm, err = bitmapdb.OpenFixedSizeBitmaps(dataPath, int((toStep-fromStep)/StepsInBiggestFile)) + if err != nil { + return err + } + } } - dataPath := filepath.Join(li.dir, fmt.Sprintf("%s.%d-%d.l", li.filenameBase, fromStep, toStep)) - li.bm, err = bitmapdb.OpenFixedSizeBitmaps(dataPath, int((toStep-fromStep)/StepsInBiggestFile)) - if err != nil { - return err + if li.file.index == nil { + idxPath := filepath.Join(li.dir, fmt.Sprintf("%s.%d-%d.li", li.filenameBase, fromStep, toStep)) + if dir.FileExist(idxPath) { + li.file.index, err = recsplit.OpenIndex(idxPath) + if err != nil { + return fmt.Errorf("LocalityIndex.openFiles: %w, %s", err, idxPath) + } + } } + li.reCalcRoFiles() return nil } @@ -160,13 +180,47 @@ func (li *LocalityIndex) closeFiles() { li.bm = nil } } +func (li *LocalityIndex) reCalcRoFiles() { + if li == nil || li.file == nil { + return + } + li.roFiles.Store(&ctxItem{ + startTxNum: li.file.startTxNum, + endTxNum: li.file.endTxNum, + i: 0, + src: li.file, + }) + li.roBmFile.Store(li.bm) +} -func (li *LocalityIndex) closeFilesAndRemove(i ctxLocalityItem) { +func (li *LocalityIndex) MakeContext() *ctxLocalityIdx { if li == nil { + return nil + } + x := &ctxLocalityIdx{ + file: li.roFiles.Load(), + bm: li.roBmFile.Load(), + } + if x.file != nil && x.file.src != nil { + x.file.src.refcount.Add(1) + } + return x +} + +func (out *ctxLocalityIdx) Close() { + if out == nil || out.file == nil || out.file.src == nil { return } - if i.file != nil { - i.file.closeFilesAndRemove() + refCnt := out.file.src.refcount.Add(-1) + if refCnt == 0 && out.file.src.canDelete.Load() { + closeLocalityIndexFilesAndRemove(out) + } +} + +func closeLocalityIndexFilesAndRemove(i *ctxLocalityIdx) { + if i.file.src != nil { + i.file.src.closeFilesAndRemove() + i.file.src = nil } if i.bm != nil { if err := i.bm.Close(); err != nil { @@ -175,10 +229,14 @@ func (li *LocalityIndex) closeFilesAndRemove(i ctxLocalityItem) { if err := os.Remove(i.bm.FilePath()); err != nil { log.Trace("os.Remove", "err", err, "file", i.bm.FileName()) } + i.bm = nil } } -func (li *LocalityIndex) Close() { li.closeFiles() } +func (li *LocalityIndex) Close() { + li.closeWhatNotInList([]string{}) + li.reCalcRoFiles() +} func (li *LocalityIndex) Files() (res []string) { return res } func (li *LocalityIndex) NewIdxReader() *recsplit.IndexReader { if li != nil && li.file != nil && li.file.index != nil { @@ -189,20 +247,24 @@ func (li *LocalityIndex) NewIdxReader() *recsplit.IndexReader { // LocalityIndex return exactly 2 file (step) // prevents searching key in many files -func (li *LocalityIndex) lookupIdxFiles(r *recsplit.IndexReader, bm *bitmapdb.FixedSizeBitmaps, file *filesItem, key []byte, fromTxNum uint64) (exactShard1, exactShard2 uint64, lastIndexedTxNum uint64, ok1, ok2 bool) { - if li == nil || r == nil || bm == nil || file == nil { +func (li *LocalityIndex) lookupIdxFiles(loc *ctxLocalityIdx, key []byte, fromTxNum uint64) (exactShard1, exactShard2 uint64, lastIndexedTxNum uint64, ok1, ok2 bool) { + if li == nil || loc == nil || loc.bm == nil { return 0, 0, 0, false, false } - if fromTxNum >= file.endTxNum { + if loc.reader == nil { + loc.reader = recsplit.NewIndexReader(loc.file.src.index) + } + + if fromTxNum >= loc.file.endTxNum { return 0, 0, fromTxNum, false, false } fromFileNum := fromTxNum / li.aggregationStep / StepsInBiggestFile - fn1, fn2, ok1, ok2, err := bm.First2At(r.Lookup(key), fromFileNum) + fn1, fn2, ok1, ok2, err := loc.bm.First2At(loc.reader.Lookup(key), fromFileNum) if err != nil { panic(err) } - return fn1 * StepsInBiggestFile, fn2 * StepsInBiggestFile, file.endTxNum, ok1, ok2 + return fn1 * StepsInBiggestFile, fn2 * StepsInBiggestFile, loc.file.endTxNum, ok1, ok2 } func (li *LocalityIndex) missedIdxFiles(ii *InvertedIndex) (toStep uint64, idxExists bool) { @@ -227,9 +289,10 @@ func (li *LocalityIndex) buildFiles(ctx context.Context, ii *InvertedIndex, toSt defer logEvery.Stop() fromStep := uint64(0) - + ic := ii.MakeContext() + defer ic.Close() count := 0 - it := ii.MakeContext().iterateKeysLocality(toStep * li.aggregationStep) + it := ic.iterateKeysLocality(toStep * li.aggregationStep) for it.HasNext() { _, _ = it.Next() count++ @@ -268,7 +331,7 @@ func (li *LocalityIndex) buildFiles(ctx context.Context, ii *InvertedIndex, toSt } defer dense.Close() - it = ii.MakeContext().iterateKeysLocality(toStep * li.aggregationStep) + it = ic.iterateKeysLocality(toStep * li.aggregationStep) for it.HasNext() { k, inFiles := it.Next() if err := dense.AddArray(i, inFiles); err != nil { @@ -283,7 +346,7 @@ func (li *LocalityIndex) buildFiles(ctx context.Context, ii *InvertedIndex, toSt case <-ctx.Done(): return nil, ctx.Err() case <-logEvery.C: - log.Debug("[LocalityIndex] build", "name", li.filenameBase, "progress", fmt.Sprintf("%.2f%%", 50+it.Progress()/2)) + log.Info("[LocalityIndex] build", "name", li.filenameBase, "progress", fmt.Sprintf("%.2f%%", 50+it.Progress()/2)) default: } } @@ -326,6 +389,7 @@ func (li *LocalityIndex) integrateFiles(sf LocalityIndexFiles, txNumFrom, txNumT frozen: false, } li.bm = sf.bm + li.reCalcRoFiles() } func (li *LocalityIndex) BuildMissedIndices(ctx context.Context, ii *InvertedIndex) error { @@ -447,18 +511,20 @@ func (li *LocalityIndex) CleanupDir() { if li == nil || li.dir == "" { return } - files, err := os.ReadDir(li.dir) - if err != nil { - log.Warn("[clean] can't read dir", "err", err, "dir", li.dir) - return - } - uselessFiles := li.scanStateFiles(files) - for _, f := range uselessFiles { - fName := fmt.Sprintf("%s.%d-%d.l", li.filenameBase, f.startTxNum/li.aggregationStep, f.endTxNum/li.aggregationStep) - err = os.Remove(filepath.Join(li.dir, fName)) - log.Debug("[clean] remove", "file", fName, "err", err) - fIdxName := fmt.Sprintf("%s.%d-%d.li", li.filenameBase, f.startTxNum/li.aggregationStep, f.endTxNum/li.aggregationStep) - err = os.Remove(filepath.Join(li.dir, fIdxName)) - log.Debug("[clean] remove", "file", fName, "err", err) - } + /* + files, err := os.ReadDir(li.dir) + if err != nil { + log.Warn("[clean] can't read dir", "err", err, "dir", li.dir) + return + } + uselessFiles := li.scanStateFiles(files) + for _, f := range uselessFiles { + fName := fmt.Sprintf("%s.%d-%d.l", li.filenameBase, f.startTxNum/li.aggregationStep, f.endTxNum/li.aggregationStep) + err = os.Remove(filepath.Join(li.dir, fName)) + log.Debug("[clean] remove", "file", fName, "err", err) + fIdxName := fmt.Sprintf("%s.%d-%d.li", li.filenameBase, f.startTxNum/li.aggregationStep, f.endTxNum/li.aggregationStep) + err = os.Remove(filepath.Join(li.dir, fIdxName)) + log.Debug("[clean] remove", "file", fName, "err", err) + } + */ } diff --git a/state/locality_index_test.go b/state/locality_index_test.go index 89d2df8b3..bdd51fd72 100644 --- a/state/locality_index_test.go +++ b/state/locality_index_test.go @@ -4,12 +4,27 @@ import ( "context" "encoding/binary" "math" + "sync/atomic" "testing" - "github.com/ledgerwatch/erigon-lib/recsplit" "github.com/stretchr/testify/require" ) +func BenchmarkName2(b *testing.B) { + b.Run("1", func(b *testing.B) { + j := atomic.Int32{} + for i := 0; i < b.N; i++ { + j.Add(1) + } + }) + b.Run("2", func(b *testing.B) { + j := &atomic.Int32{} + for i := 0; i < b.N; i++ { + j.Add(1) + } + }) +} + func TestLocality(t *testing.T) { ctx, require := context.Background(), require.New(t) const Module uint64 = 31 @@ -72,9 +87,11 @@ func TestLocality(t *testing.T) { require.Zero(snd) }) t.Run("locality index: lookup", func(t *testing.T) { + liCtx := li.MakeContext() + defer liCtx.Close() var k [8]byte binary.BigEndian.PutUint64(k[:], 1) - v1, v2, from, ok1, ok2 := li.lookupIdxFiles(recsplit.NewIndexReader(files.index), files.bm, li.file, k[:], 1*li.aggregationStep*StepsInBiggestFile) + v1, v2, from, ok1, ok2 := li.lookupIdxFiles(liCtx, k[:], 1*li.aggregationStep*StepsInBiggestFile) require.True(ok1) require.False(ok2) require.Equal(uint64(1*StepsInBiggestFile), v1) diff --git a/state/merge.go b/state/merge.go index 6cdb2000b..d835c9259 100644 --- a/state/merge.go +++ b/state/merge.go @@ -25,12 +25,14 @@ import ( "path/filepath" "strings" + "github.com/ledgerwatch/erigon-lib/common/background" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/recsplit" "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" - "github.com/ledgerwatch/log/v3" ) func (d *Domain) endTxNumMinimax() uint64 { @@ -104,6 +106,26 @@ type DomainRanges struct { index bool } +func (r DomainRanges) String() string { + var b strings.Builder + if r.values { + b.WriteString(fmt.Sprintf("Values: [%d, %d)", r.valuesStartTxNum, r.valuesEndTxNum)) + } + if r.history { + if b.Len() > 0 { + b.WriteString(", ") + } + b.WriteString(fmt.Sprintf("History: [%d, %d)", r.historyStartTxNum, r.historyEndTxNum)) + } + if r.index { + if b.Len() > 0 { + b.WriteString(", ") + } + b.WriteString(fmt.Sprintf("Index: [%d, %d)", r.indexStartTxNum, r.indexEndTxNum)) + } + return b.String() +} + func (r DomainRanges) any() bool { return r.values || r.history || r.index } @@ -142,93 +164,6 @@ func (d *Domain) findMergeRange(maxEndTxNum, maxSpan uint64) DomainRanges { return r } -// nolint -type mergedDomainFiles struct { - values *filesItem - index *filesItem - history *filesItem -} - -// nolint -func (m *mergedDomainFiles) Close() { - for _, item := range []*filesItem{ - m.values, m.index, m.history, - } { - if item != nil { - if item.decompressor != nil { - item.decompressor.Close() - } - if item.decompressor != nil { - item.index.Close() - } - } - } -} - -// nolint -type staticFilesInRange struct { - valuesFiles []*filesItem - indexFiles []*filesItem - historyFiles []*filesItem - startJ int -} - -// nolint -func (s *staticFilesInRange) Close() { - for _, group := range [][]*filesItem{ - s.valuesFiles, s.indexFiles, s.historyFiles, - } { - for _, item := range group { - if item != nil { - if item.decompressor != nil { - item.decompressor.Close() - } - if item.index != nil { - item.index.Close() - } - } - } - } -} - -/* -// nolint - - func (d *Domain) mergeRangesUpTo(ctx context.Context, maxTxNum, maxSpan uint64, workers int) (err error) { - closeAll := true - for rng := d.findMergeRange(maxSpan, maxTxNum); rng.any(); rng = d.findMergeRange(maxTxNum, maxSpan) { - var sfr staticFilesInRange - sfr.valuesFiles, sfr.indexFiles, sfr.historyFiles, sfr.startJ = d.staticFilesInRange(rng) - defer func() { - if closeAll { - sfr.Close() - } - }() - - var mf mergedDomainFiles - if mf.values, mf.index, mf.history, err = d.mergeFiles(ctx, sfr.valuesFiles, sfr.indexFiles, sfr.historyFiles, rng, workers); err != nil { - return err - } - defer func() { - if closeAll { - mf.Close() - } - }() - - //defer func(t time.Time) { log.Info("[snapshots] merge", "took", time.Since(t)) }(time.Now()) - d.integrateMergedFiles(sfr.valuesFiles, sfr.indexFiles, sfr.historyFiles, mf.values, mf.index, mf.history) - - if err := d.deleteFiles(sfr.valuesFiles, sfr.indexFiles, sfr.historyFiles); err != nil { - return err - } - - log.Info(fmt.Sprintf("domain files mergedRange[%d, %d) name=%s span=%d \n", rng.valuesStartTxNum, rng.valuesEndTxNum, d.filenameBase, maxSpan)) - } - closeAll = false - return nil - } -*/ - // 0-1,1-2,2-3,3-4: allow merge 0-1 // 0-2,2-3,3-4: allow merge 0-4 // 0-2,2-4: allow merge 0-4 @@ -266,12 +201,10 @@ func (ii *InvertedIndex) findMergeRange(maxEndTxNum, maxSpan uint64) (bool, uint return minFound, startTxNum, endTxNum } -/* -// nolint -func (ii *InvertedIndex) mergeRangesUpTo(ctx context.Context, maxTxNum, maxSpan uint64, workers int) (err error) { +func (ii *InvertedIndex) mergeRangesUpTo(ctx context.Context, maxTxNum, maxSpan uint64, workers int, ictx *InvertedIndexContext, ps *background.ProgressSet) (err error) { closeAll := true for updated, startTx, endTx := ii.findMergeRange(maxSpan, maxTxNum); updated; updated, startTx, endTx = ii.findMergeRange(maxTxNum, maxSpan) { - staticFiles, startJ := ii.staticFilesInRange(startTx, endTx) + staticFiles, _ := ii.staticFilesInRange(startTx, endTx, ictx) defer func() { if closeAll { for _, i := range staticFiles { @@ -280,9 +213,8 @@ func (ii *InvertedIndex) mergeRangesUpTo(ctx context.Context, maxTxNum, maxSpan } } }() - _ = startJ - mergedIndex, err := ii.mergeFiles(ctx, staticFiles, startTx, endTx, workers) + mergedIndex, err := ii.mergeFiles(ctx, staticFiles, startTx, endTx, workers, ps) if err != nil { return err } @@ -293,19 +225,12 @@ func (ii *InvertedIndex) mergeRangesUpTo(ctx context.Context, maxTxNum, maxSpan } }() - //defer func(t time.Time) { log.Info("[snapshots] merge", "took", time.Since(t)) }(time.Now()) ii.integrateMergedFiles(staticFiles, mergedIndex) - - if err := ii.deleteFiles(staticFiles); err != nil { - return err - } - - log.Info(fmt.Sprintf("domain files mergedRange[%d, %d) name=%s span=%d \n", startTx, endTx, ii.filenameBase, maxSpan)) + ii.cleanFrozenParts(mergedIndex) } closeAll = false return nil } -*/ type HistoryRanges struct { historyStartTxNum uint64 @@ -536,32 +461,35 @@ func mergeEfs(preval, val, buf []byte) ([]byte, error) { efIt := ef.Iterator() newEf := eliasfano32.NewEliasFano(preef.Count()+ef.Count(), ef.Max()) for preIt.HasNext() { - v, _ := preIt.Next() + v, err := preIt.Next() + if err != nil { + return nil, err + } newEf.AddOffset(v) } for efIt.HasNext() { - v, _ := efIt.Next() + v, err := efIt.Next() + if err != nil { + return nil, err + } newEf.AddOffset(v) } newEf.Build() return newEf.AppendBytes(buf), nil } -func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, historyFiles []*filesItem, r DomainRanges, workers int) (valuesIn, indexIn, historyIn *filesItem, err error) { +func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, historyFiles []*filesItem, r DomainRanges, workers int, ps *background.ProgressSet) (valuesIn, indexIn, historyIn *filesItem, err error) { if !r.any() { return } var comp *compress.Compressor - //var decomp *compress.Decompressor - var closeItem = true + closeItem := true + defer func() { if closeItem { if comp != nil { comp.Close() } - //if decomp != nil { - // decomp.Close() - //} if indexIn != nil { if indexIn.decompressor != nil { indexIn.decompressor.Close() @@ -569,6 +497,9 @@ func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, histor if indexIn.index != nil { indexIn.index.Close() } + if indexIn.bindex != nil { + indexIn.bindex.Close() + } } if historyIn != nil { if historyIn.decompressor != nil { @@ -577,6 +508,9 @@ func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, histor if historyIn.index != nil { historyIn.index.Close() } + if historyIn.bindex != nil { + historyIn.bindex.Close() + } } if valuesIn != nil { if valuesIn.decompressor != nil { @@ -585,6 +519,9 @@ func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, histor if valuesIn.index != nil { valuesIn.index.Close() } + if valuesIn.bindex != nil { + valuesIn.bindex.Close() + } } } }() @@ -595,19 +532,21 @@ func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, histor history: r.history, indexStartTxNum: r.indexStartTxNum, indexEndTxNum: r.indexEndTxNum, - index: r.index}, workers); err != nil { + index: r.index}, workers, ps); err != nil { return nil, nil, nil, err } if r.values { - log.Info(fmt.Sprintf("[snapshots] merge: %s.%d-%d.kv", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep)) for _, f := range valuesFiles { defer f.decompressor.EnableMadvNormal().DisableReadAhead() } - - datPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kv", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep)) + datFileName := fmt.Sprintf("%s.%d-%d.kv", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep) + datPath := filepath.Join(d.dir, datFileName) if comp, err = compress.NewCompressor(ctx, "merge", datPath, d.tmpdir, compress.MinPatternScore, workers, log.LvlTrace); err != nil { return nil, nil, nil, fmt.Errorf("merge %s history compressor: %w", d.filenameBase, err) } + p := ps.AddNew("merege "+datFileName, 1) + defer ps.Delete(p) + var cp CursorHeap heap.Init(&cp) for _, item := range valuesFiles { @@ -656,24 +595,21 @@ func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, histor heap.Pop(&cp) } } - var skip bool - if d.prefixLen > 0 { - skip = r.valuesStartTxNum == 0 && len(lastVal) == 0 && len(lastKey) != d.prefixLen - } else { - // For the rest of types, empty value means deletion - skip = r.valuesStartTxNum == 0 && len(lastVal) == 0 - } - if !skip { - if keyBuf != nil && (d.prefixLen == 0 || len(keyBuf) != d.prefixLen || bytes.HasPrefix(lastKey, keyBuf)) { + + // empty value means deletion + deleted := r.valuesStartTxNum == 0 && len(lastVal) == 0 + if !deleted { + if keyBuf != nil { if err = comp.AddUncompressedWord(keyBuf); err != nil { return nil, nil, nil, err } keyCount++ // Only counting keys, not values - if d.compressVals { + switch d.compressVals { + case true: if err = comp.AddWord(valBuf); err != nil { return nil, nil, nil, err } - } else { + default: if err = comp.AddUncompressedWord(valBuf); err != nil { return nil, nil, nil, err } @@ -703,16 +639,38 @@ func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, histor } comp.Close() comp = nil - idxPath := filepath.Join(d.dir, fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep)) + ps.Delete(p) frozen := (r.valuesEndTxNum-r.valuesStartTxNum)/d.aggregationStep == StepsInBiggestFile valuesIn = &filesItem{startTxNum: r.valuesStartTxNum, endTxNum: r.valuesEndTxNum, frozen: frozen} if valuesIn.decompressor, err = compress.NewDecompressor(datPath); err != nil { return nil, nil, nil, fmt.Errorf("merge %s decompressor [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err) } + + idxFileName := fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep) + idxPath := filepath.Join(d.dir, idxFileName) + p = ps.AddNew("merge "+idxFileName, uint64(keyCount*2)) + defer ps.Delete(p) + ps.Delete(p) + // if valuesIn.index, err = buildIndex(valuesIn.decompressor, idxPath, d.dir, keyCount, false /* values */); err != nil { - if valuesIn.index, err = buildIndex(ctx, valuesIn.decompressor, idxPath, d.tmpdir, keyCount, false /* values */); err != nil { + if valuesIn.index, err = buildIndexThenOpen(ctx, valuesIn.decompressor, idxPath, d.tmpdir, keyCount, false /* values */, p); err != nil { return nil, nil, nil, fmt.Errorf("merge %s buildIndex [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err) } + + btFileName := strings.TrimSuffix(idxFileName, "kvi") + "bt" + p = ps.AddNew(btFileName, uint64(keyCount*2)) + defer ps.Delete(p) + btPath := filepath.Join(d.dir, btFileName) + err = BuildBtreeIndexWithDecompressor(btPath, valuesIn.decompressor, p) + if err != nil { + return nil, nil, nil, fmt.Errorf("merge %s btindex [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err) + } + + bt, err := OpenBtreeIndexWithDecompressor(btPath, 2048, valuesIn.decompressor) + if err != nil { + return nil, nil, nil, fmt.Errorf("merge %s btindex2 [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err) + } + valuesIn.bindex = bt } closeItem = false d.stats.MergesCount++ @@ -720,11 +678,10 @@ func (d *Domain) mergeFiles(ctx context.Context, valuesFiles, indexFiles, histor return } -func (ii *InvertedIndex) mergeFiles(ctx context.Context, files []*filesItem, startTxNum, endTxNum uint64, workers int) (*filesItem, error) { +func (ii *InvertedIndex) mergeFiles(ctx context.Context, files []*filesItem, startTxNum, endTxNum uint64, workers int, ps *background.ProgressSet) (*filesItem, error) { for _, h := range files { defer h.decompressor.EnableMadvNormal().DisableReadAhead() } - log.Info(fmt.Sprintf("[snapshots] merge: %s.%d-%d.ef", ii.filenameBase, startTxNum/ii.aggregationStep, endTxNum/ii.aggregationStep)) var outItem *filesItem var comp *compress.Compressor @@ -754,10 +711,14 @@ func (ii *InvertedIndex) mergeFiles(ctx context.Context, files []*filesItem, sta return nil, ctx.Err() } - datPath := filepath.Join(ii.dir, fmt.Sprintf("%s.%d-%d.ef", ii.filenameBase, startTxNum/ii.aggregationStep, endTxNum/ii.aggregationStep)) + datFileName := fmt.Sprintf("%s.%d-%d.ef", ii.filenameBase, startTxNum/ii.aggregationStep, endTxNum/ii.aggregationStep) + datPath := filepath.Join(ii.dir, datFileName) if comp, err = compress.NewCompressor(ctx, "Snapshots merge", datPath, ii.tmpdir, compress.MinPatternScore, workers, log.LvlTrace); err != nil { return nil, fmt.Errorf("merge %s inverted index compressor: %w", ii.filenameBase, err) } + p := ps.AddNew("merge "+datFileName, 1) + defer ps.Delete(p) + var cp CursorHeap heap.Init(&cp) for _, item := range files { @@ -836,20 +797,25 @@ func (ii *InvertedIndex) mergeFiles(ctx context.Context, files []*filesItem, sta } comp.Close() comp = nil - idxPath := filepath.Join(ii.dir, fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, startTxNum/ii.aggregationStep, endTxNum/ii.aggregationStep)) frozen := (endTxNum-startTxNum)/ii.aggregationStep == StepsInBiggestFile outItem = &filesItem{startTxNum: startTxNum, endTxNum: endTxNum, frozen: frozen} if outItem.decompressor, err = compress.NewDecompressor(datPath); err != nil { return nil, fmt.Errorf("merge %s decompressor [%d-%d]: %w", ii.filenameBase, startTxNum, endTxNum, err) } - if outItem.index, err = buildIndex(ctx, outItem.decompressor, idxPath, ii.tmpdir, keyCount, false /* values */); err != nil { + ps.Delete(p) + + idxFileName := fmt.Sprintf("%s.%d-%d.efi", ii.filenameBase, startTxNum/ii.aggregationStep, endTxNum/ii.aggregationStep) + idxPath := filepath.Join(ii.dir, idxFileName) + p = ps.AddNew("merge "+idxFileName, uint64(outItem.decompressor.Count()*2)) + defer ps.Delete(p) + if outItem.index, err = buildIndexThenOpen(ctx, outItem.decompressor, idxPath, ii.tmpdir, keyCount, false /* values */, p); err != nil { return nil, fmt.Errorf("merge %s buildIndex [%d-%d]: %w", ii.filenameBase, startTxNum, endTxNum, err) } closeItem = false return outItem, nil } -func (h *History) mergeFiles(ctx context.Context, indexFiles, historyFiles []*filesItem, r HistoryRanges, workers int) (indexIn, historyIn *filesItem, err error) { +func (h *History) mergeFiles(ctx context.Context, indexFiles, historyFiles []*filesItem, r HistoryRanges, workers int, ps *background.ProgressSet) (indexIn, historyIn *filesItem, err error) { if !r.any() { return nil, nil, nil } @@ -862,11 +828,10 @@ func (h *History) mergeFiles(ctx context.Context, indexFiles, historyFiles []*fi } } }() - if indexIn, err = h.InvertedIndex.mergeFiles(ctx, indexFiles, r.indexStartTxNum, r.indexEndTxNum, workers); err != nil { + if indexIn, err = h.InvertedIndex.mergeFiles(ctx, indexFiles, r.indexStartTxNum, r.indexEndTxNum, workers, ps); err != nil { return nil, nil, err } if r.history { - log.Info(fmt.Sprintf("[snapshots] merge: %s.%d-%d.v", h.filenameBase, r.historyStartTxNum/h.aggregationStep, r.historyEndTxNum/h.aggregationStep)) for _, f := range indexFiles { defer f.decompressor.EnableMadvNormal().DisableReadAhead() } @@ -903,11 +868,15 @@ func (h *History) mergeFiles(ctx context.Context, indexFiles, historyFiles []*fi } } }() - datPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.v", h.filenameBase, r.historyStartTxNum/h.aggregationStep, r.historyEndTxNum/h.aggregationStep)) - idxPath := filepath.Join(h.dir, fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, r.historyStartTxNum/h.aggregationStep, r.historyEndTxNum/h.aggregationStep)) + datFileName := fmt.Sprintf("%s.%d-%d.v", h.filenameBase, r.historyStartTxNum/h.aggregationStep, r.historyEndTxNum/h.aggregationStep) + idxFileName := fmt.Sprintf("%s.%d-%d.vi", h.filenameBase, r.historyStartTxNum/h.aggregationStep, r.historyEndTxNum/h.aggregationStep) + datPath := filepath.Join(h.dir, datFileName) + idxPath := filepath.Join(h.dir, idxFileName) if comp, err = compress.NewCompressor(ctx, "merge", datPath, h.tmpdir, compress.MinPatternScore, workers, log.LvlTrace); err != nil { return nil, nil, fmt.Errorf("merge %s history compressor: %w", h.filenameBase, err) } + p := ps.AddNew("merge "+datFileName, 1) + defer ps.Delete(p) var cp CursorHeap heap.Init(&cp) for _, item := range indexFiles { @@ -985,6 +954,10 @@ func (h *History) mergeFiles(ctx context.Context, indexFiles, historyFiles []*fi if decomp, err = compress.NewDecompressor(datPath); err != nil { return nil, nil, err } + ps.Delete(p) + + p = ps.AddNew("merge "+idxFileName, uint64(2*keyCount)) + defer ps.Delete(p) if rs, err = recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: keyCount, Enums: false, @@ -1024,6 +997,7 @@ func (h *History) mergeFiles(ctx context.Context, indexFiles, historyFiles []*fi valOffset = g2.SkipUncompressed() } } + p.Processed.Add(1) } if err = rs.Build(); err != nil { if rs.Collision() { @@ -1057,7 +1031,7 @@ func (d *Domain) integrateMergedFiles(valuesOuts, indexOuts, historyOuts []*file // `kill -9` may leave some garbage // but it still may be useful for merges, until we finish merge frozen file - if historyIn.frozen { + if historyIn != nil && historyIn.frozen { d.files.Walk(func(items []*filesItem) bool { for _, item := range items { if item.frozen || item.endTxNum > valuesIn.endTxNum { @@ -1162,11 +1136,11 @@ func (d *Domain) cleanAfterFreeze(f *filesItem) { d.files.Delete(out) out.canDelete.Store(true) } - d.History.cleanAfterFreeze(f) + d.History.cleanFrozenParts(f) } -// cleanAfterFreeze - mark all small files before `f` as `canDelete=true` -func (h *History) cleanAfterFreeze(f *filesItem) { +// cleanFrozenParts - mark all small files before `f` as `canDelete=true` +func (h *History) cleanFrozenParts(f *filesItem) { if f == nil || !f.frozen { return } @@ -1190,11 +1164,11 @@ func (h *History) cleanAfterFreeze(f *filesItem) { h.files.Delete(out) out.canDelete.Store(true) } - h.InvertedIndex.cleanAfterFreeze(f) + h.InvertedIndex.cleanFrozenParts(f) } -// cleanAfterFreeze - mark all small files before `f` as `canDelete=true` -func (ii *InvertedIndex) cleanAfterFreeze(f *filesItem) { +// cleanFrozenParts - mark all small files before `f` as `canDelete=true` +func (ii *InvertedIndex) cleanFrozenParts(f *filesItem) { if f == nil || !f.frozen { return } diff --git a/state/read_indices.go b/state/read_indices.go index 90143b322..c42708a42 100644 --- a/state/read_indices.go +++ b/state/read_indices.go @@ -23,6 +23,7 @@ import ( "time" "github.com/RoaringBitmap/roaring/roaring64" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/kv" ) @@ -34,6 +35,7 @@ type ReadIndices struct { keyBuf []byte aggregationStep uint64 txNum uint64 + ps *background.ProgressSet } func NewReadIndices( @@ -42,6 +44,7 @@ func NewReadIndices( ) (*ReadIndices, error) { ri := &ReadIndices{ aggregationStep: aggregationStep, + ps: background.NewProgressSet(), } closeIndices := true defer func() { @@ -98,10 +101,8 @@ type RCollation struct { func (c RCollation) Close() { } -func (ri *ReadIndices) collate(step uint64, txFrom, txTo uint64, roTx kv.Tx) (RCollation, error) { +func (ri *ReadIndices) collate(txFrom, txTo uint64, roTx kv.Tx) (RCollation, error) { - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() var c RCollation var err error closeColl := true @@ -111,13 +112,13 @@ func (ri *ReadIndices) collate(step uint64, txFrom, txTo uint64, roTx kv.Tx) (RC } }() ctx := context.TODO() - if c.accounts, err = ri.accounts.collate(ctx, txFrom, txTo, roTx, logEvery); err != nil { + if c.accounts, err = ri.accounts.collate(ctx, txFrom, txTo, roTx); err != nil { return RCollation{}, err } - if c.storage, err = ri.storage.collate(ctx, txFrom, txTo, roTx, logEvery); err != nil { + if c.storage, err = ri.storage.collate(ctx, txFrom, txTo, roTx); err != nil { return RCollation{}, err } - if c.code, err = ri.code.collate(ctx, txFrom, txTo, roTx, logEvery); err != nil { + if c.code, err = ri.code.collate(ctx, txFrom, txTo, roTx); err != nil { return RCollation{}, err } closeColl = false @@ -150,21 +151,21 @@ func (ri *ReadIndices) buildFiles(ctx context.Context, step uint64, collation RC go func() { defer wg.Done() var err error - if sf.accounts, err = ri.accounts.buildFiles(ctx, step, collation.accounts); err != nil { + if sf.accounts, err = ri.accounts.buildFiles(ctx, step, collation.accounts, ri.ps); err != nil { errCh <- err } }() go func() { defer wg.Done() var err error - if sf.storage, err = ri.storage.buildFiles(ctx, step, collation.storage); err != nil { + if sf.storage, err = ri.storage.buildFiles(ctx, step, collation.storage, ri.ps); err != nil { errCh <- err } }() go func() { defer wg.Done() var err error - if sf.code, err = ri.code.buildFiles(ctx, step, collation.code); err != nil { + if sf.code, err = ri.code.buildFiles(ctx, step, collation.code, ri.ps); err != nil { errCh <- err } }() @@ -188,7 +189,7 @@ func (ri *ReadIndices) integrateFiles(sf RStaticFiles, txNumFrom, txNumTo uint64 ri.code.integrateFiles(sf.code, txNumFrom, txNumTo) } -func (ri *ReadIndices) prune(step uint64, txFrom, txTo uint64) error { +func (ri *ReadIndices) prune(txFrom, txTo uint64) error { ctx := context.TODO() logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() @@ -312,7 +313,7 @@ func (ri *ReadIndices) mergeFiles(ctx context.Context, files RSelectedStaticFile defer wg.Done() var err error if r.accounts { - if mf.accounts, err = ri.accounts.mergeFiles(ctx, files.accounts, r.accountsStartTxNum, r.accountsEndTxNum, workers); err != nil { + if mf.accounts, err = ri.accounts.mergeFiles(ctx, files.accounts, r.accountsStartTxNum, r.accountsEndTxNum, workers, ri.ps); err != nil { errCh <- err } } @@ -321,7 +322,7 @@ func (ri *ReadIndices) mergeFiles(ctx context.Context, files RSelectedStaticFile defer wg.Done() var err error if r.storage { - if mf.storage, err = ri.storage.mergeFiles(ctx, files.storage, r.storageStartTxNum, r.storageEndTxNum, workers); err != nil { + if mf.storage, err = ri.storage.mergeFiles(ctx, files.storage, r.storageStartTxNum, r.storageEndTxNum, workers, ri.ps); err != nil { errCh <- err } } @@ -330,7 +331,7 @@ func (ri *ReadIndices) mergeFiles(ctx context.Context, files RSelectedStaticFile defer wg.Done() var err error if r.code { - if mf.code, err = ri.code.mergeFiles(ctx, files.code, r.codeStartTxNum, r.codeEndTxNum, workers); err != nil { + if mf.code, err = ri.code.mergeFiles(ctx, files.code, r.codeStartTxNum, r.codeEndTxNum, workers, ri.ps); err != nil { errCh <- err } } @@ -384,7 +385,7 @@ func (ri *ReadIndices) FinishTx() error { } closeAll := true step := ri.txNum / ri.aggregationStep - collation, err := ri.collate(step, step*ri.aggregationStep, (step+1)*ri.aggregationStep, ri.rwTx) + collation, err := ri.collate(step*ri.aggregationStep, (step+1)*ri.aggregationStep, ri.rwTx) if err != nil { return err } @@ -403,7 +404,7 @@ func (ri *ReadIndices) FinishTx() error { } }() ri.integrateFiles(sf, step*ri.aggregationStep, (step+1)*ri.aggregationStep) - if err = ri.prune(step, step*ri.aggregationStep, (step+1)*ri.aggregationStep); err != nil { + if err = ri.prune(step*ri.aggregationStep, (step+1)*ri.aggregationStep); err != nil { return err } maxEndTxNum := ri.endTxNumMinimax() diff --git a/txpool/fetch_test.go b/txpool/fetch_test.go index 2b5a93c4c..0edfcbf7a 100644 --- a/txpool/fetch_test.go +++ b/txpool/fetch_test.go @@ -166,7 +166,7 @@ func TestOnNewBlock(t *testing.T) { } i++ return &remote.StateChangeBatch{ - StateVersionID: 1, + StateVersionId: 1, ChangeBatch: []*remote.StateChange{ {Txs: [][]byte{decodeHex(types3.TxParseMainnetTests[0].PayloadStr), decodeHex(types3.TxParseMainnetTests[1].PayloadStr), decodeHex(types3.TxParseMainnetTests[2].PayloadStr)}, BlockHeight: 1, BlockHash: gointerfaces.ConvertHashToH256([32]byte{})}, }, diff --git a/txpool/pool.go b/txpool/pool.go index bc1a0905f..d67c55d9c 100644 --- a/txpool/pool.go +++ b/txpool/pool.go @@ -29,16 +29,17 @@ import ( "runtime" "sort" "sync" + "sync/atomic" "time" "github.com/VictoriaMetrics/metrics" mapset "github.com/deckarep/golang-set/v2" "github.com/go-stack/stack" "github.com/google/btree" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" "github.com/ledgerwatch/log/v3" - "go.uber.org/atomic" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" @@ -71,38 +72,6 @@ var ( basefeeSubCounter = metrics.GetOrCreateCounter(`txpool_basefee`) ) -type Config struct { - DBDir string - TracedSenders []string // List of senders for which tx pool should print out debugging info - SyncToNewPeersEvery time.Duration - ProcessRemoteTxsEvery time.Duration - CommitEvery time.Duration - LogEvery time.Duration - PendingSubPoolLimit int - BaseFeeSubPoolLimit int - QueuedSubPoolLimit int - MinFeeCap uint64 - AccountSlots uint64 // Number of executable transaction slots guaranteed per account - PriceBump uint64 // Price bump percentage to replace an already existing transaction - OverrideShanghaiTime *big.Int -} - -var DefaultConfig = Config{ - SyncToNewPeersEvery: 2 * time.Minute, - ProcessRemoteTxsEvery: 100 * time.Millisecond, - CommitEvery: 15 * time.Second, - LogEvery: 30 * time.Second, - - PendingSubPoolLimit: 10_000, - BaseFeeSubPoolLimit: 10_000, - QueuedSubPoolLimit: 10_000, - - MinFeeCap: 1, - AccountSlots: 16, //TODO: to choose right value (16 to be compatible with Geth) - PriceBump: 10, // Price bump percentage to replace an already existing transaction - OverrideShanghaiTime: nil, -} - // Pool is interface for the transaction pool // This interface exists for the convenience of testing, and not yet because // there are multiple implementations @@ -311,18 +280,18 @@ type TxPool struct { // - batch notifications about new txs (reduce P2P spam to other nodes about txs propagation) // - and as a result reducing lock contention unprocessedRemoteTxs *types.TxSlots - unprocessedRemoteByHash map[string]int // to reject duplicates - byHash map[string]*metaTx // tx_hash => tx : only not committed to db yet records - discardReasonsLRU *simplelru.LRU // tx_hash => discard_reason : non-persisted + unprocessedRemoteByHash map[string]int // to reject duplicates + byHash map[string]*metaTx // tx_hash => tx : only not committed to db yet records + discardReasonsLRU *simplelru.LRU[string, DiscardReason] // tx_hash => discard_reason : non-persisted pending *PendingPool baseFee *SubPool queued *SubPool - isLocalLRU *simplelru.LRU // tx_hash => is_local : to restore isLocal flag of unwinded transactions - newPendingTxs chan types.Announcements // notifications about new txs in Pending sub-pool - all *BySenderAndNonce // senderID => (sorted map of tx nonce => *metaTx) - deletedTxs []*metaTx // list of discarded txs since last db commit + isLocalLRU *simplelru.LRU[string, struct{}] // tx_hash => is_local : to restore isLocal flag of unwinded transactions + newPendingTxs chan types.Announcements // notifications about new txs in Pending sub-pool + all *BySenderAndNonce // senderID => (sorted map of tx nonce => *metaTx) + deletedTxs []*metaTx // list of discarded txs since last db commit promoted types.Announcements - cfg Config + cfg txpoolcfg.Config chainID uint256.Int lastSeenBlock atomic.Uint64 started atomic.Bool @@ -332,12 +301,13 @@ type TxPool struct { isPostShanghai atomic.Bool } -func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg Config, cache kvcache.Cache, chainID uint256.Int, shanghaiTime *big.Int) (*TxPool, error) { - localsHistory, err := simplelru.NewLRU(10_000, nil) +func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, cache kvcache.Cache, chainID uint256.Int, shanghaiTime *big.Int) (*TxPool, error) { + var err error + localsHistory, err := simplelru.NewLRU[string, struct{}](10_000, nil) if err != nil { return nil, err } - discardHistory, err := simplelru.NewLRU(10_000, nil) + discardHistory, err := simplelru.NewLRU[string, DiscardReason](10_000, nil) if err != nil { return nil, err } @@ -347,9 +317,9 @@ func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg Config, cache kvca search: &metaTx{Tx: &types.TxSlot{}}, senderIDTxnCount: map[uint64]int{}, } - tracedSenders := make(map[string]struct{}) + tracedSenders := make(map[common.Address]struct{}) for _, sender := range cfg.TracedSenders { - tracedSenders[sender] = struct{}{} + tracedSenders[common.BytesToAddress([]byte(sender))] = struct{}{} } return &TxPool{ lock: &sync.Mutex{}, @@ -449,23 +419,21 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang //log.Debug("[txpool] new block", "unwinded", len(unwindTxs.txs), "mined", len(minedTxs.txs), "baseFee", baseFee, "blockHeight", blockHeight) - p.pending.resetAdded() - p.baseFee.resetAdded() - if err := addTxsOnNewBlock(p.lastSeenBlock.Load(), cacheView, stateChanges, p.senders, unwindTxs, + announcements, err := addTxsOnNewBlock(p.lastSeenBlock.Load(), cacheView, stateChanges, p.senders, unwindTxs, pendingBaseFee, stateChanges.BlockGasLimit, - p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked); err != nil { + p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked) + if err != nil { return err } p.pending.EnforceWorstInvariants() p.baseFee.EnforceInvariants() p.queued.EnforceInvariants() - promote(p.pending, p.baseFee, p.queued, pendingBaseFee, p.discardLocked) + promote(p.pending, p.baseFee, p.queued, pendingBaseFee, p.discardLocked, &announcements) p.pending.EnforceBestInvariants() p.promoted.Reset() - p.pending.appendAddedTo(&p.promoted) - p.baseFee.appendAddedTo(&p.promoted) + p.promoted.AppendOther(announcements) - if p.started.CAS(false, true) { + if p.started.CompareAndSwap(false, true) { log.Info("[txpool] Started") } @@ -516,15 +484,13 @@ func (p *TxPool) processRemoteTxs(ctx context.Context) error { return err } - p.pending.resetAdded() - p.baseFee.resetAdded() - if _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, - p.pendingBaseFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked); err != nil { + announcements, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, + p.pendingBaseFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true) + if err != nil { return err } p.promoted.Reset() - p.pending.appendAddedTo(&p.promoted) - p.baseFee.appendAddedTo(&p.promoted) + p.promoted.AppendOther(announcements) if p.promoted.Len() > 0 { select { @@ -541,19 +507,19 @@ func (p *TxPool) processRemoteTxs(ctx context.Context) error { //log.Info("[txpool] on new txs", "amount", len(newPendingTxs.txs), "in", time.Since(t)) return nil } -func (p *TxPool) getRlpLocked(tx kv.Tx, hash []byte) (rlpTxn []byte, sender []byte, isLocal bool, err error) { +func (p *TxPool) getRlpLocked(tx kv.Tx, hash []byte) (rlpTxn []byte, sender common.Address, isLocal bool, err error) { txn, ok := p.byHash[string(hash)] if ok && txn.Tx.Rlp != nil { return txn.Tx.Rlp, p.senders.senderID2Addr[txn.Tx.SenderID], txn.subPool&IsLocal > 0, nil } v, err := tx.GetOne(kv.PoolTransaction, hash) if err != nil { - return nil, nil, false, err + return nil, common.Address{}, false, err } if v == nil { - return nil, nil, false, nil + return nil, common.Address{}, false, nil } - return v[20:], v[:20], txn != nil && txn.subPool&IsLocal > 0, nil + return v[20:], *(*[20]byte)(v[:20]), txn != nil && txn.subPool&IsLocal > 0, nil } func (p *TxPool) GetRlp(tx kv.Tx, hash []byte) ([]byte, error) { p.lock.Lock() @@ -682,7 +648,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG } txs.Txs[count] = rlpTx - copy(txs.Senders.At(count), sender) + copy(txs.Senders.At(count), sender.Bytes()) txs.IsLocal[count] = isLocal toSkip.Add(mt.Tx.IDHash) count++ @@ -711,7 +677,7 @@ func (p *TxPool) YieldBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, avail } func (p *TxPool) PeekBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableDataGas uint64) (bool, error) { - set := mapset.NewSet[[32]byte]() + set := mapset.NewThreadUnsafeSet[[32]byte]() onTime, _, err := p.best(n, txs, tx, onTopOf, availableGas, availableDataGas, set) return onTime, err } @@ -891,14 +857,14 @@ func (p *TxPool) punishSpammer(spammer uint64) { } } -func fillDiscardReasons(reasons []DiscardReason, newTxs types.TxSlots, discardReasonsLRU *simplelru.LRU) []DiscardReason { +func fillDiscardReasons(reasons []DiscardReason, newTxs types.TxSlots, discardReasonsLRU *simplelru.LRU[string, DiscardReason]) []DiscardReason { for i := range reasons { if reasons[i] != NotSet { continue } reason, ok := discardReasonsLRU.Get(string(newTxs.Txs[i].IDHash[:])) if ok { - reasons[i] = reason.(DiscardReason) + reasons[i] = reason } else { reasons[i] = Success } @@ -925,7 +891,7 @@ func (p *TxPool) AddLocalTxs(ctx context.Context, newTransactions types.TxSlots, if err := p.fromDB(ctx, tx, coreTx); err != nil { return nil, fmt.Errorf("loading txs from DB: %w", err) } - if p.started.CAS(false, true) { + if p.started.CompareAndSwap(false, true) { log.Info("[txpool] Started") } } @@ -939,10 +905,9 @@ func (p *TxPool) AddLocalTxs(ctx context.Context, newTransactions types.TxSlots, return nil, err } - p.pending.resetAdded() - p.baseFee.resetAdded() - if addReasons, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, - p.pendingBaseFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked); err == nil { + announcements, addReasons, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, + p.pendingBaseFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true) + if err == nil { for i, reason := range addReasons { if reason != NotSet { reasons[i] = reason @@ -952,8 +917,7 @@ func (p *TxPool) AddLocalTxs(ctx context.Context, newTransactions types.TxSlots, return nil, err } p.promoted.Reset() - p.pending.appendAddedTo(&p.promoted) - p.baseFee.appendAddedTo(&p.promoted) + p.promoted.AppendOther(announcements) reasons = fillDiscardReasons(reasons, newTxs, p.discardReasonsLRU) for i, reason := range reasons { @@ -989,7 +953,7 @@ func (p *TxPool) cache() kvcache.Cache { func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, newTxs types.TxSlots, pendingBaseFee, blockGasLimit uint64, pending *PendingPool, baseFee, queued *SubPool, - byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx) DiscardReason, discard func(*metaTx, DiscardReason)) ([]DiscardReason, error) { + byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx, *types.Announcements) DiscardReason, discard func(*metaTx, DiscardReason), collect bool) (types.Announcements, []DiscardReason, error) { protocolBaseFee := calcProtocolBaseFee(pendingBaseFee) if assert.Enable { for _, txn := range newTxs.Txs { @@ -1009,27 +973,18 @@ func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, // time (up to some "immutability threshold"). sendersWithChangedState := map[uint64]struct{}{} discardReasons := make([]DiscardReason, len(newTxs.Txs)) + announcements := types.Announcements{} for i, txn := range newTxs.Txs { if found, ok := byHash[string(txn.IDHash[:])]; ok { discardReasons[i] = DuplicateHash // In case if the transation is stuck, "poke" it to rebroadcast - // TODO refactor to return the list of promoted hashes instead of using added inside the pool - if newTxs.IsLocal[i] { - switch found.currentSubPool { - case PendingSubPool: - if pending.adding { - pending.added.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:]) - } - case BaseFeeSubPool: - if baseFee.adding { - baseFee.added.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:]) - } - } + if collect && newTxs.IsLocal[i] && (found.currentSubPool == PendingSubPool || found.currentSubPool == BaseFeeSubPool) { + announcements.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:]) } continue } mt := newMetaTx(txn, newTxs.IsLocal[i], blockNum) - if reason := add(mt); reason != NotSet { + if reason := add(mt, &announcements); reason != NotSet { discardReasons[i] = reason continue } @@ -1043,21 +998,21 @@ func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, for senderID := range sendersWithChangedState { nonce, balance, err := senders.info(cacheView, senderID) if err != nil { - return discardReasons, err + return announcements, discardReasons, err } onSenderStateChange(senderID, nonce, balance, byNonce, protocolBaseFee, blockGasLimit, pending, baseFee, queued, discard) } - promote(pending, baseFee, queued, pendingBaseFee, discard) + promote(pending, baseFee, queued, pendingBaseFee, discard, &announcements) pending.EnforceBestInvariants() - return discardReasons, nil + return announcements, discardReasons, nil } func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges *remote.StateChangeBatch, senders *sendersBatch, newTxs types.TxSlots, pendingBaseFee uint64, blockGasLimit uint64, pending *PendingPool, baseFee, queued *SubPool, - byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx) DiscardReason, discard func(*metaTx, DiscardReason)) error { + byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx, *types.Announcements) DiscardReason, discard func(*metaTx, DiscardReason)) (types.Announcements, error) { protocolBaseFee := calcProtocolBaseFee(pendingBaseFee) if assert.Enable { for _, txn := range newTxs.Txs { @@ -1076,12 +1031,13 @@ func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges // somehow the fact that certain transactions were local, needs to be remembered for some // time (up to some "immutability threshold"). sendersWithChangedState := map[uint64]struct{}{} + announcements := types.Announcements{} for i, txn := range newTxs.Txs { if _, ok := byHash[string(txn.IDHash[:])]; ok { continue } mt := newMetaTx(txn, newTxs.IsLocal[i], blockNum) - if reason := add(mt); reason != NotSet { + if reason := add(mt, &announcements); reason != NotSet { discard(mt, reason) continue } @@ -1096,7 +1052,7 @@ func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges continue } addr := gointerfaces.ConvertH160toAddress(change.Address) - id, ok := senders.getID(addr[:]) + id, ok := senders.getID(addr) if !ok { continue } @@ -1108,13 +1064,13 @@ func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges for senderID := range sendersWithChangedState { nonce, balance, err := senders.info(cacheView, senderID) if err != nil { - return err + return announcements, err } onSenderStateChange(senderID, nonce, balance, byNonce, protocolBaseFee, blockGasLimit, pending, baseFee, queued, discard) } - return nil + return announcements, nil } func (p *TxPool) setBaseFee(baseFee uint64) (uint64, bool) { @@ -1126,7 +1082,7 @@ func (p *TxPool) setBaseFee(baseFee uint64) (uint64, bool) { return p.pendingBaseFee.Load(), changed } -func (p *TxPool) addLocked(mt *metaTx) DiscardReason { +func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) DiscardReason { // Insert to pending pool, if pool doesn't have txn with same Nonce and bigger Tip found := p.all.get(mt.Tx.SenderID, mt.Tx.Nonce) if found != nil { @@ -1139,18 +1095,8 @@ func (p *TxPool) addLocked(mt *metaTx) DiscardReason { if mt.Tx.Tip.Cmp(tipThreshold) < 0 || mt.Tx.FeeCap.Cmp(feecapThreshold) < 0 { // Both tip and feecap need to be larger than previously to replace the transaction // In case if the transation is stuck, "poke" it to rebroadcast - // TODO refactor to return the list of promoted hashes instead of using added inside the pool - if mt.subPool&IsLocal != 0 { - switch found.currentSubPool { - case PendingSubPool: - if p.pending.adding { - p.pending.added.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:]) - } - case BaseFeeSubPool: - if p.baseFee.adding { - p.baseFee.added.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:]) - } - } + if mt.subPool&IsLocal != 0 && (found.currentSubPool == PendingSubPool || found.currentSubPool == BaseFeeSubPool) { + announcements.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:]) } if bytes.Equal(found.Tx.IDHash[:], mt.Tx.IDHash[:]) { return NotSet @@ -1200,7 +1146,7 @@ func (p *TxPool) discardLocked(mt *metaTx, reason DiscardReason) { func (p *TxPool) NonceFromAddress(addr [20]byte) (nonce uint64, inPool bool) { p.lock.Lock() defer p.lock.Unlock() - senderID, found := p.senders.getID(addr[:]) + senderID, found := p.senders.getID(addr) if !found { return 0, false } @@ -1377,11 +1323,13 @@ func onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint // promote reasserts invariants of the subpool and returns the list of transactions that ended up // being promoted to the pending or basefee pool, for re-broadcasting -func promote(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint64, discard func(*metaTx, DiscardReason)) { +func promote(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint64, discard func(*metaTx, DiscardReason), announcements *types.Announcements) { // Demote worst transactions that do not qualify for pending sub pool anymore, to other sub pools, or discard for worst := pending.Worst(); pending.Len() > 0 && (worst.subPool < BaseFeePoolBits || worst.minFeeCap.Cmp(uint256.NewInt(pendingBaseFee)) < 0); worst = pending.Worst() { if worst.subPool >= BaseFeePoolBits { - baseFee.Add(pending.PopWorst()) + tx := pending.PopWorst() + announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:]) + baseFee.Add(tx) } else if worst.subPool >= QueuedPoolBits { queued.Add(pending.PopWorst()) } else { @@ -1391,7 +1339,9 @@ func promote(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint // Promote best transactions from base fee pool to pending pool while they qualify for best := baseFee.Best(); baseFee.Len() > 0 && best.subPool >= BaseFeePoolBits && best.minFeeCap.Cmp(uint256.NewInt(pendingBaseFee)) >= 0; best = baseFee.Best() { - pending.Add(baseFee.PopBest()) + tx := baseFee.PopBest() + announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:]) + pending.Add(tx) } // Demote worst transactions that do not qualify for base fee pool anymore, to queued sub pool, or discard @@ -1406,7 +1356,9 @@ func promote(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint // Promote best transactions from the queued pool to either pending or base fee pool, while they qualify for best := queued.Best(); queued.Len() > 0 && best.subPool >= BaseFeePoolBits; best = queued.Best() { if best.minFeeCap.Cmp(uint256.NewInt(pendingBaseFee)) >= 0 { - pending.Add(queued.PopBest()) + tx := queued.PopBest() + announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:]) + pending.Add(tx) } else { baseFee.Add(queued.PopBest()) } @@ -1609,7 +1561,7 @@ func (p *TxPool) flushLocked(tx kv.RwTx) (err error) { addr, ok := p.senders.senderID2Addr[id] if ok { delete(p.senders.senderID2Addr, id) - delete(p.senders.senderIDs, string(addr)) + delete(p.senders.senderIDs, addr) } } //fmt.Printf("del:%d,%d,%d\n", mt.Tx.senderID, mt.Tx.nonce, mt.Tx.tip) @@ -1632,7 +1584,7 @@ func (p *TxPool) flushLocked(tx kv.RwTx) (err error) { } for i, txHash := range txHashes { binary.BigEndian.PutUint64(encID, uint64(i)) - if err := tx.Append(kv.RecentLocalTransaction, encID, []byte(txHash.(string))); err != nil { + if err := tx.Append(kv.RecentLocalTransaction, encID, []byte(txHash)); err != nil { return err } } @@ -1643,12 +1595,14 @@ func (p *TxPool) flushLocked(tx kv.RwTx) (err error) { continue } v = common.EnsureEnoughSize(v, 20+len(metaTx.Tx.Rlp)) - for addr, id := range p.senders.senderIDs { // no inverted index - tradeoff flush speed for memory usage - if id == metaTx.Tx.SenderID { - copy(v[:20], addr) - break - } + + addr, ok := p.senders.senderID2Addr[metaTx.Tx.SenderID] + if !ok { + log.Warn("[txpool] flush: sender address not found by ID", "senderID", metaTx.Tx.SenderID) + continue } + + copy(v[:20], addr.Bytes()) copy(v[20:], metaTx.Tx.Rlp) has, err := tx.Has(kv.PoolTransaction, []byte(txHash)) @@ -1717,7 +1671,7 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { if err != nil { return err } - addr, txRlp := v[:20], v[20:] + addr, txRlp := *(*[20]byte)(v[:20]), v[20:] txn := &types.TxSlot{} _, err = parseCtx.ParseTransaction(txRlp, 0, txn, nil, false /* hasEnvelope */, true /* networkVersion */, nil) @@ -1739,7 +1693,7 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { txs.Resize(uint(i + 1)) txs.Txs[i] = txn txs.IsLocal[i] = isLocalTx - copy(txs.Senders.At(i), addr) + copy(txs.Senders.At(i), addr[:]) i++ } @@ -1757,8 +1711,8 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { if err != nil { return err } - if _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, txs, - pendingBaseFee, math.MaxUint64 /* blockGasLimit */, p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked); err != nil { + if _, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, txs, + pendingBaseFee, math.MaxUint64 /* blockGasLimit */, p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, false); err != nil { return err } p.pendingBaseFee.Store(pendingBaseFee) @@ -1838,7 +1792,7 @@ func (p *TxPool) logStats() { var m runtime.MemStats dbg.ReadMemStats(&m) ctx := []interface{}{ - "block", p.lastSeenBlock.Load(), + //"block", p.lastSeenBlock.Load(), "pending", p.pending.Len(), "baseFee", p.baseFee.Len(), "queued", p.queued.Len(), @@ -1855,7 +1809,7 @@ func (p *TxPool) logStats() { } // Deprecated need switch to streaming-like -func (p *TxPool) deprecatedForEach(_ context.Context, f func(rlp, sender []byte, t SubPoolType), tx kv.Tx) { +func (p *TxPool) deprecatedForEach(_ context.Context, f func(rlp []byte, sender common.Address, t SubPoolType), tx kv.Tx) { p.lock.Lock() defer p.lock.Unlock() p.all.ascendAll(func(mt *metaTx) bool { @@ -2000,29 +1954,28 @@ func (sc *sendersBatch) printDebug(prefix string) { // flushing to db periodicaly. it doesn't play as read-cache (because db is small and memory-mapped - doesn't need cache) // non thread-safe type sendersBatch struct { - senderIDs map[string]uint64 - senderID2Addr map[uint64][]byte - tracedSenders map[string]struct{} + senderIDs map[common.Address]uint64 + senderID2Addr map[uint64]common.Address + tracedSenders map[common.Address]struct{} senderID uint64 } -func newSendersCache(tracedSenders map[string]struct{}) *sendersBatch { - return &sendersBatch{senderIDs: map[string]uint64{}, senderID2Addr: map[uint64][]byte{}, tracedSenders: tracedSenders} +func newSendersCache(tracedSenders map[common.Address]struct{}) *sendersBatch { + return &sendersBatch{senderIDs: map[common.Address]uint64{}, senderID2Addr: map[uint64]common.Address{}, tracedSenders: tracedSenders} } -func (sc *sendersBatch) getID(addr []byte) (uint64, bool) { - id, ok := sc.senderIDs[string(addr)] +func (sc *sendersBatch) getID(addr common.Address) (uint64, bool) { + id, ok := sc.senderIDs[addr] return id, ok } -func (sc *sendersBatch) getOrCreateID(addr []byte) (uint64, bool) { - _, traced := sc.tracedSenders[string(addr)] - id, ok := sc.senderIDs[string(addr)] +func (sc *sendersBatch) getOrCreateID(addr common.Address) (uint64, bool) { + _, traced := sc.tracedSenders[addr] + id, ok := sc.senderIDs[addr] if !ok { - copyAddr := common.Copy(addr) sc.senderID++ id = sc.senderID - sc.senderIDs[string(copyAddr)] = id - sc.senderID2Addr[id] = copyAddr + sc.senderIDs[addr] = id + sc.senderID2Addr[id] = addr if traced { log.Info(fmt.Sprintf("TX TRACING: allocated senderID %d to sender %x", id, addr)) } @@ -2034,7 +1987,7 @@ func (sc *sendersBatch) info(cacheView kvcache.CacheView, id uint64) (nonce uint if !ok { panic("must not happen") } - encoded, err := cacheView.Get(addr) + encoded, err := cacheView.Get(addr.Bytes()) if err != nil { return 0, emptySender.balance, err } @@ -2050,7 +2003,7 @@ func (sc *sendersBatch) info(cacheView kvcache.CacheView, id uint64) (nonce uint func (sc *sendersBatch) registerNewSenders(newTxs *types.TxSlots) (err error) { for i, txn := range newTxs.Txs { - txn.SenderID, txn.Traced = sc.getOrCreateID(newTxs.Senders.At(i)) + txn.SenderID, txn.Traced = sc.getOrCreateID(newTxs.Senders.AddressAt(i)) } return nil } @@ -2058,15 +2011,15 @@ func (sc *sendersBatch) onNewBlock(stateChanges *remote.StateChangeBatch, unwind for _, diff := range stateChanges.ChangeBatch { for _, change := range diff.Changes { // merge state changes addrB := gointerfaces.ConvertH160toAddress(change.Address) - sc.getOrCreateID(addrB[:]) + sc.getOrCreateID(addrB) } for i, txn := range unwindTxs.Txs { - txn.SenderID, txn.Traced = sc.getOrCreateID(unwindTxs.Senders.At(i)) + txn.SenderID, txn.Traced = sc.getOrCreateID(unwindTxs.Senders.AddressAt(i)) } for i, txn := range minedTxs.Txs { - txn.SenderID, txn.Traced = sc.getOrCreateID(minedTxs.Senders.At(i)) + txn.SenderID, txn.Traced = sc.getOrCreateID(minedTxs.Senders.AddressAt(i)) } } return nil @@ -2175,27 +2128,16 @@ func (b *BySenderAndNonce) replaceOrInsert(mt *metaTx) *metaTx { // It's more expensive to maintain "slice sort" invariant, but it allow do cheap copy of // pending.best slice for mining (because we consider txs and metaTx are immutable) type PendingPool struct { - best *bestSlice - worst *WorstQueue - added types.Announcements - limit int - t SubPoolType - adding bool + best *bestSlice + worst *WorstQueue + limit int + t SubPoolType } func NewPendingSubPool(t SubPoolType, limit int) *PendingPool { return &PendingPool{limit: limit, t: t, best: &bestSlice{ms: []*metaTx{}}, worst: &WorstQueue{ms: []*metaTx{}}} } -func (p *PendingPool) resetAdded() { - p.added.Reset() - p.adding = true -} -func (p *PendingPool) appendAddedTo(a *types.Announcements) { - a.AppendOther(p.added) - p.adding = false -} - // bestSlice - is similar to best queue, but with O(n log n) complexity and // it maintains element.bestIndex field type bestSlice struct { @@ -2264,9 +2206,6 @@ func (p *PendingPool) Remove(i *metaTx) { } func (p *PendingPool) Add(i *metaTx) { - if p.adding { - p.added.Append(i.Tx.Type, i.Tx.Size, i.Tx.IDHash[:]) - } if i.Tx.Traced { log.Info(fmt.Sprintf("TX TRACING: moved to subpool %s, IdHash=%x, sender=%d", p.t, i.Tx.IDHash, i.Tx.SenderID)) } @@ -2284,27 +2223,16 @@ func (p *PendingPool) DebugPrint(prefix string) { } type SubPool struct { - best *BestQueue - worst *WorstQueue - added types.Announcements - limit int - t SubPoolType - adding bool + best *BestQueue + worst *WorstQueue + limit int + t SubPoolType } func NewSubPool(t SubPoolType, limit int) *SubPool { return &SubPool{limit: limit, t: t, best: &BestQueue{}, worst: &WorstQueue{}} } -func (p *SubPool) resetAdded() { - p.added.Reset() - p.adding = true -} -func (p *SubPool) appendAddedTo(a *types.Announcements) { - a.AppendOther(p.added) - p.adding = false -} - func (p *SubPool) EnforceInvariants() { heap.Init(p.worst) heap.Init(p.best) @@ -2333,9 +2261,6 @@ func (p *SubPool) PopWorst() *metaTx { //nolint } func (p *SubPool) Len() int { return p.best.Len() } func (p *SubPool) Add(i *metaTx) { - if p.adding { - p.added.Append(i.Tx.Type, i.Tx.Size, i.Tx.IDHash[:]) - } if i.Tx.Traced { log.Info(fmt.Sprintf("TX TRACING: moved to subpool %s, IdHash=%x, sender=%d", p.t, i.Tx.IDHash, i.Tx.SenderID)) } diff --git a/txpool/pool_fuzz_test.go b/txpool/pool_fuzz_test.go index b9249934c..489581391 100644 --- a/txpool/pool_fuzz_test.go +++ b/txpool/pool_fuzz_test.go @@ -8,6 +8,9 @@ import ( "encoding/binary" "testing" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" + "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" @@ -164,7 +167,7 @@ func parseSenders(in []byte) (nonces []uint64, balances []uint256.Int) { return } -func poolsFromFuzzBytes(rawTxNonce, rawValues, rawTips, rawFeeCap, rawSender []byte) (sendersInfo map[uint64]*sender, senderIDs map[string]uint64, txs types.TxSlots, ok bool) { +func poolsFromFuzzBytes(rawTxNonce, rawValues, rawTips, rawFeeCap, rawSender []byte) (sendersInfo map[uint64]*sender, senderIDs map[common.Address]uint64, txs types.TxSlots, ok bool) { if len(rawTxNonce) < 1 || len(rawValues) < 1 || len(rawTips) < 1 || len(rawFeeCap) < 1 || len(rawSender) < 1+1 { return nil, nil, txs, false } @@ -187,13 +190,13 @@ func poolsFromFuzzBytes(rawTxNonce, rawValues, rawTips, rawFeeCap, rawSender []b } sendersInfo = map[uint64]*sender{} - senderIDs = map[string]uint64{} + senderIDs = map[common.Address]uint64{} senders := make(types.Addresses, 20*len(senderNonce)) for i := 0; i < len(senderNonce); i++ { senderID := uint64(i + 1) //non-zero expected binary.BigEndian.PutUint64(senders.At(i%senders.Len()), senderID) sendersInfo[senderID] = newSender(senderNonce[i], senderBalance[i%len(senderBalance)]) - senderIDs[string(senders.At(i%senders.Len()))] = senderID + senderIDs[senders.AddressAt(i%senders.Len())] = senderID } txs.Txs = make([]*types.TxSlot, len(txNonce)) parseCtx := types.NewTxParseContext(*u256.N1) @@ -309,13 +312,13 @@ func FuzzOnNewBlocks(f *testing.F) { ch := make(chan types.Announcements, 100) db, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t) - cfg := DefaultConfig + cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil) assert.NoError(err) pool.senders.senderIDs = senderIDs for addr, id := range senderIDs { - pool.senders.senderID2Addr[id] = []byte(addr) + pool.senders.senderID2Addr[id] = addr } pool.senders.senderID = uint64(len(senderIDs)) check := func(unwindTxs, minedTxs types.TxSlots, msg string) { @@ -469,15 +472,14 @@ func FuzzOnNewBlocks(f *testing.F) { return nil }) change := &remote.StateChangeBatch{ - StateVersionID: txID, + StateVersionId: txID, PendingBlockBaseFee: pendingBaseFee, ChangeBatch: []*remote.StateChange{ {BlockHeight: 0, BlockHash: h0}, }, } for id, sender := range senders { - var addr [20]byte - copy(addr[:], pool.senders.senderID2Addr[id]) + addr := pool.senders.senderID2Addr[id] v := make([]byte, types.EncodeSenderLengthForStorage(sender.nonce, sender.balance)) types.EncodeSender(sender.nonce, sender.balance, v) change.ChangeBatch[0].Changes = append(change.ChangeBatch[0].Changes, &remote.AccountChange{ @@ -495,7 +497,7 @@ func FuzzOnNewBlocks(f *testing.F) { _, _, _ = p2pReceived, txs2, txs3 change = &remote.StateChangeBatch{ - StateVersionID: txID, + StateVersionId: txID, PendingBlockBaseFee: pendingBaseFee, ChangeBatch: []*remote.StateChange{ {BlockHeight: 1, BlockHash: h0}, @@ -508,7 +510,7 @@ func FuzzOnNewBlocks(f *testing.F) { // unwind everything and switch to new fork (need unwind mined now) change = &remote.StateChangeBatch{ - StateVersionID: txID, + StateVersionId: txID, PendingBlockBaseFee: pendingBaseFee, ChangeBatch: []*remote.StateChange{ {BlockHeight: 0, BlockHash: h0, Direction: remote.Direction_UNWIND}, @@ -520,7 +522,7 @@ func FuzzOnNewBlocks(f *testing.F) { checkNotify(txs2, types.TxSlots{}, "fork2") change = &remote.StateChangeBatch{ - StateVersionID: txID, + StateVersionId: txID, PendingBlockBaseFee: pendingBaseFee, ChangeBatch: []*remote.StateChange{ {BlockHeight: 1, BlockHash: h22}, @@ -543,7 +545,7 @@ func FuzzOnNewBlocks(f *testing.F) { check(p2pReceived, types.TxSlots{}, "after_flush") checkNotify(p2pReceived, types.TxSlots{}, "after_flush") - p2, err := New(ch, coreDB, DefaultConfig, sendersCache, *u256.N1, nil) + p2, err := New(ch, coreDB, txpoolcfg.DefaultConfig, sendersCache, *u256.N1, nil) assert.NoError(err) p2.senders = pool.senders // senders are not persisted err = coreDB.View(ctx, func(coreTx kv.Tx) error { return p2.fromDB(ctx, tx, coreTx) }) diff --git a/txpool/pool_test.go b/txpool/pool_test.go index b0fa8653e..330e1a5ab 100644 --- a/txpool/pool_test.go +++ b/txpool/pool_test.go @@ -18,20 +18,18 @@ package txpool import ( "bytes" - "container/heap" "context" "fmt" "math" "math/big" - "math/rand" "testing" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/fixedgas" "github.com/ledgerwatch/erigon-lib/common/u256" "github.com/ledgerwatch/erigon-lib/gointerfaces" @@ -42,61 +40,12 @@ import ( "github.com/ledgerwatch/erigon-lib/types" ) -func BenchmarkName(b *testing.B) { - txs := make([]*metaTx, 10_000) - p := NewSubPool(BaseFeeSubPool, 1024) - for i := 0; i < len(txs); i++ { - txs[i] = &metaTx{Tx: &types.TxSlot{}} - } - for i := 0; i < len(txs); i++ { - p.Add(txs[i]) - } - p.EnforceInvariants() - b.ResetTimer() - for i := 0; i < b.N; i++ { - txs[0].timestamp = 1 - heap.Fix(p.best, txs[0].bestIndex) - heap.Fix(p.worst, txs[0].worstIndex) - } -} - -func BenchmarkName2(b *testing.B) { - - var ( - a = rand.Uint64() - c = rand.Uint64() - d = rand.Uint64() - ) - b.ResetTimer() - var min1 uint64 - var min2 uint64 - var r uint64 - - for i := 0; i < b.N; i++ { - min1 = cmp.Min(min1, a) - min2 = cmp.Min(min2, c) - if d <= min1 { - r = cmp.Min(min1-d, min2) - } else { - r = 0 - } - // - //// 4. Dynamic fee requirement. Set to 1 if feeCap of the transaction is no less than - //// baseFee of the currently pending block. Set to 0 otherwise. - //mt.subPool &^= EnoughFeeCapBlock - //if mt.Tx.feeCap >= pendingBaseFee { - // mt.subPool |= EnoughFeeCapBlock - //} - } - _ = r -} - func TestNonceFromAddress(t *testing.T) { assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) db, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t) - cfg := DefaultConfig + cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil) assert.NoError(err) @@ -107,7 +56,7 @@ func TestNonceFromAddress(t *testing.T) { // start blocks from 0, set empty hash - then kvcache will also work on this h1 := gointerfaces.ConvertHashToH256([32]byte{}) change := &remote.StateChangeBatch{ - StateVersionID: stateVersionID, + StateVersionId: stateVersionID, PendingBlockBaseFee: pendingBaseFee, BlockGasLimit: 1000000, ChangeBatch: []*remote.StateChange{ @@ -216,7 +165,7 @@ func TestReplaceWithHigherFee(t *testing.T) { ch := make(chan types.Announcements, 100) db, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t) - cfg := DefaultConfig + cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil) assert.NoError(err) @@ -227,7 +176,7 @@ func TestReplaceWithHigherFee(t *testing.T) { // start blocks from 0, set empty hash - then kvcache will also work on this h1 := gointerfaces.ConvertHashToH256([32]byte{}) change := &remote.StateChangeBatch{ - StateVersionID: stateVersionID, + StateVersionId: stateVersionID, PendingBlockBaseFee: pendingBaseFee, BlockGasLimit: 1000000, ChangeBatch: []*remote.StateChange{ @@ -333,7 +282,7 @@ func TestReverseNonces(t *testing.T) { ch := make(chan types.Announcements, 100) db, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t) - cfg := DefaultConfig + cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil) assert.NoError(err) @@ -344,7 +293,7 @@ func TestReverseNonces(t *testing.T) { // start blocks from 0, set empty hash - then kvcache will also work on this h1 := gointerfaces.ConvertHashToH256([32]byte{}) change := &remote.StateChangeBatch{ - StateVersionID: stateVersionID, + StateVersionId: stateVersionID, PendingBlockBaseFee: pendingBaseFee, BlockGasLimit: 1000000, ChangeBatch: []*remote.StateChange{ @@ -460,7 +409,7 @@ func TestTxPoke(t *testing.T) { ch := make(chan types.Announcements, 100) db, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t) - cfg := DefaultConfig + cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) pool, err := New(ch, coreDB, cfg, sendersCache, *u256.N1, nil) assert.NoError(err) @@ -471,7 +420,7 @@ func TestTxPoke(t *testing.T) { // start blocks from 0, set empty hash - then kvcache will also work on this h1 := gointerfaces.ConvertHashToH256([32]byte{}) change := &remote.StateChangeBatch{ - StateVersionID: stateVersionID, + StateVersionId: stateVersionID, PendingBlockBaseFee: pendingBaseFee, BlockGasLimit: 1000000, ChangeBatch: []*remote.StateChange{ @@ -717,7 +666,7 @@ func TestShanghaiValidateTx(t *testing.T) { t.Run(name, func(t *testing.T) { ch := make(chan types.Announcements, 100) _, coreDB := memdb.NewTestPoolDB(t), memdb.NewTestDB(t) - cfg := DefaultConfig + cfg := txpoolcfg.DefaultConfig var shanghaiTime *big.Int if test.isShanghai { diff --git a/txpool/txpool_grpc_server.go b/txpool/txpool_grpc_server.go index de303b350..7a6b9f75f 100644 --- a/txpool/txpool_grpc_server.go +++ b/txpool/txpool_grpc_server.go @@ -54,7 +54,7 @@ type txPool interface { PeekBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableDataGas uint64) (bool, error) GetRlp(tx kv.Tx, hash []byte) ([]byte, error) AddLocalTxs(ctx context.Context, newTxs types.TxSlots, tx kv.Tx) ([]DiscardReason, error) - deprecatedForEach(_ context.Context, f func(rlp, sender []byte, t SubPoolType), tx kv.Tx) + deprecatedForEach(_ context.Context, f func(rlp []byte, sender common.Address, t SubPoolType), tx kv.Tx) CountContent() (int, int, int) IdHashKnown(tx kv.Tx, hash []byte) (bool, error) NonceFromAddress(addr [20]byte) (nonce uint64, inPool bool) @@ -134,11 +134,9 @@ func (s *GrpcServer) All(ctx context.Context, _ *txpool_proto.AllRequest) (*txpo defer tx.Rollback() reply := &txpool_proto.AllReply{} reply.Txs = make([]*txpool_proto.AllReply_Tx, 0, 32) - var senderArr [20]byte - s.txPool.deprecatedForEach(ctx, func(rlp, sender []byte, t SubPoolType) { - copy(senderArr[:], sender) // TODO: optimize + s.txPool.deprecatedForEach(ctx, func(rlp []byte, sender common.Address, t SubPoolType) { reply.Txs = append(reply.Txs, &txpool_proto.AllReply_Tx{ - Sender: gointerfaces.ConvertAddressToH160(senderArr), + Sender: gointerfaces.ConvertAddressToH160(sender), TxnType: convertSubPoolType(t), RlpTx: common.Copy(rlp), }) diff --git a/txpool/txpoolcfg/txpoolcfg.go b/txpool/txpoolcfg/txpoolcfg.go new file mode 100644 index 000000000..9eb2bbd45 --- /dev/null +++ b/txpool/txpoolcfg/txpoolcfg.go @@ -0,0 +1,38 @@ +package txpoolcfg + +import ( + "math/big" + "time" +) + +type Config struct { + DBDir string + TracedSenders []string // List of senders for which tx pool should print out debugging info + SyncToNewPeersEvery time.Duration + ProcessRemoteTxsEvery time.Duration + CommitEvery time.Duration + LogEvery time.Duration + PendingSubPoolLimit int + BaseFeeSubPoolLimit int + QueuedSubPoolLimit int + MinFeeCap uint64 + AccountSlots uint64 // Number of executable transaction slots guaranteed per account + PriceBump uint64 // Price bump percentage to replace an already existing transaction + OverrideShanghaiTime *big.Int +} + +var DefaultConfig = Config{ + SyncToNewPeersEvery: 2 * time.Minute, + ProcessRemoteTxsEvery: 100 * time.Millisecond, + CommitEvery: 15 * time.Second, + LogEvery: 30 * time.Second, + + PendingSubPoolLimit: 10_000, + BaseFeeSubPoolLimit: 10_000, + QueuedSubPoolLimit: 10_000, + + MinFeeCap: 1, + AccountSlots: 16, //TODO: to choose right value (16 to be compatible with Geth) + PriceBump: 10, // Price bump percentage to replace an already existing transaction + OverrideShanghaiTime: nil, +} diff --git a/txpool/txpooluitl/all_components.go b/txpool/txpooluitl/all_components.go index e4f00c64d..f32ad7e61 100644 --- a/txpool/txpooluitl/all_components.go +++ b/txpool/txpooluitl/all_components.go @@ -23,6 +23,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" "github.com/ledgerwatch/log/v3" mdbx2 "github.com/torquem-ch/mdbx-go/mdbx" @@ -99,7 +100,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB return cc, blockNum, nil } -func AllComponents(ctx context.Context, cfg txpool.Config, cache kvcache.Cache, newTxs chan types.Announcements, chainDB kv.RoDB, sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) { +func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cache, newTxs chan types.Announcements, chainDB kv.RoDB, sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) { txPoolDB, err := mdbx.NewMDBX(log.New()).Label(kv.TxPoolDB).Path(cfg.DBDir). WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }). Flags(func(f uint) uint { return f ^ mdbx2.Durable | mdbx2.SafeNoSync }). diff --git a/types/txn.go b/types/txn.go index 6adb9739c..d040e1f2a 100644 --- a/types/txn.go +++ b/types/txn.go @@ -161,7 +161,7 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo // therefore we assign the first returned value of Prefix function (list) to legacy variable dataPos, dataLen, legacy, err := rlp.Prefix(payload, pos) if err != nil { - return 0, fmt.Errorf("%w: size Prefix: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: size Prefix: %s", ErrParseTxn, err) //nolint } // This handles the transactions coming from other Erigon peers of older versions, which add 0x80 (empty) transactions into packets if dataLen == 0 { @@ -186,10 +186,10 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo return len(payload), ctx.ParseBlobTransaction(payload[p:], slot, sender, networkVersion, validateHash) } if _, err = ctx.Keccak1.Write(payload[p : p+1]); err != nil { - return 0, fmt.Errorf("%w: computing IdHash (hashing type Prefix): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing IdHash (hashing type Prefix): %s", ErrParseTxn, err) //nolint } if _, err = ctx.Keccak2.Write(payload[p : p+1]); err != nil { - return 0, fmt.Errorf("%w: computing signHash (hashing type Prefix): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing signHash (hashing type Prefix): %s", ErrParseTxn, err) //nolint } p++ if p >= len(payload) { @@ -197,11 +197,11 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo } dataPos, dataLen, err = rlp.List(payload, p) if err != nil { - return 0, fmt.Errorf("%w: envelope Prefix: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: envelope Prefix: %s", ErrParseTxn, err) //nolint } // Hash the envelope, not the full payload if _, err = ctx.Keccak1.Write(payload[p : dataPos+dataLen]); err != nil { - return 0, fmt.Errorf("%w: computing IdHash (hashing the envelope): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing IdHash (hashing the envelope): %s", ErrParseTxn, err) //nolint } // For legacy transaction, the entire payload in expected to be in "rlp" field // whereas for non-legacy, only the content of the envelope (start with position p) @@ -223,7 +223,7 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo if !legacy { p, err = rlp.U256(payload, p, &ctx.ChainID) if err != nil { - return 0, fmt.Errorf("%w: chainId len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: chainId len: %s", ErrParseTxn, err) //nolint } if ctx.ChainID.IsZero() { // zero indicates that the chain ID was not specified in the tx. if ctx.chainIDRequired { @@ -238,13 +238,13 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo // Next follows the nonce, which we need to parse p, slot.Nonce, err = rlp.U64(payload, p) if err != nil { - return 0, fmt.Errorf("%w: nonce: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: nonce: %s", ErrParseTxn, err) //nolint } // Next follows gas price or tip // Although consensus rules specify that tip can be up to 256 bit long, we narrow it to 64 bit p, err = rlp.U256(payload, p, &slot.Tip) if err != nil { - return 0, fmt.Errorf("%w: tip: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: tip: %s", ErrParseTxn, err) //nolint } // Next follows feeCap, but only for dynamic fee transactions, for legacy transaction, it is // equal to tip @@ -254,18 +254,18 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo // Although consensus rules specify that feeCap can be up to 256 bit long, we narrow it to 64 bit p, err = rlp.U256(payload, p, &slot.FeeCap) if err != nil { - return 0, fmt.Errorf("%w: feeCap: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: feeCap: %s", ErrParseTxn, err) //nolint } } // Next follows gas p, slot.Gas, err = rlp.U64(payload, p) if err != nil { - return 0, fmt.Errorf("%w: gas: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: gas: %s", ErrParseTxn, err) //nolint } // Next follows the destination address (if present) dataPos, dataLen, err = rlp.String(payload, p) if err != nil { - return 0, fmt.Errorf("%w: to len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: to len: %s", ErrParseTxn, err) //nolint } if dataLen != 0 && dataLen != 20 { return 0, fmt.Errorf("%w: unexpected length of to field: %d", ErrParseTxn, dataLen) @@ -277,12 +277,12 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo // Next follows value p, err = rlp.U256(payload, p, &slot.Value) if err != nil { - return 0, fmt.Errorf("%w: value: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: value: %s", ErrParseTxn, err) //nolint } // Next goes data, but we are only interesting in its length dataPos, dataLen, err = rlp.String(payload, p) if err != nil { - return 0, fmt.Errorf("%w: data len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: data len: %s", ErrParseTxn, err) //nolint } slot.DataLen = dataLen @@ -300,31 +300,31 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo if !legacy { dataPos, dataLen, err = rlp.List(payload, p) if err != nil { - return 0, fmt.Errorf("%w: access list len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: access list len: %s", ErrParseTxn, err) //nolint } tuplePos := dataPos var tupleLen int for tuplePos < dataPos+dataLen { tuplePos, tupleLen, err = rlp.List(payload, tuplePos) if err != nil { - return 0, fmt.Errorf("%w: tuple len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: tuple len: %s", ErrParseTxn, err) //nolint } var addrPos int addrPos, err = rlp.StringOfLen(payload, tuplePos, 20) if err != nil { - return 0, fmt.Errorf("%w: tuple addr len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: tuple addr len: %s", ErrParseTxn, err) //nolint } slot.AlAddrCount++ var storagePos, storageLen int storagePos, storageLen, err = rlp.List(payload, addrPos+20) if err != nil { - return 0, fmt.Errorf("%w: storage key list len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: storage key list len: %s", ErrParseTxn, err) //nolint } skeyPos := storagePos for skeyPos < storagePos+storageLen { skeyPos, err = rlp.StringOfLen(payload, skeyPos, 32) if err != nil { - return 0, fmt.Errorf("%w: tuple storage key len: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: tuple storage key len: %s", ErrParseTxn, err) //nolint } slot.AlStorCount++ skeyPos += 32 @@ -348,7 +348,7 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo if legacy { p, err = rlp.U256(payload, p, &ctx.V) if err != nil { - return 0, fmt.Errorf("%w: V: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: V: %s", ErrParseTxn, err) //nolint } ctx.IsProtected = ctx.V.Eq(u256.N27) || ctx.V.Eq(u256.N28) // Compute chainId from V @@ -380,7 +380,7 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo var v uint64 p, v, err = rlp.U64(payload, p) if err != nil { - return 0, fmt.Errorf("%w: V: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: V: %s", ErrParseTxn, err) //nolint } if v > 1 { return 0, fmt.Errorf("%w: V is loo large: %d", ErrParseTxn, v) @@ -392,18 +392,18 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo // Next follows R of the signature p, err = rlp.U256(payload, p, &ctx.R) if err != nil { - return 0, fmt.Errorf("%w: R: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: R: %s", ErrParseTxn, err) //nolint } // New follows S of the signature p, err = rlp.U256(payload, p, &ctx.S) if err != nil { - return 0, fmt.Errorf("%w: S: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: S: %s", ErrParseTxn, err) //nolint } // For legacy transactions, hash the full payload if legacy { if _, err = ctx.Keccak1.Write(payload[pos:p]); err != nil { - return 0, fmt.Errorf("%w: computing IdHash: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing IdHash: %s", ErrParseTxn, err) //nolint } } //ctx.keccak1.Sum(slot.IdHash[:0]) @@ -427,25 +427,25 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo if sigHashLen < 56 { ctx.buf[0] = byte(sigHashLen) + 192 if _, err := ctx.Keccak2.Write(ctx.buf[:1]); err != nil { - return 0, fmt.Errorf("%w: computing signHash (hashing len Prefix): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing signHash (hashing len Prefix): %s", ErrParseTxn, err) //nolint } } else { beLen := (bits.Len(sigHashLen) + 7) / 8 binary.BigEndian.PutUint64(ctx.buf[1:], uint64(sigHashLen)) ctx.buf[8-beLen] = byte(beLen) + 247 if _, err := ctx.Keccak2.Write(ctx.buf[8-beLen : 9]); err != nil { - return 0, fmt.Errorf("%w: computing signHash (hashing len Prefix): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing signHash (hashing len Prefix): %s", ErrParseTxn, err) //nolint } } if _, err = ctx.Keccak2.Write(payload[sigHashPos:sigHashEnd]); err != nil { - return 0, fmt.Errorf("%w: computing signHash: %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing signHash: %s", ErrParseTxn, err) //nolint } if legacy { if chainIDLen > 0 { if chainIDBits <= 7 { ctx.buf[0] = byte(ctx.ChainID.Uint64()) if _, err := ctx.Keccak2.Write(ctx.buf[:1]); err != nil { - return 0, fmt.Errorf("%w: computing signHash (hashing legacy chainId): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing signHash (hashing legacy chainId): %s", ErrParseTxn, err) //nolint } } else { binary.BigEndian.PutUint64(ctx.buf[1:9], ctx.ChainID[3]) @@ -454,14 +454,14 @@ func (ctx *TxParseContext) ParseTransaction(payload []byte, pos int, slot *TxSlo binary.BigEndian.PutUint64(ctx.buf[25:33], ctx.ChainID[0]) ctx.buf[32-chainIDLen] = 128 + byte(chainIDLen) if _, err = ctx.Keccak2.Write(ctx.buf[32-chainIDLen : 33]); err != nil { - return 0, fmt.Errorf("%w: computing signHash (hashing legacy chainId): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing signHash (hashing legacy chainId): %s", ErrParseTxn, err) //nolint } } // Encode two zeros ctx.buf[0] = 128 ctx.buf[1] = 128 if _, err := ctx.Keccak2.Write(ctx.buf[:2]); err != nil { - return 0, fmt.Errorf("%w: computing signHash (hashing zeros after legacy chainId): %s", ErrParseTxn, err) + return 0, fmt.Errorf("%w: computing signHash (hashing zeros after legacy chainId): %s", ErrParseTxn, err) //nolint } } } @@ -642,7 +642,7 @@ func (a Announcements) DedupCopy() Announcements { sizes: make([]uint32, unique), hashes: make([]byte, unique*length.Hash), } - copy(c.hashes[:], a.hashes[0:length.Hash]) + copy(c.hashes, a.hashes[0:length.Hash]) c.ts[0] = a.ts[0] c.sizes[0] = a.sizes[0] dest := length.Hash @@ -707,6 +707,12 @@ func (a Announcements) Copy() Announcements { type Addresses []byte // flatten list of 20-byte addresses +// AddressAt returns an address at the given index in the flattened list. +// Use this method if you want to reduce memory allocations +func (h Addresses) AddressAt(i int) common.Address { + return *(*[20]byte)(h[i*length.Addr : (i+1)*length.Addr]) +} + func (h Addresses) At(i int) []byte { return h[i*length.Addr : (i+1)*length.Addr] } func (h Addresses) Len() int { return len(h) / length.Addr }