From 8860f058ff643ebe1d51bcc3c9c2ad7172e8f0f3 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 21 Feb 2023 16:23:46 -0500 Subject: [PATCH 01/27] Remove `--whitelisted-subnets` (#2468) --- config/config.go | 17 +++++------------ config/flags.go | 1 - config/keys.go | 1 - 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/config/config.go b/config/config.go index 248d9b791bca..77f0dab56cf8 100644 --- a/config/config.go +++ b/config/config.go @@ -60,9 +60,7 @@ const ( ) var ( - deprecatedKeys = map[string]string{ - WhitelistedSubnetsKey: fmt.Sprintf("Use --%s instead", TrackSubnetsKey), - } + deprecatedKeys = map[string]string{} errInvalidStakerWeights = errors.New("staking weights must be positive") errStakingDisableOnPublicNetwork = errors.New("staking disabled on public network") @@ -842,15 +840,10 @@ func getGenesisData(v *viper.Viper, networkID uint32, stakingCfg *genesis.Stakin } func getTrackedSubnets(v *viper.Viper) (set.Set[ids.ID], error) { - var trackSubnets string - if v.IsSet(TrackSubnetsKey) { - trackSubnets = v.GetString(TrackSubnetsKey) - } else { - trackSubnets = v.GetString(WhitelistedSubnetsKey) - } - - trackedSubnetIDs := set.Set[ids.ID]{} - for _, subnet := range strings.Split(trackSubnets, ",") { + trackSubnetsStr := v.GetString(TrackSubnetsKey) + trackSubnetsStrs := strings.Split(trackSubnetsStr, ",") + trackedSubnetIDs := set.NewSet[ids.ID](len(trackSubnetsStrs)) + for _, subnet := range trackSubnetsStrs { if subnet == "" { continue } diff --git a/config/flags.go b/config/flags.go index 1f5c010dc512..bd5f4a937aab 100644 --- a/config/flags.go +++ b/config/flags.go @@ -263,7 +263,6 @@ func addNodeFlags(fs *flag.FlagSet) { fs.Duration(StakeMintingPeriodKey, genesis.LocalParams.RewardConfig.MintingPeriod, "Consumption period of the staking function") fs.Uint64(StakeSupplyCapKey, genesis.LocalParams.RewardConfig.SupplyCap, "Supply cap of the staking function") // Subnets - fs.String(WhitelistedSubnetsKey, "", fmt.Sprintf("[DEPRECATED] Use --%s", TrackSubnetsKey)) fs.String(TrackSubnetsKey, "", "List of subnets for the node to track. A node tracking a subnet will track the uptimes of the subnet validators and attempt to sync all the chains in the subnet. Before validating a subnet, a node should be tracking the subnet to avoid impacting their subnet validation uptime") // State syncing diff --git a/config/keys.go b/config/keys.go index 496871e7c5f5..8c65f20c9f2b 100644 --- a/config/keys.go +++ b/config/keys.go @@ -127,7 +127,6 @@ const ( SnowMaxTimeProcessingKey = "snow-max-time-processing" SnowMixedQueryNumPushVdrKey = "snow-mixed-query-num-push-vdr" SnowMixedQueryNumPushNonVdrKey = "snow-mixed-query-num-push-non-vdr" - WhitelistedSubnetsKey = "whitelisted-subnets" TrackSubnetsKey = "track-subnets" AdminAPIEnabledKey = "api-admin-enabled" InfoAPIEnabledKey = "api-info-enabled" From 513bd4431990456d90024f7aa1dab086d1702212 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 21 Feb 2023 17:06:55 -0500 Subject: [PATCH 02/27] Update secp256k1 version to v4.1.0 (#1634) --- go.mod | 3 +-- go.sum | 7 ++----- utils/crypto/secp256k1/secp256k1.go | 4 ++-- utils/crypto/secp256k1/secp256k1_test.go | 2 +- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index d1302dc3a343..029f9d73f04d 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/ava-labs/coreth v0.11.7-rc.3 github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 github.com/btcsuite/btcd/btcutil v1.1.3 - github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 github.com/golang-jwt/jwt/v4 v4.3.0 github.com/golang/mock v1.6.0 github.com/google/btree v1.1.2 @@ -70,7 +70,6 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set v1.8.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf // indirect github.com/ethereum/go-ethereum v1.10.26 // indirect diff --git a/go.sum b/go.sum index 7f7a6177321a..19cd6c76f3a7 100644 --- a/go.sum +++ b/go.sum @@ -118,14 +118,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= -github.com/decred/dcrd/chaincfg/chainhash v1.0.2 h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU= -github.com/decred/dcrd/chaincfg/chainhash v1.0.2/go.mod h1:BpbrGgrPTr3YJYRN3Bm+D9NuaFd+zGyNeIKgrhCXK60= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 h1:g2cyFTu5FKWhCo7L4hVJ797Q506B4EywA7L9I6OebgA= -github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837/go.mod h1:J70FGZSbzsjecRTiTzER+3f1KZLNaXkuv+yeFTKoxM8= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= diff --git a/utils/crypto/secp256k1/secp256k1.go b/utils/crypto/secp256k1/secp256k1.go index 7c2e0640588d..46febd9cc9c6 100644 --- a/utils/crypto/secp256k1/secp256k1.go +++ b/utils/crypto/secp256k1/secp256k1.go @@ -10,9 +10,9 @@ import ( stdecdsa "crypto/ecdsa" - "github.com/decred/dcrd/dcrec/secp256k1/v3/ecdsa" + "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" - secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v3" + secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" diff --git a/utils/crypto/secp256k1/secp256k1_test.go b/utils/crypto/secp256k1/secp256k1_test.go index 0e0f8d646744..b8735fc84975 100644 --- a/utils/crypto/secp256k1/secp256k1_test.go +++ b/utils/crypto/secp256k1/secp256k1_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" - secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v3" + secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" From 08fb4f647b195c48e6a1dbad870ef0aa6635e748 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 22 Feb 2023 12:04:21 -0500 Subject: [PATCH 03/27] Simplify `app` package structure (#2631) --- app/app.go | 209 +++++++++++++++++++++++++++++++++++++++ app/process/process.go | 217 ----------------------------------------- app/runner/config.go | 9 -- app/runner/runner.go | 26 ----- config/config.go | 7 -- main/main.go | 15 ++- 6 files changed, 220 insertions(+), 263 deletions(-) delete mode 100644 app/process/process.go delete mode 100644 app/runner/config.go delete mode 100644 app/runner/runner.go diff --git a/app/app.go b/app/app.go index 90e28bacb874..3490e03564e1 100644 --- a/app/app.go +++ b/app/app.go @@ -4,11 +4,39 @@ package app import ( + "fmt" "os" "os/signal" + "sync" "syscall" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + "github.com/ava-labs/avalanchego/nat" + "github.com/ava-labs/avalanchego/node" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/ulimit" +) + +const ( + Header = ` _____ .__ .__ + / _ \___ _______ | | _____ ____ ____ | |__ ____ ,_ o + / /_\ \ \/ /\__ \ | | \__ \ / \_/ ___\| | \_/ __ \ / //\, + / | \ / / __ \| |__/ __ \| | \ \___| Y \ ___/ \>> | + \____|__ /\_/ (____ /____(____ /___| /\___ >___| /\___ > \\ + \/ \/ \/ \/ \/ \/ \/` +) + +var ( + stakingPortName = fmt.Sprintf("%s-staking", constants.AppName) + httpPortName = fmt.Sprintf("%s-http", constants.AppName) + + _ App = (*app)(nil) ) type App interface { @@ -26,6 +54,13 @@ type App interface { ExitCode() (int, error) } +func New(config node.Config) App { + return &app{ + config: config, + node: &node.Node{}, + } +} + func Run(app App) int { // start running the application if err := app.Start(); err != nil { @@ -61,3 +96,177 @@ func Run(app App) int { // return the exit code that the application reported return exitCode } + +// app is a wrapper around a node that runs in this process +type app struct { + config node.Config + node *node.Node + exitWG sync.WaitGroup +} + +// Start the business logic of the node (as opposed to config reading, etc). +// Does not block until the node is done. Errors returned from this method +// are not logged. +func (a *app) Start() error { + // Set the data directory permissions to be read write. + if err := perms.ChmodR(a.config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) + } + if err := perms.ChmodR(a.config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) + } + + // we want to create the logger after the plugin has started the app + logFactory := logging.NewFactory(a.config.LoggingConfig) + log, err := logFactory.Make("main") + if err != nil { + logFactory.Close() + return err + } + + // update fd limit + fdLimit := a.config.FdLimit + if err := ulimit.Set(fdLimit, log); err != nil { + log.Fatal("failed to set fd-limit", + zap.Error(err), + ) + logFactory.Close() + return err + } + + // Track if sybil control is enforced + if !a.config.EnableStaking { + log.Warn("sybil control is not enforced", + zap.String("reason", "staking is disabled"), + ) + } + + // TODO move this to config + // SupportsNAT() for NoRouter is false. + // Which means we tried to perform a NAT activity but we were not successful. + if a.config.AttemptedNATTraversal && !a.config.Nat.SupportsNAT() { + log.Warn("UPnP and NAT-PMP router attach failed, you may not be listening publicly. " + + "Please confirm the settings in your router") + } + + if ip := a.config.IPPort.IPPort().IP; ip.IsLoopback() || ip.IsPrivate() { + log.Warn("P2P IP is private, you will not be publicly discoverable", + zap.Stringer("ip", ip), + ) + } + + // An empty host is treated as a wildcard to match all addresses, so it is + // considered public. + hostIsPublic := a.config.HTTPHost == "" + if !hostIsPublic { + ip, err := ips.Lookup(a.config.HTTPHost) + if err != nil { + log.Fatal("failed to lookup HTTP host", + zap.String("host", a.config.HTTPHost), + zap.Error(err), + ) + logFactory.Close() + return err + } + hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() + + log.Debug("finished HTTP host lookup", + zap.String("host", a.config.HTTPHost), + zap.Stringer("ip", ip), + zap.Bool("isPublic", hostIsPublic), + ) + } + + mapper := nat.NewPortMapper(log, a.config.Nat) + + // Open staking port we want for NAT traversal to have the external port + // (config.IP.Port) to connect to our internal listening port + // (config.InternalStakingPort) which should be the same in most cases. + if port := a.config.IPPort.IPPort().Port; port != 0 { + mapper.Map( + port, + port, + stakingPortName, + a.config.IPPort, + a.config.IPResolutionFreq, + ) + } + + // Don't open the HTTP port if the HTTP server is private + if hostIsPublic { + log.Warn("HTTP server is binding to a potentially public host. "+ + "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", + zap.String("host", a.config.HTTPHost), + ) + + // For NAT traversal we want to route from the external port + // (config.ExternalHTTPPort) to our internal port (config.HTTPPort). + if a.config.HTTPPort != 0 { + mapper.Map( + a.config.HTTPPort, + a.config.HTTPPort, + httpPortName, + nil, + a.config.IPResolutionFreq, + ) + } + } + + // Regularly update our public IP. + // Note that if the node config said to not dynamically resolve and + // update our public IP, [p.config.IPUdater] is a no-op implementation. + go a.config.IPUpdater.Dispatch(log) + + if err := a.node.Initialize(&a.config, log, logFactory); err != nil { + log.Fatal("error initializing node", + zap.Error(err), + ) + mapper.UnmapAllPorts() + a.config.IPUpdater.Stop() + log.Stop() + logFactory.Close() + return err + } + + // [p.ExitCode] will block until [p.exitWG.Done] is called + a.exitWG.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + fmt.Println("caught panic", r) + } + log.Stop() + logFactory.Close() + a.exitWG.Done() + }() + defer func() { + mapper.UnmapAllPorts() + a.config.IPUpdater.Stop() + + // If [p.node.Dispatch()] panics, then we should log the panic and + // then re-raise the panic. This is why the above defer is broken + // into two parts. + log.StopOnPanic() + }() + + err := a.node.Dispatch() + log.Debug("dispatch returned", + zap.Error(err), + ) + }() + return nil +} + +// Stop attempts to shutdown the currently running node. This function will +// return immediately. +func (a *app) Stop() error { + a.node.Shutdown(0) + return nil +} + +// ExitCode returns the exit code that the node is reporting. This function +// blocks until the node has been shut down. +func (a *app) ExitCode() (int, error) { + a.exitWG.Wait() + return a.node.ExitCode(), nil +} diff --git a/app/process/process.go b/app/process/process.go deleted file mode 100644 index 1e2e734077d0..000000000000 --- a/app/process/process.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package process - -import ( - "fmt" - "sync" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/app" - "github.com/ava-labs/avalanchego/nat" - "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/perms" - "github.com/ava-labs/avalanchego/utils/ulimit" -) - -const ( - Header = ` _____ .__ .__ - / _ \___ _______ | | _____ ____ ____ | |__ ____ ,_ o - / /_\ \ \/ /\__ \ | | \__ \ / \_/ ___\| | \_/ __ \ / //\, - / | \ / / __ \| |__/ __ \| | \ \___| Y \ ___/ \>> | - \____|__ /\_/ (____ /____(____ /___| /\___ >___| /\___ > \\ - \/ \/ \/ \/ \/ \/ \/` -) - -var ( - stakingPortName = fmt.Sprintf("%s-staking", constants.AppName) - httpPortName = fmt.Sprintf("%s-http", constants.AppName) - - _ app.App = (*process)(nil) -) - -// process is a wrapper around a node that runs in this process -type process struct { - config node.Config - node *node.Node - exitWG sync.WaitGroup -} - -func NewApp(config node.Config) app.App { - return &process{ - config: config, - node: &node.Node{}, - } -} - -// Start the business logic of the node (as opposed to config reading, etc). -// Does not block until the node is done. Errors returned from this method -// are not logged. -func (p *process) Start() error { - // Set the data directory permissions to be read write. - if err := perms.ChmodR(p.config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) - } - if err := perms.ChmodR(p.config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) - } - - // we want to create the logger after the plugin has started the app - logFactory := logging.NewFactory(p.config.LoggingConfig) - log, err := logFactory.Make("main") - if err != nil { - logFactory.Close() - return err - } - - // update fd limit - fdLimit := p.config.FdLimit - if err := ulimit.Set(fdLimit, log); err != nil { - log.Fatal("failed to set fd-limit", - zap.Error(err), - ) - logFactory.Close() - return err - } - - // Track if sybil control is enforced - if !p.config.EnableStaking { - log.Warn("sybil control is not enforced", - zap.String("reason", "staking is disabled"), - ) - } - - // TODO move this to config - // SupportsNAT() for NoRouter is false. - // Which means we tried to perform a NAT activity but we were not successful. - if p.config.AttemptedNATTraversal && !p.config.Nat.SupportsNAT() { - log.Warn("UPnP and NAT-PMP router attach failed, you may not be listening publicly. " + - "Please confirm the settings in your router") - } - - if ip := p.config.IPPort.IPPort().IP; ip.IsLoopback() || ip.IsPrivate() { - log.Warn("P2P IP is private, you will not be publicly discoverable", - zap.Stringer("ip", ip), - ) - } - - // An empty host is treated as a wildcard to match all addresses, so it is - // considered public. - hostIsPublic := p.config.HTTPHost == "" - if !hostIsPublic { - ip, err := ips.Lookup(p.config.HTTPHost) - if err != nil { - log.Fatal("failed to lookup HTTP host", - zap.String("host", p.config.HTTPHost), - zap.Error(err), - ) - logFactory.Close() - return err - } - hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() - - log.Debug("finished HTTP host lookup", - zap.String("host", p.config.HTTPHost), - zap.Stringer("ip", ip), - zap.Bool("isPublic", hostIsPublic), - ) - } - - mapper := nat.NewPortMapper(log, p.config.Nat) - - // Open staking port we want for NAT traversal to have the external port - // (config.IP.Port) to connect to our internal listening port - // (config.InternalStakingPort) which should be the same in most cases. - if port := p.config.IPPort.IPPort().Port; port != 0 { - mapper.Map( - port, - port, - stakingPortName, - p.config.IPPort, - p.config.IPResolutionFreq, - ) - } - - // Don't open the HTTP port if the HTTP server is private - if hostIsPublic { - log.Warn("HTTP server is binding to a potentially public host. "+ - "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", - zap.String("host", p.config.HTTPHost), - ) - - // For NAT traversal we want to route from the external port - // (config.ExternalHTTPPort) to our internal port (config.HTTPPort). - if p.config.HTTPPort != 0 { - mapper.Map( - p.config.HTTPPort, - p.config.HTTPPort, - httpPortName, - nil, - p.config.IPResolutionFreq, - ) - } - } - - // Regularly update our public IP. - // Note that if the node config said to not dynamically resolve and - // update our public IP, [p.config.IPUdater] is a no-op implementation. - go p.config.IPUpdater.Dispatch(log) - - if err := p.node.Initialize(&p.config, log, logFactory); err != nil { - log.Fatal("error initializing node", - zap.Error(err), - ) - mapper.UnmapAllPorts() - p.config.IPUpdater.Stop() - log.Stop() - logFactory.Close() - return err - } - - // [p.ExitCode] will block until [p.exitWG.Done] is called - p.exitWG.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - fmt.Println("caught panic", r) - } - log.Stop() - logFactory.Close() - p.exitWG.Done() - }() - defer func() { - mapper.UnmapAllPorts() - p.config.IPUpdater.Stop() - - // If [p.node.Dispatch()] panics, then we should log the panic and - // then re-raise the panic. This is why the above defer is broken - // into two parts. - log.StopOnPanic() - }() - - err := p.node.Dispatch() - log.Debug("dispatch returned", - zap.Error(err), - ) - }() - return nil -} - -// Stop attempts to shutdown the currently running node. This function will -// return immediately. -func (p *process) Stop() error { - p.node.Shutdown(0) - return nil -} - -// ExitCode returns the exit code that the node is reporting. This function -// blocks until the node has been shut down. -func (p *process) ExitCode() (int, error) { - p.exitWG.Wait() - return p.node.ExitCode(), nil -} diff --git a/app/runner/config.go b/app/runner/config.go deleted file mode 100644 index 49bb6eb0121b..000000000000 --- a/app/runner/config.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package runner - -type Config struct { - // If true, displays version and exits during startup - DisplayVersionAndExit bool -} diff --git a/app/runner/runner.go b/app/runner/runner.go deleted file mode 100644 index 883f9e7fc7b0..000000000000 --- a/app/runner/runner.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package runner - -import ( - "fmt" - "os" - - "golang.org/x/term" - - "github.com/ava-labs/avalanchego/app" - "github.com/ava-labs/avalanchego/app/process" - "github.com/ava-labs/avalanchego/node" -) - -// Run an AvalancheGo node. -func Run(nodeConfig node.Config) { - nodeApp := process.NewApp(nodeConfig) // Create node wrapper - if term.IsTerminal(int(os.Stdout.Fd())) { - fmt.Println(process.Header) - } - - exitCode := app.Run(nodeApp) - os.Exit(exitCode) -} diff --git a/config/config.go b/config/config.go index 77f0dab56cf8..e7f891f7a16e 100644 --- a/config/config.go +++ b/config/config.go @@ -19,7 +19,6 @@ import ( "github.com/spf13/viper" - "github.com/ava-labs/avalanchego/app/runner" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" @@ -81,12 +80,6 @@ var ( errPluginDirNotADirectory = errors.New("plugin dir is not a directory") ) -func GetRunnerConfig(v *viper.Viper) runner.Config { - return runner.Config{ - DisplayVersionAndExit: v.GetBool(VersionKey), - } -} - func getConsensusConfig(v *viper.Viper) avalanche.Parameters { return avalanche.Parameters{ Parameters: snowball.Parameters{ diff --git a/main/main.go b/main/main.go index afbeb9d93f1e..266af2b550d4 100644 --- a/main/main.go +++ b/main/main.go @@ -10,7 +10,9 @@ import ( "github.com/spf13/pflag" - "github.com/ava-labs/avalanchego/app/runner" + "golang.org/x/term" + + "github.com/ava-labs/avalanchego/app" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/version" ) @@ -28,8 +30,7 @@ func main() { os.Exit(1) } - runnerConfig := config.GetRunnerConfig(v) - if runnerConfig.DisplayVersionAndExit { + if v.GetBool(config.VersionKey) { fmt.Print(version.String) os.Exit(0) } @@ -40,5 +41,11 @@ func main() { os.Exit(1) } - runner.Run(nodeConfig) + nodeApp := app.New(nodeConfig) // Create node wrapper + if term.IsTerminal(int(os.Stdout.Fd())) { + fmt.Println(app.Header) + } + + exitCode := app.Run(nodeApp) + os.Exit(exitCode) } From 92c098f5641a195c37aefa161ae7f177c740a954 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 23 Feb 2023 01:20:37 +0800 Subject: [PATCH 04/27] Rename Avalanche consensus metrics to `avalanche_{chainID}_avalanche` (#2588) --- chains/manager.go | 20 ++++++++++++++----- snow/consensus/avalanche/consensus_test.go | 6 +++--- snow/consensus/avalanche/topological.go | 2 +- snow/consensus/snowstorm/consensus_test.go | 8 ++++---- snow/consensus/snowstorm/directed.go | 10 +++++----- snow/context.go | 17 ++++++++++++---- .../avalanche/bootstrap/bootstrapper.go | 2 +- .../avalanche/bootstrap/bootstrapper_test.go | 4 ++-- snow/engine/avalanche/getter/getter.go | 2 +- snow/engine/avalanche/transitive.go | 4 ++-- snow/engine/avalanche/transitive_test.go | 2 +- snow/networking/sender/sender.go | 15 ++++++++++++-- snow/networking/sender/sender_test.go | 19 ++++++++++++------ tests/e2e/x/transfer/virtuous.go | 6 +++--- tests/e2e/x/whitelist-vtx/suites.go | 14 ++++++------- 15 files changed, 84 insertions(+), 47 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index aa9719d42873..b5263b5db985 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -427,6 +427,15 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c return nil, fmt.Errorf("error while registering chain's metrics %w", err) } + // This converts the prefix for all the Avalanche consensus metrics from + // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that + // there are no conflicts when registering the Snowman consensus metrics. + avalancheConsensusMetrics := prometheus.NewRegistry() + avalancheDAGNamespace := fmt.Sprintf("%s_avalanche", chainNamespace) + if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { + return nil, fmt.Errorf("error while registering DAG metrics %w", err) + } + vmMetrics := metrics.NewOptionalGatherer() vmNamespace := fmt.Sprintf("%s_vm", chainNamespace) if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { @@ -455,9 +464,10 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c ValidatorState: m.validatorState, ChainDataDir: chainDataDir, }, - DecisionAcceptor: m.DecisionAcceptorGroup, - ConsensusAcceptor: m.ConsensusAcceptorGroup, - Registerer: consensusMetrics, + DecisionAcceptor: m.DecisionAcceptorGroup, + ConsensusAcceptor: m.ConsensusAcceptorGroup, + Registerer: consensusMetrics, + AvalancheRegisterer: avalancheConsensusMetrics, } // Get a factory for the vm we want to use on our chain @@ -583,11 +593,11 @@ func (m *manager) createAvalancheChain( vertexBootstrappingDB := prefixdb.New([]byte("vertex_bs"), db.Database) txBootstrappingDB := prefixdb.New([]byte("tx_bs"), db.Database) - vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.Registerer) + vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) if err != nil { return nil, err } - txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.Registerer) + txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.AvalancheRegisterer) if err != nil { return nil, err } diff --git a/snow/consensus/avalanche/consensus_test.go b/snow/consensus/avalanche/consensus_test.go index 604353de75a5..cef48914f1e7 100644 --- a/snow/consensus/avalanche/consensus_test.go +++ b/snow/consensus/avalanche/consensus_test.go @@ -89,7 +89,7 @@ func MetricsTest(t *testing.T, factory Factory) { Parents: 2, BatchSize: 1, } - err := ctx.Registerer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ Name: "vtx_processing", })) if err != nil { @@ -113,7 +113,7 @@ func MetricsTest(t *testing.T, factory Factory) { Parents: 2, BatchSize: 1, } - err := ctx.Registerer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ Name: "vtx_accepted", })) if err != nil { @@ -137,7 +137,7 @@ func MetricsTest(t *testing.T, factory Factory) { Parents: 2, BatchSize: 1, } - err := ctx.Registerer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ Name: "vtx_rejected", })) if err != nil { diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index f2c1ef8afe66..84cd47c1266d 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -122,7 +122,7 @@ func (ta *Topological) Initialize( ta.votes = bag.UniqueBag[ids.ID]{} ta.kahnNodes = make(map[ids.ID]kahnNode) - latencyMetrics, err := metrics.NewLatency("vtx", "vertex/vertices", chainCtx.Log, "", chainCtx.Registerer) + latencyMetrics, err := metrics.NewLatency("vtx", "vertex/vertices", chainCtx.Log, "", chainCtx.AvalancheRegisterer) if err != nil { return err } diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index 43e390f92adf..5e8474aca7cf 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -132,7 +132,7 @@ func MetricsTest(t *testing.T, factory Factory) { BetaRogue: 2, ConcurrentRepolls: 1, } - err := ctx.Registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_processing", })) if err != nil { @@ -152,7 +152,7 @@ func MetricsTest(t *testing.T, factory Factory) { BetaRogue: 2, ConcurrentRepolls: 1, } - err := ctx.Registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_accepted", })) if err != nil { @@ -172,7 +172,7 @@ func MetricsTest(t *testing.T, factory Factory) { BetaRogue: 2, ConcurrentRepolls: 1, } - err := ctx.Registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_rejected", })) if err != nil { @@ -605,7 +605,7 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { } ctx := snow.DefaultConsensusContextTest() reg := prometheus.NewRegistry() - ctx.Registerer = reg + ctx.AvalancheRegisterer = reg err := graph.Initialize(ctx, params) if err != nil { t.Fatal(err) diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 0ea5e5183d50..17b494d28519 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -110,17 +110,17 @@ func (dg *Directed) Initialize( dg.params = params var err error - dg.Polls, err = metrics.NewPolls("", ctx.Registerer) + dg.Polls, err = metrics.NewPolls("", ctx.AvalancheRegisterer) if err != nil { return fmt.Errorf("failed to create poll metrics: %w", err) } - dg.Latency, err = metrics.NewLatency("txs", "transaction(s)", ctx.Log, "", ctx.Registerer) + dg.Latency, err = metrics.NewLatency("txs", "transaction(s)", ctx.Log, "", ctx.AvalancheRegisterer) if err != nil { return fmt.Errorf("failed to create latency metrics: %w", err) } - dg.whitelistTxLatency, err = metrics.NewLatency("whitelist_tx", "whitelist transaction(s)", ctx.Log, "", ctx.Registerer) + dg.whitelistTxLatency, err = metrics.NewLatency("whitelist_tx", "whitelist transaction(s)", ctx.Log, "", ctx.AvalancheRegisterer) if err != nil { return fmt.Errorf("failed to create whitelist tx metrics: %w", err) } @@ -129,7 +129,7 @@ func (dg *Directed) Initialize( Name: "virtuous_tx_processing", Help: "Number of currently processing virtuous transaction(s)", }) - err = ctx.Registerer.Register(dg.numVirtuousTxs) + err = ctx.AvalancheRegisterer.Register(dg.numVirtuousTxs) if err != nil { return fmt.Errorf("failed to create virtuous tx metrics: %w", err) } @@ -138,7 +138,7 @@ func (dg *Directed) Initialize( Name: "rogue_tx_processing", Help: "Number of currently processing rogue transaction(s)", }) - err = ctx.Registerer.Register(dg.numRogueTxs) + err = ctx.AvalancheRegisterer.Register(dg.numRogueTxs) if err != nil { return fmt.Errorf("failed to create rogue tx metrics: %w", err) } diff --git a/snow/context.go b/snow/context.go index e93f9a3f7016..5dcd820f2d61 100644 --- a/snow/context.go +++ b/snow/context.go @@ -63,7 +63,15 @@ type Registerer interface { type ConsensusContext struct { *Context + // Registers all common and snowman consensus metrics. Unlike the avalanche + // consensus engine metrics, we do not prefix the name with the engine name, + // as snowman is used for all chains by default. Registerer Registerer + // Only used to register Avalanche consensus metrics. Previously, all + // metrics were prefixed with "avalanche_{chainID}_". Now we add avalanche + // to the prefix, "avalanche_{chainID}_avalanche_", to differentiate + // consensus operations after the DAG linearization. + AvalancheRegisterer Registerer // DecisionAcceptor is the callback that will be fired whenever a VM is // notified that their object, either a block in snowman or a transaction @@ -100,9 +108,10 @@ func DefaultContextTest() *Context { func DefaultConsensusContextTest() *ConsensusContext { return &ConsensusContext{ - Context: DefaultContextTest(), - Registerer: prometheus.NewRegistry(), - DecisionAcceptor: noOpAcceptor{}, - ConsensusAcceptor: noOpAcceptor{}, + Context: DefaultContextTest(), + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), + DecisionAcceptor: noOpAcceptor{}, + ConsensusAcceptor: noOpAcceptor{}, } } diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index efed8751290d..73bf1c589ea2 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -58,7 +58,7 @@ func New(ctx context.Context, config Config, onFinished func(ctx context.Context executedStateTransitions: math.MaxInt32, } - if err := b.metrics.Initialize("bs", config.Ctx.Registerer); err != nil { + if err := b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer); err != nil { return nil, err } diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index e0ccefb5891e..28e1f4c7a82b 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -63,11 +63,11 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te t.Fatal(err) } - vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", ctx.Registerer) + vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", ctx.AvalancheRegisterer) if err != nil { t.Fatal(err) } - txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", ctx.Registerer) + txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", ctx.AvalancheRegisterer) if err != nil { t.Fatal(err) } diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index 6a4a5cd4ec09..abede8098f67 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -38,7 +38,7 @@ func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, "bs", "get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", - commonCfg.Ctx.Registerer, + commonCfg.Ctx.AvalancheRegisterer, ) return gh, err } diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 1164a50c8886..ea1429d3d492 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -100,12 +100,12 @@ func newTransitive(config Config) (*Transitive, error) { polls: poll.NewSet(factory, config.Ctx.Log, "", - config.Ctx.Registerer, + config.Ctx.AvalancheRegisterer, ), uniformSampler: sampler.NewUniform(), } - return t, t.metrics.Initialize("", config.Ctx.Registerer) + return t, t.metrics.Initialize("", config.Ctx.AvalancheRegisterer) } func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxBytes []byte) error { diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index 3b14d064c851..e53aec68634f 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -3104,7 +3104,7 @@ func TestEngineReBootstrapFails(t *testing.T) { t.Fatal(err) } - bootCfg.Ctx.Registerer = prometheus.NewRegistry() + bootCfg.Ctx.AvalancheRegisterer = prometheus.NewRegistry() // re-register the Transitive bootstrapper2, err := bootstrap.New( diff --git a/snow/networking/sender/sender.go b/snow/networking/sender/sender.go index 3883405529dd..391eb68688a3 100644 --- a/snow/networking/sender/sender.go +++ b/snow/networking/sender/sender.go @@ -73,9 +73,20 @@ func New( Help: fmt.Sprintf("# of times a %s request was not sent because the node was benched", op), }, ) - if err := ctx.Registerer.Register(counter); err != nil { - return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) + + switch engineType { + case p2p.EngineType_ENGINE_TYPE_SNOWMAN: + if err := ctx.Registerer.Register(counter); err != nil { + return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) + } + case p2p.EngineType_ENGINE_TYPE_AVALANCHE: + if err := ctx.AvalancheRegisterer.Register(counter); err != nil { + return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) + } + default: + return nil, fmt.Errorf("unknown engine type %s", engineType) } + s.failedDueToBench[op] = counter } return s, nil diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 6440bc1370f1..037fc3eb874e 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -595,8 +595,9 @@ func TestSender_Bootstrap_Requests(t *testing.T) { ctx.SubnetID = subnetID ctx.NodeID = myNodeID snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), } type test struct { @@ -882,8 +883,9 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ctx.SubnetID = subnetID ctx.NodeID = myNodeID snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), } type test struct { @@ -1031,7 +1033,11 @@ func TestSender_Bootstrap_Responses(t *testing.T) { timeoutManager = timeout.NewMockManager(ctrl) router = router.NewMockRouter(ctrl) ) + + // Instantiate new registerers to avoid duplicate metrics + // registration snowCtx.Registerer = prometheus.NewRegistry() + snowCtx.AvalancheRegisterer = prometheus.NewRegistry() sender, err := New( snowCtx, @@ -1091,8 +1097,9 @@ func TestSender_Single_Request(t *testing.T) { ctx.SubnetID = subnetID ctx.NodeID = myNodeID snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), } type test struct { diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 2623dc820fa8..48b7ce939767 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -26,9 +26,9 @@ import ( ) const ( - metricVtxProcessing = "avalanche_X_vtx_processing" - metricVtxAccepted = "avalanche_X_vtx_accepted_count" - metricVtxRejected = "avalanche_X_vtx_rejected_count" + metricVtxProcessing = "avalanche_X_avalanche_vtx_processing" + metricVtxAccepted = "avalanche_X_avalanche_vtx_accepted_count" + metricVtxRejected = "avalanche_X_avalanche_vtx_rejected_count" ) const totalRounds = 50 diff --git a/tests/e2e/x/whitelist-vtx/suites.go b/tests/e2e/x/whitelist-vtx/suites.go index 15583cf3df22..34fa32ce0e1b 100644 --- a/tests/e2e/x/whitelist-vtx/suites.go +++ b/tests/e2e/x/whitelist-vtx/suites.go @@ -24,13 +24,13 @@ import ( ) const ( - metricVtxIssueSuccess = "avalanche_X_whitelist_vtx_issue_success" - metricVtxIssueFailure = "avalanche_X_whitelist_vtx_issue_failure" - metricTxProcessing = "avalanche_X_whitelist_tx_processing" - metricTxAccepted = "avalanche_X_whitelist_tx_accepted_count" - metricTxRejected = "avalanche_X_whitelist_tx_rejected_count" - metricTxPollsAccepted = "avalanche_X_whitelist_tx_polls_accepted_count" - metricTxPollsRejected = "avalanche_X_whitelist_tx_polls_rejected_count" + metricVtxIssueSuccess = "avalanche_X_avalanche_whitelist_vtx_issue_success" + metricVtxIssueFailure = "avalanche_X_avalanche_whitelist_vtx_issue_failure" + metricTxProcessing = "avalanche_X_avalanche_whitelist_tx_processing" + metricTxAccepted = "avalanche_X_avalanche_whitelist_tx_accepted_count" + metricTxRejected = "avalanche_X_avalanche_whitelist_tx_rejected_count" + metricTxPollsAccepted = "avalanche_X_avalanche_whitelist_tx_polls_accepted_count" + metricTxPollsRejected = "avalanche_X_avalanche_whitelist_tx_polls_rejected_count" ) var _ = e2e.DescribeXChain("[WhitelistTx]", func() { From 88d444c8785a9a2b7aa82827833f0c9f13fe8d9a Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 22 Feb 2023 15:02:37 -0500 Subject: [PATCH 05/27] Remove parents map from merkledb trieview (#2601) Co-authored-by: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> --- x/merkledb/README.md | 1 - x/merkledb/trie_test.go | 107 ++++++++++- x/merkledb/trieview.go | 414 +++++++++++++++------------------------- 3 files changed, 256 insertions(+), 266 deletions(-) diff --git a/x/merkledb/README.md b/x/merkledb/README.md index e98a85fb924c..b764ff01ce96 100644 --- a/x/merkledb/README.md +++ b/x/merkledb/README.md @@ -9,7 +9,6 @@ - [ ] Consider allowing a child view to commit into a parent view without committing to the base DB. - [ ] Allow concurrent reads into the trieview. - [ ] Remove `baseValuesCache` and `baseNodesCache` from the trieview. - - [ ] Remove `parents` from the trieview. - [ ] Remove special casing around the root node from the physical structure of the hashed tree. - [ ] Remove the implied prefix from the `dbNode`'s `child` - [ ] Fix intermediate node eviction panic when encountering errors diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 4baebdff5ac1..6a46d3e2eeb9 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -22,11 +22,13 @@ func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { if err := asTrieView.CalculateIDs(context.Background()); err != nil { return nil, err } - closestNode, exact, err := asTrieView.getClosestNode(context.Background(), newPath([]byte(key))) + path := newPath([]byte(key)) + nodePath, err := asTrieView.getPathTo(context.Background(), path) if err != nil { return nil, err } - if !exact || closestNode == nil { + closestNode := nodePath[len(nodePath)-1] + if closestNode.key.Compare(path) != 0 || closestNode == nil { return nil, database.ErrNotFound } @@ -37,11 +39,13 @@ func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { if err != nil { return nil, err } - closestNode, exact, err := view.(*trieView).getClosestNode(context.Background(), newPath([]byte(key))) + path := newPath([]byte(key)) + nodePath, err := view.(*trieView).getPathTo(context.Background(), path) if err != nil { return nil, err } - if !exact || closestNode == nil { + closestNode := nodePath[len(nodePath)-1] + if closestNode.key.Compare(path) != 0 || closestNode == nil { return nil, database.ErrNotFound } @@ -50,6 +54,101 @@ func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { return nil, nil } +func TestTrieViewGetPathTo(t *testing.T) { + require := require.New(t) + + db, err := newDatabase( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + ValueCacheSize: 1000, + HistoryLength: 1000, + NodeCacheSize: 1000, + }, + &mockMetrics{}, + ) + require.NoError(err) + + trieIntf, err := db.NewView(context.Background()) + require.NoError(err) + trie, ok := trieIntf.(*trieView) + require.True(ok) + + path, err := trie.getPathTo(context.Background(), newPath(nil)) + require.NoError(err) + + // Just the root + require.Len(path, 1) + require.Equal(trie.root, path[0]) + + // Insert a key + key1 := []byte{0} + err = trie.Insert(context.Background(), key1, []byte("value")) + require.NoError(err) + err = trie.CalculateIDs(context.Background()) + require.NoError(err) + + path, err = trie.getPathTo(context.Background(), newPath(key1)) + require.NoError(err) + + // Root and 1 value + require.Len(path, 2) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + + // Insert another key which is a child of the first + key2 := []byte{0, 1} + err = trie.Insert(context.Background(), key2, []byte("value")) + require.NoError(err) + err = trie.CalculateIDs(context.Background()) + require.NoError(err) + + path, err = trie.getPathTo(context.Background(), newPath(key2)) + require.NoError(err) + require.Len(path, 3) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + require.Equal(newPath(key2), path[2].key) + + // Insert a key which shares no prefix with the others + key3 := []byte{255} + err = trie.Insert(context.Background(), key3, []byte("value")) + require.NoError(err) + err = trie.CalculateIDs(context.Background()) + require.NoError(err) + + path, err = trie.getPathTo(context.Background(), newPath(key3)) + require.NoError(err) + require.Len(path, 2) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key3), path[1].key) + + // Other key paths not affected + path, err = trie.getPathTo(context.Background(), newPath(key2)) + require.NoError(err) + require.Len(path, 3) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + require.Equal(newPath(key2), path[2].key) + + // Gets closest node when key doesn't exist + key4 := []byte{0, 1, 2} + path, err = trie.getPathTo(context.Background(), newPath(key4)) + require.NoError(err) + require.Len(path, 3) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + require.Equal(newPath(key2), path[2].key) + + // Gets just root when key doesn't exist and no key shares a prefix + key5 := []byte{128} + path, err = trie.getPathTo(context.Background(), newPath(key5)) + require.NoError(err) + require.Len(path, 1) + require.Equal(trie.root, path[0]) +} + func Test_Trie_Partial_Commit_Leaves_Valid_Tries(t *testing.T) { dbTrie, err := newDatabase( context.Background(), diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go index f585758e5fff..291259dc6d69 100644 --- a/x/merkledb/trieview.go +++ b/x/merkledb/trieview.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/set" ) @@ -43,6 +42,8 @@ var ( ErrStartAfterEnd = errors.New("start key > end key") _ TrieView = &trieView{} + + numCPU = runtime.NumCPU() ) // Editable view of a trie, collects changes on top of a base trie. @@ -51,6 +52,9 @@ type trieView struct { // Must be held when reading/writing fields. lock sync.Mutex + // Changes made to this view. + // May include nodes that haven't been updated + // but will when their ID is recalculated. changes *changeSummary // Key/value pairs we've already fetched from [baseTrie]. @@ -83,9 +87,6 @@ type trieView struct { // A nil value indicates that the node isn't in [baseTrie]. baseNodesCache map[path]*node - // Key --> Parent of the node with that key. - parents map[path]*node - // True if the IDs of nodes in this view need to be recalculated. needsRecalculation bool @@ -153,7 +154,6 @@ func newTrieView( estimatedSize: estimatedSize, baseNodesCache: make(map[path]*node, defaultPreallocationSize), baseValuesCache: make(map[path]Maybe[[]byte], defaultPreallocationSize), - parents: make(map[path]*node, 2*estimatedSize), unappliedValueChanges: make(map[path]Maybe[[]byte], estimatedSize), } var err error @@ -212,59 +212,75 @@ func (t *trieView) calculateIDs(ctx context.Context) error { return err } - _, topoSpan := t.db.tracer.Start(ctx, "MerkleDB.trieview.calculateIDs.topologicalSort") + _, helperSpan := t.db.tracer.Start(ctx, "MerkleDB.trieview.calculateIDsHelper") + defer helperSpan.End() - seen := set.NewSet[path](len(t.changes.nodes) * 2) - dependencyCounts := make(map[path]int, len(t.changes.nodes)) - readyNodes := make(map[path]*node, len(t.changes.nodes)) - - // determine all changed node's ancestors and gather dependency data - for key, nodeChange := range t.changes.nodes { - if seen.Contains(key) || nodeChange.after == nil { - continue - } - if _, ok := dependencyCounts[key]; !ok { - readyNodes[key] = nodeChange.after - } + // [eg] limits the number of goroutines we start. + var eg errgroup.Group + eg.SetLimit(numCPU) + if err := t.calculateIDsHelper(ctx, t.root, &eg); err != nil { + return err + } + if err := eg.Wait(); err != nil { + return err + } + t.needsRecalculation = false + return nil +} - currentNodeKey := key +// Calculates the ID of all descendants of [n] which need to be recalculated, +// and then calculates the ID of [n] itself. +func (t *trieView) calculateIDsHelper(ctx context.Context, n *node, eg *errgroup.Group) error { + var ( + // We use [wg] to wait until all descendants of [n] have been updated. + // Note we can't wait on [eg] because [eg] may have started goroutines + // that aren't calculating IDs for descendants of [n]. + wg sync.WaitGroup + updatedChildren = make(chan *node, len(n.children)) + ) - parent := t.parents[nodeChange.after.key] + for childIndex, child := range n.children { + childIndex, child := childIndex, child - // all ancestors of a modified node need to have their ID updated - // if the ancestors have already been seen or there is no parent, we can stop - for !seen.Contains(currentNodeKey) && parent != nil { - // mark the previous node as handled - seen.Add(currentNodeKey) + childPath := n.key + path(childIndex) + child.compressedPath + childNodeChange, ok := t.changes.nodes[childPath] + if !ok { + // This child wasn't changed. + continue + } - // move on to the parent of the previous node - currentNodeKey = parent.key + wg.Add(1) + updateChild := func() error { + defer wg.Done() - // this node depends on the hash of the previous node, so add one to the dependency count - dependencyCounts[currentNodeKey]++ + if err := t.calculateIDsHelper(ctx, childNodeChange.after, eg); err != nil { + return err + } - // this node has a dependency, so it cannot be ready - delete(readyNodes, currentNodeKey) + // Note that this will never block + updatedChildren <- childNodeChange.after + return nil + } - // move on to the next ancestor - parent = t.parents[parent.key] + // Try updating the child and its descendants in a goroutine. + if ok := eg.TryGo(updateChild); !ok { + // We're at the goroutine limit; do the work in this goroutine. + if err := updateChild(); err != nil { + return err + } } } - topoSpan.End() - // perform hashing in topological order - var err error - if seen.Len() >= minNodeCountForConcurrentHashing { - err = t.calculateIDsConcurrent(ctx, readyNodes, dependencyCounts) - } else { - err = t.calculateIDsSync(ctx, readyNodes, dependencyCounts) - } - if err != nil { - return err + // Wait until all descendants of [n] have been updated. + wg.Wait() + close(updatedChildren) + + for child := range updatedChildren { + n.addChild(child) } - t.needsRecalculation = false - return nil + // The IDs [n]'s descendants are up to date so we can calculate [n]'s ID. + return n.calculateID(t.db.metrics) } // Returns a proof that [bytesPath] is in or not in trie [t]. @@ -293,21 +309,21 @@ func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { // Get the node at the given path, or the node closest to it. keyPath := newPath(key) - closestNode, exact, err := t.getClosestNode(ctx, keyPath) + + proofPath, err := t.getPathTo(ctx, keyPath) if err != nil { return nil, err } - proofPath := buffer.NewUnboundedDeque[ProofNode](initialProofPathSize) - currentNode := closestNode - for currentNode != nil { - proofPath.PushLeft(currentNode.asProofNode()) - currentNode = t.parents[currentNode.key] - } // From root --> node from left --> right. - proof.Path = proofPath.List() + proof.Path = make([]ProofNode, len(proofPath), len(proofPath)+1) + for i, node := range proofPath { + proof.Path[i] = node.asProofNode() + } + + closestNode := proofPath[len(proofPath)-1] - if exact { + if closestNode.key.Compare(keyPath) == 0 { // There is a node with the given [key]. return proof, nil } @@ -817,21 +833,19 @@ func (t *trieView) applyChangedValuesToTrie(ctx context.Context) error { // Merges together nodes in the inclusive descendants of [node] that // have no value and a single child into one node with a compressed // path until a node that doesn't meet those criteria is reached. +// [parent] is [node]'s parent. // Assumes at least one of the following is true: // * [node] has a value. // * [node] has children. // Assumes this view stack is locked. -func (t *trieView) compressNodePath(ctx context.Context, node *node) error { - parent := t.parents[node.key] - +func (t *trieView) compressNodePath(ctx context.Context, parent, node *node) error { // don't collapse into this node if it's the root, doesn't have 1 child, or has a value - if parent == nil || len(node.children) != 1 || node.hasValue() { + if len(node.children) != 1 || node.hasValue() { return nil } // delete all empty nodes with a single child under [node] for len(node.children) == 1 && !node.hasValue() { - delete(t.parents, node.key) if err := t.recordNodeDeleted(ctx, node); err != nil { return err } @@ -843,89 +857,84 @@ func (t *trieView) compressNodePath(ctx context.Context, node *node) error { node = nextNode } - // [node] is the first node with multiple children or with a value under [n]. - // combine it with [n]. - t.parents[node.key] = parent + // [node] is the first node with multiple children. + // combine it with the [node] passed in. parent.addChild(node) return t.recordNodeChange(ctx, parent) } -// Deletes each node in the inclusive ancestry of [node] that has no -// value and no children. +// Starting from the last node in [nodePath], traverses toward the root +// and deletes each node that has no value and no children. +// Stops when a node with a value or children is reached. +// Assumes [nodePath] is a path from the root to a node. // Assumes this view stack is locked. -func (t *trieView) deleteEmptyNodes(ctx context.Context, node *node) error { - for node != nil && len(node.children) == 0 && !node.hasValue() { +func (t *trieView) deleteEmptyNodes(ctx context.Context, nodePath []*node) error { + node := nodePath[len(nodePath)-1] + nextParentIndex := len(nodePath) - 2 + + for ; nextParentIndex >= 0 && len(node.children) == 0 && !node.hasValue(); nextParentIndex-- { if err := t.recordNodeDeleted(ctx, node); err != nil { return err } - parent := t.parents[node.key] + parent := nodePath[nextParentIndex] - if parent != nil { - delete(t.parents, node.key) - parent.removeChild(node) - if err := t.recordNodeChange(ctx, parent); err != nil { - return err - } + parent.removeChild(node) + if err := t.recordNodeChange(ctx, parent); err != nil { + return err } node = parent } - if node == nil { - // The last processed node was the root. - // No need to call [t.compressNodePath] because the - // root has no parent that it can be merged with. + if nextParentIndex < 0 { return nil } + parent := nodePath[nextParentIndex] - return t.compressNodePath(ctx, node) + return t.compressNodePath(ctx, parent, node) } -// Gets the node furthest along a path if any exist. -// Returns: -// 1. The node closest to matching the [fullPath]. -// 2. True if the node is an exact match with the [fullPath]. -// 3. Any error that occurred while following the path. -// Assumes this view stack is locked. -func (t *trieView) getClosestNode( - ctx context.Context, - fullPath path, -) (closestNode *node, exactMatch bool, err error) { - // all paths start at the root - currentNode := t.root - matchedPathIndex := 0 - var previousNode *node +// Returns the nodes along the path to [key]. +// The first node is the root, and the last node is either the node with the +// given [key], if it's in the trie, or the node with the largest prefix of +// the [key] if it isn't in the trie. +// Always returns at least the root node. +func (t *trieView) getPathTo(ctx context.Context, key path) ([]*node, error) { + var ( + // all paths start at the root + currentNode = t.root + matchedKeyIndex = 0 + nodes = []*node{t.root} + ) // while the entire path hasn't been matched - for matchedPathIndex < len(fullPath) { + for matchedKeyIndex < len(key) { // confirm that a child exists and grab its ID before attempting to load it - nextChildEntry, hasChild := currentNode.children[fullPath[matchedPathIndex]] + nextChildEntry, hasChild := currentNode.children[key[matchedKeyIndex]] // the nibble for the child entry has now been handled, so increment the matchedPathIndex - matchedPathIndex += 1 + matchedKeyIndex += 1 - if !hasChild || !fullPath[matchedPathIndex:].HasPrefix(nextChildEntry.compressedPath) { + if !hasChild || !key[matchedKeyIndex:].HasPrefix(nextChildEntry.compressedPath) { // there was no child along the path or the child that was there doesn't match the remaining path - return currentNode, false, nil + return nodes, nil } // the compressed path of the entry there matched the path, so increment the matched index - matchedPathIndex += len(nextChildEntry.compressedPath) - previousNode = currentNode + matchedKeyIndex += len(nextChildEntry.compressedPath) - // grab the child node - currentNode, err = t.getNodeWithID(ctx, nextChildEntry.id, fullPath[:matchedPathIndex]) + // grab the next node along the path + var err error + currentNode, err = t.getNodeWithID(ctx, nextChildEntry.id, key[:matchedKeyIndex]) if err != nil { - // trouble retrieving the next node - // return the last node that was able to be retrieved - return previousNode, false, err + return nil, err } - // record that the node just loaded has the previous node as its parent - t.parents[currentNode.key] = previousNode + + // add node to path + nodes = append(nodes, currentNode) } - // the entire path was matched entirely, so return the node and indicate it was an exact match - return currentNode, true, nil + return nodes, nil } func getLengthOfCommonPrefix(first, second path) int { @@ -975,145 +984,6 @@ func (t *trieView) getCachedValue(key path) ([]byte, bool, error) { return nil, false, nil } -// Hashes all nodes concurrently. -// Assumes this view stack is locked. -func (t *trieView) calculateIDsConcurrent( - ctx context.Context, - readyNodes map[path]*node, - dependencyCounts map[path]int, -) error { - ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.hashingConcurrent") - defer span.End() - - eg, ctx := errgroup.WithContext(ctx) - - numThreads := runtime.NumCPU() - readyNodesChan := make(chan *node, len(readyNodes)) // nodes that are ready to be hashed - updateParentChan := make(chan *node, len(readyNodes)) // parent nodes that need to be updated - - // This iteration is guaranteed not to block because [readyNodesChan] is the - // exact right size. - // - // Invariant: There must be at least one node ready to hash. - for _, n := range readyNodes { - readyNodesChan <- n - } - - // allHashed is used to wait for all hashing goroutines to finish - var allHashed sync.WaitGroup - - // Note: minNodeCountForConcurrentHashing is likely larger than the number - // of CPUs so this will not spawn unused goroutines. - // - // Invariant: numThreads > 0. - allHashed.Add(numThreads) - for i := 0; i < numThreads; i++ { - eg.Go(func() error { - defer allHashed.Done() - for currentNode := range readyNodesChan { - if err := currentNode.calculateID(t.db.metrics); err != nil { - return err - } - - // Now that the node has been hashed, notify its parent to - // reduce its dependency count. - updateParentChan <- currentNode - } - return nil - }) - } - - // Update the parents with the node's new hash and add the parent to - // [readyNodesChan] if there are no remaining unhashed children. - eg.Go(func() error { - for currentNode := range updateParentChan { - // Record the node's new hash. - if err := t.recordNodeChange(ctx, currentNode); err != nil { - return err - } - - // If the node has a parent, update its child ID - parent, ok := t.parents[currentNode.key] - if !ok { - continue - } - parent.addChild(currentNode) - - dependencyCounts[parent.key]-- - - // If the parent has more dependencies, then we need to wait until - // those children are hashed. - if dependencyCounts[parent.key] != 0 { - continue - } - - // All of the parent's children have updated their hashes, so the - // parent is now ready to be hashed. - // - // Invariant: A push into [readyNodesChan] can never block here - // because this is only executed after removing an element from - // [readyNodesChan] and there are no other sources of nodes being - // pushed into the channel at this point. - readyNodesChan <- parent - - delete(dependencyCounts, parent.key) - // If there are no more dependencies being tracked, then no more - // nodes will become ready. - if len(dependencyCounts) == 0 { - close(readyNodesChan) - } - } - return nil - }) - - allHashed.Wait() - close(updateParentChan) - return eg.Wait() -} - -// hash all changed nodes synchronously. -// Assumes this view stack is locked. -func (t *trieView) calculateIDsSync( - ctx context.Context, - readyNodes map[path]*node, - dependencyCounts map[path]int, -) error { - ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.hashingSync") - defer span.End() - - // Run through each updated node and force the ID to recalculate, then update the parent - for len(readyNodes) > 0 { - for key, currentNode := range readyNodes { - delete(readyNodes, key) - if err := currentNode.calculateID(t.db.metrics); err != nil { - return err - } - - // record the newly hashed node - if err := t.recordNodeChange(ctx, currentNode); err != nil { - return err - } - - // update the parent if it exists - parent, ok := t.parents[key] - if !ok { - continue - } - parent.addChild(currentNode) - - // one of this node's dependencies has been set so decrement the count - dependencyCounts[parent.key]-- - - // when there are no more dependencies, the node is now ready to be hashed - if dependencyCounts[parent.key] == 0 { - readyNodes[parent.key] = parent - delete(dependencyCounts, parent.key) - } - } - } - return nil -} - // Inserts a key/value pair into the trie. // Assumes this view stack is locked. func (t *trieView) insertIntoTrie( @@ -1121,16 +991,26 @@ func (t *trieView) insertIntoTrie( key path, value Maybe[[]byte], ) (*node, error) { - // find the node that most closely matches the keyPath - closestNode, exactMatch, err := t.getClosestNode(ctx, key) + // find the node that most closely matches [key] + pathToNode, err := t.getPathTo(ctx, key) if err != nil { return nil, err } + // We're inserting a node whose ancestry is [pathToNode] + // so we'll need to recalculate their IDs. + for _, node := range pathToNode { + if err := t.recordNodeChange(ctx, node); err != nil { + return nil, err + } + } + + closestNode := pathToNode[len(pathToNode)-1] + // a node with that exact path already exists so update its value - if exactMatch { + if closestNode.key.Compare(key) == 0 { closestNode.setValue(value) - return closestNode, t.recordNodeChange(ctx, closestNode) + return closestNode, nil } closestNodeKeyLength := len(closestNode.key) @@ -1148,7 +1028,6 @@ func (t *trieView) insertIntoTrie( key, ) newNode.setValue(value) - t.parents[newNode.key] = closestNode return newNode, t.recordNodeChange(ctx, newNode) } else if err != nil { return nil, err @@ -1168,7 +1047,6 @@ func (t *trieView) insertIntoTrie( return nil, err } nodeWithValue := branchNode - t.parents[branchNode.key] = closestNode if len(key)-len(branchNode.key) == 0 { // there was no residual path for the inserted key, so the value goes directly into the new branch node @@ -1179,7 +1057,6 @@ func (t *trieView) insertIntoTrie( branchNode, key, ) - t.parents[newNode.key] = branchNode newNode.setValue(value) if err := t.recordNodeChange(ctx, newNode); err != nil { return nil, err @@ -1201,7 +1078,6 @@ func (t *trieView) insertIntoTrie( existingChildKey[len(branchNode.key)+1:], existingChildEntry.id, ) - t.parents[existingChildKey] = branchNode return nodeWithValue, t.recordNodeChange(ctx, branchNode) } @@ -1222,7 +1098,7 @@ func (t *trieView) recordNodeChange(ctx context.Context, after *node) error { return t.recordKeyChange(ctx, after.key, after) } -// Records that the node associated with the given key has been deleted +// Records that the node associated with the given key has been deleted. // Assumes this view stack is locked. func (t *trieView) recordNodeDeleted(ctx context.Context, after *node) error { // don't delete the root. @@ -1232,7 +1108,7 @@ func (t *trieView) recordNodeDeleted(ctx context.Context, after *node) error { return t.recordKeyChange(ctx, after.key, nil) } -// Records that the node associated with the given key has been changed +// Records that the node associated with the given key has been changed. // Assumes this view stack is locked. func (t *trieView) recordKeyChange(ctx context.Context, key path, after *node) error { t.needsRecalculation = true @@ -1300,15 +1176,26 @@ func (t *trieView) recordValueChange(ctx context.Context, key path, value Maybe[ // Removes the provided [key] from the trie. // Assumes this view stack is locked. func (t *trieView) removeFromTrie(ctx context.Context, key path) error { - nodeToDelete, exactMatch, err := t.getClosestNode(ctx, key) + nodePath, err := t.getPathTo(ctx, key) if err != nil { return err } - if !exactMatch || !nodeToDelete.hasValue() { + + nodeToDelete := nodePath[len(nodePath)-1] + + if nodeToDelete.key.Compare(key) != 0 || !nodeToDelete.hasValue() { // the key wasn't in the trie or doesn't have a value so there's nothing to do return nil } + // A node with ancestry [nodePath] is being deleted, so we need to recalculate + // all of the nodes in this path. + for _, node := range nodePath { + if err := t.recordNodeChange(ctx, node); err != nil { + return err + } + } + nodeToDelete.setValue(Nothing[[]byte]()) if err := t.recordNodeChange(ctx, nodeToDelete); err != nil { return err @@ -1316,11 +1203,16 @@ func (t *trieView) removeFromTrie(ctx context.Context, key path) error { // if the removed node has no children, the node can be removed from the trie if len(nodeToDelete.children) == 0 { - return t.deleteEmptyNodes(ctx, nodeToDelete) + return t.deleteEmptyNodes(ctx, nodePath) + } + + if len(nodePath) == 1 { + return nil } + parent := nodePath[len(nodePath)-2] // merge this node and its descendants into a single node if possible - return t.compressNodePath(ctx, nodeToDelete) + return t.compressNodePath(ctx, parent, nodeToDelete) } // Retrieves the node with the given [key], which is a child of [parent], and From 73d6e8c6f1f4556301825f8672164ff5281303f3 Mon Sep 17 00:00:00 2001 From: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Date: Wed, 22 Feb 2023 16:42:08 -0500 Subject: [PATCH 06/27] Remove base caches from the trie view (#2640) --- x/merkledb/README.md | 1 - x/merkledb/trieview.go | 72 +++++------------------------------------- 2 files changed, 8 insertions(+), 65 deletions(-) diff --git a/x/merkledb/README.md b/x/merkledb/README.md index b764ff01ce96..d99f5842e729 100644 --- a/x/merkledb/README.md +++ b/x/merkledb/README.md @@ -8,7 +8,6 @@ - [ ] Remove internal parent view commitments. - [ ] Consider allowing a child view to commit into a parent view without committing to the base DB. - [ ] Allow concurrent reads into the trieview. - - [ ] Remove `baseValuesCache` and `baseNodesCache` from the trieview. - [ ] Remove special casing around the root node from the physical structure of the hashed tree. - [ ] Remove the implied prefix from the `dbNode`'s `child` - [ ] Fix intermediate node eviction panic when encountering errors diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go index 291259dc6d69..813f3dca75e4 100644 --- a/x/merkledb/trieview.go +++ b/x/merkledb/trieview.go @@ -57,10 +57,6 @@ type trieView struct { // but will when their ID is recalculated. changes *changeSummary - // Key/value pairs we've already fetched from [baseTrie]. - // A Nothing value indicates that the key has been removed. - baseValuesCache map[path]Maybe[[]byte] - // Key/value pairs that have been inserted/removed but not // yet reflected in the trie's structure. This allows us to // defer the cost of updating the trie until we calculate node IDs. @@ -83,10 +79,6 @@ type trieView struct { // The root of the trie represented by this view. root *node - // Nodes we've already fetched from [baseTrie]. - // A nil value indicates that the node isn't in [baseTrie]. - baseNodesCache map[path]*node - // True if the IDs of nodes in this view need to be recalculated. needsRecalculation bool @@ -152,8 +144,6 @@ func newTrieView( basedOnRoot: baseRoot, changes: changes, estimatedSize: estimatedSize, - baseNodesCache: make(map[path]*node, defaultPreallocationSize), - baseValuesCache: make(map[path]Maybe[[]byte], defaultPreallocationSize), unappliedValueChanges: make(map[path]Maybe[[]byte], estimatedSize), } var err error @@ -711,21 +701,20 @@ func (t *trieView) GetValue(ctx context.Context, key []byte) ([]byte, error) { // Assumes this view stack is locked. func (t *trieView) getValue(ctx context.Context, key path) ([]byte, error) { - value, hasLocal, err := t.getCachedValue(key) - if hasLocal { - return value, err + if change, ok := t.changes.values[key]; ok { + t.db.metrics.ViewValueCacheHit() + if change.after.IsNothing() { + return nil, database.ErrNotFound + } + return change.after.value, nil } + t.db.metrics.ViewValueCacheMiss() // if we don't have local copy of the key, then grab a copy from the base trie - value, err = t.baseTrie.getValue(ctx, key) + value, err := t.baseTrie.getValue(ctx, key) if err != nil { - if err == database.ErrNotFound { - // Cache the miss. - t.baseValuesCache[key] = Nothing[[]byte]() - } return nil, err } - t.baseValuesCache[key] = Some(value) return value, nil } @@ -958,32 +947,6 @@ func (t *trieView) getNode(ctx context.Context, key path) (*node, error) { return n.clone(), nil } -// Returns: -// 1. The value at [key] iff the following return value is true. -// 2. True if the value at [key] exists in the caches. -// If false, the [key] may be in the trie, just not in the caches. -// 3. database.ErrNotFound if the value isn't in the trie at all (not just the caches). -// -// Assumes this view stack is locked. -func (t *trieView) getCachedValue(key path) ([]byte, bool, error) { - if change, ok := t.changes.values[key]; ok { - t.db.metrics.ViewValueCacheHit() - if change.after.IsNothing() { - return nil, true, database.ErrNotFound - } - return change.after.value, true, nil - } - if maybeVal, ok := t.baseValuesCache[key]; ok { - t.db.metrics.ViewValueCacheHit() - if maybeVal.IsNothing() { - return nil, true, database.ErrNotFound - } - return maybeVal.value, true, nil - } - t.db.metrics.ViewValueCacheMiss() - return nil, false, nil -} - // Inserts a key/value pair into the trie. // Assumes this view stack is locked. func (t *trieView) insertIntoTrie( @@ -1118,8 +1081,6 @@ func (t *trieView) recordKeyChange(ctx context.Context, key path, after *node) e return nil } - delete(t.baseNodesCache, key) - before, err := t.baseTrie.getNode(ctx, key) if err != nil { if err != database.ErrNotFound { @@ -1152,8 +1113,6 @@ func (t *trieView) recordValueChange(ctx context.Context, key path, value Maybe[ return nil } - delete(t.baseValuesCache, key) - // grab the before value var beforeMaybe Maybe[[]byte] before, err := t.baseTrie.getValue(ctx, key) @@ -1243,29 +1202,14 @@ func (t *trieView) getNodeWithID(ctx context.Context, id ids.ID, key path) (*nod return nodeChange.after, nil } - // check for the key within the nodes we have already grabbed from the base trie - if node, haveLocal := t.baseNodesCache[key]; haveLocal { - t.db.metrics.ViewNodeCacheHit() - if node == nil { - return nil, database.ErrNotFound - } - return node, nil - } - t.db.metrics.ViewNodeCacheMiss() - // get the node from the base trie and store a localy copy baseTrieNode, err := t.baseTrie.getNode(ctx, key) if err != nil { - if err == database.ErrNotFound { - // Cache the miss - t.baseNodesCache[key] = nil - } return nil, err } // copy the node so any alterations to it don't affect the base trie node := baseTrieNode.clone() - t.baseNodesCache[key] = node // only need to initialize the id if it's from the base trie. // nodes in the current view change list have already been initialized. From 888bada2bc7308de36b3062eff451c2309f92766 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Thu, 23 Feb 2023 12:43:17 -0500 Subject: [PATCH 07/27] Merkledb remove panic during cache eviction (#2638) --- cache/lru_cache.go | 21 ---- cache/lru_cache_test.go | 37 ------- x/merkledb/cache.go | 85 ++++++++++++++++ x/merkledb/cache_test.go | 214 +++++++++++++++++++++++++++++++++++++++ x/merkledb/db.go | 93 +++++++++++------ 5 files changed, 360 insertions(+), 90 deletions(-) create mode 100644 x/merkledb/cache.go create mode 100644 x/merkledb/cache_test.go diff --git a/cache/lru_cache.go b/cache/lru_cache.go index bdb2014616bf..814e530c296c 100644 --- a/cache/lru_cache.go +++ b/cache/lru_cache.go @@ -27,9 +27,6 @@ type LRU[K comparable, V any] struct { entryMap map[K]*list.Element entryList *list.List Size int - // OnEviction is called with an internal lock held, and therefore should - // never call any methods on the cache internally. - OnEviction func(V) } func (c *LRU[K, V]) Put(key K, value V) { @@ -79,9 +76,6 @@ func (c *LRU[K, V]) resize() { val := e.Value.(*entry[K, V]) delete(c.entryMap, val.Key) - if c.OnEviction != nil { - c.OnEviction(val.Value) - } } } @@ -96,9 +90,6 @@ func (c *LRU[K, V]) put(key K, value V) { val := e.Value.(*entry[K, V]) delete(c.entryMap, val.Key) - if c.OnEviction != nil { - c.OnEviction(val.Value) - } val.Key = key val.Value = value } else { @@ -136,24 +127,12 @@ func (c *LRU[K, V]) evict(key K) { if e, ok := c.entryMap[key]; ok { c.entryList.Remove(e) delete(c.entryMap, key) - - if c.OnEviction != nil { - val := e.Value.(*entry[K, V]) - c.OnEviction(val.Value) - } } } func (c *LRU[K, V]) flush() { c.init() - if c.OnEviction != nil { - for _, v := range c.entryMap { - val := v.Value.(*entry[K, V]) - c.OnEviction(val.Value) - } - } - c.entryMap = make(map[K]*list.Element, minCacheSize) c.entryList = list.New() } diff --git a/cache/lru_cache_test.go b/cache/lru_cache_test.go index 39be52e2cb24..b7ca773d1beb 100644 --- a/cache/lru_cache_test.go +++ b/cache/lru_cache_test.go @@ -7,8 +7,6 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/stretchr/testify/require" ) func TestLRU(t *testing.T) { @@ -62,38 +60,3 @@ func TestLRUResize(t *testing.T) { t.Fatalf("Retrieved wrong value") } } - -func TestLRUOnEviction(t *testing.T) { - require := require.New(t) - - shouldEvict := set.NewSet[int](1) - cache := LRU[int, int]{ - Size: 2, - OnEviction: func(i int) { - require.Contains(shouldEvict, i) - shouldEvict.Remove(i) - }, - } - - cache.Put(11, 1) - cache.Put(22, 2) - - shouldEvict.Add(1) - cache.Put(33, 3) - require.Empty(shouldEvict) - - shouldEvict.Add(2) - cache.Size = 1 - cache.resize() - require.Empty(shouldEvict) - - shouldEvict.Add(3) - cache.Flush() - require.Empty(shouldEvict) - - cache.Put(44, 4) - - shouldEvict.Add(4) - cache.Evict(44) - require.Empty(shouldEvict) -} diff --git a/x/merkledb/cache.go b/x/merkledb/cache.go new file mode 100644 index 000000000000..57979af0fa63 --- /dev/null +++ b/x/merkledb/cache.go @@ -0,0 +1,85 @@ +// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "sync" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +// A cache that calls [onEviction] on the evicted element. +type onEvictCache[K comparable, V any] struct { + lock sync.Mutex + maxSize int + // LRU --> MRU from left to right. + lru linkedhashmap.LinkedHashmap[K, V] + cache cache.Cacher[K, V] + onEviction func(V) error +} + +func newOnEvictCache[K comparable, V any](maxSize int, onEviction func(V) error) onEvictCache[K, V] { + return onEvictCache[K, V]{ + maxSize: maxSize, + lru: linkedhashmap.New[K, V](), + cache: &cache.LRU[K, V]{Size: maxSize}, + onEviction: onEviction, + } +} + +// Get an element from this cache. +func (c *onEvictCache[K, V]) Get(key K) (V, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + val, ok := c.cache.Get(key) + if ok { + // This key was touched; move it to the MRU position. + c.lru.Put(key, val) + } + return val, ok +} + +// Put an element into this cache. If this causes an element +// to be evicted, calls [c.onEviction] on the evicted element +// and returns the error from [c.onEviction]. Otherwise returns nil. +func (c *onEvictCache[K, V]) Put(key K, value V) error { + c.lock.Lock() + defer c.lock.Unlock() + + c.cache.Put(key, value) + c.lru.Put(key, value) // Mark as MRU + + if c.lru.Len() > c.maxSize { + // Note that [c.cache] has already evicted the oldest + // element because its max size is [c.maxSize]. + oldestKey, oldsetVal, _ := c.lru.Oldest() + c.lru.Delete(oldestKey) + return c.onEviction(oldsetVal) + } + return nil +} + +// Removes all elements from the cache. +// Returns the last non-nil error during [c.onEviction], if any. +// If [c.onEviction] errors, it will still be called for any +// subsequent elements and the cache will still be emptied. +func (c *onEvictCache[K, V]) Flush() error { + c.lock.Lock() + defer func() { + c.cache.Flush() + c.lru = linkedhashmap.New[K, V]() + c.lock.Unlock() + }() + + var errs wrappers.Errs + iter := c.lru.NewIterator() + for iter.Next() { + val := iter.Value() + errs.Add(c.onEviction(val)) + } + return errs.Err +} diff --git a/x/merkledb/cache_test.go b/x/merkledb/cache_test.go new file mode 100644 index 000000000000..50691ee676eb --- /dev/null +++ b/x/merkledb/cache_test.go @@ -0,0 +1,214 @@ +// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +var errTest = errors.New("test error") + +func TestNewOnEvictCache(t *testing.T) { + require := require.New(t) + + called := false + onEviction := func(int) error { + called = true + return nil + } + maxSize := 10 + + cache := newOnEvictCache[int](maxSize, onEviction) + require.Equal(maxSize, cache.maxSize) + require.NotNil(cache.lru) + require.Equal(0, cache.lru.Len()) + require.NotNil(cache.cache) + // Can't test function equality directly so do this + // to make sure it was assigned correctly + err := cache.onEviction(0) + require.NoError(err) + require.True(called) +} + +// Test the functionality of the cache when the onEviction function +// never returns an error. +// Note this test assumes the internal cache is an LRU cache. +func TestOnEvictCacheNoOnEvictionError(t *testing.T) { + require := require.New(t) + + evicted := []int{} + onEviction := func(n int) error { + evicted = append(evicted, n) + return nil + } + maxSize := 3 + + cache := newOnEvictCache[int](maxSize, onEviction) + + // Get non-existent key + _, ok := cache.Get(0) + require.False(ok) + + // Put key + err := cache.Put(0, 0) + require.NoError(err) + require.Equal(1, cache.lru.Len()) + + // Get key + val, ok := cache.Get(0) + require.True(ok) + require.Equal(0, val) + + // Get non-existent key + _, ok = cache.Get(1) + require.False(ok) + + // Fill the cache + for i := 1; i < maxSize; i++ { + err := cache.Put(i, i) + require.NoError(err) + require.Equal(i+1, cache.lru.Len()) + } + require.Len(evicted, 0) + + // Cache has [0,1,2] from LRU --> MRU + + // Put another key. This should evict the LRU key (0). + err = cache.Put(maxSize, maxSize) + require.NoError(err) + require.Equal(maxSize, cache.lru.Len()) + require.Len(evicted, 1) + require.Equal(0, evicted[0]) + + // Cache has [1,2,3] from LRU --> MRU + iter := cache.lru.NewIterator() + require.True(iter.Next()) + require.Equal(1, iter.Key()) + require.Equal(1, iter.Value()) + require.True(iter.Next()) + require.Equal(2, iter.Key()) + require.Equal(2, iter.Value()) + require.True(iter.Next()) + require.Equal(3, iter.Key()) + require.Equal(3, iter.Value()) + require.False(iter.Next()) + + // 0 should no longer be in the cache + _, ok = cache.Get(0) + require.False(ok) + + // Other keys should still be in the cache + for i := maxSize; i >= 1; i-- { + val, ok := cache.Get(i) + require.True(ok) + require.Equal(i, val) + } + + // Cache has [3,2,1] from LRU --> MRU + iter = cache.lru.NewIterator() + require.True(iter.Next()) + require.Equal(3, iter.Key()) + require.Equal(3, iter.Value()) + require.True(iter.Next()) + require.Equal(2, iter.Key()) + require.Equal(2, iter.Value()) + require.True(iter.Next()) + require.Equal(1, iter.Key()) + require.Equal(1, iter.Value()) + require.False(iter.Next()) + + // Put another key to evict the LRU key (3). + err = cache.Put(maxSize+1, maxSize+1) + require.NoError(err) + require.Equal(maxSize, cache.lru.Len()) + require.Len(evicted, 2) + require.Equal(3, evicted[1]) + + // Cache has [2,1,4] from LRU --> MRU + iter = cache.lru.NewIterator() + require.True(iter.Next()) + require.Equal(2, iter.Key()) + require.Equal(2, iter.Value()) + require.True(iter.Next()) + require.Equal(1, iter.Key()) + require.Equal(1, iter.Value()) + require.True(iter.Next()) + require.Equal(4, iter.Key()) + require.Equal(4, iter.Value()) + require.False(iter.Next()) + + // 3 should no longer be in the cache + _, ok = cache.Get(3) + require.False(ok) + + err = cache.Flush() + require.NoError(err) + + // Cache should be empty + require.Equal(0, cache.lru.Len()) + require.Len(evicted, 5) + require.Equal(evicted, []int{0, 3, 2, 1, 4}) + require.Equal(0, cache.lru.Len()) + require.Equal(maxSize, cache.maxSize) // Should be unchanged +} + +// Test the functionality of the cache when the onEviction function +// returns an error. +// Note this test assumes the internal cache is an LRU cache. +func TestOnEvictCacheOnEvictionError(t *testing.T) { + var ( + require = require.New(t) + evicted = []int{} + onEviction = func(n int) error { + // Evicting even keys errors + evicted = append(evicted, n) + if n%2 == 0 { + return errTest + } + return nil + } + maxSize = 2 + ) + + cache := newOnEvictCache[int](maxSize, onEviction) + + // Fill the cache + for i := 0; i < maxSize; i++ { + err := cache.Put(i, i) + require.NoError(err) + require.Equal(i+1, cache.lru.Len()) + } + + // Put another key. This should evict the LRU key (0) + // and return an error since 0 is even. + err := cache.Put(maxSize, maxSize) + require.ErrorIs(err, errTest) + + // Cache should still have correct state [1,2] + require.Equal(evicted, []int{0}) + require.Equal(maxSize, cache.lru.Len()) + _, ok := cache.Get(0) + require.False(ok) + _, ok = cache.Get(1) + require.True(ok) + _, ok = cache.Get(2) + require.True(ok) + + // Flush the cache. Should error on last element (2). + err = cache.Flush() + require.ErrorIs(err, errTest) + + // Should still be empty. + require.Equal(0, cache.lru.Len()) + require.Equal(evicted, []int{0, 1, 2}) + _, ok = cache.Get(0) + require.False(ok) + _, ok = cache.Get(1) + require.False(ok) + _, ok = cache.Get(2) + require.False(ok) +} diff --git a/x/merkledb/db.go b/x/merkledb/db.go index 2e2dc5ca407e..bd634ed5bfa8 100644 --- a/x/merkledb/db.go +++ b/x/merkledb/db.go @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" ) @@ -79,7 +80,8 @@ type Database struct { metadataDB database.Database // If a value is nil, the corresponding key isn't in the trie. - nodeCache cache.LRU[path, *node] + nodeCache onEvictCache[path, *node] + onEvictionErr utils.Atomic[error] valueCache cache.LRU[string, Maybe[[]byte]] @@ -115,10 +117,7 @@ func newDatabase( // Note: trieDB.OnEviction is responsible for writing intermediary nodes to // disk as they are evicted from the cache. - trieDB.nodeCache = cache.LRU[path, *node]{ - Size: config.NodeCacheSize, - OnEviction: trieDB.OnEviction, - } + trieDB.nodeCache = newOnEvictCache[path](config.NodeCacheSize, trieDB.onEviction) root, err := trieDB.initializeRootIfNeeded(ctx) if err != nil { @@ -179,7 +178,7 @@ func (db *Database) rebuild(ctx context.Context) error { } currentViewSize := 0 viewSizeLimit := math.Max( - db.nodeCache.Size/rebuildViewSizeFractionOfCacheSize, + db.nodeCache.maxSize/rebuildViewSizeFractionOfCacheSize, minRebuildViewSizePerCommit, ) for it.Next() { @@ -261,21 +260,38 @@ func (db *Database) Compact(start []byte, limit []byte) error { func (db *Database) Close() error { db.lock.Lock() + defer db.lock.Unlock() + + if db.closed { + return database.ErrClosed + } + + db.closed = true + defer func() { - _ = db.metadataDB.Close() // TODO add logger and log error - _ = db.nodeDB.Close() // TODO add logger and log error - db.lock.Unlock() + _ = db.metadataDB.Close() + _ = db.nodeDB.Close() }() - db.closed = true + if err := db.onEvictionErr.Get(); err != nil { + // If there was an error during cache eviction, + // [db.nodeCache] and [db.nodeDB] are in an inconsistent state. + // Do not write cached nodes to disk or mark clean shutdown. + return nil + } + + // Flush [nodeCache] to persist intermediary nodes to disk. + if err := db.nodeCache.Flush(); err != nil { + // There was an error during cache eviction. + // Don't commit to disk. + return err + } - // flush [nodeCache] to persist intermediary nodes to disk. - db.nodeCache.Flush() if err := db.nodeDB.Commit(); err != nil { return err } - // after intermediary nodes are persisted, we can mark a clean shutdown. + // Successfully wrote intermediate nodes. return db.metadataDB.Put(cleanShutdownKey, hadCleanShutdown) } @@ -629,26 +645,34 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database } } -// OnEviction persists intermediary nodes to [nodeDB] as they are evicted from -// [nodeCache]. -// Note that this is called by [db.nodeCache] with that cache's lock held, so -// the movement of the node from [db.nodeCache] to [db.nodeDB] is atomic. -// That is, as soon as [db.nodeCache] reports that it no longer has an evicted -// node, the node is guaranteed to be in [db.nodeDB]. -func (db *Database) OnEviction(node *node) { +// If [node] is an intermediary node, puts it in [nodeDB]. +// Note this is called by [db.nodeCache] with its lock held, so +// the movement of [node] from [db.nodeCache] to [db.nodeDB] is atomic. +// As soon as [db.nodeCache] no longer has [node], [db.nodeDB] does. +// Non-nil error is fatal -- causes [db] to close. +func (db *Database) onEviction(node *node) error { if node == nil || node.hasValue() { // only persist intermediary nodes - return + return nil } + nodeBytes, err := node.marshal() if err != nil { - // TODO: Handle this error correctly - panic(err) + db.onEvictionErr.Set(err) + // Prevent reads/writes from/to [db.nodeDB] to avoid inconsistent state. + _ = db.nodeDB.Close() + // This is a fatal error. + go db.Close() + return err } - if err = db.nodeDB.Put(node.key.Bytes(), nodeBytes); err != nil { - // TODO: Handle this error correctly - panic(err) + + if err := db.nodeDB.Put(node.key.Bytes(), nodeBytes); err != nil { + db.onEvictionErr.Set(err) + _ = db.nodeDB.Close() + go db.Close() + return err } + return nil } // Inserts the key/value pair into the db. @@ -756,7 +780,9 @@ func (db *Database) commitChanges(ctx context.Context, changes *changeSummary) e db.root = rootChange.after for key, nodeChange := range changes.nodes { - db.putNodeInCache(key, nodeChange.after) + if err := db.putNodeInCache(key, nodeChange.after); err != nil { + return err + } } _, valuesSpan := db.tracer.Start(ctx, "MerkleDB.commitChanges.writeValues") @@ -873,7 +899,9 @@ func (db *Database) getNode(_ context.Context, key path) (*node, error) { if err != nil { if err == database.ErrNotFound { // Cache the miss. - db.putNodeInCache(key, nil) + if err := db.putNodeInCache(key, nil); err != nil { + return nil, err + } } return nil, err } @@ -883,8 +911,8 @@ func (db *Database) getNode(_ context.Context, key path) (*node, error) { return nil, err } - db.putNodeInCache(key, node) - return node.clone(), nil + err = db.putNodeInCache(key, node) + return node.clone(), err } // Assumes [db.lock] is read locked. @@ -1015,11 +1043,12 @@ func (db *Database) prepareRangeProofView(ctx context.Context, start []byte, pro // This is required because putting a node in [db.nodeCache] can cause an eviction, // which puts a node in [db.nodeDB], and we don't want to put anything in [db.nodeDB] // after [db] is closed. -func (db *Database) putNodeInCache(key path, n *node) { +// Non-nil error is fatal -- [db] will close. +func (db *Database) putNodeInCache(key path, n *node) error { // TODO Cache metrics // Note that this may cause a node to be evicted from the cache, // which will call [OnEviction]. - db.nodeCache.Put(key, n) + return db.nodeCache.Put(key, n) } func (db *Database) getNodeInCache(key path) (*node, bool) { From a7b20edb01f307ed208edcd1ea0c9a395a2174b9 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 23 Feb 2023 12:50:40 -0500 Subject: [PATCH 08/27] Migrate utxo interfaces into `avax` package (#2643) Co-authored-by: Chloe <99216251+coffeeavax@users.noreply.github.com> --- vms/avm/states/state.go | 6 +-- vms/components/avax/utxo_handler.go | 32 +++++++++++++ vms/components/avax/utxo_state.go | 8 ++++ vms/platformvm/state/state.go | 6 +-- vms/platformvm/state/utxos.go | 21 --------- .../txs/executor/proposal_tx_executor.go | 25 +++++------ .../txs/executor/standard_tx_executor.go | 45 +++++++++---------- vms/platformvm/utxo/handler.go | 30 +------------ vms/platformvm/utxo/mock_verifier.go | 3 +- 9 files changed, 83 insertions(+), 93 deletions(-) create mode 100644 vms/components/avax/utxo_handler.go delete mode 100644 vms/platformvm/state/utxos.go diff --git a/vms/avm/states/state.go b/vms/avm/states/state.go index 5cf5feedd887..ac616a04af22 100644 --- a/vms/avm/states/state.go +++ b/vms/avm/states/state.go @@ -45,9 +45,9 @@ var ( ) type Chain interface { - GetUTXO(utxoID ids.ID) (*avax.UTXO, error) - AddUTXO(utxo *avax.UTXO) - DeleteUTXO(utxoID ids.ID) + avax.UTXOGetter + avax.UTXOAdder + avax.UTXODeleter GetTx(txID ids.ID) (*txs.Tx, error) AddTx(tx *txs.Tx) diff --git a/vms/components/avax/utxo_handler.go b/vms/components/avax/utxo_handler.go new file mode 100644 index 000000000000..1f535163ee2a --- /dev/null +++ b/vms/components/avax/utxo_handler.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avax + +import "github.com/ava-labs/avalanchego/ids" + +// Removes the UTXOs consumed by [ins] from the UTXO set +func Consume(utxoDB UTXODeleter, ins []*TransferableInput) { + for _, input := range ins { + utxoDB.DeleteUTXO(input.InputID()) + } +} + +// Adds the UTXOs created by [outs] to the UTXO set. +// [txID] is the ID of the tx that created [outs]. +func Produce( + utxoDB UTXOAdder, + txID ids.ID, + outs []*TransferableOutput, +) { + for index, out := range outs { + utxoDB.AddUTXO(&UTXO{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: uint32(index), + }, + Asset: out.Asset, + Out: out.Output(), + }) + } +} diff --git a/vms/components/avax/utxo_state.go b/vms/components/avax/utxo_state.go index 560d02530b43..84d349b2b085 100644 --- a/vms/components/avax/utxo_state.go +++ b/vms/components/avax/utxo_state.go @@ -49,6 +49,14 @@ type UTXOGetter interface { GetUTXO(utxoID ids.ID) (*UTXO, error) } +type UTXOAdder interface { + AddUTXO(utxo *UTXO) +} + +type UTXODeleter interface { + DeleteUTXO(utxoID ids.ID) +} + // UTXOWriter is a thin wrapper around a database to provide storage and // deletion of UTXOs. type UTXOWriter interface { diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index ec478e73ed38..b2b8bd3d659b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -84,9 +84,9 @@ var ( // execution. type Chain interface { Stakers - UTXOAdder - UTXOGetter - UTXODeleter + avax.UTXOAdder + avax.UTXOGetter + avax.UTXODeleter GetTimestamp() time.Time SetTimestamp(tm time.Time) diff --git a/vms/platformvm/state/utxos.go b/vms/platformvm/state/utxos.go deleted file mode 100644 index e9ccff30633f..000000000000 --- a/vms/platformvm/state/utxos.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/components/avax" -) - -type UTXOGetter interface { - GetUTXO(utxoID ids.ID) (*avax.UTXO, error) -} - -type UTXOAdder interface { - AddUTXO(utxo *avax.UTXO) -} - -type UTXODeleter interface { - DeleteUTXO(utxoID ids.ID) -} diff --git a/vms/platformvm/txs/executor/proposal_tx_executor.go b/vms/platformvm/txs/executor/proposal_tx_executor.go index c6f9717a45bc..6b579a4fa988 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/utxo" ) const ( @@ -123,9 +122,9 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { // Set up the state if this tx is committed // Consume the UTXOs - utxo.Consume(e.OnCommitState, tx.Ins) + avax.Consume(e.OnCommitState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnCommitState, txID, tx.Outs) + avax.Produce(e.OnCommitState, txID, tx.Outs) newStaker, err := state.NewPendingStaker(txID, tx) if err != nil { @@ -136,9 +135,9 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { // Set up the state if this tx is aborted // Consume the UTXOs - utxo.Consume(e.OnAbortState, tx.Ins) + avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnAbortState, txID, onAbortOuts) + avax.Produce(e.OnAbortState, txID, onAbortOuts) e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil @@ -171,9 +170,9 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) // Set up the state if this tx is committed // Consume the UTXOs - utxo.Consume(e.OnCommitState, tx.Ins) + avax.Consume(e.OnCommitState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnCommitState, txID, tx.Outs) + avax.Produce(e.OnCommitState, txID, tx.Outs) newStaker, err := state.NewPendingStaker(txID, tx) if err != nil { @@ -184,9 +183,9 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) // Set up the state if this tx is aborted // Consume the UTXOs - utxo.Consume(e.OnAbortState, tx.Ins) + avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnAbortState, txID, tx.Outs) + avax.Produce(e.OnAbortState, txID, tx.Outs) e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil @@ -220,9 +219,9 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { // Set up the state if this tx is committed // Consume the UTXOs - utxo.Consume(e.OnCommitState, tx.Ins) + avax.Consume(e.OnCommitState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnCommitState, txID, tx.Outs) + avax.Produce(e.OnCommitState, txID, tx.Outs) newStaker, err := state.NewPendingStaker(txID, tx) if err != nil { @@ -233,9 +232,9 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { // Set up the state if this tx is aborted // Consume the UTXOs - utxo.Consume(e.OnAbortState, tx.Ins) + avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnAbortState, txID, onAbortOuts) + avax.Produce(e.OnAbortState, txID, onAbortOuts) e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 064c23c13764..3f1ec92f8441 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/utxo" ) var ( @@ -75,9 +74,9 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) // Add the new chain to the database e.State.AddChain(e.Tx) @@ -114,9 +113,9 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) // Add the new subnet to the database e.State.AddSubnet(e.Tx) return nil @@ -183,9 +182,9 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) e.AtomicRequests = map[ids.ID]*atomic.Requests{ tx.SourceChain: { @@ -227,9 +226,9 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) elems := make([]*atomic.Element, len(tx.ExportedOutputs)) for i, out := range tx.ExportedOutputs { @@ -286,8 +285,8 @@ func (e *StandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { } e.State.PutPendingValidator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } @@ -309,8 +308,8 @@ func (e *StandardTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) } e.State.PutPendingValidator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } @@ -332,8 +331,8 @@ func (e *StandardTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { } e.State.PutPendingDelegator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } @@ -363,8 +362,8 @@ func (e *StandardTxExecutor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidat // Invariant: There are no permissioned subnet delegators to remove. txID := e.Tx.ID() - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } @@ -406,9 +405,9 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) // Transform the new subnet in the database e.State.AddSubnetTransformation(e.Tx) e.State.SetCurrentSupply(tx.Subnet, tx.InitialSupply) @@ -432,8 +431,8 @@ func (e *StandardTxExecutor) AddPermissionlessValidatorTx(tx *txs.AddPermissionl } e.State.PutPendingValidator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } @@ -455,8 +454,8 @@ func (e *StandardTxExecutor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionl } e.State.PutPendingDelegator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } diff --git a/vms/platformvm/utxo/handler.go b/vms/platformvm/utxo/handler.go index 501bb23cfcfd..b11163601841 100644 --- a/vms/platformvm/utxo/handler.go +++ b/vms/platformvm/utxo/handler.go @@ -32,32 +32,6 @@ var ( errLockedFundsNotMarkedAsLocked = errors.New("locked funds not marked as locked") ) -// Removes the UTXOs consumed by [ins] from the UTXO set -func Consume(utxoDB state.UTXODeleter, ins []*avax.TransferableInput) { - for _, input := range ins { - utxoDB.DeleteUTXO(input.InputID()) - } -} - -// Adds the UTXOs created by [outs] to the UTXO set. -// [txID] is the ID of the tx that created [outs]. -func Produce( - utxoDB state.UTXOAdder, - txID ids.ID, - outs []*avax.TransferableOutput, -) { - for index, out := range outs { - utxoDB.AddUTXO(&avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(index), - }, - Asset: out.Asset, - Out: out.Output(), - }) - } -} - // TODO: Stake and Authorize should be replaced by similar methods in the // P-chain wallet type Spender interface { @@ -114,7 +88,7 @@ type Verifier interface { // Note: [unlockedProduced] is modified by this method. VerifySpend( tx txs.UnsignedTx, - utxoDB state.UTXOGetter, + utxoDB avax.UTXOGetter, ins []*avax.TransferableInput, outs []*avax.TransferableOutput, creds []verify.Verifiable, @@ -453,7 +427,7 @@ func (h *handler) Authorize( func (h *handler) VerifySpend( tx txs.UnsignedTx, - utxoDB state.UTXOGetter, + utxoDB avax.UTXOGetter, ins []*avax.TransferableInput, outs []*avax.TransferableOutput, creds []verify.Verifiable, diff --git a/vms/platformvm/utxo/mock_verifier.go b/vms/platformvm/utxo/mock_verifier.go index d391e14d52b1..7893a7475e50 100644 --- a/vms/platformvm/utxo/mock_verifier.go +++ b/vms/platformvm/utxo/mock_verifier.go @@ -13,7 +13,6 @@ import ( ids "github.com/ava-labs/avalanchego/ids" avax "github.com/ava-labs/avalanchego/vms/components/avax" verify "github.com/ava-labs/avalanchego/vms/components/verify" - state "github.com/ava-labs/avalanchego/vms/platformvm/state" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" gomock "github.com/golang/mock/gomock" ) @@ -42,7 +41,7 @@ func (m *MockVerifier) EXPECT() *MockVerifierMockRecorder { } // VerifySpend mocks base method. -func (m *MockVerifier) VerifySpend(arg0 txs.UnsignedTx, arg1 state.UTXOGetter, arg2 []*avax.TransferableInput, arg3 []*avax.TransferableOutput, arg4 []verify.Verifiable, arg5 map[ids.ID]uint64) error { +func (m *MockVerifier) VerifySpend(arg0 txs.UnsignedTx, arg1 avax.UTXOGetter, arg2 []*avax.TransferableInput, arg3 []*avax.TransferableOutput, arg4 []verify.Verifiable, arg5 map[ids.ID]uint64) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifySpend", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) From 1f8a4941e7e4e6bce270f70c9964104616c773fd Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 23 Feb 2023 15:34:56 -0500 Subject: [PATCH 09/27] Pass VM configs rather than factories (#2645) Co-authored-by: Chloe <99216251+coffeeavax@users.noreply.github.com> --- node/node.go | 11 +- vms/avm/config/config.go | 13 ++ vms/avm/factory.go | 6 +- vms/avm/vm.go | 3 +- vms/avm/vm_test.go | 3 +- vms/platformvm/factory.go | 2 +- vms/platformvm/vm.go | 3 +- vms/platformvm/vm_regression_test.go | 18 ++- vms/platformvm/vm_test.go | 198 ++++++++++++--------------- 9 files changed, 127 insertions(+), 130 deletions(-) create mode 100644 vms/avm/config/config.go diff --git a/node/node.go b/node/node.go index 5508f1dc4474..bb433619e057 100644 --- a/node/node.go +++ b/node/node.go @@ -73,7 +73,6 @@ import ( "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/registry" @@ -81,6 +80,8 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ipcsapi "github.com/ava-labs/avalanchego/api/ipcs" + avmconfig "github.com/ava-labs/avalanchego/vms/avm/config" + platformconfig "github.com/ava-labs/avalanchego/vms/platformvm/config" ) var ( @@ -763,7 +764,7 @@ func (n *Node) initVMs() error { errs := wrappers.Errs{} errs.Add( vmRegisterer.Register(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ - Config: config.Config{ + Config: platformconfig.Config{ Chains: n.chainManager, Validators: vdrs, UptimeLockedCalculator: n.uptimeCalculator, @@ -794,8 +795,10 @@ func (n *Node) initVMs() error { }, }), vmRegisterer.Register(context.TODO(), constants.AVMID, &avm.Factory{ - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, + Config: avmconfig.Config{ + TxFee: n.Config.TxFee, + CreateAssetTxFee: n.Config.CreateAssetTxFee, + }, }), vmRegisterer.Register(context.TODO(), constants.EVMID, &coreth.Factory{}), n.Config.VMManager.RegisterFactory(context.TODO(), secp256k1fx.ID, &secp256k1fx.Factory{}), diff --git a/vms/avm/config/config.go b/vms/avm/config/config.go new file mode 100644 index 000000000000..44b926efe605 --- /dev/null +++ b/vms/avm/config/config.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package config + +// Struct collecting all the foundational parameters of the AVM +type Config struct { + // Fee that is burned by every non-asset creating transaction + TxFee uint64 + + // Fee that must be burned by every asset creating transaction + CreateAssetTxFee uint64 +} diff --git a/vms/avm/factory.go b/vms/avm/factory.go index 7a70613e43a5..77e8993a3cd7 100644 --- a/vms/avm/factory.go +++ b/vms/avm/factory.go @@ -6,15 +6,15 @@ package avm import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/avm/config" ) var _ vms.Factory = (*Factory)(nil) type Factory struct { - TxFee uint64 - CreateAssetTxFee uint64 + config.Config } func (f *Factory) New(*snow.Context) (interface{}, error) { - return &VM{Factory: *f}, nil + return &VM{Config: f.Config}, nil } diff --git a/vms/avm/vm.go b/vms/avm/vm.go index cfd9a9688f64..13ec7cd37976 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -39,6 +39,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -73,7 +74,7 @@ var ( type VM struct { common.AppHandler - Factory + config.Config metrics avax.AddressManager avax.AtomicUTXOManager diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index e498f35b3183..f70cac3e1650 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -36,6 +36,7 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -303,7 +304,7 @@ func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGen ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) issuer := make(chan common.Message, 1) - vm := &VM{Factory: Factory{ + vm := &VM{Config: config.Config{ TxFee: testTxFee, CreateAssetTxFee: testTxFee, }} diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index 8993d6be3fbe..f561738826ea 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -18,5 +18,5 @@ type Factory struct { // New returns a new instance of the Platform Chain func (f *Factory) New(*snow.Context) (interface{}, error) { - return &VM{Factory: *f}, nil + return &VM{Config: f.Config}, nil } diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 016942f80a61..6a76600c6722 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -39,6 +39,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" @@ -71,7 +72,7 @@ var ( ) type VM struct { - Factory + config.Config blockbuilder.Builder metrics metrics.Metrics diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 567f569f5ddb..1a40ec711160 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -357,16 +357,14 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - Validators: vdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, - }, + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} ctx := defaultContext() diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 8734958e68e0..13b6515dc819 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -319,26 +319,24 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - StakingEnabled: true, - Validators: vdrs, - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - TransformSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, - BanffTime: banffForkTime, - }, + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + StakingEnabled: true, + Validators: vdrs, + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + TransformSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + ApricotPhase3Time: defaultValidateEndTime, + ApricotPhase5Time: defaultValidateEndTime, + BanffTime: banffForkTime, }} baseDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -424,20 +422,18 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - Validators: vdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - TxFee: defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, - }, + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + TxFee: defaultTxFee, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} baseDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -1477,16 +1473,14 @@ func TestRestartFullyAccepted(t *testing.T) { firstVdrs := validators.NewManager() firstPrimaryVdrs := validators.NewSet() _ = firstVdrs.Add(constants.PrimaryNetworkID, firstPrimaryVdrs) - firstVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - Validators: firstVdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, - }, + firstVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: firstVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} firstCtx := defaultContext() @@ -1568,16 +1562,14 @@ func TestRestartFullyAccepted(t *testing.T) { secondVdrs := validators.NewManager() secondPrimaryVdrs := validators.NewSet() _ = secondVdrs.Add(constants.PrimaryNetworkID, secondPrimaryVdrs) - secondVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - Validators: secondVdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, - }, + secondVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: secondVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} secondCtx := defaultContext() @@ -1626,16 +1618,14 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - Validators: vdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, - }, + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} initialClkTime := banffForkTime.Add(time.Second) @@ -1937,16 +1927,14 @@ func TestUnverifiedParent(t *testing.T) { vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - Validators: vdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, - }, + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} initialClkTime := banffForkTime.Add(time.Second) @@ -2102,15 +2090,13 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { firstVdrs := validators.NewManager() firstPrimaryVdrs := validators.NewSet() _ = firstVdrs.Add(constants.PrimaryNetworkID, firstPrimaryVdrs) - firstVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: .2, - RewardConfig: defaultRewardConfig, - Validators: firstVdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, - }, + firstVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .2, + RewardConfig: defaultRewardConfig, + Validators: firstVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: banffForkTime, }} firstCtx := defaultContext() @@ -2147,14 +2133,12 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { secondVdrs := validators.NewManager() secondPrimaryVdrs := validators.NewSet() _ = secondVdrs.Add(constants.PrimaryNetworkID, secondPrimaryVdrs) - secondVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: .21, - Validators: secondVdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, - }, + secondVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .21, + Validators: secondVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: banffForkTime, }} secondCtx := defaultContext() @@ -2285,15 +2269,13 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: .2, - RewardConfig: defaultRewardConfig, - Validators: vdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, - }, + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .2, + RewardConfig: defaultRewardConfig, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: banffForkTime, }} ctx := defaultContext() @@ -2401,15 +2383,13 @@ func TestVM_GetValidatorSet(t *testing.T) { primaryVdrs := validators.NewSet() _ = vdrManager.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: .2, - RewardConfig: defaultRewardConfig, - Validators: vdrManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: mockable.MaxTime, - }, + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .2, + RewardConfig: defaultRewardConfig, + Validators: vdrManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: mockable.MaxTime, }} ctx := defaultContext() From 7703ea0e40c0381b34251ddc3a2c09e5a46dd5e1 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 23 Feb 2023 15:36:35 -0500 Subject: [PATCH 10/27] Remove `platformvm.VM#txExecutorBackend` (#2648) Co-authored-by: Alberto Benegiamo --- vms/platformvm/vm.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 6a76600c6722..e622552b4ec5 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -103,9 +103,8 @@ type VM struct { // sliding window of blocks that were recently accepted recentlyAccepted window.Window[ids.ID] - txBuilder txbuilder.Builder - txExecutorBackend *txexecutor.Backend - manager blockexecutor.Manager + txBuilder txbuilder.Builder + manager blockexecutor.Manager } // Initialize this blockchain. @@ -182,7 +181,7 @@ func (vm *VM) Initialize( utxoHandler, ) - vm.txExecutorBackend = &txexecutor.Backend{ + txExecutorBackend := &txexecutor.Backend{ Config: &vm.Config, Ctx: vm.ctx, Clk: &vm.clock, @@ -204,13 +203,13 @@ func (vm *VM) Initialize( mempool, vm.metrics, vm.state, - vm.txExecutorBackend, + txExecutorBackend, vm.recentlyAccepted, ) vm.Builder = blockbuilder.New( mempool, vm.txBuilder, - vm.txExecutorBackend, + txExecutorBackend, vm.manager, toEngine, appSender, From e4e41f29d04c0701c2fd87dac289aedcf6865adc Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 24 Feb 2023 10:54:58 -0500 Subject: [PATCH 11/27] Simplify VM Factory interface (#2649) --- chains/manager.go | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- scripts/constants.sh | 2 +- vms/avm/factory.go | 4 ++-- vms/manager.go | 7 ++++--- vms/mock_manager.go | 4 ++-- vms/nftfx/factory.go | 4 ++-- vms/nftfx/factory_test.go | 4 +++- vms/platformvm/factory.go | 4 ++-- vms/propertyfx/factory.go | 4 ++-- vms/propertyfx/factory_test.go | 4 +++- vms/registry/vm_registerer.go | 4 ++-- vms/registry/vm_registerer_test.go | 28 ++++++++++++++-------------- vms/rpcchainvm/factory.go | 21 +++++---------------- vms/rpcchainvm/vm_client.go | 13 ++++--------- vms/secp256k1fx/factory.go | 4 ++-- vms/secp256k1fx/factory_test.go | 4 +++- 18 files changed, 56 insertions(+), 65 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index b5263b5db985..3b088a5299ff 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -477,7 +477,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } // Create the chain - vm, err := vmFactory.New(ctx.Context) + vm, err := vmFactory.New(chainLog) if err != nil { return nil, fmt.Errorf("error while creating vm: %w", err) } @@ -491,7 +491,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c return nil, fmt.Errorf("error while getting fxFactory: %w", err) } - fx, err := fxFactory.New(ctx.Context) + fx, err := fxFactory.New(chainLog) if err != nil { return nil, fmt.Errorf("error while creating fx: %w", err) } diff --git a/go.mod b/go.mod index 029f9d73f04d..538ebc7ba129 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 - github.com/ava-labs/coreth v0.11.7-rc.3 + github.com/ava-labs/coreth v0.11.8-0.20230223235704-14175cdd347d github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 diff --git a/go.sum b/go.sum index 19cd6c76f3a7..c9db5b8426ea 100644 --- a/go.sum +++ b/go.sum @@ -57,8 +57,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 h1:TVi9JEdKNU/RevYZ9PyW4pULbEdS+KQDA9Ki2DUvuAs= github.com/ava-labs/avalanche-network-runner-sdk v0.3.0/go.mod h1:SgKJvtqvgo/Bl/c8fxEHCLaSxEbzimYfBopcfrajxQk= -github.com/ava-labs/coreth v0.11.7-rc.3 h1:+GaXmcqzBDd6jFJcPrAQ/RKEFJlqCVcdTF/Q5T6woy4= -github.com/ava-labs/coreth v0.11.7-rc.3/go.mod h1:uIKJtaUX5TI60IS+DpYT8SLXLM2JydgngMF+9q8YjXM= +github.com/ava-labs/coreth v0.11.8-0.20230223235704-14175cdd347d h1:i2pf4SXb1kmmAMgQOsoTHwy2rpOhU3dy1ND06nEdwdE= +github.com/ava-labs/coreth v0.11.8-0.20230223235704-14175cdd347d/go.mod h1:UiSBTrY+KwCiwHaIsGMZgoDtLLah8UXealH6LK0wEbc= github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 h1:EdxD90j5sClfL5Ngpz2TlnbnkNYdFPDXa0jDOjam65c= github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7/go.mod h1:XhiXSrh90sHUbkERzaxEftCmUz53eCijshDLZ4fByVM= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= diff --git a/scripts/constants.sh b/scripts/constants.sh index 4f64f0b90fa9..2fa3a74c1415 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -9,7 +9,7 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Direct avalanchego_path="$AVALANCHE_PATH/build/avalanchego" plugin_dir=${PLUGIN_DIR:-$HOME/.avalanchego/plugins} evm_path=${EVM_PATH:-$plugin_dir/evm} -coreth_version=${CORETH_VERSION:-'v0.11.7-rc.3'} +coreth_version=${CORETH_VERSION:-'v0.11.8-0.20230223235704-14175cdd347d'} # Set the PATHS GOPATH="$(go env GOPATH)" diff --git a/vms/avm/factory.go b/vms/avm/factory.go index 77e8993a3cd7..82eab91f241e 100644 --- a/vms/avm/factory.go +++ b/vms/avm/factory.go @@ -4,7 +4,7 @@ package avm import ( - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm/config" ) @@ -15,6 +15,6 @@ type Factory struct { config.Config } -func (f *Factory) New(*snow.Context) (interface{}, error) { +func (f *Factory) New(logging.Logger) (interface{}, error) { return &VM{Config: f.Config}, nil } diff --git a/vms/manager.go b/vms/manager.go index 72d0efca4b49..d8e5cca25c04 100644 --- a/vms/manager.go +++ b/vms/manager.go @@ -12,8 +12,8 @@ import ( "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" ) var ( @@ -24,7 +24,7 @@ var ( // A Factory creates new instances of a VM type Factory interface { - New(*snow.Context) (interface{}, error) + New(logging.Logger) (interface{}, error) } // Manager tracks a collection of VM factories, their aliases, and their @@ -104,7 +104,8 @@ func (m *manager) RegisterFactory(ctx context.Context, vmID ids.ID, factory Fact m.factories[vmID] = factory - vm, err := factory.New(nil) + // TODO: Pass in a VM specific logger + vm, err := factory.New(logging.NoLog{}) if err != nil { return err } diff --git a/vms/mock_manager.go b/vms/mock_manager.go index b1d4e2227563..ff624944f522 100644 --- a/vms/mock_manager.go +++ b/vms/mock_manager.go @@ -12,7 +12,7 @@ import ( reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - snow "github.com/ava-labs/avalanchego/snow" + logging "github.com/ava-labs/avalanchego/utils/logging" gomock "github.com/golang/mock/gomock" ) @@ -40,7 +40,7 @@ func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { } // New mocks base method. -func (m *MockFactory) New(arg0 *snow.Context) (interface{}, error) { +func (m *MockFactory) New(arg0 logging.Logger) (interface{}, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "New", arg0) ret0, _ := ret[0].(interface{}) diff --git a/vms/nftfx/factory.go b/vms/nftfx/factory.go index f615e8999d09..a23caa7159a8 100644 --- a/vms/nftfx/factory.go +++ b/vms/nftfx/factory.go @@ -5,7 +5,7 @@ package nftfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" ) @@ -18,6 +18,6 @@ var ( type Factory struct{} -func (*Factory) New(*snow.Context) (interface{}, error) { +func (*Factory) New(logging.Logger) (interface{}, error) { return &Fx{}, nil } diff --git a/vms/nftfx/factory_test.go b/vms/nftfx/factory_test.go index 82dbafa76676..37ea2c8b9be4 100644 --- a/vms/nftfx/factory_test.go +++ b/vms/nftfx/factory_test.go @@ -5,11 +5,13 @@ package nftfx import ( "testing" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { factory := Factory{} - if fx, err := factory.New(nil); err != nil { + if fx, err := factory.New(logging.NoLog{}); err != nil { t.Fatal(err) } else if fx == nil { t.Fatalf("Factory.New returned nil") diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index f561738826ea..64e8c64e7627 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -4,7 +4,7 @@ package platformvm import ( - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/platformvm/config" ) @@ -17,6 +17,6 @@ type Factory struct { } // New returns a new instance of the Platform Chain -func (f *Factory) New(*snow.Context) (interface{}, error) { +func (f *Factory) New(logging.Logger) (interface{}, error) { return &VM{Config: f.Config}, nil } diff --git a/vms/propertyfx/factory.go b/vms/propertyfx/factory.go index 350122a995ab..8151efa5bc8a 100644 --- a/vms/propertyfx/factory.go +++ b/vms/propertyfx/factory.go @@ -5,7 +5,7 @@ package propertyfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" ) @@ -18,6 +18,6 @@ var ( type Factory struct{} -func (*Factory) New(*snow.Context) (interface{}, error) { +func (*Factory) New(logging.Logger) (interface{}, error) { return &Fx{}, nil } diff --git a/vms/propertyfx/factory_test.go b/vms/propertyfx/factory_test.go index 788e71240b4a..5aae5134f33c 100644 --- a/vms/propertyfx/factory_test.go +++ b/vms/propertyfx/factory_test.go @@ -5,11 +5,13 @@ package propertyfx import ( "testing" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { factory := Factory{} - if fx, err := factory.New(nil); err != nil { + if fx, err := factory.New(logging.NoLog{}); err != nil { t.Fatal(err) } else if fx == nil { t.Fatalf("Factory.New returned nil") diff --git a/vms/registry/vm_registerer.go b/vms/registry/vm_registerer.go index a89773842e45..2897c20bd376 100644 --- a/vms/registry/vm_registerer.go +++ b/vms/registry/vm_registerer.go @@ -89,8 +89,8 @@ func (r *vmRegisterer) createStaticHandlers( vmID ids.ID, factory vms.Factory, ) (map[string]*common.HTTPHandler, error) { - // passing a nil ctx to the factory disables logging. - vm, err := factory.New(nil) + // TODO: Pass in a VM specific logger + vm, err := factory.New(logging.NoLog{}) if err != nil { return nil, err } diff --git a/vms/registry/vm_registerer_test.go b/vms/registry/vm_registerer_test.go index 637399f8583a..db8180bfcd8f 100644 --- a/vms/registry/vm_registerer_test.go +++ b/vms/registry/vm_registerer_test.go @@ -46,7 +46,7 @@ func TestRegisterBadVM(t *testing.T) { resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) require.Error(t, resources.registerer.Register(context.Background(), id, vmFactory)) } @@ -60,7 +60,7 @@ func TestRegisterCreateHandlersAndShutdownFails(t *testing.T) { vm := mocks.NewMockChainVM(resources.ctrl) resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + fail to shutdown vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) @@ -77,7 +77,7 @@ func TestRegisterCreateHandlersFails(t *testing.T) { vm := mocks.NewMockChainVM(resources.ctrl) resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + but succeed our shutdown vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) @@ -98,7 +98,7 @@ func TestRegisterAddRouteFails(t *testing.T) { } resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // We fail to create an endpoint for the handler resources.mockServer.EXPECT(). @@ -127,7 +127,7 @@ func TestRegisterAliasLookupFails(t *testing.T) { } resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // Registering the route fails resources.mockServer.EXPECT(). @@ -158,7 +158,7 @@ func TestRegisterAddAliasesFails(t *testing.T) { aliases := []string{"alias-1", "alias-2"} resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRoute( @@ -196,7 +196,7 @@ func TestRegisterHappyCase(t *testing.T) { aliases := []string{"alias-1", "alias-2"} resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRoute( @@ -243,7 +243,7 @@ func TestRegisterWithReadLockBadVM(t *testing.T) { resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) require.Error(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory)) } @@ -257,7 +257,7 @@ func TestRegisterWithReadLockCreateHandlersAndShutdownFails(t *testing.T) { vm := mocks.NewMockChainVM(resources.ctrl) resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + fail to shutdown vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) @@ -274,7 +274,7 @@ func TestRegisterWithReadLockCreateHandlersFails(t *testing.T) { vm := mocks.NewMockChainVM(resources.ctrl) resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + but succeed our shutdown vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) @@ -295,7 +295,7 @@ func TestRegisterWithReadLockAddRouteWithReadLockFails(t *testing.T) { } resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // We fail to create an endpoint for the handler resources.mockServer.EXPECT(). @@ -324,7 +324,7 @@ func TestRegisterWithReadLockAliasLookupFails(t *testing.T) { } resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // RegisterWithReadLocking the route fails resources.mockServer.EXPECT(). @@ -355,7 +355,7 @@ func TestRegisterWithReadLockAddAliasesFails(t *testing.T) { aliases := []string{"alias-1", "alias-2"} resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRouteWithReadLock( @@ -393,7 +393,7 @@ func TestRegisterWithReadLockHappyCase(t *testing.T) { aliases := []string{"alias-1", "alias-2"} resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRouteWithReadLock( diff --git a/vms/rpcchainvm/factory.go b/vms/rpcchainvm/factory.go index 199c08f86e0c..0c68d66f7309 100644 --- a/vms/rpcchainvm/factory.go +++ b/vms/rpcchainvm/factory.go @@ -6,9 +6,7 @@ package rpcchainvm import ( "context" "fmt" - "io" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/vms" @@ -35,21 +33,12 @@ func NewFactory(path string, processTracker resource.ProcessTracker, runtimeTrac } } -func (f *factory) New(ctx *snow.Context) (interface{}, error) { +func (f *factory) New(log logging.Logger) (interface{}, error) { config := &subprocess.Config{ + Stderr: log, + Stdout: log, HandshakeTimeout: runtime.DefaultHandshakeTimeout, - } - - // createStaticHandlers will send a nil ctx to disable logs - // TODO: create a separate log file and no-op ctx - if ctx != nil { - config.Stderr = ctx.Log - config.Stdout = ctx.Log - config.Log = ctx.Log - } else { - config.Stderr = io.Discard - config.Stdout = io.Discard - config.Log = logging.NoLog{} + Log: log, } listener, err := grpcutils.NewListener() @@ -73,7 +62,7 @@ func (f *factory) New(ctx *snow.Context) (interface{}, error) { } vm := NewClient(vmpb.NewVMClient(clientConn)) - vm.SetProcess(ctx, stopper, status.Pid, f.processTracker) + vm.SetProcess(stopper, status.Pid, f.processTracker) f.runtimeTracker.TrackRuntime(stopper) diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 05d32d40d301..7110073b0143 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -105,8 +105,6 @@ type VMClient struct { conns []*grpc.ClientConn grpcServerMetrics *grpc_prometheus.ServerMetrics - - ctx *snow.Context } // NewClient returns a VM connected to a remote VM @@ -117,8 +115,7 @@ func NewClient(client vmpb.VMClient) *VMClient { } // SetProcess gives ownership of the server process to the client. -func (vm *VMClient) SetProcess(ctx *snow.Context, runtime runtime.Stopper, pid int, processTracker resource.ProcessTracker) { - vm.ctx = ctx +func (vm *VMClient) SetProcess(runtime runtime.Stopper, pid int, processTracker resource.ProcessTracker) { vm.runtime = runtime vm.processTracker = processTracker vm.pid = pid @@ -140,8 +137,6 @@ func (vm *VMClient) Initialize( return errUnsupportedFXs } - vm.ctx = chainCtx - // Register metrics registerer := prometheus.NewRegistry() multiGatherer := metrics.NewMultiGatherer() @@ -169,7 +164,7 @@ func (vm *VMClient) Initialize( serverAddr := serverListener.Addr().String() go grpcutils.Serve(serverListener, vm.newDBServer(semDB.Database)) - vm.ctx.Log.Info("grpc: serving database", + chainCtx.Log.Info("grpc: serving database", zap.String("version", dbVersion), zap.String("address", serverAddr), ) @@ -195,7 +190,7 @@ func (vm *VMClient) Initialize( serverAddr := serverListener.Addr().String() go grpcutils.Serve(serverListener, vm.newInitServer()) - vm.ctx.Log.Info("grpc: serving vm services", + chainCtx.Log.Info("grpc: serving vm services", zap.String("address", serverAddr), ) @@ -264,7 +259,7 @@ func (vm *VMClient) Initialize( } vm.State = chainState - return vm.ctx.Metrics.Register(multiGatherer) + return chainCtx.Metrics.Register(multiGatherer) } func (vm *VMClient) newDBServer(db database.Database) *grpc.Server { diff --git a/vms/secp256k1fx/factory.go b/vms/secp256k1fx/factory.go index c6ed549825b1..7f39e5473ac3 100644 --- a/vms/secp256k1fx/factory.go +++ b/vms/secp256k1fx/factory.go @@ -5,7 +5,7 @@ package secp256k1fx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" ) @@ -18,6 +18,6 @@ var ( type Factory struct{} -func (*Factory) New(*snow.Context) (interface{}, error) { +func (*Factory) New(logging.Logger) (interface{}, error) { return &Fx{}, nil } diff --git a/vms/secp256k1fx/factory_test.go b/vms/secp256k1fx/factory_test.go index 644fe21f29a1..3d8039c25091 100644 --- a/vms/secp256k1fx/factory_test.go +++ b/vms/secp256k1fx/factory_test.go @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(nil) + fx, err := factory.New(logging.NoLog{}) require.NoError(err) require.NotNil(fx) } From e25eea01636416d35f53b7b25179c4593a97d717 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 24 Feb 2023 18:08:42 +0100 Subject: [PATCH 12/27] Move p-chain `Validator` and `SubnetValidator` to the `txs` package (#2636) --- tests/e2e/p/permissionless_subnets.go | 10 +++---- tests/e2e/p/workflow.go | 4 +-- vms/platformvm/api/static_service.go | 3 +-- vms/platformvm/blocks/builder/builder_test.go | 3 +-- .../blocks/executor/proposal_block_test.go | 5 ++-- vms/platformvm/blocks/proposal_block_test.go | 5 ++-- vms/platformvm/blocks/standard_block_test.go | 5 ++-- vms/platformvm/state/state_test.go | 3 +-- vms/platformvm/txs/add_delegator_test.go | 9 +++---- vms/platformvm/txs/add_delegator_tx.go | 20 +++----------- .../txs/add_permissionless_delegator_tx.go | 20 +++----------- .../add_permissionless_delegator_tx_test.go | 19 +++++++------ .../txs/add_permissionless_validator_tx.go | 20 +++----------- .../add_permissionless_validator_tx_test.go | 27 +++++++++---------- .../txs/add_subnet_validator_test.go | 19 +++++++------ vms/platformvm/txs/add_subnet_validator_tx.go | 24 +++-------------- vms/platformvm/txs/add_validator_test.go | 5 ++-- vms/platformvm/txs/add_validator_tx.go | 20 +++----------- vms/platformvm/txs/builder/builder.go | 9 +++---- .../txs/executor/staker_tx_verification.go | 4 +-- .../executor/staker_tx_verification_test.go | 3 +-- vms/platformvm/txs/mempool/mempool_test.go | 3 +-- vms/platformvm/txs/staker_tx.go | 22 +++++---------- .../{validator => txs}/subnet_validator.go | 2 +- .../subnet_validator_test.go | 2 +- vms/platformvm/txs/txheap/by_end_time_test.go | 7 +++-- .../txs/txheap/by_start_time_test.go | 7 +++-- .../{validator => txs}/validator.go | 7 +---- .../{validator => txs}/validator_test.go | 2 +- wallet/chain/p/builder.go | 25 +++++++++-------- wallet/chain/p/builder_with_options.go | 11 ++++---- wallet/chain/p/signer_visitor.go | 2 +- wallet/chain/p/wallet.go | 21 +++++++-------- wallet/chain/p/wallet_with_options.go | 11 ++++---- wallet/subnet/primary/example_test.go | 10 +++---- .../add-permissioned-subnet-validator/main.go | 6 ++--- .../examples/add-primary-validator/main.go | 4 +-- 37 files changed, 136 insertions(+), 243 deletions(-) rename vms/platformvm/{validator => txs}/subnet_validator.go (97%) rename vms/platformvm/{validator => txs}/subnet_validator_test.go (97%) rename vms/platformvm/{validator => txs}/validator.go (94%) rename vms/platformvm/{validator => txs}/validator_test.go (99%) diff --git a/tests/e2e/p/permissionless_subnets.go b/tests/e2e/p/permissionless_subnets.go index c928e8d7d829..934e3cef6e4e 100644 --- a/tests/e2e/p/permissionless_subnets.go +++ b/tests/e2e/p/permissionless_subnets.go @@ -26,7 +26,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" @@ -195,8 +195,8 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { ginkgo.By("add permissionless validator", func() { ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) addSubnetValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(validatorStartTime.Unix()), End: uint64(validatorStartTime.Add(5 * time.Second).Unix()), @@ -224,8 +224,8 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { ginkgo.By("add permissionless delegator", func() { ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) addSubnetDelegatorTxID, err := pWallet.IssueAddPermissionlessDelegatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(delegatorStartTime.Unix()), End: uint64(delegatorStartTime.Add(5 * time.Second).Unix()), diff --git a/tests/e2e/p/workflow.go b/tests/e2e/p/workflow.go index d2a8539e3f0c..e27f12a2e147 100644 --- a/tests/e2e/p/workflow.go +++ b/tests/e2e/p/workflow.go @@ -23,7 +23,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/status" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" @@ -97,7 +97,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { validatorStartTimeDiff := 30 * time.Second vdrStartTime := time.Now().Add(validatorStartTimeDiff) - vdr := &validator.Validator{ + vdr := &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(vdrStartTime.Unix()), End: uint64(vdrStartTime.Add(72 * time.Hour).Unix()), diff --git a/vms/platformvm/api/static_service.go b/vms/platformvm/api/static_service.go index c9a7983d27d7..3ea637f47be5 100644 --- a/vms/platformvm/api/static_service.go +++ b/vms/platformvm/api/static_service.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txheap" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -307,7 +306,7 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl NetworkID: uint32(args.NetworkID), BlockchainID: ids.Empty, }}, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: vdr.NodeID, Start: uint64(args.Time), End: uint64(vdr.EndTime), diff --git a/vms/platformvm/blocks/builder/builder_test.go b/vms/platformvm/blocks/builder/builder_test.go index 4abb06ffe718..3eba24a97af5 100644 --- a/vms/platformvm/blocks/builder/builder_test.go +++ b/vms/platformvm/blocks/builder/builder_test.go @@ -23,7 +23,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" @@ -325,7 +324,7 @@ func TestBuildBlock(t *testing.T) { }}, Outs: []*avax.TransferableOutput{output}, }}, - Validator: validator.Validator{ + Validator: txs.Validator{ // Shouldn't be dropped Start: uint64(now.Add(2 * txexecutor.SyncBound).Unix()), }, diff --git a/vms/platformvm/blocks/executor/proposal_block_test.go b/vms/platformvm/blocks/executor/proposal_block_test.go index 35e633e869dc..a398f9c2f175 100644 --- a/vms/platformvm/blocks/executor/proposal_block_test.go +++ b/vms/platformvm/blocks/executor/proposal_block_test.go @@ -27,7 +27,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -66,7 +65,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { // create a proposal transaction to be included into proposal block utx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{}, - Validator: validator.Validator{End: uint64(chainTime.Unix())}, + Validator: txs.Validator{End: uint64(chainTime.Unix())}, StakeOuts: []*avax.TransferableOutput{ { Asset: avax.Asset{ @@ -192,7 +191,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { nextStakerTime := chainTime.Add(executor.SyncBound).Add(-1 * time.Second) unsignedNextStakerTx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{}, - Validator: validator.Validator{End: uint64(nextStakerTime.Unix())}, + Validator: txs.Validator{End: uint64(nextStakerTime.Unix())}, StakeOuts: []*avax.TransferableOutput{ { Asset: avax.Asset{ diff --git a/vms/platformvm/blocks/proposal_block_test.go b/vms/platformvm/blocks/proposal_block_test.go index 49b61b5b4676..3cc472657338 100644 --- a/vms/platformvm/blocks/proposal_block_test.go +++ b/vms/platformvm/blocks/proposal_block_test.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -33,7 +32,7 @@ func TestNewBanffProposalBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, @@ -75,7 +74,7 @@ func TestNewApricotProposalBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, diff --git a/vms/platformvm/blocks/standard_block_test.go b/vms/platformvm/blocks/standard_block_test.go index b661be035a33..e02b5905d879 100644 --- a/vms/platformvm/blocks/standard_block_test.go +++ b/vms/platformvm/blocks/standard_block_test.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -33,7 +32,7 @@ func TestNewBanffStandardBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, @@ -75,7 +74,7 @@ func TestNewApricotStandardBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 43f463a7d593..b78f455dd360 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -28,7 +28,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -425,7 +424,7 @@ func newInitializedState(require *require.Assertions) (State, database.Database) s, db := newUninitializedState(require) initialValidator := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: initialNodeID, Start: uint64(initialTime.Unix()), End: uint64(initialValidatorEndTime.Unix()), diff --git a/vms/platformvm/txs/add_delegator_test.go b/vms/platformvm/txs/add_delegator_test.go index 6363e8a04fb5..3afa9a955632 100644 --- a/vms/platformvm/txs/add_delegator_test.go +++ b/vms/platformvm/txs/add_delegator_test.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -83,7 +82,7 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { Ins: inputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), @@ -117,11 +116,11 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { // Case: delegator weight is not equal to total stake weight addDelegatorTx.SyntacticallyVerified = false - addDelegatorTx.Validator.Wght = 2 * validatorWeight + addDelegatorTx.Wght = 2 * validatorWeight stx, err = NewSigned(addDelegatorTx, Codec, signers) require.NoError(err) require.ErrorIs(stx.SyntacticVerify(ctx), errDelegatorWeightMismatch) - addDelegatorTx.Validator.Wght = validatorWeight + addDelegatorTx.Wght = validatorWeight } func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { @@ -181,7 +180,7 @@ func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { Ins: inputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), diff --git a/vms/platformvm/txs/add_delegator_tx.go b/vms/platformvm/txs/add_delegator_tx.go index 562cd7b34742..f8fe8dcaca99 100644 --- a/vms/platformvm/txs/add_delegator_tx.go +++ b/vms/platformvm/txs/add_delegator_tx.go @@ -6,7 +6,6 @@ package txs import ( "errors" "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -16,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -31,7 +29,7 @@ type AddDelegatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // Where to send staked tokens when done validating StakeOuts []*avax.TransferableOutput `serialize:"true" json:"stake"` // Where to send staking rewards when done validating @@ -62,18 +60,6 @@ func (*AddDelegatorTx) PublicKey() (*bls.PublicKey, bool, error) { return nil, false, nil } -func (tx *AddDelegatorTx) StartTime() time.Time { - return tx.Validator.StartTime() -} - -func (tx *AddDelegatorTx) EndTime() time.Time { - return tx.Validator.EndTime() -} - -func (tx *AddDelegatorTx) Weight() uint64 { - return tx.Validator.Wght -} - func (*AddDelegatorTx) PendingPriority() Priority { return PrimaryNetworkDelegatorApricotPendingPriority } @@ -126,10 +112,10 @@ func (tx *AddDelegatorTx) SyntacticVerify(ctx *snow.Context) error { switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: + case totalStakeWeight != tx.Wght: return fmt.Errorf("%w, delegator weight %d total stake weight %d", errDelegatorWeightMismatch, - tx.Validator.Wght, + tx.Wght, totalStakeWeight, ) } diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx.go b/vms/platformvm/txs/add_permissionless_delegator_tx.go index 7d80013e220e..d456ef0870db 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx.go @@ -5,7 +5,6 @@ package txs import ( "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -15,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -26,7 +24,7 @@ type AddPermissionlessDelegatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the validator - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // ID of the subnet this validator is validating Subnet ids.ID `serialize:"true" json:"subnetID"` // Where to send staked tokens when done validating @@ -59,18 +57,6 @@ func (*AddPermissionlessDelegatorTx) PublicKey() (*bls.PublicKey, bool, error) { return nil, false, nil } -func (tx *AddPermissionlessDelegatorTx) StartTime() time.Time { - return tx.Validator.StartTime() -} - -func (tx *AddPermissionlessDelegatorTx) EndTime() time.Time { - return tx.Validator.EndTime() -} - -func (tx *AddPermissionlessDelegatorTx) Weight() uint64 { - return tx.Validator.Wght -} - func (tx *AddPermissionlessDelegatorTx) PendingPriority() Priority { if tx.Subnet == constants.PrimaryNetworkID { return PrimaryNetworkDelegatorBanffPendingPriority @@ -136,10 +122,10 @@ func (tx *AddPermissionlessDelegatorTx) SyntacticVerify(ctx *snow.Context) error switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: + case totalStakeWeight != tx.Wght: return fmt.Errorf("%w, delegator weight %d total stake weight %d", errDelegatorWeightMismatch, - tx.Validator.Wght, + tx.Wght, totalStakeWeight, ) } diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go index 052386efe11f..5e4e4b482082 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -90,7 +89,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(errCustom) return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -119,7 +118,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { stakeOut.EXPECT().Verify().Return(errCustom) return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -143,7 +142,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -178,7 +177,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -213,7 +212,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -248,7 +247,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 2, }, Subnet: ids.GenerateTestID(), @@ -283,7 +282,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 2, }, Subnet: constants.PrimaryNetworkID, @@ -326,7 +325,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { t.Run("invalid BaseTx", func(t *testing.T) { tx := &AddPermissionlessDelegatorTx{ BaseTx: invalidBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: []*avax.TransferableOutput{ @@ -353,7 +352,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() tx := &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, diff --git a/vms/platformvm/txs/add_permissionless_validator_tx.go b/vms/platformvm/txs/add_permissionless_validator_tx.go index fb56c55b3745..8fbc613c684f 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx.go @@ -6,7 +6,6 @@ package txs import ( "errors" "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -18,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -37,7 +35,7 @@ type AddPermissionlessValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the validator - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // ID of the subnet this validator is validating Subnet ids.ID `serialize:"true" json:"subnetID"` // If the [Subnet] is the primary network, [Signer] is the BLS key for this @@ -88,18 +86,6 @@ func (tx *AddPermissionlessValidatorTx) PublicKey() (*bls.PublicKey, bool, error return key, key != nil, nil } -func (tx *AddPermissionlessValidatorTx) StartTime() time.Time { - return tx.Validator.StartTime() -} - -func (tx *AddPermissionlessValidatorTx) EndTime() time.Time { - return tx.Validator.EndTime() -} - -func (tx *AddPermissionlessValidatorTx) Weight() uint64 { - return tx.Validator.Wght -} - func (tx *AddPermissionlessValidatorTx) PendingPriority() Priority { if tx.Subnet == constants.PrimaryNetworkID { return PrimaryNetworkValidatorPendingPriority @@ -188,8 +174,8 @@ func (tx *AddPermissionlessValidatorTx) SyntacticVerify(ctx *snow.Context) error switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: - return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Validator.Wght, totalStakeWeight) + case totalStakeWeight != tx.Wght: + return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Wght, totalStakeWeight) } // cache that this is valid diff --git a/vms/platformvm/txs/add_permissionless_validator_tx_test.go b/vms/platformvm/txs/add_permissionless_validator_tx_test.go index 1506971e22c4..71fb77b584ed 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx_test.go @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -83,7 +82,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { txFunc: func(*gomock.Controller) *AddPermissionlessValidatorTx { return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.EmptyNodeID, }, } @@ -95,7 +94,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { txFunc: func(*gomock.Controller) *AddPermissionlessValidatorTx { return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: nil, @@ -108,7 +107,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { txFunc: func(*gomock.Controller) *AddPermissionlessValidatorTx { return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: []*avax.TransferableOutput{ @@ -133,7 +132,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(errCustom) return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -163,7 +162,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -196,7 +195,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { stakeOut.EXPECT().Verify().Return(errCustom) return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -224,7 +223,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -263,7 +262,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -302,7 +301,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -341,7 +340,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 2, }, @@ -380,7 +379,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 2, }, @@ -427,7 +426,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { t.Run("invalid BaseTx", func(t *testing.T) { tx := &AddPermissionlessValidatorTx{ BaseTx: invalidBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: []*avax.TransferableOutput{ @@ -455,7 +454,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() tx := &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, diff --git a/vms/platformvm/txs/add_subnet_validator_test.go b/vms/platformvm/txs/add_subnet_validator_test.go index 3dc413ef9ed9..ce81aa12f9ad 100644 --- a/vms/platformvm/txs/add_subnet_validator_test.go +++ b/vms/platformvm/txs/add_subnet_validator_test.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -72,8 +71,8 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { Outs: outputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.SubnetValidator{ - Validator: validator.Validator{ + SubnetValidator: SubnetValidator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), @@ -100,21 +99,21 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { // Case: Missing Subnet ID addSubnetValidatorTx.SyntacticallyVerified = false - addSubnetValidatorTx.Validator.Subnet = ids.Empty + addSubnetValidatorTx.Subnet = ids.Empty stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) require.Error(err) - addSubnetValidatorTx.Validator.Subnet = subnetID + addSubnetValidatorTx.Subnet = subnetID // Case: No weight addSubnetValidatorTx.SyntacticallyVerified = false - addSubnetValidatorTx.Validator.Wght = 0 + addSubnetValidatorTx.Wght = 0 stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) require.Error(err) - addSubnetValidatorTx.Validator.Wght = validatorWeight + addSubnetValidatorTx.Wght = validatorWeight // Case: Subnet auth indices not unique addSubnetValidatorTx.SyntacticallyVerified = false @@ -129,7 +128,7 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { // Case: adding to Primary Network addSubnetValidatorTx.SyntacticallyVerified = false - addSubnetValidatorTx.Validator.Subnet = constants.PrimaryNetworkID + addSubnetValidatorTx.Subnet = constants.PrimaryNetworkID stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) @@ -183,8 +182,8 @@ func TestAddSubnetValidatorMarshal(t *testing.T) { Outs: outputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.SubnetValidator{ - Validator: validator.Validator{ + SubnetValidator: SubnetValidator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), diff --git a/vms/platformvm/txs/add_subnet_validator_tx.go b/vms/platformvm/txs/add_subnet_validator_tx.go index a3ff2652684b..cf677679cd57 100644 --- a/vms/platformvm/txs/add_subnet_validator_tx.go +++ b/vms/platformvm/txs/add_subnet_validator_tx.go @@ -5,14 +5,12 @@ package txs import ( "errors" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" ) var ( @@ -26,35 +24,19 @@ type AddSubnetValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // The validator - Validator validator.SubnetValidator `serialize:"true" json:"validator"` + SubnetValidator `serialize:"true" json:"validator"` // Auth that will be allowing this validator into the network SubnetAuth verify.Verifiable `serialize:"true" json:"subnetAuthorization"` } -func (tx *AddSubnetValidatorTx) SubnetID() ids.ID { - return tx.Validator.Subnet -} - func (tx *AddSubnetValidatorTx) NodeID() ids.NodeID { - return tx.Validator.NodeID + return tx.SubnetValidator.NodeID } func (*AddSubnetValidatorTx) PublicKey() (*bls.PublicKey, bool, error) { return nil, false, nil } -func (tx *AddSubnetValidatorTx) StartTime() time.Time { - return tx.Validator.StartTime() -} - -func (tx *AddSubnetValidatorTx) EndTime() time.Time { - return tx.Validator.EndTime() -} - -func (tx *AddSubnetValidatorTx) Weight() uint64 { - return tx.Validator.Wght -} - func (*AddSubnetValidatorTx) PendingPriority() Priority { return SubnetPermissionedValidatorPendingPriority } @@ -70,7 +52,7 @@ func (tx *AddSubnetValidatorTx) SyntacticVerify(ctx *snow.Context) error { return ErrNilTx case tx.SyntacticallyVerified: // already passed syntactic verification return nil - case tx.Validator.Subnet == constants.PrimaryNetworkID: + case tx.Subnet == constants.PrimaryNetworkID: return errAddPrimaryNetworkValidator } diff --git a/vms/platformvm/txs/add_validator_test.go b/vms/platformvm/txs/add_validator_test.go index f1fd25f73fbb..cba68f7946ee 100644 --- a/vms/platformvm/txs/add_validator_test.go +++ b/vms/platformvm/txs/add_validator_test.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -82,7 +81,7 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { Ins: inputs, Outs: outputs, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), @@ -199,7 +198,7 @@ func TestAddValidatorTxSyntacticVerifyNotAVAX(t *testing.T) { Ins: inputs, Outs: outputs, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), diff --git a/vms/platformvm/txs/add_validator_tx.go b/vms/platformvm/txs/add_validator_tx.go index 8b8200f8e0fd..1d57f9f8620a 100644 --- a/vms/platformvm/txs/add_validator_tx.go +++ b/vms/platformvm/txs/add_validator_tx.go @@ -5,7 +5,6 @@ package txs import ( "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -16,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -31,7 +29,7 @@ type AddValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // Where to send staked tokens when done validating StakeOuts []*avax.TransferableOutput `serialize:"true" json:"stake"` // Where to send staking rewards when done validating @@ -66,18 +64,6 @@ func (*AddValidatorTx) PublicKey() (*bls.PublicKey, bool, error) { return nil, false, nil } -func (tx *AddValidatorTx) StartTime() time.Time { - return tx.Validator.StartTime() -} - -func (tx *AddValidatorTx) EndTime() time.Time { - return tx.Validator.EndTime() -} - -func (tx *AddValidatorTx) Weight() uint64 { - return tx.Validator.Wght -} - func (*AddValidatorTx) PendingPriority() Priority { return PrimaryNetworkValidatorPendingPriority } @@ -140,8 +126,8 @@ func (tx *AddValidatorTx) SyntacticVerify(ctx *snow.Context) error { switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: - return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Validator.Wght, totalStakeWeight) + case totalStakeWeight != tx.Wght: + return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Wght, totalStakeWeight) } // cache that this is valid diff --git a/vms/platformvm/txs/builder/builder.go b/vms/platformvm/txs/builder/builder.go index 14570ed7db82..f101e6071111 100644 --- a/vms/platformvm/txs/builder/builder.go +++ b/vms/platformvm/txs/builder/builder.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -448,7 +447,7 @@ func (b *builder) NewAddValidatorTx( Ins: ins, Outs: unstakedOuts, }}, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: nodeID, Start: startTime, End: endTime, @@ -490,7 +489,7 @@ func (b *builder) NewAddDelegatorTx( Ins: ins, Outs: unlockedOuts, }}, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: nodeID, Start: startTime, End: endTime, @@ -538,8 +537,8 @@ func (b *builder) NewAddSubnetValidatorTx( Ins: ins, Outs: outs, }}, - Validator: validator.SubnetValidator{ - Validator: validator.Validator{ + SubnetValidator: txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: nodeID, Start: startTime, End: endTime, diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 2a9f12934cb8..28dc3da56284 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -179,7 +179,7 @@ func verifyAddSubnetValidatorTx( ) } - _, err := GetValidator(chainState, tx.Validator.Subnet, tx.Validator.NodeID) + _, err := GetValidator(chainState, tx.SubnetValidator.Subnet, tx.Validator.NodeID) if err == nil { return fmt.Errorf( "attempted to issue duplicate subnet validation for %s", @@ -209,7 +209,7 @@ func verifyAddSubnetValidatorTx( return errValidatorSubset } - baseTxCreds, err := verifyPoASubnetAuthorization(backend, chainState, sTx, tx.Validator.Subnet, tx.SubnetAuth) + baseTxCreds, err := verifyPoASubnetAuthorization(backend, chainState, sTx, tx.SubnetValidator.Subnet, tx.SubnetAuth) if err != nil { return err } diff --git a/vms/platformvm/txs/executor/staker_tx_verification_test.go b/vms/platformvm/txs/executor/staker_tx_verification_test.go index 350d0cea28bd..c596f513d3ae 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_test.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_test.go @@ -23,7 +23,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -63,7 +62,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { Ins: []*avax.TransferableInput{}, }, }, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: 1, End: 1 + uint64(unsignedTransformTx.MinStakeDuration), diff --git a/vms/platformvm/txs/mempool/mempool_test.go b/vms/platformvm/txs/mempool/mempool_test.go index a3bd2ac8ac05..9cf86a775bcb 100644 --- a/vms/platformvm/txs/mempool/mempool_test.go +++ b/vms/platformvm/txs/mempool/mempool_test.go @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -226,7 +225,7 @@ func createTestProposalTxs(count int) ([]*txs.Tx, error) { for i := 0; i < count; i++ { utx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{}, - Validator: validator.Validator{ + Validator: txs.Validator{ Start: uint64(clk.Time().Add(time.Duration(count-i) * time.Second).Unix()), }, StakeOuts: nil, diff --git a/vms/platformvm/txs/staker_tx.go b/vms/platformvm/txs/staker_tx.go index af4e67548b77..ec37a10c0b0c 100644 --- a/vms/platformvm/txs/staker_tx.go +++ b/vms/platformvm/txs/staker_tx.go @@ -16,20 +16,6 @@ import ( // delegation. type ValidatorTx interface { UnsignedTx - Validator -} - -type DelegatorTx interface { - UnsignedTx - Delegator -} - -type StakerTx interface { - UnsignedTx - Staker -} - -type Validator interface { PermissionlessStaker ValidationRewardsOwner() fx.Owner @@ -37,12 +23,18 @@ type Validator interface { Shares() uint32 } -type Delegator interface { +type DelegatorTx interface { + UnsignedTx PermissionlessStaker RewardsOwner() fx.Owner } +type StakerTx interface { + UnsignedTx + Staker +} + type PermissionlessStaker interface { Staker diff --git a/vms/platformvm/validator/subnet_validator.go b/vms/platformvm/txs/subnet_validator.go similarity index 97% rename from vms/platformvm/validator/subnet_validator.go rename to vms/platformvm/txs/subnet_validator.go index 81ad9435484c..b1161662ee04 100644 --- a/vms/platformvm/validator/subnet_validator.go +++ b/vms/platformvm/txs/subnet_validator.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validator +package txs import ( "github.com/ava-labs/avalanchego/ids" diff --git a/vms/platformvm/validator/subnet_validator_test.go b/vms/platformvm/txs/subnet_validator_test.go similarity index 97% rename from vms/platformvm/validator/subnet_validator_test.go rename to vms/platformvm/txs/subnet_validator_test.go index 995826372ff1..36a0abe22e6e 100644 --- a/vms/platformvm/validator/subnet_validator_test.go +++ b/vms/platformvm/txs/subnet_validator_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validator +package txs import ( "testing" diff --git a/vms/platformvm/txs/txheap/by_end_time_test.go b/vms/platformvm/txs/txheap/by_end_time_test.go index 25f053003fa2..b9ff85e42b20 100644 --- a/vms/platformvm/txs/txheap/by_end_time_test.go +++ b/vms/platformvm/txs/txheap/by_end_time_test.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -23,7 +22,7 @@ func TestByStopTime(t *testing.T) { baseTime := time.Now() utx0 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{0}, Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 1, @@ -35,7 +34,7 @@ func TestByStopTime(t *testing.T) { require.NoError(err) utx1 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 2, @@ -47,7 +46,7 @@ func TestByStopTime(t *testing.T) { require.NoError(err) utx2 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 3, diff --git a/vms/platformvm/txs/txheap/by_start_time_test.go b/vms/platformvm/txs/txheap/by_start_time_test.go index 1ef76707a54c..ab744d03a1d5 100644 --- a/vms/platformvm/txs/txheap/by_start_time_test.go +++ b/vms/platformvm/txs/txheap/by_start_time_test.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -23,7 +22,7 @@ func TestByStartTime(t *testing.T) { baseTime := time.Now() utx0 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{0}, Start: uint64(baseTime.Unix()) + 1, End: uint64(baseTime.Unix()) + 1, @@ -35,7 +34,7 @@ func TestByStartTime(t *testing.T) { require.NoError(err) utx1 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()) + 2, End: uint64(baseTime.Unix()) + 2, @@ -47,7 +46,7 @@ func TestByStartTime(t *testing.T) { require.NoError(err) utx2 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()) + 3, End: uint64(baseTime.Unix()) + 3, diff --git a/vms/platformvm/validator/validator.go b/vms/platformvm/txs/validator.go similarity index 94% rename from vms/platformvm/validator/validator.go rename to vms/platformvm/txs/validator.go index c1894ef94bc5..a207c17cb957 100644 --- a/vms/platformvm/validator/validator.go +++ b/vms/platformvm/txs/validator.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validator +package txs import ( "errors" @@ -30,11 +30,6 @@ type Validator struct { Wght uint64 `serialize:"true" json:"weight"` } -// ID returns the node ID of the validator -func (v *Validator) ID() ids.NodeID { - return v.NodeID -} - // StartTime is the time that this validator will enter the validator set func (v *Validator) StartTime() time.Time { return time.Unix(int64(v.Start), 0) diff --git a/vms/platformvm/validator/validator_test.go b/vms/platformvm/txs/validator_test.go similarity index 99% rename from vms/platformvm/validator/validator_test.go rename to vms/platformvm/txs/validator_test.go index fc977bafbe8e..eb0c4a2af9eb 100644 --- a/vms/platformvm/validator/validator_test.go +++ b/vms/platformvm/txs/validator_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validator +package txs import ( "testing" diff --git a/wallet/chain/p/builder.go b/wallet/chain/p/builder.go index 2e5b89854a16..783d9434b17b 100644 --- a/wallet/chain/p/builder.go +++ b/wallet/chain/p/builder.go @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -73,7 +72,7 @@ type Builder interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. NewAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -84,7 +83,7 @@ type Builder interface { // - [vdr] specifies all the details of the validation period such as the // startTime, endTime, sampling weight, nodeID, and subnetID. NewAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) @@ -104,7 +103,7 @@ type Builder interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // may accrue at the end of its delegation period. NewAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) @@ -221,7 +220,7 @@ type Builder interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. NewAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -239,7 +238,7 @@ type Builder interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // earns during its delegation period. NewAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, @@ -325,7 +324,7 @@ func (b *builder) NewBaseTx( } func (b *builder) NewAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -360,7 +359,7 @@ func (b *builder) NewAddValidatorTx( } func (b *builder) NewAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) { toBurn := map[ids.ID]uint64{ @@ -386,8 +385,8 @@ func (b *builder) NewAddSubnetValidatorTx( Outs: outputs, Memo: ops.Memo(), }}, - Validator: *vdr, - SubnetAuth: subnetAuth, + SubnetValidator: *vdr, + SubnetAuth: subnetAuth, }, nil } @@ -426,7 +425,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( } func (b *builder) NewAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) { @@ -730,7 +729,7 @@ func (b *builder) NewTransformSubnetTx( } func (b *builder) NewAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -775,7 +774,7 @@ func (b *builder) NewAddPermissionlessValidatorTx( } func (b *builder) NewAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, diff --git a/wallet/chain/p/builder_with_options.go b/wallet/chain/p/builder_with_options.go index 4761b007a620..5d96fd80540a 100644 --- a/wallet/chain/p/builder_with_options.go +++ b/wallet/chain/p/builder_with_options.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -55,7 +54,7 @@ func (b *builderWithOptions) GetImportableBalance( } func (b *builderWithOptions) NewAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -69,7 +68,7 @@ func (b *builderWithOptions) NewAddValidatorTx( } func (b *builderWithOptions) NewAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) { return b.Builder.NewAddSubnetValidatorTx( @@ -91,7 +90,7 @@ func (b *builderWithOptions) RemoveSubnetValidatorTx( } func (b *builderWithOptions) NewAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) { @@ -191,7 +190,7 @@ func (b *builderWithOptions) NewTransformSubnetTx( } func (b *builderWithOptions) NewAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -211,7 +210,7 @@ func (b *builderWithOptions) NewAddPermissionlessValidatorTx( } func (b *builderWithOptions) NewAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, diff --git a/wallet/chain/p/signer_visitor.go b/wallet/chain/p/signer_visitor.go index f8c27ed6807a..cd07a0544c4b 100644 --- a/wallet/chain/p/signer_visitor.go +++ b/wallet/chain/p/signer_visitor.go @@ -64,7 +64,7 @@ func (s *signerVisitor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error if err != nil { return err } - subnetAuthSigners, err := s.getSubnetSigners(tx.Validator.Subnet, tx.SubnetAuth) + subnetAuthSigners, err := s.getSubnetSigners(tx.SubnetValidator.Subnet, tx.SubnetAuth) if err != nil { return err } diff --git a/wallet/chain/p/wallet.go b/wallet/chain/p/wallet.go index 57ecaea28bc8..f87e6e974194 100644 --- a/wallet/chain/p/wallet.go +++ b/wallet/chain/p/wallet.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -55,7 +54,7 @@ type Wallet interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. IssueAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -67,7 +66,7 @@ type Wallet interface { // - [vdr] specifies all the details of the validation period such as the // startTime, endTime, sampling weight, nodeID, and subnetID. IssueAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (ids.ID, error) @@ -89,7 +88,7 @@ type Wallet interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // may accrue at the end of its delegation period. IssueAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (ids.ID, error) @@ -208,7 +207,7 @@ type Wallet interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. IssueAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -226,7 +225,7 @@ type Wallet interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // earns during its delegation period. IssueAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, @@ -286,7 +285,7 @@ func (w *wallet) IssueBaseTx( } func (w *wallet) IssueAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -299,7 +298,7 @@ func (w *wallet) IssueAddValidatorTx( } func (w *wallet) IssueAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (ids.ID, error) { utx, err := w.builder.NewAddSubnetValidatorTx(vdr, options...) @@ -322,7 +321,7 @@ func (w *wallet) IssueRemoveSubnetValidatorTx( } func (w *wallet) IssueAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (ids.ID, error) { @@ -424,7 +423,7 @@ func (w *wallet) IssueTransformSubnetTx( } func (w *wallet) IssueAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -448,7 +447,7 @@ func (w *wallet) IssueAddPermissionlessValidatorTx( } func (w *wallet) IssueAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, diff --git a/wallet/chain/p/wallet_with_options.go b/wallet/chain/p/wallet_with_options.go index 4352f5f50145..66760eb52d32 100644 --- a/wallet/chain/p/wallet_with_options.go +++ b/wallet/chain/p/wallet_with_options.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -50,7 +49,7 @@ func (w *walletWithOptions) IssueBaseTx( } func (w *walletWithOptions) IssueAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -64,7 +63,7 @@ func (w *walletWithOptions) IssueAddValidatorTx( } func (w *walletWithOptions) IssueAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (ids.ID, error) { return w.Wallet.IssueAddSubnetValidatorTx( @@ -86,7 +85,7 @@ func (w *walletWithOptions) IssueRemoveSubnetValidatorTx( } func (w *walletWithOptions) IssueAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (ids.ID, error) { @@ -186,7 +185,7 @@ func (w *walletWithOptions) IssueTransformSubnetTx( } func (w *walletWithOptions) IssueAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -206,7 +205,7 @@ func (w *walletWithOptions) IssueAddPermissionlessValidatorTx( } func (w *walletWithOptions) IssueAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, diff --git a/wallet/subnet/primary/example_test.go b/wallet/subnet/primary/example_test.go index 441a08412df4..6b3f5fefad5c 100644 --- a/wallet/subnet/primary/example_test.go +++ b/wallet/subnet/primary/example_test.go @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -133,8 +133,8 @@ func ExampleWallet() { addPermissionlessValidatorStartTime := time.Now() startTime := time.Now().Add(time.Minute) addSubnetValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(startTime.Unix()), End: uint64(startTime.Add(5 * time.Second).Unix()), @@ -156,8 +156,8 @@ func ExampleWallet() { addPermissionlessDelegatorStartTime := time.Now() addSubnetDelegatorTxID, err := pWallet.IssueAddPermissionlessDelegatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(startTime.Unix()), End: uint64(startTime.Add(5 * time.Second).Unix()), diff --git a/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go b/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go index 8d5e277fa665..4e5c4b57f96f 100644 --- a/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go +++ b/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" ) @@ -54,8 +54,8 @@ func main() { pWallet := wallet.P() addValidatorStartTime := time.Now() - addValidatorTxID, err := pWallet.IssueAddSubnetValidatorTx(&validator.SubnetValidator{ - Validator: validator.Validator{ + addValidatorTxID, err := pWallet.IssueAddSubnetValidatorTx(&txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), End: uint64(startTime.Add(duration).Unix()), diff --git a/wallet/subnet/primary/examples/add-primary-validator/main.go b/wallet/subnet/primary/examples/add-primary-validator/main.go index 0a2dce7809a5..1cdc71c2f300 100644 --- a/wallet/subnet/primary/examples/add-primary-validator/main.go +++ b/wallet/subnet/primary/examples/add-primary-validator/main.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" ) @@ -54,7 +54,7 @@ func main() { addValidatorStartTime := time.Now() addValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( - &validator.SubnetValidator{Validator: validator.Validator{ + &txs.SubnetValidator{Validator: txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), End: uint64(startTime.Add(duration).Unix()), From b16d82c86749982bbd916e4396d3ae31c9d7b484 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 24 Feb 2023 13:57:39 -0500 Subject: [PATCH 13/27] Refactor X-chain API tx creation (#2654) Co-authored-by: Chloe <99216251+coffeeavax@users.noreply.github.com> --- vms/avm/service.go | 26 ++- vms/avm/utxo/spender.go | 440 ++++++++++++++++++++++++++++++++++++++++ vms/avm/vm.go | 343 +------------------------------ 3 files changed, 457 insertions(+), 352 deletions(-) create mode 100644 vms/avm/utxo/spender.go diff --git a/vms/avm/service.go b/vms/avm/service.go index 57000c4c2de1..875fac5df4a2 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -39,20 +39,18 @@ const ( ) var ( - errUnknownAssetID = errors.New("unknown asset ID") - errTxNotCreateAsset = errors.New("transaction doesn't create an asset") - errNoMinters = errors.New("no minters provided") - errNoHoldersOrMinters = errors.New("no minters or initialHolders provided") - errZeroAmount = errors.New("amount must be positive") - errNoOutputs = errors.New("no outputs to send") - errSpendOverflow = errors.New("spent amount overflows uint64") - errInvalidMintAmount = errors.New("amount minted must be positive") - errAddressesCantMintAsset = errors.New("provided addresses don't have the authority to mint the provided asset") - errInvalidUTXO = errors.New("invalid utxo") - errNilTxID = errors.New("nil transaction ID") - errNoAddresses = errors.New("no addresses provided") - errNoKeys = errors.New("from addresses have no keys or funds") - errMissingPrivateKey = errors.New("argument 'privateKey' not given") + errUnknownAssetID = errors.New("unknown asset ID") + errTxNotCreateAsset = errors.New("transaction doesn't create an asset") + errNoMinters = errors.New("no minters provided") + errNoHoldersOrMinters = errors.New("no minters or initialHolders provided") + errZeroAmount = errors.New("amount must be positive") + errNoOutputs = errors.New("no outputs to send") + errInvalidMintAmount = errors.New("amount minted must be positive") + errInvalidUTXO = errors.New("invalid utxo") + errNilTxID = errors.New("nil transaction ID") + errNoAddresses = errors.New("no addresses provided") + errNoKeys = errors.New("from addresses have no keys or funds") + errMissingPrivateKey = errors.New("argument 'privateKey' not given") ) // Service defines the base service for the asset vm diff --git a/vms/avm/utxo/spender.go b/vms/avm/utxo/spender.go new file mode 100644 index 000000000000..18b4cf4647f3 --- /dev/null +++ b/vms/avm/utxo/spender.go @@ -0,0 +1,440 @@ +// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utxo + +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var ( + errSpendOverflow = errors.New("spent amount overflows uint64") + errInsufficientFunds = errors.New("insufficient funds") + errAddressesCantMintAsset = errors.New("provided addresses don't have the authority to mint the provided asset") +) + +type Spender interface { + // Spend the provided amount while deducting the provided fee. + // Arguments: + // - [utxos] contains assets ID and amount to be spend for each assestID + // - [kc] are the owners of the funds + // - [amounts] is the amount of funds that are available to be spent for each assetID + // Returns: + // - [amountsSpent] the amount of funds that are spent + // - [inputs] the inputs that should be consumed to fund the outputs + // - [signers] the proof of ownership of the funds being moved + Spend( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, + ) ( + map[ids.ID]uint64, // amountsSpent + []*avax.TransferableInput, // inputs + [][]*secp256k1.PrivateKey, // signers + error, + ) + + SpendNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + groupID uint32, + to ids.ShortID, + ) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, + ) + + SpendAll( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + ) ( + map[ids.ID]uint64, + []*avax.TransferableInput, + [][]*secp256k1.PrivateKey, + error, + ) + + Mint( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, + to ids.ShortID, + ) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, + ) + + MintNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + payload []byte, + to ids.ShortID, + ) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, + ) +} + +func NewSpender( + clk *mockable.Clock, + codec codec.Manager, +) Spender { + return &spender{ + clock: clk, + codec: codec, + } +} + +type spender struct { + clock *mockable.Clock + codec codec.Manager +} + +func (s *spender) Spend( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, +) ( + map[ids.ID]uint64, // amountsSpent + []*avax.TransferableInput, // inputs + [][]*secp256k1.PrivateKey, // signers + error, +) { + amountsSpent := make(map[ids.ID]uint64, len(amounts)) + time := s.clock.Unix() + + ins := []*avax.TransferableInput{} + keys := [][]*secp256k1.PrivateKey{} + for _, utxo := range utxos { + assetID := utxo.AssetID() + amount := amounts[assetID] + amountSpent := amountsSpent[assetID] + + if amountSpent >= amount { + // we already have enough inputs allocated to this asset + continue + } + + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + // this utxo can't be spent with the current keys right now + continue + } + input, ok := inputIntf.(avax.TransferableIn) + if !ok { + // this input doesn't have an amount, so I don't care about it here + continue + } + newAmountSpent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + // there was an error calculating the consumed amount, just error + return nil, nil, nil, errSpendOverflow + } + amountsSpent[assetID] = newAmountSpent + + // add the new input to the array + ins = append(ins, &avax.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: avax.Asset{ID: assetID}, + In: input, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + for asset, amount := range amounts { + if amountsSpent[asset] < amount { + return nil, nil, nil, fmt.Errorf("want to spend %d of asset %s but only have %d", + amount, + asset, + amountsSpent[asset], + ) + } + } + + avax.SortTransferableInputsWithSigners(ins, keys) + return amountsSpent, ins, keys, nil +} + +func (s *spender) SpendNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + groupID uint32, + to ids.ShortID, +) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, +) { + time := s.clock.Unix() + + ops := []*txs.Operation{} + keys := [][]*secp256k1.PrivateKey{} + + for _, utxo := range utxos { + // makes sure that the variable isn't overwritten with the next iteration + utxo := utxo + + if len(ops) > 0 { + // we have already been able to create the operation needed + break + } + + if utxo.AssetID() != assetID { + // wrong asset ID + continue + } + out, ok := utxo.Out.(*nftfx.TransferOutput) + if !ok { + // wrong output type + continue + } + if out.GroupID != groupID { + // wrong group id + continue + } + indices, signers, ok := kc.Match(&out.OutputOwners, time) + if !ok { + // unable to spend the output + continue + } + + // add the new operation to the array + ops = append(ops, &txs.Operation{ + Asset: utxo.Asset, + UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, + Op: &nftfx.TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: indices, + }, + Output: nftfx.TransferOutput{ + GroupID: out.GroupID, + Payload: out.Payload, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + if len(ops) == 0 { + return nil, nil, errInsufficientFunds + } + + txs.SortOperationsWithSigners(ops, keys, s.codec) + return ops, keys, nil +} + +func (s *spender) SpendAll( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, +) ( + map[ids.ID]uint64, + []*avax.TransferableInput, + [][]*secp256k1.PrivateKey, + error, +) { + amountsSpent := make(map[ids.ID]uint64) + time := s.clock.Unix() + + ins := []*avax.TransferableInput{} + keys := [][]*secp256k1.PrivateKey{} + for _, utxo := range utxos { + assetID := utxo.AssetID() + amountSpent := amountsSpent[assetID] + + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + // this utxo can't be spent with the current keys right now + continue + } + input, ok := inputIntf.(avax.TransferableIn) + if !ok { + // this input doesn't have an amount, so I don't care about it here + continue + } + newAmountSpent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + // there was an error calculating the consumed amount, just error + return nil, nil, nil, errSpendOverflow + } + amountsSpent[assetID] = newAmountSpent + + // add the new input to the array + ins = append(ins, &avax.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: avax.Asset{ID: assetID}, + In: input, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + avax.SortTransferableInputsWithSigners(ins, keys) + return amountsSpent, ins, keys, nil +} + +func (s *spender) Mint( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, + to ids.ShortID, +) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, +) { + time := s.clock.Unix() + + ops := []*txs.Operation{} + keys := [][]*secp256k1.PrivateKey{} + + for _, utxo := range utxos { + // makes sure that the variable isn't overwritten with the next iteration + utxo := utxo + + assetID := utxo.AssetID() + amount := amounts[assetID] + if amount == 0 { + continue + } + + out, ok := utxo.Out.(*secp256k1fx.MintOutput) + if !ok { + continue + } + + inIntf, signers, err := kc.Spend(out, time) + if err != nil { + continue + } + + in, ok := inIntf.(*secp256k1fx.Input) + if !ok { + continue + } + + // add the operation to the array + ops = append(ops, &txs.Operation{ + Asset: utxo.Asset, + UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, + Op: &secp256k1fx.MintOperation{ + MintInput: *in, + MintOutput: *out, + TransferOutput: secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }, + }) + // add the required keys to the array + keys = append(keys, signers) + + // remove the asset from the required amounts to mint + delete(amounts, assetID) + } + + for _, amount := range amounts { + if amount > 0 { + return nil, nil, errAddressesCantMintAsset + } + } + + txs.SortOperationsWithSigners(ops, keys, s.codec) + return ops, keys, nil +} + +func (s *spender) MintNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + payload []byte, + to ids.ShortID, +) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, +) { + time := s.clock.Unix() + + ops := []*txs.Operation{} + keys := [][]*secp256k1.PrivateKey{} + + for _, utxo := range utxos { + // makes sure that the variable isn't overwritten with the next iteration + utxo := utxo + + if len(ops) > 0 { + // we have already been able to create the operation needed + break + } + + if utxo.AssetID() != assetID { + // wrong asset id + continue + } + out, ok := utxo.Out.(*nftfx.MintOutput) + if !ok { + // wrong output type + continue + } + + indices, signers, ok := kc.Match(&out.OutputOwners, time) + if !ok { + // unable to spend the output + continue + } + + // add the operation to the array + ops = append(ops, &txs.Operation{ + Asset: avax.Asset{ID: assetID}, + UTXOIDs: []*avax.UTXOID{ + &utxo.UTXOID, + }, + Op: &nftfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: indices, + }, + GroupID: out.GroupID, + Payload: payload, + Outputs: []*secp256k1fx.OutputOwners{{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }}, + }, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + if len(ops) == 0 { + return nil, nil, errAddressesCantMintAsset + } + + txs.SortOperationsWithSigners(ops, keys, s.codec) + return ops, keys, nil +} diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 13ec7cd37976..34325c9cc506 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -31,7 +31,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" @@ -42,14 +41,13 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/utxo" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" "github.com/ava-labs/avalanchego/vms/components/keystore" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - safemath "github.com/ava-labs/avalanchego/utils/math" extensions "github.com/ava-labs/avalanchego/vms/avm/fxs" ) @@ -65,7 +63,6 @@ var ( errUnknownFx = errors.New("unknown feature extension") errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state") errBootstrapping = errors.New("chain is currently bootstrapping") - errInsufficientFunds = errors.New("insufficient funds") errUnimplemented = errors.New("unimplemented") _ vertex.DAGVM = (*VM)(nil) @@ -79,6 +76,7 @@ type VM struct { avax.AddressManager avax.AtomicUTXOManager ids.Aliaser + utxo.Spender // Contains information of where this VM is executing ctx *snow.Context @@ -212,7 +210,9 @@ func (vm *VM) Initialize( return err } - vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, vm.parser.Codec()) + codec := vm.parser.Codec() + vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, codec) + vm.Spender = utxo.NewSpender(&vm.clock, codec) state, err := states.New(vm.db, vm.parser, registerer) if err != nil { @@ -728,339 +728,6 @@ func (vm *VM) LoadUser( return utxos, kc, user.Close() } -func (vm *VM) Spend( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - amounts map[ids.ID]uint64, -) ( - map[ids.ID]uint64, - []*avax.TransferableInput, - [][]*secp256k1.PrivateKey, - error, -) { - amountsSpent := make(map[ids.ID]uint64, len(amounts)) - time := vm.clock.Unix() - - ins := []*avax.TransferableInput{} - keys := [][]*secp256k1.PrivateKey{} - for _, utxo := range utxos { - assetID := utxo.AssetID() - amount := amounts[assetID] - amountSpent := amountsSpent[assetID] - - if amountSpent >= amount { - // we already have enough inputs allocated to this asset - continue - } - - inputIntf, signers, err := kc.Spend(utxo.Out, time) - if err != nil { - // this utxo can't be spent with the current keys right now - continue - } - input, ok := inputIntf.(avax.TransferableIn) - if !ok { - // this input doesn't have an amount, so I don't care about it here - continue - } - newAmountSpent, err := safemath.Add64(amountSpent, input.Amount()) - if err != nil { - // there was an error calculating the consumed amount, just error - return nil, nil, nil, errSpendOverflow - } - amountsSpent[assetID] = newAmountSpent - - // add the new input to the array - ins = append(ins, &avax.TransferableInput{ - UTXOID: utxo.UTXOID, - Asset: avax.Asset{ID: assetID}, - In: input, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - for asset, amount := range amounts { - if amountsSpent[asset] < amount { - return nil, nil, nil, fmt.Errorf("want to spend %d of asset %s but only have %d", - amount, - asset, - amountsSpent[asset], - ) - } - } - - avax.SortTransferableInputsWithSigners(ins, keys) - return amountsSpent, ins, keys, nil -} - -func (vm *VM) SpendNFT( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - assetID ids.ID, - groupID uint32, - to ids.ShortID, -) ( - []*txs.Operation, - [][]*secp256k1.PrivateKey, - error, -) { - time := vm.clock.Unix() - - ops := []*txs.Operation{} - keys := [][]*secp256k1.PrivateKey{} - - for _, utxo := range utxos { - // makes sure that the variable isn't overwritten with the next iteration - utxo := utxo - - if len(ops) > 0 { - // we have already been able to create the operation needed - break - } - - if utxo.AssetID() != assetID { - // wrong asset ID - continue - } - out, ok := utxo.Out.(*nftfx.TransferOutput) - if !ok { - // wrong output type - continue - } - if out.GroupID != groupID { - // wrong group id - continue - } - indices, signers, ok := kc.Match(&out.OutputOwners, time) - if !ok { - // unable to spend the output - continue - } - - // add the new operation to the array - ops = append(ops, &txs.Operation{ - Asset: utxo.Asset, - UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, - Op: &nftfx.TransferOperation{ - Input: secp256k1fx.Input{ - SigIndices: indices, - }, - Output: nftfx.TransferOutput{ - GroupID: out.GroupID, - Payload: out.Payload, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, - }, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - if len(ops) == 0 { - return nil, nil, errInsufficientFunds - } - - txs.SortOperationsWithSigners(ops, keys, vm.parser.Codec()) - return ops, keys, nil -} - -func (vm *VM) SpendAll( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, -) ( - map[ids.ID]uint64, - []*avax.TransferableInput, - [][]*secp256k1.PrivateKey, - error, -) { - amountsSpent := make(map[ids.ID]uint64) - time := vm.clock.Unix() - - ins := []*avax.TransferableInput{} - keys := [][]*secp256k1.PrivateKey{} - for _, utxo := range utxos { - assetID := utxo.AssetID() - amountSpent := amountsSpent[assetID] - - inputIntf, signers, err := kc.Spend(utxo.Out, time) - if err != nil { - // this utxo can't be spent with the current keys right now - continue - } - input, ok := inputIntf.(avax.TransferableIn) - if !ok { - // this input doesn't have an amount, so I don't care about it here - continue - } - newAmountSpent, err := safemath.Add64(amountSpent, input.Amount()) - if err != nil { - // there was an error calculating the consumed amount, just error - return nil, nil, nil, errSpendOverflow - } - amountsSpent[assetID] = newAmountSpent - - // add the new input to the array - ins = append(ins, &avax.TransferableInput{ - UTXOID: utxo.UTXOID, - Asset: avax.Asset{ID: assetID}, - In: input, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - avax.SortTransferableInputsWithSigners(ins, keys) - return amountsSpent, ins, keys, nil -} - -func (vm *VM) Mint( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - amounts map[ids.ID]uint64, - to ids.ShortID, -) ( - []*txs.Operation, - [][]*secp256k1.PrivateKey, - error, -) { - time := vm.clock.Unix() - - ops := []*txs.Operation{} - keys := [][]*secp256k1.PrivateKey{} - - for _, utxo := range utxos { - // makes sure that the variable isn't overwritten with the next iteration - utxo := utxo - - assetID := utxo.AssetID() - amount := amounts[assetID] - if amount == 0 { - continue - } - - out, ok := utxo.Out.(*secp256k1fx.MintOutput) - if !ok { - continue - } - - inIntf, signers, err := kc.Spend(out, time) - if err != nil { - continue - } - - in, ok := inIntf.(*secp256k1fx.Input) - if !ok { - continue - } - - // add the operation to the array - ops = append(ops, &txs.Operation{ - Asset: utxo.Asset, - UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, - Op: &secp256k1fx.MintOperation{ - MintInput: *in, - MintOutput: *out, - TransferOutput: secp256k1fx.TransferOutput{ - Amt: amount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, - }, - }) - // add the required keys to the array - keys = append(keys, signers) - - // remove the asset from the required amounts to mint - delete(amounts, assetID) - } - - for _, amount := range amounts { - if amount > 0 { - return nil, nil, errAddressesCantMintAsset - } - } - - txs.SortOperationsWithSigners(ops, keys, vm.parser.Codec()) - return ops, keys, nil -} - -func (vm *VM) MintNFT( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - assetID ids.ID, - payload []byte, - to ids.ShortID, -) ( - []*txs.Operation, - [][]*secp256k1.PrivateKey, - error, -) { - time := vm.clock.Unix() - - ops := []*txs.Operation{} - keys := [][]*secp256k1.PrivateKey{} - - for _, utxo := range utxos { - // makes sure that the variable isn't overwritten with the next iteration - utxo := utxo - - if len(ops) > 0 { - // we have already been able to create the operation needed - break - } - - if utxo.AssetID() != assetID { - // wrong asset id - continue - } - out, ok := utxo.Out.(*nftfx.MintOutput) - if !ok { - // wrong output type - continue - } - - indices, signers, ok := kc.Match(&out.OutputOwners, time) - if !ok { - // unable to spend the output - continue - } - - // add the operation to the array - ops = append(ops, &txs.Operation{ - Asset: avax.Asset{ID: assetID}, - UTXOIDs: []*avax.UTXOID{ - &utxo.UTXOID, - }, - Op: &nftfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: indices, - }, - GroupID: out.GroupID, - Payload: payload, - Outputs: []*secp256k1fx.OutputOwners{{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }}, - }, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - if len(ops) == 0 { - return nil, nil, errAddressesCantMintAsset - } - - txs.SortOperationsWithSigners(ops, keys, vm.parser.Codec()) - return ops, keys, nil -} - // selectChangeAddr returns the change address to be used for [kc] when [changeAddr] is given // as the optional change address argument func (vm *VM) selectChangeAddr(defaultAddr ids.ShortID, changeAddr string) (ids.ShortID, error) { From b12227cdf699c612e5235dffbe848f8cae430a3b Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 24 Feb 2023 15:21:12 -0500 Subject: [PATCH 14/27] Remove `timer.TimeoutManager` (#2653) --- utils/timer/timeout_manager.go | 142 ---------------------------- utils/timer/timeout_manager_test.go | 25 ----- 2 files changed, 167 deletions(-) delete mode 100644 utils/timer/timeout_manager.go delete mode 100644 utils/timer/timeout_manager_test.go diff --git a/utils/timer/timeout_manager.go b/utils/timer/timeout_manager.go deleted file mode 100644 index 5e6c5b40bd44..000000000000 --- a/utils/timer/timeout_manager.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import ( - "container/list" - "sync" - "time" - - "github.com/ava-labs/avalanchego/ids" -) - -type timeout struct { - id ids.ID - handler func() - timer time.Time -} - -// TimeoutManager is a manager for timeouts. -type TimeoutManager struct { - lock sync.Mutex - duration time.Duration // Amount of time before a timeout - timeoutMap map[ids.ID]*list.Element - timeoutList *list.List - timer *Timer // Timer that will fire to clear the timeouts -} - -// Initialize is a constructor b/c Golang, in its wisdom, doesn't ... have them? -func (tm *TimeoutManager) Initialize(duration time.Duration) { - tm.duration = duration - tm.timeoutMap = make(map[ids.ID]*list.Element) - tm.timeoutList = list.New() - tm.timer = NewTimer(tm.Timeout) -} - -func (tm *TimeoutManager) Dispatch() { - tm.timer.Dispatch() -} - -// Stop executing timeouts -func (tm *TimeoutManager) Stop() { - tm.timer.Stop() -} - -// Put puts hash into the hash map -func (tm *TimeoutManager) Put(id ids.ID, handler func()) { - tm.lock.Lock() - defer tm.lock.Unlock() - - tm.put(id, handler) -} - -// Remove the item that no longer needs to be there. -func (tm *TimeoutManager) Remove(id ids.ID) { - tm.lock.Lock() - defer tm.lock.Unlock() - - tm.remove(id) -} - -// Timeout registers a timeout -func (tm *TimeoutManager) Timeout() { - tm.lock.Lock() - defer tm.lock.Unlock() - - tm.timeout() -} - -func (tm *TimeoutManager) timeout() { - timeBound := time.Now().Add(-tm.duration) - // removeExpiredHead returns false once there is nothing left to remove - for { - timeout := tm.removeExpiredHead(timeBound) - if timeout == nil { - break - } - - // Don't execute a callback with a lock held - tm.lock.Unlock() - timeout() - tm.lock.Lock() - } - tm.registerTimeout() -} - -func (tm *TimeoutManager) put(id ids.ID, handler func()) { - tm.remove(id) - - tm.timeoutMap[id] = tm.timeoutList.PushBack(timeout{ - id: id, - handler: handler, - timer: time.Now(), - }) - - if tm.timeoutList.Len() == 1 { - tm.registerTimeout() - } -} - -func (tm *TimeoutManager) remove(id ids.ID) { - e, exists := tm.timeoutMap[id] - if !exists { - return - } - delete(tm.timeoutMap, id) - tm.timeoutList.Remove(e) -} - -// Returns true if the head was removed, false otherwise -func (tm *TimeoutManager) removeExpiredHead(t time.Time) func() { - if tm.timeoutList.Len() == 0 { - return nil - } - - e := tm.timeoutList.Front() - head := e.Value.(timeout) - - headTime := head.timer - if headTime.Before(t) { - tm.remove(head.id) - return head.handler - } - return nil -} - -func (tm *TimeoutManager) registerTimeout() { - if tm.timeoutList.Len() == 0 { - // There are no pending timeouts - tm.timer.Cancel() - return - } - - e := tm.timeoutList.Front() - head := e.Value.(timeout) - - timeBound := time.Now().Add(-tm.duration) - headTime := head.timer - duration := headTime.Sub(timeBound) - - tm.timer.SetTimeoutIn(duration) -} diff --git a/utils/timer/timeout_manager_test.go b/utils/timer/timeout_manager_test.go deleted file mode 100644 index 779eaeb3633d..000000000000 --- a/utils/timer/timeout_manager_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import ( - "sync" - "testing" - "time" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestTimeoutManager(*testing.T) { - wg := sync.WaitGroup{} - wg.Add(2) - defer wg.Wait() - - tm := TimeoutManager{} - tm.Initialize(time.Millisecond) - go tm.Dispatch() - - tm.Put(ids.ID{}, wg.Done) - tm.Put(ids.ID{1}, wg.Done) -} From 9f42ae055791eb79ac9316624f6bf82085cb5fc3 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 24 Feb 2023 15:49:18 -0500 Subject: [PATCH 15/27] Use the `maps` package when possible (#2647) --- api/health/worker.go | 7 +++---- api/server/router.go | 12 +++++++----- snow/consensus/avalanche/topological.go | 6 +++--- utils/set/set.go | 2 ++ utils/sorting.go | 1 + vms/platformvm/service.go | 7 +++---- 6 files changed, 19 insertions(+), 16 deletions(-) diff --git a/api/health/worker.go b/api/health/worker.go index 4bcdb3a22acc..4ccebc353875 100644 --- a/api/health/worker.go +++ b/api/health/worker.go @@ -12,6 +12,8 @@ import ( "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/utils" ) @@ -120,10 +122,7 @@ func (w *worker) runChecks(ctx context.Context) { // during this iteration. If [w.checks] is modified during this iteration of // [runChecks], then the added check will not be run until the next // iteration. - checks := make(map[string]Checker, len(w.checks)) - for name, checker := range w.checks { - checks[name] = checker - } + checks := maps.Clone(w.checks) w.checksLock.RUnlock() var wg sync.WaitGroup diff --git a/api/server/router.go b/api/server/router.go index 00d69c0d04f6..8bbc38cfc33f 100644 --- a/api/server/router.go +++ b/api/server/router.go @@ -10,6 +10,8 @@ import ( "sync" "github.com/gorilla/mux" + + "github.com/ava-labs/avalanchego/utils/set" ) var ( @@ -22,7 +24,7 @@ type router struct { router *mux.Router routeLock sync.Mutex - reservedRoutes map[string]bool // Reserves routes so that there can't be alias that conflict + reservedRoutes set.Set[string] // Reserves routes so that there can't be alias that conflict aliases map[string][]string // Maps a route to a set of reserved routes routes map[string]map[string]http.Handler // Maps routes to a handler } @@ -30,7 +32,7 @@ type router struct { func newRouter() *router { return &router{ router: mux.NewRouter(), - reservedRoutes: make(map[string]bool), + reservedRoutes: set.Set[string]{}, aliases: make(map[string][]string), routes: make(map[string]map[string]http.Handler), } @@ -68,7 +70,7 @@ func (r *router) AddRouter(base, endpoint string, handler http.Handler) error { } func (r *router) addRouter(base, endpoint string, handler http.Handler) error { - if r.reservedRoutes[base] { + if r.reservedRoutes.Contains(base) { return fmt.Errorf("couldn't route to %s as that route is either aliased or already maps to a handler", base) } @@ -113,13 +115,13 @@ func (r *router) AddAlias(base string, aliases ...string) error { defer r.routeLock.Unlock() for _, alias := range aliases { - if r.reservedRoutes[alias] { + if r.reservedRoutes.Contains(alias) { return fmt.Errorf("couldn't alias to %s as that route is either already aliased or already maps to a handler", alias) } } for _, alias := range aliases { - r.reservedRoutes[alias] = true + r.reservedRoutes.Add(alias) } r.aliases[base] = append(r.aliases[base], aliases...) diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index 84cd47c1266d..2dbcc8167a79 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -11,6 +11,8 @@ import ( "go.uber.org/zap" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -315,9 +317,7 @@ func (ta *Topological) HealthCheck(ctx context.Context) (interface{}, error) { // the non-transitively applied votes. Also returns the list of leaf nodes. func (ta *Topological) calculateInDegree(responses bag.UniqueBag[ids.ID]) error { // Clear the kahn node set - for k := range ta.kahnNodes { - delete(ta.kahnNodes, k) - } + maps.Clear(ta.kahnNodes) // Clear the leaf set ta.leaves.Clear() diff --git a/utils/set/set.go b/utils/set/set.go index 7712b7206a42..b946c761896b 100644 --- a/utils/set/set.go +++ b/utils/set/set.go @@ -134,6 +134,8 @@ func (s Set[T]) CappedList(size int) []T { // Equals returns true if the sets contain the same elements func (s Set[T]) Equals(other Set[T]) bool { + // Using maps.Equals makes the build not work for some reason so do this + // manually. if s.Len() != other.Len() { return false } diff --git a/utils/sorting.go b/utils/sorting.go index 115bb0a28863..156b81caa783 100644 --- a/utils/sorting.go +++ b/utils/sorting.go @@ -83,6 +83,7 @@ func IsSortedAndUniqueByHash[T ~[]byte](s []T) bool { // Returns true iff the elements in [s] are unique. func IsUnique[T comparable](elts []T) bool { + // Can't use set.Set because it'd be a circular import. asMap := make(map[T]struct{}, len(elts)) for _, elt := range elts { if _, ok := asMap[elt]; ok { diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index d1e6f03472f9..b2bd4f93f1bc 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -14,6 +14,8 @@ import ( "go.uber.org/zap" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" @@ -288,10 +290,7 @@ utxoFor: response.UTXOIDs = append(response.UTXOIDs, &utxo.UTXOID) } - balances := map[ids.ID]uint64{} - for assetID, amount := range lockedStakeables { - balances[assetID] = amount - } + balances := maps.Clone(lockedStakeables) for assetID, amount := range lockedNotStakeables { newBalance, err := math.Add64(balances[assetID], amount) if err != nil { From 75a38151cd832304c3eec4c002d59d644e597534 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Fri, 24 Feb 2023 16:32:32 -0500 Subject: [PATCH 16/27] Simplfy subprocess `Stop` & remove useless test (#2652) --- utils/ulimit/ulimit_test.go | 18 ------------------ vms/rpcchainvm/runtime/subprocess/stopper.go | 19 +++++-------------- 2 files changed, 5 insertions(+), 32 deletions(-) delete mode 100644 utils/ulimit/ulimit_test.go diff --git a/utils/ulimit/ulimit_test.go b/utils/ulimit/ulimit_test.go deleted file mode 100644 index 0ffe3aa11934..000000000000 --- a/utils/ulimit/ulimit_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ulimit - -import ( - "testing" - - "github.com/ava-labs/avalanchego/utils/logging" -) - -// Test_SetDefault performs sanity checks for the os default. -func Test_SetDefault(t *testing.T) { - err := Set(DefaultFDLimit, logging.NoLog{}) - if err != nil { - t.Skipf("default fd-limit failed %v", err) - } -} diff --git a/vms/rpcchainvm/runtime/subprocess/stopper.go b/vms/rpcchainvm/runtime/subprocess/stopper.go index 616f9c473c83..5f432bd416fd 100644 --- a/vms/rpcchainvm/runtime/subprocess/stopper.go +++ b/vms/rpcchainvm/runtime/subprocess/stopper.go @@ -20,22 +20,13 @@ func NewStopper(logger logging.Logger, cmd *exec.Cmd) runtime.Stopper { } type stopper struct { - lock sync.Mutex - cmd *exec.Cmd - shutdown bool - + once sync.Once + cmd *exec.Cmd logger logging.Logger } func (s *stopper) Stop(ctx context.Context) { - s.lock.Lock() - defer s.lock.Unlock() - - // subsequent calls to this method are a no-op - if s.shutdown || s.cmd.Process == nil { - return - } - - s.shutdown = true - stop(ctx, s.logger, s.cmd) + s.once.Do(func() { + stop(ctx, s.logger, s.cmd) + }) } From 1212d4898688a3cb5039238ed393623c57e2d8dc Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Sun, 26 Feb 2023 11:25:43 -0500 Subject: [PATCH 17/27] Remove dangling buf.lock (#2659) --- buf.lock | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 buf.lock diff --git a/buf.lock b/buf.lock deleted file mode 100644 index c8628dea0a29..000000000000 --- a/buf.lock +++ /dev/null @@ -1,10 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: prometheus - repository: client-model - branch: main - commit: 1d56a02d481a412a83b3c4984eb90c2e - digest: b1-qpEBhqZ9HZsskMFK3hfVMAA2b-XZmKb2WkhyhFej7Gs= - create_time: 2022-01-12T14:51:04.903729Z From b4c89eb71ec03784d5bcdb2dcf40ad8472d80f6a Mon Sep 17 00:00:00 2001 From: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:38:36 -0500 Subject: [PATCH 18/27] Add value digests and value verification in proofs (#2639) Co-authored-by: Dan Laine Co-authored-by: Darioush Jalali --- x/merkledb/codec.go | 10 +- x/merkledb/codec_test.go | 16 +- x/merkledb/node.go | 37 +- x/merkledb/proof.go | 435 ++++++++++++++------- x/merkledb/proof_test.go | 817 +++++++++++++++++---------------------- x/merkledb/trieview.go | 1 + 6 files changed, 694 insertions(+), 622 deletions(-) diff --git a/x/merkledb/codec.go b/x/merkledb/codec.go index 0191b4418ea7..2a5d601ed084 100644 --- a/x/merkledb/codec.go +++ b/x/merkledb/codec.go @@ -117,6 +117,9 @@ func (c *codecImpl) EncodeProof(version uint16, proof *Proof) ([]byte, error) { if err := c.encodeByteSlice(buf, proof.Key); err != nil { return nil, err } + if err := c.encodeMaybeByteSlice(buf, proof.Value); err != nil { + return nil, err + } return buf.Bytes(), nil } @@ -277,6 +280,9 @@ func (c *codecImpl) DecodeProof(b []byte, proof *Proof) (uint16, error) { if proof.Key, err = c.decodeByteSlice(src); err != nil { return 0, err } + if proof.Value, err = c.decodeMaybeByteSlice(src); err != nil { + return 0, err + } if src.Len() != 0 { return 0, errExtraSpace } @@ -690,7 +696,7 @@ func (c *codecImpl) decodeProofNode(src *bytes.Reader) (ProofNode, error) { if result.KeyPath, err = c.decodeSerializedPath(src); err != nil { return result, err } - if result.Value, err = c.decodeMaybeByteSlice(src); err != nil { + if result.ValueOrHash, err = c.decodeMaybeByteSlice(src); err != nil { return result, err } numChildren, err := c.decodeInt(src) @@ -731,7 +737,7 @@ func (c *codecImpl) encodeProofNode(pn ProofNode, dst io.Writer) error { if err := c.encodeSerializedPath(pn.KeyPath, dst); err != nil { return err } - if err := c.encodeMaybeByteSlice(dst, pn.Value); err != nil { + if err := c.encodeMaybeByteSlice(dst, pn.ValueOrHash); err != nil { return err } if err := c.encodeInt(dst, len(pn.Children)); err != nil { diff --git a/x/merkledb/codec_test.go b/x/merkledb/codec_test.go index 6982336880b3..bac2093f0ab8 100644 --- a/x/merkledb/codec_test.go +++ b/x/merkledb/codec_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" ) // TODO add more codec tests @@ -20,7 +21,7 @@ import ( func newRandomProofNode(r *rand.Rand) ProofNode { key := make([]byte, r.Intn(32)) // #nosec G404 _, _ = r.Read(key) // #nosec G404 - val := make([]byte, r.Intn(32)) // #nosec G404 + val := make([]byte, r.Intn(64)) // #nosec G404 _, _ = r.Read(val) // #nosec G404 children := map[byte]ids.ID{} @@ -31,8 +32,10 @@ func newRandomProofNode(r *rand.Rand) ProofNode { children[byte(j)] = childID } } - - if len(val) == 0 { + // use the hash instead when length is greater than the hash length + if len(val) >= HashLength { + val = hashing.ComputeHash256(val) + } else if len(val) == 0 { // We do this because when we encode a value of []byte{} we will later // decode it as nil. // Doing this prevents inconsistency when comparing the encoded and @@ -41,10 +44,11 @@ func newRandomProofNode(r *rand.Rand) ProofNode { // variable on the struct val = nil } + return ProofNode{ - KeyPath: newPath(key).Serialize(), - Value: Some(val), - Children: children, + KeyPath: newPath(key).Serialize(), + ValueOrHash: Some(val), + Children: children, } } diff --git a/x/merkledb/node.go b/x/merkledb/node.go index c4034255566f..b639362cf1ca 100644 --- a/x/merkledb/node.go +++ b/x/merkledb/node.go @@ -10,7 +10,10 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -const NodeBranchFactor = 16 +const ( + NodeBranchFactor = 16 + HashLength = 32 +) // the values that go into the node's id type hashValues struct { @@ -33,9 +36,10 @@ type child struct { // node holds additional information on top of the dbNode that makes calulcations easier to do type node struct { dbNode - id ids.ID - key path - nodeBytes []byte + id ids.ID + key path + nodeBytes []byte + valueDigest Maybe[[]byte] } // Returns a new node with the given [key] and no value. @@ -59,11 +63,14 @@ func parseNode(key path, nodeBytes []byte) (*node, error) { if _, err := Codec.decodeDBNode(nodeBytes, &n); err != nil { return nil, err } - return &node{ + result := &node{ dbNode: n, key: key, nodeBytes: nodeBytes, - }, nil + } + + result.setValueDigest() + return result, nil } // Returns true iff this node has a value. @@ -100,9 +107,10 @@ func (n *node) calculateID(metrics merkleMetrics) error { hv := &hashValues{ Children: n.children, - Value: n.value, + Value: n.valueDigest, Key: n.key.Serialize(), } + bytes, err := Codec.encodeHashValues(Version, hv) if err != nil { return err @@ -117,6 +125,15 @@ func (n *node) calculateID(metrics merkleMetrics) error { func (n *node) setValue(val Maybe[[]byte]) { n.onNodeChanged() n.value = val + n.setValueDigest() +} + +func (n *node) setValueDigest() { + if n.value.IsNothing() || len(n.value.value) < HashLength { + n.valueDigest = n.value + } else { + n.valueDigest = Some(hashing.ComputeHash256(n.value.value)) + } } // Adds [child] as a child of [n]. @@ -164,9 +181,9 @@ func (n *node) clone() *node { // Returns the ProofNode representation of this node. func (n *node) asProofNode() ProofNode { pn := ProofNode{ - KeyPath: n.key.Serialize(), - Children: make(map[byte]ids.ID, len(n.children)), - Value: n.value, + KeyPath: n.key.Serialize(), + Children: make(map[byte]ids.ID, len(n.children)), + ValueOrHash: n.valueDigest, } for index, entry := range n.children { pn.Children[index] = entry.id diff --git a/x/merkledb/proof.go b/x/merkledb/proof.go index b47a207a5ba0..ff10cd98a1f2 100644 --- a/x/merkledb/proof.go +++ b/x/merkledb/proof.go @@ -9,33 +9,40 @@ import ( "errors" "fmt" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/hashing" ) const verificationCacheSize = 2_000 var ( - ErrInvalidProof = errors.New("proof obtained an invalid root ID") - ErrInvalidMaxLength = errors.New("expected max length to be > 0") - ErrNonIncreasingValues = errors.New("keys sent are not in increasing order") - ErrStateFromOutsideOfRange = errors.New("state key falls outside of the start->end range") - ErrNonIncreasingProofNodes = errors.New("each proof node key must be a strict prefix of the next") - ErrExtraProofNodes = errors.New("extra proof nodes in path") - ErrDataInMissingRootProof = errors.New("there should be no state or deleted keys in a change proof that had a missing root") - ErrNoMerkleProof = errors.New("empty key response must include merkle proof") - ErrShouldJustBeRoot = errors.New("end proof should only contain root") - ErrNoStartProof = errors.New("no start proof") - ErrNoEndProof = errors.New("no end proof") - ErrNoProof = errors.New("proof has no nodes") - ErrProofNodeNotForKey = errors.New("the provided node has a key that is not a prefix of the specified key") + ErrInvalidProof = errors.New("proof obtained an invalid root ID") + ErrInvalidMaxLength = errors.New("expected max length to be > 0") + ErrNonIncreasingValues = errors.New("keys sent are not in increasing order") + ErrStateFromOutsideOfRange = errors.New("state key falls outside of the start->end range") + ErrNonIncreasingProofNodes = errors.New("each proof node key must be a strict prefix of the next") + ErrExtraProofNodes = errors.New("extra proof nodes in path") + ErrDataInMissingRootProof = errors.New("there should be no state or deleted keys in a change proof that had a missing root") + ErrNoMerkleProof = errors.New("empty key response must include merkle proof") + ErrShouldJustBeRoot = errors.New("end proof should only contain root") + ErrNoStartProof = errors.New("no start proof") + ErrNoEndProof = errors.New("no end proof") + ErrNoProof = errors.New("proof has no nodes") + ErrProofNodeNotForKey = errors.New("the provided node has a key that is not a prefix of the specified key") + ErrProofValueDoesntMatch = errors.New("the provided value does not match the proof node for the provided key's value") + ErrProofNodeHasUnincludedValue = errors.New("the provided proof has a value for a key within the range that is not present in the provided key/values") ) type ProofNode struct { - KeyPath SerializedPath - Value Maybe[[]byte] - Children map[byte]ids.ID + KeyPath SerializedPath + // Nothing if this is an intermediate node. + // The value in this node if its length < [HashLen]. + // The hash of the value in this node otherwise. + ValueOrHash Maybe[[]byte] + Children map[byte]ids.ID } // An inclusion/exclustion proof of a key. @@ -46,6 +53,10 @@ type Proof struct { Path []ProofNode // This is a proof that [key] exists/doesn't exist. Key []byte + + // Nothing if [Key] isn't in the trie. + // Otherwise the value corresponding to [Key]. + Value Maybe[[]byte] } // Returns nil if the trie given in [proof] has root [expectedRootID]. @@ -56,40 +67,44 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { if len(proof.Path) == 0 { return ErrNoProof } - if err := verifyProofPath(proof.Path, proof.Key); err != nil { + if err := verifyProofPath(proof.Path, newPath(proof.Key)); err != nil { return err } - tracer, err := trace.New(trace.Config{Enabled: false}) - if err != nil { - return err + // Confirm that the last proof node's value matches the claimed proof value + lastNode := proof.Path[len(proof.Path)-1] + + // If the last proof node's key is [proof.Key] (i.e. this is an inclusion proof) + // then the value of the last proof node must match [proof.Value]. + // Note odd length keys can never match the [proof.Key] since it's bytes, + // and thus an even number of nibbles. + if !lastNode.KeyPath.hasOddLength() && + bytes.Equal(proof.Key, lastNode.KeyPath.Value) && + !valueOrHashMatches(proof.Value, lastNode.ValueOrHash) { + return ErrProofValueDoesntMatch } - db, err := newDatabase( - ctx, - memdb.New(), - Config{ - Tracer: tracer, - ValueCacheSize: verificationCacheSize, - NodeCacheSize: verificationCacheSize, - }, - &mockMetrics{}, - ) - if err != nil { - return err + + // If the last proof node has an odd length or a different key than [proof.Key] + // then this is an exclusion proof and should prove that [proof.Key] isn't in the trie.. + // Note odd length keys can never match the [proof.Key] since it's bytes, + // and thus an even number of nibbles. + if (lastNode.KeyPath.hasOddLength() || !bytes.Equal(proof.Key, lastNode.KeyPath.Value)) && + !proof.Value.IsNothing() { + return ErrProofValueDoesntMatch } - view, err := db.NewView(ctx) + view, err := getEmptyTrieView(ctx) if err != nil { return err } - // Don't need to lock [view] because nobody else has a reference to it. // Insert all of the proof nodes. - // [provenKey] is the key that we are proving exists, or the key - // that is where the key we are proving doesn't exist should be. - provenKey := proof.Path[len(proof.Path)-1].KeyPath.Value + // [provenPath] is the path that we are proving exists, or the path + // that is where the path we are proving doesn't exist should be. + provenPath := proof.Path[len(proof.Path)-1].KeyPath.deserialize() + // Don't bother locking [db] and [view] -- nobody else has a reference to them. - if err = addPathInfo(ctx, view, proof.Path, provenKey, provenKey); err != nil { + if err = addPathInfo(ctx, view, proof.Path, provenPath, provenPath); err != nil { return err } @@ -138,10 +153,10 @@ type RangeProof struct { // - One of the following holds: // - [end] and [proof.EndProof] are empty. // - [proof.StartProof], [start], [end], and [proof.KeyValues] are empty and -// [proof.EndProof] is just the root. +// [proof.EndProof] is just the root. // - [end] is non-empty and [proof.EndProof] is a valid proof of a key <= [end]. -// - [expectedRootID] is the root of the trie containing the given key-value pairs -// and start/end proofs. +// - [expectedRootID] is the root of the trie containing the given key-value pairs +// and start/end proofs. func (proof *RangeProof) Verify( ctx context.Context, start []byte, @@ -164,48 +179,43 @@ func (proof *RangeProof) Verify( return err } - // Make sure the start proof, if given, is well-formed. - if len(proof.StartProof) != 0 { - // Do this check before making a trie to fail faster in - // case of a bad proof. - if err := verifyProofPath(proof.StartProof, start); err != nil { - return err - } - } - - // Make sure the end proof, if given, is well-formed. + largestkey := end if len(proof.KeyValues) > 0 { // If [proof] has key-value pairs, we should insert children // greater than [end] to ancestors of the node containing [end] // so that we get the expected root ID. - end = proof.KeyValues[len(proof.KeyValues)-1].Key + largestkey = proof.KeyValues[len(proof.KeyValues)-1].Key } - if len(proof.EndProof) != 0 { - if err := verifyProofPath(proof.EndProof, end); err != nil { - return err - } + + // The key-value pairs (allegedly) proven by [proof]. + keyValues := make(map[path][]byte, len(proof.KeyValues)) + for _, keyValue := range proof.KeyValues { + keyValues[newPath(keyValue.Key)] = keyValue.Value } - tracer, err := trace.New(trace.Config{Enabled: false}) - if err != nil { + smallestPath := newPath(start) + largestPath := newPath(largestkey) + + // Ensure that the start proof is valid and contains values that + // match the key/values that were sent. + if err := verifyProofPath(proof.StartProof, smallestPath); err != nil { return err } - db, err := newDatabase( - ctx, - memdb.New(), - Config{ - Tracer: tracer, - ValueCacheSize: verificationCacheSize, - NodeCacheSize: verificationCacheSize, - }, - &mockMetrics{}, - ) - if err != nil { + if err := verifyAllRangeProofKeyValuesPresent(proof.StartProof, smallestPath, largestPath, keyValues); err != nil { return err } - // Don't need to lock [db] and [view] because nobody else has a reference to it. - view, err := db.newView(ctx) + // Ensure that the end proof is valid and contains values that + // match the key/values that were sent. + if err := verifyProofPath(proof.EndProof, largestPath); err != nil { + return err + } + if err := verifyAllRangeProofKeyValuesPresent(proof.EndProof, smallestPath, largestPath, keyValues); err != nil { + return err + } + + // Don't need to lock [view] because nobody else has a reference to it. + view, err := getEmptyTrieView(ctx) if err != nil { return err } @@ -219,21 +229,17 @@ func (proof *RangeProof) Verify( // For all the nodes along the edges of the proof, insert children < [start] and > [end] // into the trie so that we get the expected root ID (if this proof is valid). - if proof.StartProof != nil { - // By inserting all children < [start], we prove that there are no keys - // > [start] but less than the first key given. That is, the peer who - // gave us this proof is not omitting nodes. - if err := addPathInfo(ctx, view, proof.StartProof, start, end); err != nil { - return err - } + // By inserting all children < [start], we prove that there are no keys + // > [start] but less than the first key given. That is, the peer who + // gave us this proof is not omitting nodes. + if err := addPathInfo(ctx, view, proof.StartProof, smallestPath, largestPath); err != nil { + return err } - if proof.EndProof != nil { - if err := addPathInfo(ctx, view, proof.EndProof, start, end); err != nil { - return err - } + if err := addPathInfo(ctx, view, proof.EndProof, smallestPath, largestPath); err != nil { + return err } - calculatedRoot, err := view.getMerkleRoot(ctx) + calculatedRoot, err := view.GetMerkleRoot(ctx) if err != nil { return err } @@ -243,6 +249,33 @@ func (proof *RangeProof) Verify( return nil } +// Verify that all non-intermediate nodes in [proof] which have keys +// in [[start], [end]] have the value given for that key in [keysValues]. +func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start, end path, keysValues map[path][]byte) error { + for i := 0; i < len(proof); i++ { + var ( + node = proof[i] + nodeKey = node.KeyPath + nodePath = nodeKey.deserialize() + ) + + // Skip odd length keys since they cannot have a value (enforced by [verifyProofPath]). + if !nodeKey.hasOddLength() && nodePath.Compare(start) >= 0 && nodePath.Compare(end) <= 0 { + value, ok := keysValues[nodePath] + if !ok && !node.ValueOrHash.IsNothing() { + // We didn't get a key-value pair for this key, but the proof node has a value. + return ErrProofNodeHasUnincludedValue + } + if ok && !valueOrHashMatches(Some(value), node.ValueOrHash) { + // We got a key-value pair for this key, but the value in the proof + // node doesn't match the value we got for this key. + return ErrProofValueDoesntMatch + } + } + } + return nil +} + type ChangeProof struct { // If false, the node that created this doesn't have // sufficient history to generate a change proof and @@ -272,15 +305,16 @@ type ChangeProof struct { } // Returns nil iff all of the following hold: -// - [start] <= [end]. -// - [proof] is non-empty iff [proof.HadRootsInHistory]. -// - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. +// - [start] <= [end]. +// - [proof] is non-empty iff [proof.HadRootsInHistory]. +// - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. // - If [start] is empty, all keys are considered > [start]. // - If [end] is empty, all keys are considered < [end]. -// - [proof.KeyValues] and [proof.DeletedKeys] are sorted in order of increasing key. -// - [proof.StartProof] and [proof.EndProof] are well-formed. -// - When the keys in [proof.KeyValues] are added to [db] and the keys in [proof.DeletedKeys] -// are removed from [db], the root ID of [db] is [expectedEndRootID]. +// - [proof.KeyValues] and [proof.DeletedKeys] are sorted in order of increasing key. +// - [proof.StartProof] and [proof.EndProof] are well-formed. +// - When the keys in [proof.KeyValues] are added to [db] and the keys in [proof.DeletedKeys] +// are removed from [db], the root ID of [db] is [expectedEndRootID]. +// // Assumes [db.lock] isn't held. func (proof *ChangeProof) Verify( ctx context.Context, @@ -317,6 +351,14 @@ func (proof *ChangeProof) Verify( return ErrNoStartProof } + keyValues := make(map[path]Maybe[[]byte], len(proof.KeyValues)) + for _, keyValue := range proof.KeyValues { + keyValues[newPath(keyValue.Key)] = Some(keyValue.Value) + } + for _, key := range proof.DeletedKeys { + keyValues[newPath(key)] = Nothing[[]byte]() + } + // Make sure the key-value pairs are sorted and in [start, end]. if err := verifyKeyValues(proof.KeyValues, start, end); err != nil { return err @@ -324,40 +366,62 @@ func (proof *ChangeProof) Verify( // Make sure the deleted keys are sorted and in [start, end]. for i := 0; i < len(proof.DeletedKeys); i++ { - if i < len(proof.DeletedKeys)-1 && bytes.Compare(proof.DeletedKeys[i], proof.DeletedKeys[i+1]) >= 0 { + deletedKey := proof.DeletedKeys[i] + if i < len(proof.DeletedKeys)-1 && bytes.Compare(deletedKey, proof.DeletedKeys[i+1]) >= 0 { return ErrNonIncreasingValues } - if (len(start) > 0 && bytes.Compare(proof.DeletedKeys[i], start) < 0) || - (len(end) > 0 && bytes.Compare(proof.DeletedKeys[i], end) > 0) { + if (len(start) > 0 && bytes.Compare(deletedKey, start) < 0) || + (len(end) > 0 && bytes.Compare(deletedKey, end) > 0) { return ErrStateFromOutsideOfRange } } - // Make sure the start proof, if given, is well-formed. - if proof.StartProof != nil { - if err := verifyProofPath(proof.StartProof, start); err != nil { - return err - } - } + largestKey := end // Find the greatest key in [proof.KeyValues] and [proof.DeletedKeys]. // Note that [proof.EndProof] is a proof for this key. - // [end] is also used when we add children of proof nodes to [trie] below. + // [largestKey] is also used when we add children of proof nodes to [trie] below. if len(proof.KeyValues) > 0 { - end = proof.KeyValues[len(proof.KeyValues)-1].Key + largestKey = proof.KeyValues[len(proof.KeyValues)-1].Key } if len(proof.DeletedKeys) > 0 { lastDeleted := proof.DeletedKeys[len(proof.DeletedKeys)-1] - if bytes.Compare(lastDeleted, end) > 0 { - end = lastDeleted + if bytes.Compare(lastDeleted, largestKey) > 0 { + largestKey = lastDeleted } } + smallestPath := newPath(start) + largestPath := newPath(largestKey) + + // Make sure the start proof, if given, is well-formed. + if err := verifyProofPath(proof.StartProof, smallestPath); err != nil { + return err + } + if err := verifyAllChangeProofKeyValuesPresent( + ctx, + db, + proof.StartProof, + smallestPath, + largestPath, + keyValues, + ); err != nil { + return err + } + // Make sure the end proof, if given, is well-formed. - if proof.EndProof != nil { - if err := verifyProofPath(proof.EndProof, end); err != nil { - return err - } + if err := verifyProofPath(proof.EndProof, largestPath); err != nil { + return err + } + if err := verifyAllChangeProofKeyValuesPresent( + ctx, + db, + proof.EndProof, + smallestPath, + largestPath, + keyValues, + ); err != nil { + return err } db.lock.RLock() @@ -368,7 +432,6 @@ func (proof *ChangeProof) Verify( if err != nil { return err } - // Don't bother locking [view] -- nobody else has access to it. // Insert the key-value pairs into the trie. for _, kv := range proof.KeyValues { @@ -384,17 +447,13 @@ func (proof *ChangeProof) Verify( } } - // For all the nodes along the edges of the proof, insert children < [start] and > [end] + // For all the nodes along the edges of the proof, insert children < [start] and > [largestKey] // into the trie so that we get the expected root ID (if this proof is valid). - if proof.StartProof != nil { - if err := addPathInfo(ctx, view, proof.StartProof, start, end); err != nil { - return err - } + if err := addPathInfo(ctx, view, proof.StartProof, smallestPath, largestPath); err != nil { + return err } - if proof.EndProof != nil { - if err := addPathInfo(ctx, view, proof.EndProof, start, end); err != nil { - return err - } + if err := addPathInfo(ctx, view, proof.EndProof, smallestPath, largestPath); err != nil { + return err } // Make sure we get the expected root. @@ -409,6 +468,50 @@ func (proof *ChangeProof) Verify( return nil } +// Verifies that all values present in the [proof]: +// - Are nothing when deleted, not in the db, or the node has an odd path length. +// - if the node's path is within the key range, that has a value that matches the value passed in the change list or in the db +func verifyAllChangeProofKeyValuesPresent( + ctx context.Context, + db *Database, + proof []ProofNode, + start path, + end path, + keysValues map[path]Maybe[[]byte], +) error { + for i := 0; i < len(proof); i++ { + var ( + node = proof[i] + nodeKey = node.KeyPath + nodePath = nodeKey.deserialize() + ) + + // Check the value of any node with a key that is within the range. + // Skip odd length keys since they cannot have a value (enforced by [verifyProofPath]). + if !nodeKey.hasOddLength() && nodePath.Compare(start) >= 0 && nodePath.Compare(end) <= 0 { + value, ok := keysValues[nodePath] + if !ok { + // This value isn't in the list of key-value pairs we got. + dbValue, err := db.GetValue(ctx, nodeKey.Value) + if err != nil { + if err != database.ErrNotFound { + return err + } + // This key isn't in the database so proof node should have Nothing. + value = Nothing[[]byte]() + } else { + // This key is in the database so proof node should have matching value. + value = Some(dbValue) + } + } + if !valueOrHashMatches(value, node.ValueOrHash) { + return ErrProofValueDoesntMatch + } + } + } + return nil +} + func (proof *ChangeProof) Empty() bool { return len(proof.KeyValues) == 0 && len(proof.DeletedKeys) == 0 && len(proof.StartProof) == 0 && len(proof.EndProof) == 0 @@ -436,29 +539,70 @@ func verifyKeyValues(kvs []KeyValue, start, end []byte) error { } // Returns nil iff all the following hold: -// - Each key in [proof] is a strict prefix of the following key. -// - Each key in [proof] is a strict prefix of [keyBytes], except possibly the last. -// - If the last element in [proof] is [keyBytes], this is an inclusion proof. -// Otherwise, this is an exclusion proof and [keyBytes] must not be in [proof]. -func verifyProofPath(proof []ProofNode, keyBytes []byte) error { - key := newPath(keyBytes).Serialize() - +// - Any node with an odd nibble length, should not have a value associated with it +// since all keys with values are written in bytes, so have even nibble length. +// - Each key in [proof] is a strict prefix of the following key. +// - Each key in [proof] is a strict prefix of [keyBytes], except possibly the last. +// - If the last element in [proof] is [keyBytes], this is an inclusion proof. +// Otherwise, this is an exclusion proof and [keyBytes] must not be in [proof]. +func verifyProofPath(proof []ProofNode, keyPath path) error { + provenKey := keyPath.Serialize() + + // loop over all but the last node since it will not have the prefix in exclusion proofs for i := 0; i < len(proof)-1; i++ { nodeKey := proof[i].KeyPath - if !key.HasStrictPrefix(nodeKey) { + // intermediate nodes (nodes with odd nibble length) should never have a value associated with them + if nodeKey.hasOddLength() && !proof[i].ValueOrHash.IsNothing() { + return ErrOddLengthWithValue + } + + // each node should have a key that has the proven key as a prefix + if !provenKey.HasStrictPrefix(nodeKey) { return ErrProofNodeNotForKey } + // each node should have a key that is a prefix of the next node's key nextKey := proof[i+1].KeyPath if !nextKey.HasStrictPrefix(nodeKey) { return ErrNonIncreasingProofNodes } } + // check the last node for a value since the above loop doesn't check the last node + if len(proof) > 0 { + lastNode := proof[len(proof)-1] + if lastNode.KeyPath.hasOddLength() && !lastNode.ValueOrHash.IsNothing() { + return ErrOddLengthWithValue + } + } + return nil } +// Returns true if [value] and [valueDigest] match. +// [valueOrHash] should be the [ValueOrHash] field of a [ProofNode]. +func valueOrHashMatches(value Maybe[[]byte], valueOrHash Maybe[[]byte]) bool { + var ( + valueIsNothing = value.IsNothing() + digestIsNothing = valueOrHash.IsNothing() + ) + + switch { + case valueIsNothing != digestIsNothing: + // One is nothing and the other isn't -- no match. + return false + case valueIsNothing: + // Both are nothing -- match. + return true + case len(value.value) < HashLength: + return bytes.Equal(value.value, valueOrHash.value) + default: + valueHash := hashing.ComputeHash256(value.value) + return bytes.Equal(valueHash, valueOrHash.value) + } +} + // Adds each key/value pair in [proofPath] to [t]. // For each proof node, adds the children that are < [start] or > [end]. // If [start] is empty, no children are < [start]. @@ -468,30 +612,32 @@ func addPathInfo( ctx context.Context, t TrieView, proofPath []ProofNode, - start []byte, - end []byte, + startPath path, + endPath path, ) error { var ( - startPath = newPath(start) - hasLowerBound = len(start) > 0 - endPath = newPath(end) - hasUpperBound = len(end) > 0 + hasLowerBound = len(startPath) > 0 + hasUpperBound = len(endPath) > 0 ) for i := len(proofPath) - 1; i >= 0; i-- { proofNode := proofPath[i] keyPath := proofNode.KeyPath.deserialize() - if len(keyPath)&1 == 1 && !proofNode.Value.IsNothing() { + if len(keyPath)&1 == 1 && !proofNode.ValueOrHash.IsNothing() { // a value cannot have an odd number of nibbles in its key return ErrOddLengthWithValue } // load the node associated with the key or create a new one - n, err := t.insertIntoTrie(ctx, keyPath, proofNode.Value) + // pass nothing because we are going to overwrite the value digest below + n, err := t.insertIntoTrie(ctx, keyPath, Nothing[[]byte]()) if err != nil { return err } + // We overwrite the valueDigest to be the hash provided in the proof + // node because we may not know the pre-image of the valueDigest. + n.valueDigest = proofNode.ValueOrHash if !hasLowerBound && !hasUpperBound { // No children of proof nodes are outside the range. @@ -512,5 +658,28 @@ func addPathInfo( } } } + return nil } + +func getEmptyTrieView(ctx context.Context) (TrieView, error) { + tracer, err := trace.New(trace.Config{Enabled: false}) + if err != nil { + return nil, err + } + db, err := newDatabase( + ctx, + memdb.New(), + Config{ + Tracer: tracer, + ValueCacheSize: verificationCacheSize, + NodeCacheSize: verificationCacheSize, + }, + &mockMetrics{}, + ) + if err != nil { + return nil, err + } + + return db.NewView(ctx) +} diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index bdbf83e38b24..298b98c5378a 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -13,12 +13,11 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" ) -func Test_Proof_Marshal(t *testing.T) { - require := require.New(t) - - dbTrie, err := newDatabase( +func getBasicDB() (*Database, error) { + return newDatabase( context.Background(), memdb.New(), Config{ @@ -29,23 +28,26 @@ func Test_Proof_Marshal(t *testing.T) { }, &mockMetrics{}, ) - require.NoError(err) - require.NotNil(dbTrie) - trie, err := dbTrie.NewView(context.Background()) - require.NoError(err) +} - err = trie.Insert(context.Background(), []byte("key0"), []byte("value0")) - require.NoError(err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(err) - err = trie.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(err) - err = trie.Insert(context.Background(), []byte("key4"), []byte("value4")) +func writeBasicBatch(t *testing.T, db *Database) { + batch := db.NewBatch() + require.NoError(t, batch.Put([]byte{0}, []byte{0})) + require.NoError(t, batch.Put([]byte{1}, []byte{1})) + require.NoError(t, batch.Put([]byte{2}, []byte{2})) + require.NoError(t, batch.Put([]byte{3}, []byte{3})) + require.NoError(t, batch.Put([]byte{4}, []byte{4})) + require.NoError(t, batch.Write()) +} + +func Test_Proof_Marshal(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() require.NoError(err) + require.NotNil(dbTrie) + writeBasicBatch(t, dbTrie) - proof, err := trie.GetProof(context.Background(), []byte("key1")) + proof, err := dbTrie.GetProof(context.Background(), []byte{1}) require.NoError(err) require.NotNil(proof) @@ -57,6 +59,7 @@ func Test_Proof_Marshal(t *testing.T) { require.NoError(err) verifyPath(t, proof.Path, parsedProof.Path) + require.Equal([]byte{1}, proof.Value.value) } func Test_Proof_Empty(t *testing.T) { @@ -66,37 +69,22 @@ func Test_Proof_Empty(t *testing.T) { } func Test_Proof_MissingValue(t *testing.T) { - trie, err := newDatabase( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - ValueCacheSize: 1000, - HistoryLength: 1000, - NodeCacheSize: 1000, - }, - &mockMetrics{}, - ) + trie, err := getBasicDB() require.NoError(t, err) require.NotNil(t, trie) - err = trie.Insert(context.Background(), []byte("key0"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key4"), []byte("value4")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key5678"), []byte("value4")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte{1}, []byte{0})) + require.NoError(t, trie.Insert(context.Background(), []byte{1, 2}, []byte{0})) + require.NoError(t, trie.Insert(context.Background(), []byte{1, 2, 4}, []byte{0})) + require.NoError(t, trie.Insert(context.Background(), []byte{1, 3}, []byte{0})) - proof, err := trie.GetProof(context.Background(), []byte("key5")) + // get a proof for a value not in the db + proof, err := trie.GetProof(context.Background(), []byte{1, 2, 3}) require.NoError(t, err) require.NotNil(t, proof) + require.True(t, proof.Value.IsNothing()) + proofBytes, err := Codec.EncodeProof(Version, proof) require.NoError(t, err) @@ -108,32 +96,13 @@ func Test_Proof_MissingValue(t *testing.T) { } func Test_Proof_Marshal_Errors(t *testing.T) { - trie, err := newDatabase( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - ValueCacheSize: 1000, - HistoryLength: 1000, - NodeCacheSize: 1000, - }, - &mockMetrics{}, - ) + trie, err := getBasicDB() require.NoError(t, err) require.NotNil(t, trie) - err = trie.Insert(context.Background(), []byte("key0"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key4"), []byte("value4")) - require.NoError(t, err) + writeBasicBatch(t, trie) - proof, err := trie.GetProof(context.Background(), []byte("key1")) + proof, err := trie.GetProof(context.Background(), []byte{1}) require.NoError(t, err) require.NotNil(t, proof) @@ -146,11 +115,8 @@ func Test_Proof_Marshal_Errors(t *testing.T) { _, err = Codec.DecodeProof(broken, parsed) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } - proofBytes[176] = 35 - parsed := &Proof{} - _, err = Codec.DecodeProof(proofBytes, parsed) - require.ErrorIs(t, err, errChildIndexTooLarge) + // add a child at an invalid index proof.Path[0].Children[255] = ids.Empty _, err = Codec.EncodeProof(Version, proof) require.ErrorIs(t, err, errChildIndexTooLarge) @@ -161,79 +127,189 @@ func verifyPath(t *testing.T, path1, path2 []ProofNode) { for i := range path1 { require.True(t, bytes.Equal(path1[i].KeyPath.Value, path2[i].KeyPath.Value)) require.Equal(t, path1[i].KeyPath.hasOddLength(), path2[i].KeyPath.hasOddLength()) - require.True(t, bytes.Equal(path1[i].Value.value, path2[i].Value.value)) + require.True(t, bytes.Equal(path1[i].ValueOrHash.value, path2[i].ValueOrHash.value)) for childIndex := range path1[i].Children { require.Equal(t, path1[i].Children[childIndex], path2[i].Children[childIndex]) } } } -func Test_RangeProof_Extra_State(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, +func Test_Proof_Verify_Bad_Data(t *testing.T) { + type test struct { + name string + malform func(proof *Proof) + expectedErr error + } + + tests := []test{ + { + name: "happyPath", + malform: func(proof *Proof) {}, + expectedErr: nil, }, - ) - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key6"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key8"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() + { + name: "odd length key path with value", + malform: func(proof *Proof) { + proof.Path[1].ValueOrHash = Some([]byte{1, 2}) + }, + expectedErr: ErrOddLengthWithValue, + }, + { + name: "last proof node has missing value", + malform: func(proof *Proof) { + proof.Path[len(proof.Path)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "missing value on proof", + malform: func(proof *Proof) { + proof.Value = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "mismatched value on proof", + malform: func(proof *Proof) { + proof.Value = Some([]byte{10}) + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "value of exclusion proof", + malform: func(proof *Proof) { + // remove the value node to make it look like it is an exclusion proof + proof.Path = proof.Path[:len(proof.Path)-1] + }, + expectedErr: ErrProofValueDoesntMatch, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + writeBasicBatch(t, db) + + proof, err := db.GetProof(context.Background(), []byte{2}) + require.NoError(t, err) + require.NotNil(t, proof) + + tt.malform(proof) + + err = proof.Verify(context.Background(), db.getMerkleRoot()) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func Test_Proof_ValueOrHashMatches(t *testing.T) { + require.True(t, valueOrHashMatches(Some([]byte{0}), Some([]byte{0}))) + require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{0})))) + require.True(t, valueOrHashMatches(Nothing[[]byte](), Nothing[[]byte]())) + + require.False(t, valueOrHashMatches(Some([]byte{0}), Nothing[[]byte]())) + require.False(t, valueOrHashMatches(Nothing[[]byte](), Some([]byte{0}))) + require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{1})))) + require.False(t, valueOrHashMatches(Some(hashing.ComputeHash256([]byte{0})), Nothing[[]byte]())) +} + +func Test_RangeProof_Extra_Value(t *testing.T) { + db, err := getBasicDB() require.NoError(t, err) + writeBasicBatch(t, db) - val, err := db.Get([]byte("key2")) + val, err := db.Get([]byte{2}) require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + require.Equal(t, []byte{2}, val) - proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key55"), 10) + proof, err := db.GetRangeProof(context.Background(), []byte{1}, []byte{5, 5}, 10) require.NoError(t, err) require.NotNil(t, proof) err = proof.Verify( context.Background(), - []byte("key1"), - []byte("key55"), + []byte{1}, + []byte{5, 5}, db.root.id, ) require.NoError(t, err) - badKeyValues := []KeyValue{proof.KeyValues[0], {Key: []byte("key3"), Value: []byte{}}, proof.KeyValues[1]} - proof.KeyValues = badKeyValues + proof.KeyValues = append(proof.KeyValues, KeyValue{Key: []byte{5}, Value: []byte{5}}) err = proof.Verify( context.Background(), - []byte("key1"), - []byte("key55"), + []byte{1}, + []byte{5, 5}, db.root.id, ) require.ErrorIs(t, err, ErrInvalidProof) } -func Test_RangeProof_MaxLength(t *testing.T) { - dbTrie, err := newDatabase( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - ValueCacheSize: 1000, - HistoryLength: 1000, - NodeCacheSize: 1000, +func Test_RangeProof_Verify_Bad_Data(t *testing.T) { + type test struct { + name string + malform func(proof *RangeProof) + expectedErr error + } + + tests := []test{ + { + name: "happyPath", + malform: func(proof *RangeProof) {}, + expectedErr: nil, }, - &mockMetrics{}, - ) + { + name: "StartProof: last proof node has missing value", + malform: func(proof *RangeProof) { + proof.StartProof[len(proof.StartProof)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "EndProof: odd length key path with value", + malform: func(proof *RangeProof) { + proof.EndProof[1].ValueOrHash = Some([]byte{1, 2}) + }, + expectedErr: ErrOddLengthWithValue, + }, + { + name: "EndProof: last proof node has missing value", + malform: func(proof *RangeProof) { + proof.EndProof[len(proof.EndProof)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "missing key/value", + malform: func(proof *RangeProof) { + proof.KeyValues = proof.KeyValues[1:] + }, + expectedErr: ErrProofNodeHasUnincludedValue, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + writeBasicBatch(t, db) + + proof, err := db.GetRangeProof(context.Background(), []byte{2}, []byte{3, 0}, 50) + require.NoError(t, err) + require.NotNil(t, proof) + + tt.malform(proof) + + err = proof.Verify(context.Background(), []byte{2}, []byte{3, 0}, db.getMerkleRoot()) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func Test_RangeProof_MaxLength(t *testing.T) { + dbTrie, err := getBasicDB() require.NoError(t, err) require.NotNil(t, dbTrie) trie, err := dbTrie.NewView(context.Background()) @@ -247,17 +323,7 @@ func Test_RangeProof_MaxLength(t *testing.T) { } func Test_Proof(t *testing.T) { - dbTrie, err := newDatabase( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - ValueCacheSize: 1000, - HistoryLength: 1000, - NodeCacheSize: 1000, - }, - &mockMetrics{}, - ) + dbTrie, err := getBasicDB() require.NoError(t, err) require.NotNil(t, dbTrie) trie, err := dbTrie.NewView(context.Background()) @@ -283,17 +349,17 @@ func Test_Proof(t *testing.T) { require.Len(t, proof.Path, 3) require.Equal(t, newPath([]byte("key1")).Serialize(), proof.Path[2].KeyPath) - require.Equal(t, Some([]byte("value1")), proof.Path[2].Value) + require.Equal(t, Some([]byte("value1")), proof.Path[2].ValueOrHash) require.Equal(t, newPath([]byte{}).Serialize(), proof.Path[0].KeyPath) - require.True(t, proof.Path[0].Value.IsNothing()) + require.True(t, proof.Path[0].ValueOrHash.IsNothing()) expectedRootID, err := trie.GetMerkleRoot(context.Background()) require.NoError(t, err) err = proof.Verify(context.Background(), expectedRootID) require.NoError(t, err) - proof.Path[2].Value = Some([]byte("value2")) + proof.Path[0].ValueOrHash = Some([]byte("value2")) err = proof.Verify(context.Background(), expectedRootID) require.ErrorIs(t, err, ErrInvalidProof) @@ -473,101 +539,51 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { func Test_RangeProof(t *testing.T) { require := require.New(t) - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) - require.NoError(err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(err) - err = batch.Write() + db, err := getBasicDB() require.NoError(err) + writeBasicBatch(t, db) - proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key35"), 10) + proof, err := db.GetRangeProof(context.Background(), []byte{1}, []byte{3, 5}, 10) require.NoError(err) require.NotNil(proof) require.Len(proof.KeyValues, 3) - require.Equal([]byte("key1"), proof.KeyValues[0].Key) - require.Equal([]byte("key2"), proof.KeyValues[1].Key) - require.Equal([]byte("key3"), proof.KeyValues[2].Key) + require.Equal([]byte{1}, proof.KeyValues[0].Key) + require.Equal([]byte{2}, proof.KeyValues[1].Key) + require.Equal([]byte{3}, proof.KeyValues[2].Key) + + require.Equal([]byte{1}, proof.KeyValues[0].Value) + require.Equal([]byte{2}, proof.KeyValues[1].Value) + require.Equal([]byte{3}, proof.KeyValues[2].Value) - require.Equal([]byte("value1"), proof.KeyValues[0].Value) - require.Equal([]byte("value2"), proof.KeyValues[1].Value) - require.Equal([]byte("value3"), proof.KeyValues[2].Value) + require.Equal([]byte{}, proof.EndProof[0].KeyPath.Value) + require.Equal([]byte{0}, proof.EndProof[1].KeyPath.Value) + require.Equal([]byte{3}, proof.EndProof[2].KeyPath.Value) - require.Equal(newPath([]byte("key1")).Serialize(), proof.StartProof[0].KeyPath) - require.Equal(newPath([]byte("key3")).Serialize(), proof.EndProof[2].KeyPath) - require.Equal(SerializedPath{Value: []uint8{0x6b, 0x65, 0x79, 0x30}, NibbleLength: 7}, proof.EndProof[1].KeyPath) - require.Equal(newPath([]byte("")).Serialize(), proof.EndProof[0].KeyPath) + // only a single node here since others are duplicates in endproof + require.Equal([]byte{1}, proof.StartProof[0].KeyPath.Value) err = proof.Verify( context.Background(), - []byte("key1"), - []byte("key35"), + []byte{1}, + []byte{3, 5}, db.root.id, ) require.NoError(err) } func Test_RangeProof_BadBounds(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key20"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key21"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key22"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key23"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key24"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() + db, err := getBasicDB() require.NoError(t, err) // non-nil start/end - proof, err := db.GetRangeProof(context.Background(), []byte("key4"), []byte("key3"), 50) + proof, err := db.GetRangeProof(context.Background(), []byte{4}, []byte{3}, 50) require.ErrorIs(t, err, ErrStartAfterEnd) require.Nil(t, proof) } func Test_RangeProof_NilStart(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) + db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() err = batch.Put([]byte("key1"), []byte("value1")) @@ -611,54 +627,32 @@ func Test_RangeProof_NilStart(t *testing.T) { } func Test_RangeProof_NilEnd(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) + db, err := getBasicDB() require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() + writeBasicBatch(t, db) require.NoError(t, err) - val, err := db.Get([]byte("key1")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), val) - - proof, err := db.GetRangeProof(context.Background(), []byte("key1"), nil, 2) + proof, err := db.GetRangeProof(context.Background(), []byte{1}, nil, 2) require.NoError(t, err) require.NotNil(t, proof) require.Len(t, proof.KeyValues, 2) - require.Equal(t, []byte("key1"), proof.KeyValues[0].Key) - require.Equal(t, []byte("key2"), proof.KeyValues[1].Key) + require.Equal(t, []byte{1}, proof.KeyValues[0].Key) + require.Equal(t, []byte{2}, proof.KeyValues[1].Key) - require.Equal(t, []byte("value1"), proof.KeyValues[0].Value) - require.Equal(t, []byte("value2"), proof.KeyValues[1].Value) + require.Equal(t, []byte{1}, proof.KeyValues[0].Value) + require.Equal(t, []byte{2}, proof.KeyValues[1].Value) - require.Equal(t, newPath([]byte("key1")).Serialize(), proof.StartProof[0].KeyPath) + require.Equal(t, []byte{1}, proof.StartProof[0].KeyPath.Value) - require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) - require.Equal(t, SerializedPath{Value: []uint8{0x6b, 0x65, 0x79, 0x30}, NibbleLength: 7}, proof.EndProof[1].KeyPath) - require.Equal(t, newPath([]byte("")).Serialize(), proof.EndProof[0].KeyPath) + require.Equal(t, []byte{}, proof.EndProof[0].KeyPath.Value) + require.Equal(t, []byte{0}, proof.EndProof[1].KeyPath.Value) + require.Equal(t, []byte{2}, proof.EndProof[2].KeyPath.Value) err = proof.Verify( context.Background(), - []byte("key1"), + []byte{1}, nil, db.root.id, ) @@ -666,16 +660,7 @@ func Test_RangeProof_NilEnd(t *testing.T) { } func Test_RangeProof_EmptyValues(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) + db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() err = batch.Put([]byte("key1"), nil) @@ -720,34 +705,13 @@ func Test_RangeProof_EmptyValues(t *testing.T) { } func Test_RangeProof_Marshal_Nil(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() + db, err := getBasicDB() require.NoError(t, err) + writeBasicBatch(t, db) - val, err := db.Get([]byte("key1")) + val, err := db.Get([]byte{1}) require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + require.Equal(t, []byte{1}, val) proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key35"), 10) require.NoError(t, err) @@ -770,34 +734,14 @@ func Test_RangeProof_Marshal_Nil(t *testing.T) { } func Test_RangeProof_Marshal(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() + db, err := getBasicDB() require.NoError(t, err) - val, err := db.Get([]byte("key1")) + writeBasicBatch(t, db) + + val, err := db.Get([]byte{1}) require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + require.Equal(t, []byte{1}, val) proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) require.NoError(t, err) @@ -820,34 +764,9 @@ func Test_RangeProof_Marshal(t *testing.T) { } func Test_RangeProof_Marshal_Errors(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) + db, err := getBasicDB() require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) - - val, err := db.Get([]byte("key1")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + writeBasicBatch(t, db) proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) require.NoError(t, err) @@ -865,16 +784,7 @@ func Test_RangeProof_Marshal_Errors(t *testing.T) { } func Test_ChangeProof_Marshal(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) + db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() err = batch.Put([]byte("key0"), []byte("value0")) @@ -944,60 +854,27 @@ func Test_ChangeProof_Marshal(t *testing.T) { } func Test_ChangeProof_Marshal_Errors(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() + db, err := getBasicDB() require.NoError(t, err) + writeBasicBatch(t, db) startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) - batch = db.NewBatch() - err = batch.Put([]byte("key4"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key5"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key6"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key7"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key8"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + batch := db.NewBatch() + require.NoError(t, batch.Put([]byte{5}, []byte{5})) + require.NoError(t, batch.Put([]byte{6}, []byte{6})) + require.NoError(t, batch.Put([]byte{7}, []byte{7})) + require.NoError(t, batch.Put([]byte{8}, []byte{8})) + require.NoError(t, batch.Delete([]byte{0})) + require.NoError(t, batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key9"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key10"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key11"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key12"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key13"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte{9}, []byte{9})) + require.NoError(t, batch.Put([]byte{10}, []byte{10})) + require.NoError(t, batch.Put([]byte{11}, []byte{11})) + require.NoError(t, batch.Put([]byte{12}, []byte{12})) + require.NoError(t, batch.Delete([]byte{1})) + require.NoError(t, batch.Write()) endroot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -1005,6 +882,8 @@ func Test_ChangeProof_Marshal_Errors(t *testing.T) { require.NoError(t, err) require.NotNil(t, proof) require.True(t, proof.HadRootsInHistory) + require.Len(t, proof.KeyValues, 8) + require.Len(t, proof.DeletedKeys, 2) proofBytes, err := Codec.EncodeChangeProof(Version, proof) require.NoError(t, err) @@ -1017,48 +896,12 @@ func Test_ChangeProof_Marshal_Errors(t *testing.T) { } } -func Test_ChangeProof_Missing_History(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() +func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { + db, err := getBasicDB() require.NoError(t, err) startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) - batch = db.NewBatch() - err = batch.Put([]byte("key4"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key5"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key6"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key7"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key8"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) - proof, err := db.GetChangeProof(context.Background(), startRoot, ids.Empty, nil, nil, 50) require.NoError(t, err) require.NotNil(t, proof) @@ -1068,47 +911,13 @@ func Test_ChangeProof_Missing_History(t *testing.T) { } func Test_ChangeProof_BadBounds(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key20"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key21"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key22"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key23"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key24"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() + db, err := getBasicDB() require.NoError(t, err) startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) - batch = db.NewBatch() - err = batch.Put([]byte("key30"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key31"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key32"), []byte("value2")) - require.NoError(t, err) - err = batch.Delete([]byte("key21")) - require.NoError(t, err) - err = batch.Delete([]byte("key22")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, db.Insert(context.Background(), []byte{0}, []byte{0})) endRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -1120,16 +929,7 @@ func Test_ChangeProof_BadBounds(t *testing.T) { } func Test_ChangeProof_Verify(t *testing.T) { - db, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) + db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() err = batch.Put([]byte("key20"), []byte("value0")) @@ -1148,16 +948,7 @@ func Test_ChangeProof_Verify(t *testing.T) { require.NoError(t, err) // create a second db that has "synced" to the start root - dbClone, err := New( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - ValueCacheSize: minCacheSize, - NodeCacheSize: minCacheSize, - }, - ) + dbClone, err := getBasicDB() require.NoError(t, err) batch = dbClone.NewBatch() err = batch.Put([]byte("key20"), []byte("value0")) @@ -1241,6 +1032,78 @@ func Test_ChangeProof_Verify(t *testing.T) { newRoot, err := dbClone.GetMerkleRoot(context.Background()) require.NoError(t, err) require.Equal(t, endRoot, newRoot) + + proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key20"), []byte("key30"), 50) + require.NoError(t, err) + require.NotNil(t, proof) + + err = proof.Verify(context.Background(), dbClone, []byte("key20"), []byte("key30"), db.getMerkleRoot()) + require.NoError(t, err) +} + +func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { + type test struct { + name string + malform func(proof *ChangeProof) + expectedErr error + } + + tests := []test{ + { + name: "happyPath", + malform: func(proof *ChangeProof) {}, + expectedErr: nil, + }, + { + name: "odd length key path with value", + malform: func(proof *ChangeProof) { + proof.EndProof[1].ValueOrHash = Some([]byte{1, 2}) + }, + expectedErr: ErrOddLengthWithValue, + }, + { + name: "last proof node has missing value", + malform: func(proof *ChangeProof) { + proof.EndProof[len(proof.EndProof)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "missing key/value", + malform: func(proof *ChangeProof) { + proof.KeyValues = proof.KeyValues[1:] + }, + expectedErr: ErrProofValueDoesntMatch, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + writeBasicBatch(t, db) + + endRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + // create a second db that will be synced to the first db + dbClone, err := getBasicDB() + require.NoError(t, err) + + proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte{2}, []byte{3, 0}, 50) + require.NoError(t, err) + require.NotNil(t, proof) + + tt.malform(proof) + + err = proof.Verify(context.Background(), dbClone, []byte{2}, []byte{3, 0}, db.getMerkleRoot()) + require.ErrorIs(t, err, tt.expectedErr) + }) + } } func Test_ChangeProof_Syntactic_Verify(t *testing.T) { @@ -1478,7 +1341,9 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.proof.Verify(context.Background(), nil, tt.start, tt.end, ids.Empty) + db, err := getBasicDB() + require.NoError(t, err) + err = tt.proof.Verify(context.Background(), db, tt.start, tt.end, ids.Empty) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -1676,11 +1541,21 @@ func TestVerifyProofPath(t *testing.T) { proofKey: []byte{1, 2, 3}, expectedErr: ErrProofNodeNotForKey, }, + { + name: "oddLength key with value", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: SerializedPath{Value: []byte{1, 2, 240}, NibbleLength: 5}, ValueOrHash: Some([]byte{1})}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: ErrOddLengthWithValue, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := verifyProofPath(tt.path, tt.proofKey) + err := verifyProofPath(tt.path, newPath(tt.proofKey)) require.ErrorIs(t, err, tt.expectedErr) }) } diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go index 813f3dca75e4..3c2e16c15de8 100644 --- a/x/merkledb/trieview.go +++ b/x/merkledb/trieview.go @@ -315,6 +315,7 @@ func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { if closestNode.key.Compare(keyPath) == 0 { // There is a node with the given [key]. + proof.Value = closestNode.value return proof, nil } From 75cb25e626504ed5d8764c57cd1c2284c55f3874 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Mon, 27 Feb 2023 18:05:52 -0800 Subject: [PATCH 19/27] Add `Signature.NumSigners` to Warp messages (#2665) --- vms/platformvm/warp/signature.go | 23 ++++++++++ vms/platformvm/warp/signature_test.go | 63 +++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/vms/platformvm/warp/signature.go b/vms/platformvm/warp/signature.go index 511c96df4e02..36699453a79c 100644 --- a/vms/platformvm/warp/signature.go +++ b/vms/platformvm/warp/signature.go @@ -24,6 +24,12 @@ var ( ) type Signature interface { + // NumSigners is the number of [bls.PublicKeys] that participated in the + // [Signature]. This is exposed because users of these signatures typically + // impose a verification fee that is a function of the number of + // signers. + NumSigners() (int, error) + // Verify that this signature was signed by at least [quorumNum]/[quorumDen] // of the validators of [msg.SourceChainID] at [pChainHeight]. // @@ -45,6 +51,19 @@ type BitSetSignature struct { Signature [bls.SignatureLen]byte `serialize:"true"` } +func (s *BitSetSignature) NumSigners() (int, error) { + // Parse signer bit vector + // + // We assert that the length of [signerIndices.Bytes()] is equal + // to [len(s.Signers)] to ensure that [s.Signers] does not have + // any unnecessary zero-padding to represent the [set.Bits]. + signerIndices := set.BitsFromBytes(s.Signers) + if len(signerIndices.Bytes()) != len(s.Signers) { + return 0, ErrInvalidBitSet + } + return signerIndices.HammingWeight(), nil +} + func (s *BitSetSignature) Verify( ctx context.Context, msg *UnsignedMessage, @@ -64,6 +83,10 @@ func (s *BitSetSignature) Verify( } // Parse signer bit vector + // + // We assert that the length of [signerIndices.Bytes()] is equal + // to [len(s.Signers)] to ensure that [s.Signers] does not have + // any unnecessary zero-padding to represent the [set.Bits]. signerIndices := set.BitsFromBytes(s.Signers) if len(signerIndices.Bytes()) != len(s.Signers) { return ErrInvalidBitSet diff --git a/vms/platformvm/warp/signature_test.go b/vms/platformvm/warp/signature_test.go index ae619eb8f18c..65e24e4bc30c 100644 --- a/vms/platformvm/warp/signature_test.go +++ b/vms/platformvm/warp/signature_test.go @@ -71,6 +71,69 @@ func init() { utils.Sort(testVdrs) } +func TestNumSigners(t *testing.T) { + tests := map[string]struct { + generateSignature func() *BitSetSignature + count int + err error + }{ + "empty signers": { + generateSignature: func() *BitSetSignature { + return &BitSetSignature{} + }, + }, + "invalid signers": { + generateSignature: func() *BitSetSignature { + return &BitSetSignature{ + Signers: make([]byte, 1), + } + }, + err: ErrInvalidBitSet, + }, + "no signers": { + generateSignature: func() *BitSetSignature { + signers := set.NewBits() + return &BitSetSignature{ + Signers: signers.Bytes(), + } + }, + }, + "1 signer": { + generateSignature: func() *BitSetSignature { + signers := set.NewBits() + signers.Add(2) + return &BitSetSignature{ + Signers: signers.Bytes(), + } + }, + count: 1, + }, + "multiple signers": { + generateSignature: func() *BitSetSignature { + signers := set.NewBits() + signers.Add(2) + signers.Add(11) + signers.Add(55) + signers.Add(93) + return &BitSetSignature{ + Signers: signers.Bytes(), + } + }, + count: 4, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + sig := tt.generateSignature() + count, err := sig.NumSigners() + require.Equal(tt.count, count) + require.ErrorIs(err, tt.err) + }) + } +} + func TestSignatureVerification(t *testing.T) { vdrs := map[ids.NodeID]*validators.GetValidatorOutput{ testVdrs[0].nodeID: { From b39246aa69cf4a20cb2911e609cb3d1c845c3025 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Mon, 27 Feb 2023 18:28:09 -0800 Subject: [PATCH 20/27] Standardize `set.Bits` method naming with `set.Bits64` (#2667) --- utils/set/bits.go | 11 +++++++---- utils/set/bits_test.go | 14 +++++++------- vms/platformvm/warp/signature.go | 2 +- vms/platformvm/warp/validator.go | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/utils/set/bits.go b/utils/set/bits.go index 56d2d09c2cfb..eea32a220cbd 100644 --- a/utils/set/bits.go +++ b/utils/set/bits.go @@ -67,13 +67,16 @@ func (b Bits) Contains(i int) bool { return b.bits.Bit(i) == 1 } -// Len returns the bit length of this bitset -func (b Bits) Len() int { +// BitLen returns the bit length of this bitset +func (b Bits) BitLen() int { return b.bits.BitLen() } -// HammingWeight returns the amount of 1's in the bitset -func (b Bits) HammingWeight() int { +// Len returns the amount of 1's in the bitset +// +// This is typically referred to as the "Hamming Weight" +// of a set of bits. +func (b Bits) Len() int { result := 0 for _, word := range b.bits.Bits() { result += bits.OnesCount(uint(word)) diff --git a/utils/set/bits_test.go b/utils/set/bits_test.go index aca6bd0136d2..efe400dccdbd 100644 --- a/utils/set/bits_test.go +++ b/utils/set/bits_test.go @@ -36,7 +36,7 @@ func Test_Bits_New(t *testing.T) { require.True(b.Contains(bit)) } - require.Equal(test.length, b.Len()) + require.Equal(test.length, b.BitLen()) }) } } @@ -124,7 +124,7 @@ func Test_Bits_AddRemove(t *testing.T) { require.True(b.Contains(element)) } - require.Equal(test.expectedLen, b.Len()) + require.Equal(test.expectedLen, b.BitLen()) }) } } @@ -197,7 +197,7 @@ func Test_Bits_Union(t *testing.T) { require.True(b.Contains(element)) } - require.Equal(test.expectedLen, b.Len()) + require.Equal(test.expectedLen, b.BitLen()) }) } } @@ -383,7 +383,7 @@ func Test_Bits_Clear(t *testing.T) { b.Clear() - require.Zero(b.Len()) + require.Zero(b.BitLen()) }) } } @@ -419,7 +419,7 @@ func Test_Bits_String(t *testing.T) { } } -func Test_Bits_HammingWeight(t *testing.T) { +func Test_Bits_Len(t *testing.T) { tests := []struct { name string bitset []int @@ -465,7 +465,7 @@ func Test_Bits_HammingWeight(t *testing.T) { b.Add(bit) } - require.Equal(test.expected, b.HammingWeight()) + require.Equal(test.expected, b.Len()) }) } } @@ -499,7 +499,7 @@ func Test_Bits_Bytes(t *testing.T) { bytes := b.Bytes() fromBytes := BitsFromBytes(bytes) - require.Equal(len(tt.elts), fromBytes.HammingWeight()) + require.Equal(len(tt.elts), fromBytes.Len()) for _, elt := range tt.elts { require.True(fromBytes.Contains(elt)) } diff --git a/vms/platformvm/warp/signature.go b/vms/platformvm/warp/signature.go index 36699453a79c..afb33ec1a6be 100644 --- a/vms/platformvm/warp/signature.go +++ b/vms/platformvm/warp/signature.go @@ -61,7 +61,7 @@ func (s *BitSetSignature) NumSigners() (int, error) { if len(signerIndices.Bytes()) != len(s.Signers) { return 0, ErrInvalidBitSet } - return signerIndices.HammingWeight(), nil + return signerIndices.Len(), nil } func (s *BitSetSignature) Verify( diff --git a/vms/platformvm/warp/validator.go b/vms/platformvm/warp/validator.go index dc7ec1697ed5..6f04872a2630 100644 --- a/vms/platformvm/warp/validator.go +++ b/vms/platformvm/warp/validator.go @@ -95,11 +95,11 @@ func FilterValidators( vdrs []*Validator, ) ([]*Validator, error) { // Verify that all alleged signers exist - if indices.Len() > len(vdrs) { + if indices.BitLen() > len(vdrs) { return nil, fmt.Errorf( "%w: %d >= %d", ErrUnknownValidator, - indices.Len()-1, // -1 to convert from length to index + indices.BitLen()-1, // -1 to convert from length to index len(vdrs), ) } From 590859d2f5f078ee00919e052cfe9f5ce62ae545 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 28 Feb 2023 00:40:54 -0500 Subject: [PATCH 21/27] Use AVM config in `tx.SyntacticVerify` (#2666) Co-authored-by: Chloe <99216251+coffeeavax@users.noreply.github.com> --- vms/avm/txs/base_tx.go | 6 ++-- vms/avm/txs/base_tx_test.go | 28 +++++++++------ vms/avm/txs/create_asset_tx.go | 17 +++++++-- vms/avm/txs/create_asset_tx_test.go | 56 ++++++++++++++--------------- vms/avm/txs/export_tx.go | 6 ++-- vms/avm/txs/export_tx_test.go | 24 ++++++------- vms/avm/txs/import_tx.go | 6 ++-- vms/avm/txs/import_tx_test.go | 4 +-- vms/avm/txs/operation_tx.go | 6 ++-- vms/avm/txs/tx.go | 9 +++-- vms/avm/txs/tx_test.go | 10 +++--- vms/avm/unique_tx.go | 3 +- 12 files changed, 95 insertions(+), 80 deletions(-) diff --git a/vms/avm/txs/base_tx.go b/vms/avm/txs/base_tx.go index 2dc2d04e7987..4a68d8ee686d 100644 --- a/vms/avm/txs/base_tx.go +++ b/vms/avm/txs/base_tx.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -50,8 +51,7 @@ func (t *BaseTx) SyntacticVerify( ctx *snow.Context, c codec.Manager, txFeeAssetID ids.ID, - txFee uint64, - _ uint64, + config *config.Config, _ int, ) error { if t == nil { @@ -63,7 +63,7 @@ func (t *BaseTx) SyntacticVerify( } return avax.VerifyTx( - txFee, + config.TxFee, txFeeAssetID, [][]*avax.TransferableInput{t.Ins}, [][]*avax.TransferableOutput{t.Outs}, diff --git a/vms/avm/txs/base_tx_test.go b/vms/avm/txs/base_tx_test.go index 92cbf1111360..ad284bff9610 100644 --- a/vms/avm/txs/base_tx_test.go +++ b/vms/avm/txs/base_tx_test.go @@ -12,11 +12,17 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var TestConfig = config.Config{ + TxFee: 0, + CreateAssetTxFee: 0, +} + func TestBaseTxSerialization(t *testing.T) { expected := []byte{ // Codec version: @@ -272,7 +278,7 @@ func TestBaseTxSyntacticVerify(t *testing.T) { }}, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err != nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err != nil { t.Fatal(err) } } @@ -315,7 +321,7 @@ func TestBaseTxSyntacticVerifyMemoTooLarge(t *testing.T) { Memo: make([]byte, avax.MaxMemoSize+1), }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatal("should have failed because memo is too large") } } @@ -325,7 +331,7 @@ func TestBaseTxSyntacticVerifyNil(t *testing.T) { c := setupCodec() tx := (*BaseTx)(nil) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Nil BaseTx should have erred") } } @@ -367,7 +373,7 @@ func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { }}, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Wrong networkID should have erred") } } @@ -409,7 +415,7 @@ func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { }}, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Wrong chain ID should have erred") } } @@ -442,7 +448,7 @@ func TestBaseTxSyntacticVerifyInvalidOutput(t *testing.T) { }}, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Invalid output should have erred") } } @@ -498,7 +504,7 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Unsorted outputs should have erred") } } @@ -523,7 +529,7 @@ func TestBaseTxSyntacticVerifyInvalidInput(t *testing.T) { Ins: []*avax.TransferableInput{nil}, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Invalid input should have erred") } } @@ -585,7 +591,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Input overflow should have erred") } } @@ -639,7 +645,7 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { }}, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Output overflow should have erred") } } @@ -681,7 +687,7 @@ func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { }}, }} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("Insufficient funds should have erred") } } diff --git a/vms/avm/txs/create_asset_tx.go b/vms/avm/txs/create_asset_tx.go index d3f3e0954e26..078d1f775309 100644 --- a/vms/avm/txs/create_asset_tx.go +++ b/vms/avm/txs/create_asset_tx.go @@ -13,6 +13,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -66,8 +68,7 @@ func (t *CreateAssetTx) SyntacticVerify( ctx *snow.Context, c codec.Manager, txFeeAssetID ids.ID, - _ uint64, - txFee uint64, + config *config.Config, numFxs int, ) error { switch { @@ -100,7 +101,17 @@ func (t *CreateAssetTx) SyntacticVerify( } } - if err := t.BaseTx.SyntacticVerify(ctx, c, txFeeAssetID, txFee, txFee, numFxs); err != nil { + if err := t.BaseTx.Verify(ctx); err != nil { + return err + } + + if err := avax.VerifyTx( + config.CreateAssetTxFee, + txFeeAssetID, + [][]*avax.TransferableInput{t.Ins}, + [][]*avax.TransferableOutput{t.Outs}, + c, + ); err != nil { return err } diff --git a/vms/avm/txs/create_asset_tx_test.go b/vms/avm/txs/create_asset_tx_test.go index 8f05145c2a3f..50260245b9ce 100644 --- a/vms/avm/txs/create_asset_tx_test.go +++ b/vms/avm/txs/create_asset_tx_test.go @@ -88,7 +88,7 @@ func validCreateAssetTx(t *testing.T) (*CreateAssetTx, codec.Manager, *snow.Cont tx.SetBytes(unsignedBytes) ctx := NewContext(t) - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err != nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err != nil { t.Fatalf("Valid CreateAssetTx failed syntactic verification due to: %s", err) } return tx, c, ctx @@ -468,7 +468,7 @@ func TestCreateAssetTxSyntacticVerify(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err != nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err != nil { t.Fatal(err) } } @@ -479,7 +479,7 @@ func TestCreateAssetTxSyntacticVerifyNil(t *testing.T) { tx := (*CreateAssetTx)(nil) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Nil CreateAssetTx should have erred") } } @@ -501,7 +501,7 @@ func TestCreateAssetTxSyntacticVerifyNameTooShort(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Too short name should have erred") } } @@ -525,7 +525,7 @@ func TestCreateAssetTxSyntacticVerifyNameTooLong(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Too long name should have erred") } } @@ -547,7 +547,7 @@ func TestCreateAssetTxSyntacticVerifySymbolTooShort(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Too short symbol should have erred") } } @@ -569,7 +569,7 @@ func TestCreateAssetTxSyntacticVerifySymbolTooLong(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Too long symbol should have erred") } } @@ -588,7 +588,7 @@ func TestCreateAssetTxSyntacticVerifyNoFxs(t *testing.T) { Denomination: 0, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("No Fxs should have erred") } } @@ -610,7 +610,7 @@ func TestCreateAssetTxSyntacticVerifyDenominationTooLong(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Too large denomination should have erred") } } @@ -632,7 +632,7 @@ func TestCreateAssetTxSyntacticVerifyNameWithWhitespace(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Whitespace at the end of the name should have erred") } } @@ -654,7 +654,7 @@ func TestCreateAssetTxSyntacticVerifyNameWithInvalidCharacter(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Name with an invalid character should have erred") } } @@ -676,7 +676,7 @@ func TestCreateAssetTxSyntacticVerifyNameWithUnicodeCharacter(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Name with an invalid character should have erred") } } @@ -698,7 +698,7 @@ func TestCreateAssetTxSyntacticVerifySymbolWithInvalidCharacter(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Symbol with an invalid character should have erred") } } @@ -720,7 +720,7 @@ func TestCreateAssetTxSyntacticVerifyInvalidBaseTx(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Invalid BaseTx should have erred") } } @@ -742,7 +742,7 @@ func TestCreateAssetTxSyntacticVerifyInvalidInitialState(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Invalid InitialState should have erred") } } @@ -769,7 +769,7 @@ func TestCreateAssetTxSyntacticVerifyUnsortedInitialStates(t *testing.T) { }, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 2); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 2); err == nil { t.Fatalf("Unsorted InitialStates should have erred") } } @@ -787,17 +787,17 @@ func TestCreateAssetTxSyntacticVerifyName(t *testing.T) { // String of Length 129 should fail SyntacticVerify tx.Name = nameTooLong - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to name too long") } tx.Name = invalidWhitespaceStr - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid whitespace in name") } tx.Name = invalidASCIIStr - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid ASCII character in name") } } @@ -806,17 +806,17 @@ func TestCreateAssetTxSyntacticVerifySymbol(t *testing.T) { tx, c, ctx := validCreateAssetTx(t) tx.Symbol = symbolTooLong - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to symbol too long") } tx.Symbol = " F" - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid whitespace in symbol") } tx.Symbol = "É" - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid ASCII character in symbol") } } @@ -825,7 +825,7 @@ func TestCreateAssetTxSyntacticVerifyInvalidDenomination(t *testing.T) { tx, c, ctx := validCreateAssetTx(t) tx.Denomination = byte(33) - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to denomination too large") } } @@ -834,7 +834,7 @@ func TestCreateAssetTxSyntacticVerifyInitialStates(t *testing.T) { tx, c, ctx := validCreateAssetTx(t) tx.States = []*InitialState{} - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to no Initial States") } @@ -854,7 +854,7 @@ func TestCreateAssetTxSyntacticVerifyInitialStates(t *testing.T) { } // NumFxs is 1, so FxIndex 5 should cause an error - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 1); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid Fx") } @@ -904,7 +904,7 @@ func TestCreateAssetTxSyntacticVerifyInitialStates(t *testing.T) { uniqueStates[2], uniqueStates[0], } - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 3); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 3); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to non-sorted initial states") } @@ -912,7 +912,7 @@ func TestCreateAssetTxSyntacticVerifyInitialStates(t *testing.T) { uniqueStates[0], uniqueStates[0], } - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 3); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 3); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to non-unique initial states") } } @@ -921,7 +921,7 @@ func TestCreateAssetTxSyntacticVerifyBaseTx(t *testing.T) { tx, c, ctx := validCreateAssetTx(t) var baseTx BaseTx tx.BaseTx = baseTx - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 2); err == nil { + if err := tx.SyntacticVerify(ctx, c, assetID, &TestConfig, 2); err == nil { t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid BaseTx (nil)") } } diff --git a/vms/avm/txs/export_tx.go b/vms/avm/txs/export_tx.go index 1eb8cda063d8..da4fae7ae8a6 100644 --- a/vms/avm/txs/export_tx.go +++ b/vms/avm/txs/export_tx.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -42,8 +43,7 @@ func (t *ExportTx) SyntacticVerify( ctx *snow.Context, c codec.Manager, txFeeAssetID ids.ID, - txFee uint64, - _ uint64, + config *config.Config, _ int, ) error { switch { @@ -62,7 +62,7 @@ func (t *ExportTx) SyntacticVerify( } return avax.VerifyTx( - txFee, + config.TxFee, txFeeAssetID, [][]*avax.TransferableInput{t.Ins}, [][]*avax.TransferableOutput{ diff --git a/vms/avm/txs/export_tx_test.go b/vms/avm/txs/export_tx_test.go index 550d86d07936..ff7a9eff71c6 100644 --- a/vms/avm/txs/export_tx_test.go +++ b/vms/avm/txs/export_tx_test.go @@ -219,7 +219,7 @@ func TestExportTxSyntacticVerify(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err != nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err != nil { t.Fatal(err) } } @@ -230,7 +230,7 @@ func TestExportTxSyntacticVerifyNil(t *testing.T) { tx := (*ExportTx)(nil) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to a nil ExportTx") } } @@ -275,7 +275,7 @@ func TestExportTxSyntacticVerifyWrongNetworkID(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to a wrong network ID") } } @@ -325,7 +325,7 @@ func TestExportTxSyntacticVerifyWrongBlockchainID(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to wrong blockchain ID") } } @@ -371,7 +371,7 @@ func TestExportTxSyntacticVerifyInvalidMemo(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to memo field being too long") } } @@ -426,7 +426,7 @@ func TestExportTxSyntacticVerifyInvalidBaseOutput(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to an invalid base output") } } @@ -493,7 +493,7 @@ func TestExportTxSyntacticVerifyUnsortedBaseOutputs(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to unsorted base outputs") } } @@ -538,7 +538,7 @@ func TestExportTxSyntacticVerifyInvalidOutput(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to invalid output") } } @@ -595,7 +595,7 @@ func TestExportTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to unsorted outputs") } } @@ -660,7 +660,7 @@ func TestExportTxSyntacticVerifyInvalidInput(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to invalid input") } } @@ -725,7 +725,7 @@ func TestExportTxSyntacticVerifyUnsortedInputs(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to unsorted inputs") } } @@ -770,7 +770,7 @@ func TestExportTxSyntacticVerifyInvalidFlowCheck(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to an invalid flow check") } } diff --git a/vms/avm/txs/import_tx.go b/vms/avm/txs/import_tx.go index dbfab3a479b0..88813b3f0a5b 100644 --- a/vms/avm/txs/import_tx.go +++ b/vms/avm/txs/import_tx.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -78,8 +79,7 @@ func (t *ImportTx) SyntacticVerify( ctx *snow.Context, c codec.Manager, txFeeAssetID ids.ID, - txFee uint64, - _ uint64, + config *config.Config, _ int, ) error { switch { @@ -96,7 +96,7 @@ func (t *ImportTx) SyntacticVerify( } return avax.VerifyTx( - txFee, + config.TxFee, txFeeAssetID, [][]*avax.TransferableInput{ t.Ins, diff --git a/vms/avm/txs/import_tx_test.go b/vms/avm/txs/import_tx_test.go index 44b7a7df4ea6..606024885bf5 100644 --- a/vms/avm/txs/import_tx_test.go +++ b/vms/avm/txs/import_tx_test.go @@ -219,7 +219,7 @@ func TestImportTxSyntacticVerify(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err != nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err != nil { t.Fatal(err) } } @@ -265,7 +265,7 @@ func TestImportTxSyntacticVerifyInvalidMemo(t *testing.T) { }}, } - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 0); err == nil { t.Fatalf("should have erred due to memo field being too long") } } diff --git a/vms/avm/txs/operation_tx.go b/vms/avm/txs/operation_tx.go index f6bbb88df96d..61d3d01258ff 100644 --- a/vms/avm/txs/operation_tx.go +++ b/vms/avm/txs/operation_tx.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -91,8 +92,7 @@ func (t *OperationTx) SyntacticVerify( ctx *snow.Context, c codec.Manager, txFeeAssetID ids.ID, - txFee uint64, - _ uint64, + config *config.Config, numFxs int, ) error { switch { @@ -102,7 +102,7 @@ func (t *OperationTx) SyntacticVerify( return errNoOperations } - if err := t.BaseTx.SyntacticVerify(ctx, c, txFeeAssetID, txFee, txFee, numFxs); err != nil { + if err := t.BaseTx.SyntacticVerify(ctx, c, txFeeAssetID, config, numFxs); err != nil { return err } diff --git a/vms/avm/txs/tx.go b/vms/avm/txs/tx.go index 9547e6482211..10048edcd0cb 100644 --- a/vms/avm/txs/tx.go +++ b/vms/avm/txs/tx.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -41,8 +42,7 @@ type UnsignedTx interface { ctx *snow.Context, c codec.Manager, txFeeAssetID ids.ID, - txFee uint64, - creationTxFee uint64, + config *config.Config, numFxs int, ) error // Visit calls [visitor] with this transaction's concrete type @@ -108,15 +108,14 @@ func (t *Tx) SyntacticVerify( ctx *snow.Context, c codec.Manager, txFeeAssetID ids.ID, - txFee uint64, - creationTxFee uint64, + config *config.Config, numFxs int, ) error { if t == nil || t.Unsigned == nil { return errNilTx } - if err := t.Unsigned.SyntacticVerify(ctx, c, txFeeAssetID, txFee, creationTxFee, numFxs); err != nil { + if err := t.Unsigned.SyntacticVerify(ctx, c, txFeeAssetID, config, numFxs); err != nil { return err } diff --git a/vms/avm/txs/tx_test.go b/vms/avm/txs/tx_test.go index 91bf297ef70c..467678ded279 100644 --- a/vms/avm/txs/tx_test.go +++ b/vms/avm/txs/tx_test.go @@ -73,7 +73,7 @@ func TestTxNil(t *testing.T) { } tx := (*Tx)(nil) - if err := tx.SyntacticVerify(ctx, m, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, m, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Should have erred due to nil tx") } } @@ -82,7 +82,7 @@ func TestTxEmpty(t *testing.T) { ctx := NewContext(t) c := setupCodec() tx := &Tx{} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Should have erred due to nil tx") } } @@ -115,7 +115,7 @@ func TestTxInvalidCredential(t *testing.T) { } tx.SetBytes(nil, nil) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Tx should have failed due to an invalid credential") } } @@ -168,7 +168,7 @@ func TestTxInvalidUnsignedTx(t *testing.T) { } tx.SetBytes(nil, nil) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Tx should have failed due to an invalid unsigned tx") } } @@ -212,7 +212,7 @@ func TestTxInvalidNumberOfCredentials(t *testing.T) { } tx.SetBytes(nil, nil) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { + if err := tx.SyntacticVerify(ctx, c, ids.Empty, &TestConfig, 1); err == nil { t.Fatalf("Tx should have failed due to an invalid number of credentials") } } diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go index cc44b7ddcf04..399d4230479c 100644 --- a/vms/avm/unique_tx.go +++ b/vms/avm/unique_tx.go @@ -347,8 +347,7 @@ func (tx *UniqueTx) SyntacticVerify() error { tx.vm.ctx, tx.vm.parser.Codec(), tx.vm.feeAssetID, - tx.vm.TxFee, - tx.vm.CreateAssetTxFee, + &tx.vm.Config, len(tx.vm.fxs), ) return tx.validity From bf41c1d8b6c17e308c59cf89e154b5748259cc8c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 28 Feb 2023 01:05:36 -0500 Subject: [PATCH 22/27] Add BLS public key to `snow.Context` (#2661) --- chains/manager.go | 1 + proto/pb/vm/vm.pb.go | 962 ++++++++++++++++++------------------ proto/vm/vm.proto | 21 +- snow/context.go | 8 + vms/rpcchainvm/vm_client.go | 2 + vms/rpcchainvm/vm_server.go | 6 + 6 files changed, 516 insertions(+), 484 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index 3b088a5299ff..d2a61700894d 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -448,6 +448,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c SubnetID: chainParams.SubnetID, ChainID: chainParams.ID, NodeID: m.NodeID, + PublicKey: bls.PublicFromSecretKey(m.StakingBLSKey), XChainID: m.XChainID, CChainID: m.CChainID, diff --git a/proto/pb/vm/vm.pb.go b/proto/pb/vm/vm.pb.go index c4cefd3fc778..bf9ae4d0630f 100644 --- a/proto/pb/vm/vm.pb.go +++ b/proto/pb/vm/vm.pb.go @@ -243,22 +243,25 @@ type InitializeRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` - SubnetId []byte `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` - ChainId []byte `protobuf:"bytes,3,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - NodeId []byte `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - XChainId []byte `protobuf:"bytes,5,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` - CChainId []byte `protobuf:"bytes,6,opt,name=c_chain_id,json=cChainId,proto3" json:"c_chain_id,omitempty"` - AvaxAssetId []byte `protobuf:"bytes,7,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` - ChainDataDir string `protobuf:"bytes,8,opt,name=chain_data_dir,json=chainDataDir,proto3" json:"chain_data_dir,omitempty"` - GenesisBytes []byte `protobuf:"bytes,9,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` - UpgradeBytes []byte `protobuf:"bytes,10,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` - ConfigBytes []byte `protobuf:"bytes,11,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` - DbServers []*VersionedDBServer `protobuf:"bytes,12,rep,name=db_servers,json=dbServers,proto3" json:"db_servers,omitempty"` + NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + SubnetId []byte `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + ChainId []byte `protobuf:"bytes,3,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + NodeId []byte `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // public_key is the BLS public key that would correspond with any signatures + // produced by the warp messaging signer + PublicKey []byte `protobuf:"bytes,5,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + XChainId []byte `protobuf:"bytes,6,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` + CChainId []byte `protobuf:"bytes,7,opt,name=c_chain_id,json=cChainId,proto3" json:"c_chain_id,omitempty"` + AvaxAssetId []byte `protobuf:"bytes,8,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` + ChainDataDir string `protobuf:"bytes,9,opt,name=chain_data_dir,json=chainDataDir,proto3" json:"chain_data_dir,omitempty"` + GenesisBytes []byte `protobuf:"bytes,10,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` + UpgradeBytes []byte `protobuf:"bytes,11,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` + ConfigBytes []byte `protobuf:"bytes,12,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` + DbServers []*VersionedDBServer `protobuf:"bytes,13,rep,name=db_servers,json=dbServers,proto3" json:"db_servers,omitempty"` // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services - ServerAddr string `protobuf:"bytes,13,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` + ServerAddr string `protobuf:"bytes,14,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` } func (x *InitializeRequest) Reset() { @@ -321,6 +324,13 @@ func (x *InitializeRequest) GetNodeId() []byte { return nil } +func (x *InitializeRequest) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + func (x *InitializeRequest) GetXChainId() []byte { if x != nil { return x.XChainId @@ -3048,7 +3058,7 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x6f, 0x74, 0x6f, 0x22, 0xec, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, @@ -3056,484 +3066,486 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x78, 0x5f, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x78, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x63, 0x5f, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x76, 0x61, 0x78, 0x5f, 0x61, 0x73, - 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x76, - 0x61, 0x78, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72, 0x12, - 0x23, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x75, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x0a, - 0x64, 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, - 0x42, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x09, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, - 0x64, 0x64, 0x72, 0x22, 0xdd, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x65, 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x22, 0x4e, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, - 0x44, 0x42, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, - 0x64, 0x64, 0x72, 0x22, 0x32, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, - 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x41, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, - 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x47, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, - 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, - 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, - 0x73, 0x22, 0x65, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6c, 0x6f, 0x63, 0x6b, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0x51, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, - 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xd9, 0x01, 0x0a, 0x12, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, - 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x0a, 0x78, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x78, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x63, 0x5f, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x76, 0x61, 0x78, 0x5f, 0x61, 0x73, 0x73, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x76, 0x61, + 0x78, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72, 0x12, 0x23, + 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x75, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x0a, 0x64, + 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x42, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x09, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x72, 0x22, 0xdd, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0x4e, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, + 0x42, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x72, 0x22, 0x32, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, + 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0x41, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, + 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x47, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, + 0x22, 0x65, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6c, 0x6f, 0x63, 0x6b, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0x51, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x0e, + 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xd9, 0x01, 0x0a, 0x12, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x88, + 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x21, 0x0a, 0x0f, - 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, - 0x88, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1b, 0x0a, - 0x03, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, - 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x53, 0x65, - 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x03, + 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, + 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x53, 0x65, 0x74, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x64, 0x22, 0x68, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, + 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x4f, 0x0a, 0x13, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x24, 0x0a, 0x12, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, - 0x69, 0x64, 0x22, 0x68, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, - 0x0a, 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x4f, 0x0a, 0x13, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x24, 0x0a, - 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x02, 0x69, 0x64, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x69, 0x64, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, 0x0a, + 0x13, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, - 0x0a, 0x13, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, - 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x64, 0x0a, - 0x0e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, - 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, - 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0xa5, - 0x01, 0x0a, 0x17, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x59, 0x0a, 0x1d, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x22, 0x70, 0x0a, 0x18, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, - 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x45, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, - 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, - 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, - 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, - 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x22, 0x35, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, - 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, - 0x19, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, - 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, - 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, - 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, - 0x1a, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, - 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, - 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, - 0x5d, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, - 0x6c, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, - 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, - 0x0a, 0x18, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, - 0x72, 0x22, 0x7f, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, - 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x78, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, - 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, - 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, - 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, - 0x0a, 0x19, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, - 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, - 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x64, 0x0a, 0x0e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x17, + 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, + 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0xa5, 0x01, + 0x0a, 0x17, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x59, 0x0a, 0x1d, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, + 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, + 0x22, 0x70, 0x0a, 0x18, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x45, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, 0x73, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, 0x65, + 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, 0x26, + 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x52, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, + 0x35, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6b, + 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x19, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, + 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, + 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, 0x1a, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, + 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, + 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, + 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x5d, + 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, 0x0a, + 0x18, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, - 0x22, 0x31, 0x0a, 0x19, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, - 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x23, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, - 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, - 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, - 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, - 0x45, 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, - 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, - 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, - 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, - 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, - 0x10, 0x03, 0x2a, 0x61, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, - 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, - 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, - 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, - 0x54, 0x45, 0x44, 0x10, 0x03, 0x2a, 0xb6, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, - 0x15, 0x0a, 0x11, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, - 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, - 0x44, 0x45, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, - 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, - 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, 0x49, 0x4e, 0x43, 0x4f, - 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4e, 0x4f, 0x54, - 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x05, 0x32, 0xa4, - 0x12, 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, - 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, - 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, 0x68, 0x75, - 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, + 0x22, 0x7f, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, + 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, + 0x72, 0x22, 0x78, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, + 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, + 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, 0x50, + 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, 0x0a, + 0x19, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, + 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, + 0x31, 0x0a, 0x19, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x37, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x23, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x14, 0x0a, 0x10, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x4b, + 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, + 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, + 0x41, 0x54, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, + 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x10, + 0x03, 0x2a, 0x61, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x52, + 0x4f, 0x43, 0x45, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, + 0x45, 0x44, 0x10, 0x03, 0x2a, 0xb6, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, + 0x0a, 0x11, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, + 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, + 0x45, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, + 0x45, 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, 0x45, + 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, 0x49, 0x4e, 0x43, 0x4f, 0x4d, + 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, + 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x05, 0x32, 0xa4, 0x12, + 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x49, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x2e, + 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, + 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, + 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x14, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x76, 0x6d, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x09, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, 0x73, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, + 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x13, + 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, 0x74, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, 0x2e, + 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, + 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x14, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x76, 0x6d, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, - 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, - 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, - 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, - 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, - 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, - 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, - 0x2e, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, - 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, + 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, 0x61, + 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, + 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, + 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, 0x0a, + 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x2e, 0x76, 0x6d, + 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, - 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, - 0x70, 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, - 0x61, 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, - 0x76, 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x43, - 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, - 0x0a, 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x2e, 0x76, - 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, + 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, + 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, 0x2e, + 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, + 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, 0x73, 0x73, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, - 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, - 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, - 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, - 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, - 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, - 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, - 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, - 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, - 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, - 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, + 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, + 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, + 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, 0x6d, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, + 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, - 0x62, 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, + 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/vm/vm.proto b/proto/vm/vm.proto index 6c9d527285c2..179db3f48ab8 100644 --- a/proto/vm/vm.proto +++ b/proto/vm/vm.proto @@ -118,18 +118,21 @@ message InitializeRequest { bytes subnet_id = 2; bytes chain_id = 3; bytes node_id = 4; - bytes x_chain_id = 5; - bytes c_chain_id = 6; - bytes avax_asset_id = 7; - string chain_data_dir = 8; - bytes genesis_bytes = 9; - bytes upgrade_bytes = 10; - bytes config_bytes = 11; - repeated VersionedDBServer db_servers = 12; + // public_key is the BLS public key that would correspond with any signatures + // produced by the warp messaging signer + bytes public_key = 5; + bytes x_chain_id = 6; + bytes c_chain_id = 7; + bytes avax_asset_id = 8; + string chain_data_dir = 9; + bytes genesis_bytes = 10; + bytes upgrade_bytes = 11; + bytes config_bytes = 12; + repeated VersionedDBServer db_servers = 13; // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services - string server_addr = 13; + string server_addr = 14; } message InitializeResponse { diff --git a/snow/context.go b/snow/context.go index 5dcd820f2d61..6e6350ec1c8e 100644 --- a/snow/context.go +++ b/snow/context.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) @@ -34,6 +35,7 @@ type Context struct { SubnetID ids.ID ChainID ids.ID NodeID ids.NodeID + PublicKey *bls.PublicKey XChainID ids.ID CChainID ids.ID @@ -94,11 +96,17 @@ type ConsensusContext struct { } func DefaultContextTest() *Context { + sk, err := bls.NewSecretKey() + if err != nil { + panic(err) + } + pk := bls.PublicFromSecretKey(sk) return &Context{ NetworkID: 0, SubnetID: ids.Empty, ChainID: ids.Empty, NodeID: ids.EmptyNodeID, + PublicKey: pk, Log: logging.NoLog{}, BCLookup: ids.NewAliaser(), Metrics: metrics.NewOptionalGatherer(), diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 7110073b0143..cdc9f66edb47 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -39,6 +39,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/appsender" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators/gvalidators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" @@ -199,6 +200,7 @@ func (vm *VMClient) Initialize( SubnetId: chainCtx.SubnetID[:], ChainId: chainCtx.ChainID[:], NodeId: chainCtx.NodeID.Bytes(), + PublicKey: bls.PublicKeyToBytes(chainCtx.PublicKey), XChainId: chainCtx.XChainID[:], CChainId: chainCtx.CChainID[:], AvaxAssetId: chainCtx.AVAXAssetID[:], diff --git a/vms/rpcchainvm/vm_server.go b/vms/rpcchainvm/vm_server.go index 58f0f9a0a654..d06b2a052266 100644 --- a/vms/rpcchainvm/vm_server.go +++ b/vms/rpcchainvm/vm_server.go @@ -32,6 +32,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/appsender" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators/gvalidators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" @@ -108,6 +109,10 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) if err != nil { return nil, err } + publicKey, err := bls.PublicKeyFromBytes(req.PublicKey) + if err != nil { + return nil, err + } xChainID, err := ids.ToID(req.XChainId) if err != nil { return nil, err @@ -222,6 +227,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) SubnetID: subnetID, ChainID: chainID, NodeID: nodeID, + PublicKey: publicKey, XChainID: xChainID, CChainID: cChainID, From 379ac8110bf9f6ca8f8d33bb89d761b2cd8b6434 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 28 Feb 2023 01:26:12 -0500 Subject: [PATCH 23/27] Update to go1.20.1 (#2671) --- .../workflows/build-and-test-mac-windows.yml | 2 +- .github/workflows/build-and-test.yml | 2 +- .github/workflows/build-linux-binaries.yml | 4 +- .github/workflows/build-macos-release.yml | 2 +- .github/workflows/build-public-api.yml | 2 +- .../workflows/build-ubuntu-amd64-release.yml | 4 +- .../workflows/build-ubuntu-arm64-release.yml | 4 +- .github/workflows/build-win-release.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/static-analysis.yaml | 2 +- .github/workflows/test.e2e.yml | 2 +- .github/workflows/test.upgrade.yml | 2 +- .golangci.yml | 35 +++++---- Dockerfile | 4 +- README.md | 2 +- chains/atomic/memory.go | 7 +- chains/manager.go | 3 +- codec/reflectcodec/type_codec.go | 25 ++++--- database/benchmark_database.go | 5 -- genesis/genesis.go | 3 + go.mod | 2 +- indexer/index.go | 7 +- network/network.go | 12 ++-- network/peer/gossip_tracker.go | 1 + .../inbound_msg_buffer_throttler.go | 3 +- network/throttling/inbound_msg_throttler.go | 25 ++++--- scripts/build_avalanche.sh | 2 +- scripts/constants.sh | 3 + scripts/lint.sh | 2 +- scripts/local.Dockerfile | 2 +- scripts/tests.e2e.sh | 3 + snow/consensus/snowball/tree.go | 71 ++++++++++--------- snow/engine/avalanche/getter/getter.go | 9 +-- snow/engine/avalanche/transitive.go | 2 +- snow/engine/snowman/block/batched_vm.go | 9 +-- snow/engine/snowman/transitive.go | 2 +- .../networking/sender/mock_external_sender.go | 2 +- tests/colors.go | 10 +-- tests/http.go | 10 ++- utils/dynamicip/ifconfig_resolver.go | 8 ++- utils/hashing/consistent/ring.go | 56 +++++++-------- utils/wrappers/closers.go | 2 +- utils/zero.go | 2 +- vms/avm/index_test.go | 14 ++-- vms/avm/service.go | 6 +- vms/avm/vm_test.go | 5 +- vms/components/avax/utxo_id.go | 4 +- vms/manager.go | 15 ++-- vms/platformvm/state/staker.go | 1 + vms/platformvm/state/state.go | 2 +- .../txs/executor/staker_tx_verification.go | 14 ++-- .../txs/executor/subnet_tx_verification.go | 4 +- vms/platformvm/utxo/handler.go | 2 +- vms/platformvm/warp/signature.go | 2 +- vms/platformvm/warp/validator.go | 4 +- vms/proposervm/block/codec.go | 2 +- vms/proposervm/state_summary.go | 15 ++-- vms/proposervm/vm_byzantine_test.go | 54 +++++++------- vms/proposervm/vm_test.go | 20 +++--- vms/rpcchainvm/runtime/subprocess/runtime.go | 4 +- wallet/chain/p/builder_with_options.go | 2 +- wallet/chain/x/builder_with_options.go | 8 +-- wallet/subnet/primary/api.go | 4 +- x/merkledb/proof.go | 30 ++++---- x/sync/client.go | 6 +- x/sync/syncmanager.go | 6 +- 66 files changed, 313 insertions(+), 270 deletions(-) diff --git a/.github/workflows/build-and-test-mac-windows.yml b/.github/workflows/build-and-test-mac-windows.yml index b9ea9edaed58..c71dbf32489b 100644 --- a/.github/workflows/build-and-test-mac-windows.yml +++ b/.github/workflows/build-and-test-mac-windows.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - name: build_test shell: bash diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 427a0ff075e4..8e0db4a3eade 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19.3' + go-version: '1.20' check-latest: true - name: build_test shell: bash diff --git a/.github/workflows/build-linux-binaries.yml b/.github/workflows/build-linux-binaries.yml index 96b9ee0db65b..679425e49699 100644 --- a/.github/workflows/build-linux-binaries.yml +++ b/.github/workflows/build-linux-binaries.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version @@ -54,7 +54,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version diff --git a/.github/workflows/build-macos-release.yml b/.github/workflows/build-macos-release.yml index 9cacaefa0ef4..8c7ba7de87b0 100644 --- a/.github/workflows/build-macos-release.yml +++ b/.github/workflows/build-macos-release.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version diff --git a/.github/workflows/build-public-api.yml b/.github/workflows/build-public-api.yml index 0f9aba888105..8a7fba22b6a8 100644 --- a/.github/workflows/build-public-api.yml +++ b/.github/workflows/build-public-api.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version diff --git a/.github/workflows/build-ubuntu-amd64-release.yml b/.github/workflows/build-ubuntu-amd64-release.yml index 492cc617ec99..ec38fb275d18 100644 --- a/.github/workflows/build-ubuntu-amd64-release.yml +++ b/.github/workflows/build-ubuntu-amd64-release.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version @@ -54,7 +54,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version diff --git a/.github/workflows/build-ubuntu-arm64-release.yml b/.github/workflows/build-ubuntu-arm64-release.yml index 208aae29ed9f..8ea87edb8093 100644 --- a/.github/workflows/build-ubuntu-arm64-release.yml +++ b/.github/workflows/build-ubuntu-arm64-release.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version @@ -54,7 +54,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version diff --git a/.github/workflows/build-win-release.yml b/.github/workflows/build-win-release.yml index c9afd638bff1..77cd7a64535f 100644 --- a/.github/workflows/build-win-release.yml +++ b/.github/workflows/build-win-release.yml @@ -21,7 +21,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - run: go version diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bfbb6ea4c259..c44ea987d7eb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,7 +19,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - name: Run GoReleaser uses: goreleaser/goreleaser-action@v2 diff --git a/.github/workflows/static-analysis.yaml b/.github/workflows/static-analysis.yaml index a3fbc1cc850b..7056e6765b08 100644 --- a/.github/workflows/static-analysis.yaml +++ b/.github/workflows/static-analysis.yaml @@ -17,7 +17,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.18' + go-version: '1.20' check-latest: true - name: Run static analysis tests shell: bash diff --git a/.github/workflows/test.e2e.yml b/.github/workflows/test.e2e.yml index 1a8647ecb1f5..911c79ae507d 100644 --- a/.github/workflows/test.e2e.yml +++ b/.github/workflows/test.e2e.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - name: Build the avalanchego binary shell: bash diff --git a/.github/workflows/test.upgrade.yml b/.github/workflows/test.upgrade.yml index 5374b7bedef1..c7bc66b63747 100644 --- a/.github/workflows/test.upgrade.yml +++ b/.github/workflows/test.upgrade.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - name: Build the avalanchego binary shell: bash diff --git a/.golangci.yml b/.golangci.yml index a1cc0b831201..ea7a6691310f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,6 +16,7 @@ linters: disable-all: true enable: - asciicheck + - bodyclose - depguard - errcheck - errorlint @@ -25,31 +26,27 @@ linters: - gofmt - gofumpt - goimports - - revive + - goprintffuncname - gosec - gosimple - govet - ineffassign - misspell - nakedret + - noctx - nolintlint - prealloc + - revive + - staticcheck - stylecheck + - typecheck - unconvert - unparam - unused - - unconvert - whitespace - - staticcheck - # - bodyclose - # - structcheck - # - lll - # - gomnd - # - goprintffuncname - # - interfacer - # - typecheck # - goerr113 - # - noctx + # - gomnd + # - lll linters-settings: errorlint: @@ -83,13 +80,13 @@ linters-settings: - name: unhandled-error disabled: false arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - - "rand.Read" - - "sb.WriteString" + - "fmt\\.Fprint" + - "fmt\\.Fprintf" + - "fmt\\.Print" + - "fmt\\.Printf" + - "fmt\\.Println" + - "math/rand\\.Read" + - "strings\\.Builder\\.WriteString" # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter - name: unused-parameter disabled: false @@ -100,7 +97,7 @@ linters-settings: - name: useless-break disabled: false staticcheck: - go: "1.18" + go: "1.20" # https://staticcheck.io/docs/options#checks checks: - "all" diff --git a/Dockerfile b/Dockerfile index 0c51e03c71b4..3c3f0c23ee18 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,8 +5,8 @@ # README.md # go.mod # ============= Compilation Stage ================ -FROM golang:1.18.5-buster AS builder -RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 git=1:2.20.1-2+deb10u3 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 +FROM golang:1.20.1-buster AS builder +RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 WORKDIR /build # Copy and download avalanche dependencies using go mod diff --git a/README.md b/README.md index d2dcf82b343b..7f977cdf61ad 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ The minimum recommended hardware specification for nodes connected to Mainnet is If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.18.1 +- [Go](https://golang.org/doc/install) version >= 1.20.1 - [gcc](https://gcc.gnu.org/) - g++ diff --git a/chains/atomic/memory.go b/chains/atomic/memory.go index b9f1571bd1f0..22404da0643b 100644 --- a/chains/atomic/memory.go +++ b/chains/atomic/memory.go @@ -49,7 +49,7 @@ func (m *Memory) NewSharedMemory(chainID ids.ID) SharedMemory { // database // // Invariant: ReleaseSharedDatabase must be called after to free the database -// associated with [sharedID] +// associated with [sharedID] func (m *Memory) GetSharedDatabase(db database.Database, sharedID ids.ID) database.Database { lock := m.makeLock(sharedID) lock.Lock() @@ -59,9 +59,8 @@ func (m *Memory) GetSharedDatabase(db database.Database, sharedID ids.ID) databa // ReleaseSharedDatabase unlocks the provided DB // // Note: ReleaseSharedDatabase must be called only after a corresponding call to -// GetSharedDatabase. -// If ReleaseSharedDatabase is called without a corresponding one-to-one -// call with GetSharedDatabase, it will panic. +// GetSharedDatabase. If ReleaseSharedDatabase is called without a corresponding +// one-to-one call with GetSharedDatabase, it will panic. func (m *Memory) ReleaseSharedDatabase(sharedID ids.ID) { lock := m.releaseLock(sharedID) lock.Unlock() diff --git a/chains/manager.go b/chains/manager.go index d2a61700894d..b1c0007a49a5 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -299,8 +299,9 @@ func (m *manager) QueueChainCreation(chainParams ChainParameters) { } // createChain creates and starts the chain +// // Note: it is expected for the subnet to already have the chain registered as -// bootstrapping before this function is called +// bootstrapping before this function is called func (m *manager) createChain(chainParams ChainParameters) { m.Log.Info("creating chain", zap.Stringer("subnetID", chainParams.SubnetID), diff --git a/codec/reflectcodec/type_codec.go b/codec/reflectcodec/type_codec.go index 544f5010751c..e5cf0ef24149 100644 --- a/codec/reflectcodec/type_codec.go +++ b/codec/reflectcodec/type_codec.go @@ -52,16 +52,21 @@ type TypeCodec interface { // implementation for interface encoding. // // A few notes: -// 1) We use "marshal" and "serialize" interchangeably, and "unmarshal" and "deserialize" interchangeably -// 2) To include a field of a struct in the serialized form, add the tag `{tagName}:"true"` to it. `{tagName}` defaults to `serialize`. -// 3) These typed members of a struct may be serialized: -// bool, string, uint[8,16,32,64], int[8,16,32,64], -// structs, slices, arrays, interface. -// structs, slices and arrays can only be serialized if their constituent values can be. -// 4) To marshal an interface, you must pass a pointer to the value -// 5) To unmarshal an interface, you must call codec.RegisterType([instance of the type that fulfills the interface]). -// 6) Serialized fields must be exported -// 7) nil slices are marshaled as empty slices +// +// 1. We use "marshal" and "serialize" interchangeably, and "unmarshal" and +// "deserialize" interchangeably +// 2. To include a field of a struct in the serialized form, add the tag +// `{tagName}:"true"` to it. `{tagName}` defaults to `serialize`. +// 3. These typed members of a struct may be serialized: +// bool, string, uint[8,16,32,64], int[8,16,32,64], +// structs, slices, arrays, interface. +// structs, slices and arrays can only be serialized if their constituent +// values can be. +// 4. To marshal an interface, you must pass a pointer to the value +// 5. To unmarshal an interface, you must call +// codec.RegisterType([instance of the type that fulfills the interface]). +// 6. Serialized fields must be exported +// 7. nil slices are marshaled as empty slices type genericCodec struct { typer TypeCodec maxSliceLen uint32 diff --git a/database/benchmark_database.go b/database/benchmark_database.go index 487451686ba9..d99f734c026a 100644 --- a/database/benchmark_database.go +++ b/database/benchmark_database.go @@ -99,7 +99,6 @@ func BenchmarkPut(b *testing.B, db Database, name string, keys, values [][]byte) } // BenchmarkDelete measures the time it takes to delete a (k, v) from a database. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkDelete(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -127,7 +126,6 @@ func BenchmarkDelete(b *testing.B, db Database, name string, keys, values [][]by } // BenchmarkBatchPut measures the time it takes to batch put. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkBatchPut(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -145,7 +143,6 @@ func BenchmarkBatchPut(b *testing.B, db Database, name string, keys, values [][] } // BenchmarkBatchDelete measures the time it takes to batch delete. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkBatchDelete(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -163,7 +160,6 @@ func BenchmarkBatchDelete(b *testing.B, db Database, name string, keys, values [ } // BenchmarkBatchWrite measures the time it takes to batch write. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkBatchWrite(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -237,7 +233,6 @@ func BenchmarkParallelPut(b *testing.B, db Database, name string, keys, values [ } // BenchmarkParallelDelete measures the time it takes to delete a (k, v) from the db. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkParallelDelete(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { diff --git a/genesis/genesis.go b/genesis/genesis.go index 89a743105635..30f80dde0a50 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -187,6 +187,7 @@ func validateConfig(networkID uint32, config *Config, stakingCfg *StakingConfig) // loads the network genesis data from the config at [filepath]. // // FromFile returns: +// // 1. The byte representation of the genesis state of the platform chain // (ie the genesis state of the network) // 2. The asset ID of AVAX @@ -228,6 +229,7 @@ func FromFile(networkID uint32, filepath string, stakingCfg *StakingConfig) ([]b // loads the network genesis data from [genesisContent]. // // FromFlag returns: +// // 1. The byte representation of the genesis state of the platform chain // (ie the genesis state of the network) // 2. The asset ID of AVAX @@ -254,6 +256,7 @@ func FromFlag(networkID uint32, genesisContent string, stakingCfg *StakingConfig } // FromConfig returns: +// // 1. The byte representation of the genesis state of the platform chain // (ie the genesis state of the network) // 2. The asset ID of AVAX diff --git a/go.mod b/go.mod index 538ebc7ba129..1ef6fe622174 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ module github.com/ava-labs/avalanchego // Dockerfile // README.md // go.mod (here, only major.minor can be specified) -go 1.18 +go 1.20 require ( github.com/Microsoft/go-winio v0.5.2 diff --git a/indexer/index.go b/indexer/index.go index b997886e8cb9..739705ac2c42 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -293,9 +293,10 @@ func (i *index) GetLastAccepted() (Container, error) { // Assumes i.lock is held // Returns: -// 1) The index of the most recently accepted transaction, -// or 0 if no transactions have been accepted -// 2) Whether at least 1 transaction has been accepted +// +// 1. The index of the most recently accepted transaction, or 0 if no +// transactions have been accepted +// 2. Whether at least 1 transaction has been accepted func (i *index) lastAcceptedIndex() (uint64, bool) { return i.nextAcceptedIndex - 1, i.nextAcceptedIndex != 0 } diff --git a/network/network.go b/network/network.go index edd29728e503..ea502c239af1 100644 --- a/network/network.go +++ b/network/network.go @@ -831,12 +831,12 @@ func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { // getPeers returns a slice of connected peers from a set of [nodeIDs]. // -// - [nodeIDs] the IDs of the peers that should be returned if they are -// connected. -// - [subnetID] the subnetID whose membership should be considered if -// [validatorOnly] is set to true. -// - [validatorOnly] is the flag to drop any nodes from [nodeIDs] that are not -// validators in [subnetID]. +// - [nodeIDs] the IDs of the peers that should be returned if they are +// connected. +// - [subnetID] the subnetID whose membership should be considered if +// [validatorOnly] is set to true. +// - [validatorOnly] is the flag to drop any nodes from [nodeIDs] that are not +// validators in [subnetID]. func (n *network) getPeers( nodeIDs set.Set[ids.NodeID], subnetID ids.ID, diff --git a/network/peer/gossip_tracker.go b/network/peer/gossip_tracker.go index cc27907774eb..7819c1fc130b 100644 --- a/network/peer/gossip_tracker.go +++ b/network/peer/gossip_tracker.go @@ -260,6 +260,7 @@ func (g *gossipTracker) ResetValidator(validatorID ids.NodeID) bool { } // AddKnown invariants: +// // 1. [peerID] SHOULD only be a nodeID that has been tracked with // StartTrackingPeer(). func (g *gossipTracker) AddKnown( diff --git a/network/throttling/inbound_msg_buffer_throttler.go b/network/throttling/inbound_msg_buffer_throttler.go index fd8b8f89304e..5242ef4766df 100644 --- a/network/throttling/inbound_msg_buffer_throttler.go +++ b/network/throttling/inbound_msg_buffer_throttler.go @@ -57,8 +57,9 @@ type inboundMsgBufferThrottler struct { // buffer so that we can read a message from [nodeID]. // The returned release function must be called (!) when done processing the message // (or when we give up trying to read the message.) +// // invariant: There should be a maximum of 1 blocking call to Acquire for a -// given nodeID. Callers must enforce this invariant. +// given nodeID. Callers must enforce this invariant. func (t *inboundMsgBufferThrottler) Acquire(ctx context.Context, nodeID ids.NodeID) ReleaseFunc { startTime := time.Now() defer func() { diff --git a/network/throttling/inbound_msg_throttler.go b/network/throttling/inbound_msg_throttler.go index fcf28f3abd9f..70a3e3d81fd6 100644 --- a/network/throttling/inbound_msg_throttler.go +++ b/network/throttling/inbound_msg_throttler.go @@ -119,20 +119,23 @@ func NewInboundMsgThrottler( } // A sybil-safe inbound message throttler. -// Rate-limits reading of inbound messages to prevent peers from -// consuming excess resources. +// Rate-limits reading of inbound messages to prevent peers from consuming +// excess resources. // The three resources considered are: -// 1. An inbound message buffer, where each message that we're currently -// processing takes up 1 unit of space on the buffer. -// 2. An inbound message byte buffer, where a message of length n -// that we're currently processing takes up n units of space on the buffer. -// 3. Bandwidth. The bandwidth rate-limiting is implemented using a token bucket, -// where each token is 1 byte. See BandwidthThrottler. +// +// 1. An inbound message buffer, where each message that we're currently +// processing takes up 1 unit of space on the buffer. +// 2. An inbound message byte buffer, where a message of length n +// that we're currently processing takes up n units of space on the buffer. +// 3. Bandwidth. The bandwidth rate-limiting is implemented using a token +// bucket, where each token is 1 byte. See BandwidthThrottler. +// // A call to Acquire([msgSize], [nodeID]) blocks until we've secured -// enough of both these resources to read a message of size [msgSize] from [nodeID]. +// enough of both these resources to read a message of size [msgSize] from +// [nodeID]. type inboundMsgThrottler struct { - // Rate-limits based on number of messages from a given - // node that we're currently processing. + // Rate-limits based on number of messages from a given node that we're + // currently processing. bufferThrottler *inboundMsgBufferThrottler // Rate-limits based on recent bandwidth usage bandwidthThrottler bandwidthThrottler diff --git a/scripts/build_avalanche.sh b/scripts/build_avalanche.sh index 3f18dacabe69..cdf6fdb5af4b 100755 --- a/scripts/build_avalanche.sh +++ b/scripts/build_avalanche.sh @@ -30,7 +30,7 @@ done # Dockerfile # README.md # go.mod -go_version_minimum="1.18.1" +go_version_minimum="1.20.1" go_version() { go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' diff --git a/scripts/constants.sh b/scripts/constants.sh index 2fa3a74c1415..deb8be2a245b 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -40,3 +40,6 @@ fi # We use "export" here instead of just setting a bash variable because we need # to pass this flag to all child processes spawned by the shell. export CGO_CFLAGS="-O -D__BLST_PORTABLE__" +# While CGO_ENABLED doesn't need to be explicitly set, it produces a much more +# clear error due to the default value change in go1.20. +export CGO_ENABLED=1 diff --git a/scripts/lint.sh b/scripts/lint.sh index 973d441c89e7..2159fa961e0e 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -24,7 +24,7 @@ fi TESTS=${TESTS:-"golangci_lint license_header"} function test_golangci_lint { - go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0 + go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 golangci-lint run --config .golangci.yml } diff --git a/scripts/local.Dockerfile b/scripts/local.Dockerfile index 7fefd9053408..e3db6b239c76 100644 --- a/scripts/local.Dockerfile +++ b/scripts/local.Dockerfile @@ -9,7 +9,7 @@ # Dockerfile # README.md # go.mod -FROM golang:1.18.5-buster +FROM golang:1.20.1-buster RUN mkdir -p /go/src/github.com/ava-labs diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh index 8188dcc4e2b0..d5933ad2fd3c 100755 --- a/scripts/tests.e2e.sh +++ b/scripts/tests.e2e.sh @@ -24,6 +24,9 @@ fi # We use "export" here instead of just setting a bash variable because we need # to pass this flag to all child processes spawned by the shell. export CGO_CFLAGS="-O -D__BLST_PORTABLE__" +# While CGO_ENABLED doesn't need to be explicitly set, it produces a much more +# clear error due to the default value change in go1.20. +export CGO_ENABLED=1 ENABLE_WHITELIST_VTX_TESTS=${ENABLE_WHITELIST_VTX_TESTS:-false} # ref. https://onsi.github.io/ginkgo/#spec-labels diff --git a/snow/consensus/snowball/tree.go b/snow/consensus/snowball/tree.go index c4a580bb5cdf..2ec00b82327b 100644 --- a/snow/consensus/snowball/tree.go +++ b/snow/consensus/snowball/tree.go @@ -179,15 +179,18 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } +//nolint:gofmt,gofmpt,gofumpt,goimports // this comment is formatted as intended +// // This is by far the most complicated function in this algorithm. // The intuition is that this instance represents a series of consecutive unary // snowball instances, and this function's purpose is convert one of these unary // snowball instances into a binary snowball instance. // There are 5 possible cases. -// 1. None of these instances should be split, we should attempt to split a -// child // -// For example, attempting to insert the value "00001" in this node: +// 1. None of these instances should be split, we should attempt to split a +// child +// +// For example, attempting to insert the value "00001" in this node: // // +-------------------+ <-- This node will not be split // | | @@ -197,7 +200,7 @@ func (u *unaryNode) DecidedPrefix() int { // ^ // | // -// Results in: +// Results in: // // +-------------------+ // | | @@ -207,13 +210,14 @@ func (u *unaryNode) DecidedPrefix() int { // ^ // | // -// 2. This instance represents a series of only one unary instance and it must -// be split -// This will return a binary choice, with one child the same as my child, -// and another (possibly nil child) representing a new chain to the end of -// the hash +// 2. This instance represents a series of only one unary instance and it must +// be split. +// +// This will return a binary choice, with one child the same as my child, +// and another (possibly nil child) representing a new chain to the end of +// the hash // -// For example, attempting to insert the value "1" in this tree: +// For example, attempting to insert the value "1" in this tree: // // +-------------------+ // | | @@ -221,7 +225,7 @@ func (u *unaryNode) DecidedPrefix() int { // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | | @@ -229,12 +233,13 @@ func (u *unaryNode) DecidedPrefix() int { // | | | // +-------------------+ // -// 3. This instance must be split on the first bit -// This will return a binary choice, with one child equal to this instance -// with decidedPrefix increased by one, and another representing a new -// chain to the end of the hash +// 3. This instance must be split on the first bit // -// For example, attempting to insert the value "10" in this tree: +// This will return a binary choice, with one child equal to this instance +// with decidedPrefix increased by one, and another representing a new +// chain to the end of the hash +// +// For example, attempting to insert the value "10" in this tree: // // +-------------------+ // | | @@ -242,7 +247,7 @@ func (u *unaryNode) DecidedPrefix() int { // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | | @@ -257,13 +262,14 @@ func (u *unaryNode) DecidedPrefix() int { // | | | | // +-------------------+ +-------------------+ // -// 4. This instance must be split on the last bit -// This will modify this unary choice. The commonPrefix is decreased by -// one. The child is set to a binary instance that has a child equal to -// the current child and another child equal to a new unary instance to -// the end of the hash +// 4. This instance must be split on the last bit +// +// This will modify this unary choice. The commonPrefix is decreased by +// one. The child is set to a binary instance that has a child equal to +// the current child and another child equal to a new unary instance to +// the end of the hash // -// For example, attempting to insert the value "01" in this tree: +// For example, attempting to insert the value "01" in this tree: // // +-------------------+ // | | @@ -271,7 +277,7 @@ func (u *unaryNode) DecidedPrefix() int { // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | @@ -286,14 +292,15 @@ func (u *unaryNode) DecidedPrefix() int { // | | | // +-------------------+ // -// 5. This instance must be split on an interior bit -// This will modify this unary choice. The commonPrefix is set to the -// interior bit. The child is set to a binary instance that has a child -// equal to this unary choice with the decidedPrefix equal to the interior -// bit and another child equal to a new unary instance to the end of the -// hash +// 5. This instance must be split on an interior bit +// +// This will modify this unary choice. The commonPrefix is set to the +// interior bit. The child is set to a binary instance that has a child +// equal to this unary choice with the decidedPrefix equal to the interior +// bit and another child equal to a new unary instance to the end of the +// hash // -// For example, attempting to insert the value "010" in this tree: +// For example, attempting to insert the value "010" in this tree: // // +-------------------+ // | | @@ -301,7 +308,7 @@ func (u *unaryNode) DecidedPrefix() int { // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index abede8098f67..986980cd1619 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -115,12 +115,13 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID vtxBytes := vtx.Bytes() // Ensure response size isn't too large. Include wrappers.IntLen because the size of the message // is included with each container, and the size is repr. by an int. - if newLen := wrappers.IntLen + ancestorsBytesLen + len(vtxBytes); newLen < constants.MaxContainersLen { - ancestorsBytes = append(ancestorsBytes, vtxBytes) - ancestorsBytesLen = newLen - } else { // reached maximum response size + newLen := wrappers.IntLen + ancestorsBytesLen + len(vtxBytes) + if newLen > constants.MaxContainersLen { + // reached maximum response size break } + ancestorsBytes = append(ancestorsBytes, vtxBytes) + ancestorsBytesLen = newLen parents, err := vtx.Parents() if err != nil { return err diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index ea1429d3d492..c59aa9058d9d 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -365,7 +365,7 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { if vmErr == nil { return intf, consensusErr } - return intf, fmt.Errorf("vm: %w ; consensus: %s", vmErr, consensusErr) + return intf, fmt.Errorf("vm: %w ; consensus: %w", vmErr, consensusErr) } func (t *Transitive) GetVM() common.VM { diff --git a/snow/engine/snowman/block/batched_vm.go b/snow/engine/snowman/block/batched_vm.go index 32b9ef90ba53..db6470a19611 100644 --- a/snow/engine/snowman/block/batched_vm.go +++ b/snow/engine/snowman/block/batched_vm.go @@ -82,12 +82,13 @@ func GetAncestors( // Ensure response size isn't too large. Include wrappers.IntLen because // the size of the message is included with each container, and the size // is repr. by an int. - if newLen := ancestorsBytesLen + len(blkBytes) + wrappers.IntLen; newLen <= maxBlocksSize { - ancestorsBytes = append(ancestorsBytes, blkBytes) - ancestorsBytesLen = newLen - } else { // reached maximum response size + newLen := ancestorsBytesLen + len(blkBytes) + wrappers.IntLen + if newLen > maxBlocksSize { + // reached maximum response size break } + ancestorsBytes = append(ancestorsBytes, blkBytes) + ancestorsBytesLen = newLen } return ancestorsBytes, nil diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index fb9b6174d94f..2a6c227bf591 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -435,7 +435,7 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { if vmErr == nil { return intf, consensusErr } - return intf, fmt.Errorf("vm: %w ; consensus: %s", vmErr, consensusErr) + return intf, fmt.Errorf("vm: %w ; consensus: %w", vmErr, consensusErr) } func (t *Transitive) GetVM() common.VM { diff --git a/snow/networking/sender/mock_external_sender.go b/snow/networking/sender/mock_external_sender.go index 6261ba4774c1..3af7da8ceb29 100644 --- a/snow/networking/sender/mock_external_sender.go +++ b/snow/networking/sender/mock_external_sender.go @@ -12,9 +12,9 @@ import ( ids "github.com/ava-labs/avalanchego/ids" message "github.com/ava-labs/avalanchego/message" + subnets "github.com/ava-labs/avalanchego/subnets" set "github.com/ava-labs/avalanchego/utils/set" gomock "github.com/golang/mock/gomock" - subnets "github.com/ava-labs/avalanchego/subnets" ) // MockExternalSender is a mock of ExternalSender interface. diff --git a/tests/colors.go b/tests/colors.go index 8e0fc5d640d1..083bdf00fa81 100644 --- a/tests/colors.go +++ b/tests/colors.go @@ -11,13 +11,13 @@ import ( // Outputs to stdout. // -// e.g., -// Out("{{green}}{{bold}}hi there %q{{/}}", "aa") -// Out("{{magenta}}{{bold}}hi therea{{/}} {{cyan}}{{underline}}b{{/}}") +// Examples: // -// ref. -// https://github.com/onsi/ginkgo/blob/v2.0.0/formatter/formatter.go#L52-L73 +// - Out("{{green}}{{bold}}hi there %q{{/}}", "aa") +// - Out("{{magenta}}{{bold}}hi therea{{/}} {{cyan}}{{underline}}b{{/}}") // +// See https://github.com/onsi/ginkgo/blob/v2.0.0/formatter/formatter.go#L52-L73 +// for an exhaustive list of color options. func Outf(format string, args ...interface{}) { s := formatter.F(format, args...) fmt.Fprint(formatter.ColorableStdOut, s) diff --git a/tests/http.go b/tests/http.go index 36ee5bf40c38..569c6605d5d9 100644 --- a/tests/http.go +++ b/tests/http.go @@ -5,6 +5,7 @@ package tests import ( "bufio" + "context" "fmt" "io" "net/http" @@ -47,10 +48,16 @@ func GetMetricsValue(url string, metrics ...string) (map[string]float64, error) } func getHTTPLines(url string) ([]string, error) { - resp, err := http.Get(url) + req, err := http.NewRequestWithContext(context.TODO(), "GET", url, nil) if err != nil { return nil, err } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + rd := bufio.NewReader(resp.Body) lines := []string{} for { @@ -59,6 +66,7 @@ func getHTTPLines(url string) ([]string, error) { if err == io.EOF { break } + _ = resp.Body.Close() return nil, err } lines = append(lines, strings.TrimSpace(line)) diff --git a/utils/dynamicip/ifconfig_resolver.go b/utils/dynamicip/ifconfig_resolver.go index ec176be00ee1..a9e39d36ca3d 100644 --- a/utils/dynamicip/ifconfig_resolver.go +++ b/utils/dynamicip/ifconfig_resolver.go @@ -4,6 +4,7 @@ package dynamicip import ( + "context" "fmt" "io" "net" @@ -19,7 +20,12 @@ type ifConfigResolver struct { } func (r *ifConfigResolver) Resolve() (net.IP, error) { - resp, err := http.Get(r.url) + req, err := http.NewRequestWithContext(context.TODO(), "GET", r.url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } diff --git a/utils/hashing/consistent/ring.go b/utils/hashing/consistent/ring.go index 4fae79dc12cc..170e90579c1d 100644 --- a/utils/hashing/consistent/ring.go +++ b/utils/hashing/consistent/ring.go @@ -45,66 +45,66 @@ var ( // // As an example, assume we have a ring that supports hashes from 1-12. // -// 12 -// 11 1 +// 12 +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 +// 9 3 // -// 8 4 +// 8 4 // -// 7 5 -// 6 +// 7 5 +// 6 // // Add node 1 (n1). Let h(n1) = 12. // First, we compute the hash the node, and insert it into its corresponding // location on the ring. // -// 12 (n1) -// 11 1 +// 12 (n1) +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 +// 9 3 // -// 8 4 +// 8 4 // -// 7 5 -// 6 +// 7 5 +// 6 // // Now, to see which node a key (k1) should map to, we hash the key and search // for its closest clockwise neighbor. // Let h(k1) = 3. Here, we see that since n1 is the closest neighbor, as there // are no other nodes in the ring. // -// 12 (n1) -// 11 1 +// 12 (n1) +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 (k1) +// 9 3 (k1) // -// 8 4 +// 8 4 // -// 7 5 -// 6 +// 7 5 +// 6 // // Now, let's insert another node (n2), such that h(n2) = 6. // Here we observe that k1 has shuffled to n2, as n2 is the closest clockwise // neighbor to k1. // -// 12 (n1) -// 11 1 +// 12 (n1) +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 (k1) +// 9 3 (k1) // -// 8 4 +// 8 4 // -// 7 5 -// 6 (n2) +// 7 5 +// 6 (n2) // // Other optimizations can be made to help reduce blast radius of failures and // the variance in keys (hot shards). One such optimization is introducing diff --git a/utils/wrappers/closers.go b/utils/wrappers/closers.go index 834c64e2ecab..b2a46e9a16d8 100644 --- a/utils/wrappers/closers.go +++ b/utils/wrappers/closers.go @@ -24,7 +24,7 @@ func (c *Closer) Add(closer io.Closer) { } // Close closes each of the closers add to [c] and returns the first error -// that occurs or nil if no error occurs. +// that occurs or nil if no error occurs. func (c *Closer) Close() error { c.lock.Lock() closers := c.closers diff --git a/utils/zero.go b/utils/zero.go index 9812068139e2..6b2563e376c8 100644 --- a/utils/zero.go +++ b/utils/zero.go @@ -5,5 +5,5 @@ package utils // Returns a new instance of a T. func Zero[T any]() T { - return *new(T) //nolint:gocritic + return *new(T) } diff --git a/vms/avm/index_test.go b/vms/avm/index_test.go index 08ee1c9a461e..cb492acf72d7 100644 --- a/vms/avm/index_test.go +++ b/vms/avm/index_test.go @@ -584,12 +584,14 @@ func assertIndexedTX(t *testing.T, db database.Database, index uint64, sourceAdd } } -// Sets up test tx IDs in DB in the following structure for the indexer to pick them up: -// [address] prefix DB -// [assetID] prefix DB -// - "idx": 2 -// - 0: txID1 -// - 1: txID1 +// Sets up test tx IDs in DB in the following structure for the indexer to pick +// them up: +// +// [address] prefix DB +// [assetID] prefix DB +// - "idx": 2 +// - 0: txID1 +// - 1: txID1 func setupTestTxsInDB(t *testing.T, db *versiondb.Database, address ids.ShortID, assetID ids.ID, txCount int) []ids.ID { var testTxs []ids.ID for i := 0; i < txCount; i++ { diff --git a/vms/avm/service.go b/vms/avm/service.go index 875fac5df4a2..099b8cfecbbf 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -440,8 +440,10 @@ type GetAllBalancesReply struct { } // GetAllBalances returns a map where: -// Key: ID of an asset such that [args.Address] has a non-zero balance of the asset -// Value: The balance of the asset held by the address +// +// Key: ID of an asset such that [args.Address] has a non-zero balance of the asset +// Value: The balance of the asset held by the address +// // If ![args.IncludePartial], returns only unlocked balance/UTXOs with a 1-out-of-1 multisig. // Otherwise, returned balance/UTXOs includes assets held only partially by the // address, and includes balances with locktime in the future. diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index f70cac3e1650..a40b3a122648 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -121,8 +121,9 @@ func NewContext(tb testing.TB) *snow.Context { } // Returns: -// 1) tx in genesis that creates asset -// 2) the index of the output +// +// 1. tx in genesis that creates asset +// 2. the index of the output func GetCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName string) *txs.Tx { parser, err := txs.NewParser([]fxs.Fx{ &secp256k1fx.Fx{}, diff --git a/vms/components/avax/utxo_id.go b/vms/components/avax/utxo_id.go index 94c462b5bfa0..b6bd01dffde3 100644 --- a/vms/components/avax/utxo_id.go +++ b/vms/components/avax/utxo_id.go @@ -68,12 +68,12 @@ func UTXOIDFromString(s string) (*UTXOID, error) { txID, err := ids.FromString(ss[0]) if err != nil { - return nil, fmt.Errorf("%w: %s", errFailedDecodingUTXOIDTxID, err) + return nil, fmt.Errorf("%w: %w", errFailedDecodingUTXOIDTxID, err) } idx, err := strconv.ParseUint(ss[1], 10, 32) if err != nil { - return nil, fmt.Errorf("%w: %s", errFailedDecodingUTXOIDIndex, err) + return nil, fmt.Errorf("%w: %w", errFailedDecodingUTXOIDIndex, err) } return &UTXOID{ diff --git a/vms/manager.go b/vms/manager.go index d8e5cca25c04..1c536e82fc93 100644 --- a/vms/manager.go +++ b/vms/manager.go @@ -30,13 +30,14 @@ type Factory interface { // Manager tracks a collection of VM factories, their aliases, and their // versions. // It has the following functionality: -// 1) Register a VM factory. To register a VM is to associate its ID with a -// VMFactory which, when New() is called upon it, creates a new instance of -// that VM. -// 2) Get a VM factory. Given the ID of a VM that has been registered, return -// the factory that the ID is associated with. -// 3) Manage the aliases of VMs -// 3) Manage the versions of VMs +// +// 1. Register a VM factory. To register a VM is to associate its ID with a +// VMFactory which, when New() is called upon it, creates a new instance of +// that VM. +// 2. Get a VM factory. Given the ID of a VM that has been registered, return +// the factory that the ID is associated with. +// 3. Manage the aliases of VMs +// 4. Manage the versions of VMs type Manager interface { ids.Aliaser diff --git a/vms/platformvm/state/staker.go b/vms/platformvm/state/staker.go index 32e91c6d8aa5..597fa0e5bd12 100644 --- a/vms/platformvm/state/staker.go +++ b/vms/platformvm/state/staker.go @@ -59,6 +59,7 @@ type Staker struct { } // A *Staker is considered to be less than another *Staker when: +// // 1. If its NextTime is before the other's. // 2. If the NextTimes are the same, the *Staker with the lesser priority is the // lesser one. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index b2b8bd3d659b..a98ed657afa6 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1333,7 +1333,7 @@ func (s *state) loadPendingValidators() error { } // Invariant: initValidatorSets requires loadCurrentValidators to have already -// been called. +// been called. func (s *state) initValidatorSets() error { primaryValidators, ok := s.cfg.Validators.Get(constants.PrimaryNetworkID) if !ok { diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 28dc3da56284..7c8484c47dae 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -126,7 +126,7 @@ func verifyAddValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkValidatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %w", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -225,7 +225,7 @@ func verifyAddSubnetValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddSubnetValidatorFee, }, ); err != nil { - return fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return fmt.Errorf("%w: %w", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -266,7 +266,7 @@ func removeSubnetValidatorValidation( if err != nil { // It isn't a current or pending validator. return nil, false, fmt.Errorf( - "%s %w of %s: %s", + "%s %w of %s: %w", tx.NodeID, errNotValidator, tx.Subnet, @@ -300,7 +300,7 @@ func removeSubnetValidatorValidation( backend.Ctx.AVAXAssetID: backend.Config.TxFee, }, ); err != nil { - return nil, false, fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return nil, false, fmt.Errorf("%w: %w", errFlowCheckFailed, err) } return vdr, isCurrentValidator, nil @@ -401,7 +401,7 @@ func verifyAddDelegatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkDelegatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %w", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -536,7 +536,7 @@ func verifyAddPermissionlessValidatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return fmt.Errorf("%w: %w", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -717,7 +717,7 @@ func verifyAddPermissionlessDelegatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return fmt.Errorf("%w: %w", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last diff --git a/vms/platformvm/txs/executor/subnet_tx_verification.go b/vms/platformvm/txs/executor/subnet_tx_verification.go index 276481be1e40..7a2d4a5718d0 100644 --- a/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -70,7 +70,7 @@ func verifySubnetAuthorization( subnetIntf, _, err := chainState.GetTx(subnetID) if err != nil { return nil, fmt.Errorf( - "%w %q: %s", + "%w %q: %w", errCantFindSubnet, subnetID, err, @@ -83,7 +83,7 @@ func verifySubnetAuthorization( } if err := backend.Fx.VerifyPermission(sTx.Unsigned, subnetAuth, subnetCred, subnet.Owner); err != nil { - return nil, fmt.Errorf("%w: %s", errUnauthorizedSubnetModification, err) + return nil, fmt.Errorf("%w: %w", errUnauthorizedSubnetModification, err) } return sTx.Creds[:baseTxCredsLen], nil diff --git a/vms/platformvm/utxo/handler.go b/vms/platformvm/utxo/handler.go index b11163601841..1d1b843b571a 100644 --- a/vms/platformvm/utxo/handler.go +++ b/vms/platformvm/utxo/handler.go @@ -33,7 +33,7 @@ var ( ) // TODO: Stake and Authorize should be replaced by similar methods in the -// P-chain wallet +// P-chain wallet type Spender interface { // Spend the provided amount while deducting the provided fee. // Arguments: diff --git a/vms/platformvm/warp/signature.go b/vms/platformvm/warp/signature.go index afb33ec1a6be..52c58f8f8a54 100644 --- a/vms/platformvm/warp/signature.go +++ b/vms/platformvm/warp/signature.go @@ -115,7 +115,7 @@ func (s *BitSetSignature) Verify( // Parse the aggregate signature aggSig, err := bls.SignatureFromBytes(s.Signature[:]) if err != nil { - return fmt.Errorf("%w: %s", ErrParseSignature, err) + return fmt.Errorf("%w: %w", ErrParseSignature, err) } // Create the aggregate public key diff --git a/vms/platformvm/warp/validator.go b/vms/platformvm/warp/validator.go index 6f04872a2630..56e7213dbcd8 100644 --- a/vms/platformvm/warp/validator.go +++ b/vms/platformvm/warp/validator.go @@ -59,7 +59,7 @@ func GetCanonicalValidatorSet( for _, vdr := range vdrSet { totalWeight, err = math.Add64(totalWeight, vdr.Weight) if err != nil { - return nil, 0, fmt.Errorf("%w: %s", ErrWeightOverflow, err) + return nil, 0, fmt.Errorf("%w: %w", ErrWeightOverflow, err) } if vdr.PublicKey == nil { @@ -124,7 +124,7 @@ func SumWeight(vdrs []*Validator) (uint64, error) { for _, vdr := range vdrs { weight, err = math.Add64(weight, vdr.Weight) if err != nil { - return 0, fmt.Errorf("%w: %s", ErrWeightOverflow, err) + return 0, fmt.Errorf("%w: %w", ErrWeightOverflow, err) } } return weight, nil diff --git a/vms/proposervm/block/codec.go b/vms/proposervm/block/codec.go index 042174d98f52..ad7640d2acb9 100644 --- a/vms/proposervm/block/codec.go +++ b/vms/proposervm/block/codec.go @@ -17,7 +17,7 @@ const codecVersion = 0 // See: [constants.DefaultMaxMessageSize] // // Invariant: This codec must never be used to unmarshal a slice unless it is a -// `[]byte`. Otherwise a malicious payload could cause an OOM. +// `[]byte`. Otherwise a malicious payload could cause an OOM. var c codec.Manager func init() { diff --git a/vms/proposervm/state_summary.go b/vms/proposervm/state_summary.go index 977b1c776422..2113621a2f98 100644 --- a/vms/proposervm/state_summary.go +++ b/vms/proposervm/state_summary.go @@ -13,15 +13,16 @@ import ( var _ block.StateSummary = (*stateSummary)(nil) // stateSummary implements block.StateSummary by layering three objects: -// 1. [statelessSummary] carries all summary marshallable content along with -// data immediately retrievable from it. -// 2. [innerSummary] reports the height of the summary as well as notifying the -// inner vm of the summary's acceptance. -// 3. [block] is used to update the proposervm's last accepted block upon -// Accept. +// +// 1. [statelessSummary] carries all summary marshallable content along with +// data immediately retrievable from it. +// 2. [innerSummary] reports the height of the summary as well as notifying the +// inner vm of the summary's acceptance. +// 3. [block] is used to update the proposervm's last accepted block upon +// Accept. // // Note: summary.StatelessSummary contains the data to build both [innerSummary] -// and [block]. +// and [block]. type stateSummary struct { summary.StateSummary diff --git a/vms/proposervm/vm_byzantine_test.go b/vms/proposervm/vm_byzantine_test.go index 0d9da030235d..a25cb9017bf0 100644 --- a/vms/proposervm/vm_byzantine_test.go +++ b/vms/proposervm/vm_byzantine_test.go @@ -24,11 +24,11 @@ import ( // parent block (X) is issued into a PostForkBlock (A) will be marked as invalid // correctly. // -// G -// / | -// A - X -// | -// Y +// G +// / | +// A - X +// | +// Y func TestInvalidByzantineProposerParent(t *testing.T) { forkTime := time.Unix(0, 0) // enable ProBlks coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -97,11 +97,11 @@ func TestInvalidByzantineProposerParent(t *testing.T) { // the parent block (X) is issued into a PostForkBlock (A) will be marked as // invalid correctly. // -// G -// / | -// A - X -// / \ -// Y Z +// G +// / | +// A - X +// / \ +// Y Z func TestInvalidByzantineProposerOracleParent(t *testing.T) { coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) proVM.Set(coreGenBlk.Timestamp()) @@ -218,11 +218,11 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { // parent block (X) is issued into a PostForkBlock (A) will be marked as invalid // correctly. // -// G -// / | -// A - X -// / | -// B - Y +// G +// / | +// A - X +// / | +// B - Y func TestInvalidByzantineProposerPreForkParent(t *testing.T) { forkTime := time.Unix(0, 0) // enable ProBlks coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -324,11 +324,11 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { // contains core block (Y) whose parent (G) doesn't match (B)'s parent (A)'s // inner block (X) will be marked as invalid correctly. // -// G -// / | \ -// A - X | -// | / -// B - Y +// G +// / | \ +// A - X | +// | / +// B - Y func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) proVM.Set(coreGenBlk.Timestamp()) @@ -422,13 +422,13 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { } } -// ,--G ----. -// / \ \ -// A(X) B(Y) C(Z) -// | \_ /_____/ -// |\ / | -// | \/ | -// O2 O1 O3 +// ,--G ----. +// / \ \ +// A(X) B(Y) C(Z) +// | \_ /_____/ +// |\ / | +// | \/ | +// O2 O1 O3 // // O1.parent = B (non-Oracle), O1.inner = first option of X (invalid) // O2.parent = A (original), O2.inner = first option of X (valid) diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index fe0baccb598b..0993bf83775e 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -1554,11 +1554,11 @@ func TestBuildBlockDuringWindow(t *testing.T) { // Ensure that Accepting a PostForkBlock (A) containing core block (X) causes // core block (Y) and (Z) to also be rejected. // -// G -// / \ -// A(X) B(Y) -// | -// C(Z) +// G +// / \ +// A(X) B(Y) +// | +// C(Z) func TestTwoForks_OneIsAccepted(t *testing.T) { forkTime := time.Unix(0, 0) coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -1765,11 +1765,11 @@ func TestTooFarAdvanced(t *testing.T) { // Ensure that Accepting a PostForkOption (B) causes both the other option and // the core block in the other option to be rejected. // -// G -// | -// A(X) -// /====\ -// B(...) C(...) +// G +// | +// A(X) +// /====\ +// B(...) C(...) // // B(...) is B(X.opts[0]) // B(...) is C(X.opts[1]) diff --git a/vms/rpcchainvm/runtime/subprocess/runtime.go b/vms/rpcchainvm/runtime/subprocess/runtime.go index d606426d10d3..209bbab779ab 100644 --- a/vms/rpcchainvm/runtime/subprocess/runtime.go +++ b/vms/rpcchainvm/runtime/subprocess/runtime.go @@ -132,12 +132,12 @@ func Bootstrap( case <-intitializer.initialized: case <-timeout.C: stopper.Stop(ctx) - return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, runtime.ErrProcessNotFound) + return nil, nil, fmt.Errorf("%w: %w", runtime.ErrHandshakeFailed, runtime.ErrProcessNotFound) } if intitializer.err != nil { stopper.Stop(ctx) - return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, err) + return nil, nil, fmt.Errorf("%w: %w", runtime.ErrHandshakeFailed, err) } log.Info("plugin handshake succeeded", diff --git a/wallet/chain/p/builder_with_options.go b/wallet/chain/p/builder_with_options.go index 5d96fd80540a..38b0db4cf740 100644 --- a/wallet/chain/p/builder_with_options.go +++ b/wallet/chain/p/builder_with_options.go @@ -25,7 +25,7 @@ type builderWithOptions struct { // given options by default. // // - [builder] is the builder that will be called to perform the underlying -// opterations. +// operations. // - [options] will be provided to the builder in addition to the options // provided in the method calls. func NewBuilderWithOptions(builder Builder, options ...common.Option) Builder { diff --git a/wallet/chain/x/builder_with_options.go b/wallet/chain/x/builder_with_options.go index 10a65c03a0e4..87aeade37d56 100644 --- a/wallet/chain/x/builder_with_options.go +++ b/wallet/chain/x/builder_with_options.go @@ -22,10 +22,10 @@ type builderWithOptions struct { // NewBuilderWithOptions returns a new transaction builder that will use the // given options by default. // -// - [builder] is the builder that will be called to perform the underlying -// opterations. -// - [options] will be provided to the builder in addition to the options -// provided in the method calls. +// - [builder] is the builder that will be called to perform the underlying +// operations. +// - [options] will be provided to the builder in addition to the options +// provided in the method calls. func NewBuilderWithOptions(builder Builder, options ...common.Option) Builder { return &builderWithOptions{ Builder: builder, diff --git a/wallet/subnet/primary/api.go b/wallet/subnet/primary/api.go index e4f11bbdcc60..cd51e2e19455 100644 --- a/wallet/subnet/primary/api.go +++ b/wallet/subnet/primary/api.go @@ -28,8 +28,8 @@ const ( fetchLimit = 1024 ) -// TODO: refactor UTXOClient definition to allow the client implementations to -// perform their own assertions. +// TODO: Refactor UTXOClient definition to allow the client implementations to +// perform their own assertions. var ( _ UTXOClient = platformvm.Client(nil) _ UTXOClient = avm.Client(nil) diff --git a/x/merkledb/proof.go b/x/merkledb/proof.go index ff10cd98a1f2..81847a67725b 100644 --- a/x/merkledb/proof.go +++ b/x/merkledb/proof.go @@ -146,17 +146,17 @@ type RangeProof struct { // - [start] <= [end]. // - [proof] is non-empty. // - All keys in [proof.KeyValues] are in the range [start, end]. -// - If [start] is empty, all keys are considered > [start]. -// - If [end] is empty, all keys are considered < [end]. +// If [start] is empty, all keys are considered > [start]. +// If [end] is empty, all keys are considered < [end]. // - [proof.KeyValues] is sorted by increasing key. // - [proof.StartProof] and [proof.EndProof] are well-formed. // - One of the following holds: -// - [end] and [proof.EndProof] are empty. -// - [proof.StartProof], [start], [end], and [proof.KeyValues] are empty and +// [end] and [proof.EndProof] are empty. +// [proof.StartProof], [start], [end], and [proof.KeyValues] are empty and // [proof.EndProof] is just the root. -// - [end] is non-empty and [proof.EndProof] is a valid proof of a key <= [end]. -// - [expectedRootID] is the root of the trie containing the given key-value pairs -// and start/end proofs. +// [end] is non-empty and [proof.EndProof] is a valid proof of a key <= [end]. +// - [expectedRootID] is the root of the trie containing the given key-value +// pairs and start/end proofs. func (proof *RangeProof) Verify( ctx context.Context, start []byte, @@ -308,8 +308,8 @@ type ChangeProof struct { // - [start] <= [end]. // - [proof] is non-empty iff [proof.HadRootsInHistory]. // - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. -// - If [start] is empty, all keys are considered > [start]. -// - If [end] is empty, all keys are considered < [end]. +// If [start] is empty, all keys are considered > [start]. +// If [end] is empty, all keys are considered < [end]. // - [proof.KeyValues] and [proof.DeletedKeys] are sorted in order of increasing key. // - [proof.StartProof] and [proof.EndProof] are well-formed. // - When the keys in [proof.KeyValues] are added to [db] and the keys in [proof.DeletedKeys] @@ -539,12 +539,12 @@ func verifyKeyValues(kvs []KeyValue, start, end []byte) error { } // Returns nil iff all the following hold: -// - Any node with an odd nibble length, should not have a value associated with it -// since all keys with values are written in bytes, so have even nibble length. -// - Each key in [proof] is a strict prefix of the following key. -// - Each key in [proof] is a strict prefix of [keyBytes], except possibly the last. -// - If the last element in [proof] is [keyBytes], this is an inclusion proof. -// Otherwise, this is an exclusion proof and [keyBytes] must not be in [proof]. +// - Any node with an odd nibble length, should not have a value associated with it +// since all keys with values are written in bytes, so have even nibble length. +// - Each key in [proof] is a strict prefix of the following key. +// - Each key in [proof] is a strict prefix of [keyBytes], except possibly the last. +// - If the last element in [proof] is [keyBytes], this is an inclusion proof. +// Otherwise, this is an exclusion proof and [keyBytes] must not be in [proof]. func verifyProofPath(proof []ProofNode, keyPath path) error { provenKey := keyPath.Serialize() diff --git a/x/sync/client.go b/x/sync/client.go index 3adba39f2375..84f4d9a5b1ec 100644 --- a/x/sync/client.go +++ b/x/sync/client.go @@ -88,7 +88,7 @@ func (c *client) GetChangeProof(ctx context.Context, req *ChangeProofRequest, db } if err := changeProof.Verify(ctx, db, req.Start, req.End, req.EndingRoot); err != nil { - return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) + return nil, fmt.Errorf("%w due to %w", errInvalidRangeProof, err) } return changeProof, nil } @@ -116,7 +116,7 @@ func (c *client) GetRangeProof(ctx context.Context, req *RangeProofRequest) (*me req.End, req.Root, ); err != nil { - return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) + return nil, fmt.Errorf("%w due to %w", errInvalidRangeProof, err) } return rangeProof, nil } @@ -143,7 +143,7 @@ func getAndParse[T any](ctx context.Context, client *client, request Request, pa // If the context has finished, return the context error early. if err := ctx.Err(); err != nil { if lastErr != nil { - return nil, fmt.Errorf("request failed after %d attempts with last error %w and ctx error %s", attempt, lastErr, err) + return nil, fmt.Errorf("request failed after %d attempts with last error %w and ctx error %w", attempt, lastErr, err) } return nil, err } diff --git a/x/sync/syncmanager.go b/x/sync/syncmanager.go index d5c633d9b82e..de48dc52cb08 100644 --- a/x/sync/syncmanager.go +++ b/x/sync/syncmanager.go @@ -660,12 +660,12 @@ func midPoint(start, end []byte) []byte { total -= 256 index := i - 1 for index >= 0 { - if midpoint[index] == 255 { - midpoint[index] = 0 - } else { + if midpoint[index] != 255 { midpoint[index]++ break } + + midpoint[index] = 0 index-- } } From 1a2dca18d22a7a78e421e95dbdbe038287ee8361 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 28 Feb 2023 02:37:14 -0500 Subject: [PATCH 24/27] Set linger on p2p connections (#2646) Co-authored-by: Patrick O'Grady --- network/network.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/network/network.go b/network/network.go index ea502c239af1..a4d0de311854 100644 --- a/network/network.go +++ b/network/network.go @@ -49,6 +49,11 @@ const ( TimeSinceLastMsgReceivedKey = "timeSinceLastMsgReceived" TimeSinceLastMsgSentKey = "timeSinceLastMsgSent" SendFailRateKey = "sendFailRate" + + // lingerTimeout is the amount of time (in seconds) we allow for the + // remaining data in a connection to be flushed before forcibly closing the + // connection (TCP RST). + lingerTimeout = 15 ) var ( @@ -731,6 +736,7 @@ func (n *network) Dispatch() error { n.metrics.acceptFailed.Inc() continue } + n.setLinger(conn) // Note: listener.Accept is rate limited outside of this package, so a // peer can not just arbitrarily spin up goroutines here. @@ -1124,6 +1130,7 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { ) continue } + n.setLinger(conn) n.peerConfig.Log.Verbo("starting to upgrade connection", zap.String("direction", "outbound"), @@ -1144,6 +1151,24 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { }() } +// setLinger sets the linger on [conn], if it is a [*net.TCPConn], to +// [lingerTimeout]. +func (n *network) setLinger(conn net.Conn) { + tcpConn, ok := conn.(*net.TCPConn) + if !ok { + return + } + + // If a connection is closed, we allow a grace period for the unsent + // data in the connection to be flushed before forcibly closing the + // connection (TCP RST). + if err := tcpConn.SetLinger(lingerTimeout); err != nil { + n.peerConfig.Log.Warn("failed to set no linger", + zap.Error(err), + ) + } +} + // upgrade the provided connection, which may be an inbound connection or an // outbound connection, with the provided [upgrader]. // From 490f27cc56ab9451ee6a682143e09af8a3833909 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 28 Feb 2023 15:56:47 -0500 Subject: [PATCH 25/27] [merkledb] remove `cache.Cache` from `onEvictCache` (#2674) --- x/merkledb/cache.go | 7 +------ x/merkledb/cache_test.go | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/x/merkledb/cache.go b/x/merkledb/cache.go index 57979af0fa63..e86f127a7c44 100644 --- a/x/merkledb/cache.go +++ b/x/merkledb/cache.go @@ -6,7 +6,6 @@ package merkledb import ( "sync" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -17,7 +16,6 @@ type onEvictCache[K comparable, V any] struct { maxSize int // LRU --> MRU from left to right. lru linkedhashmap.LinkedHashmap[K, V] - cache cache.Cacher[K, V] onEviction func(V) error } @@ -25,7 +23,6 @@ func newOnEvictCache[K comparable, V any](maxSize int, onEviction func(V) error) return onEvictCache[K, V]{ maxSize: maxSize, lru: linkedhashmap.New[K, V](), - cache: &cache.LRU[K, V]{Size: maxSize}, onEviction: onEviction, } } @@ -35,7 +32,7 @@ func (c *onEvictCache[K, V]) Get(key K) (V, bool) { c.lock.Lock() defer c.lock.Unlock() - val, ok := c.cache.Get(key) + val, ok := c.lru.Get(key) if ok { // This key was touched; move it to the MRU position. c.lru.Put(key, val) @@ -50,7 +47,6 @@ func (c *onEvictCache[K, V]) Put(key K, value V) error { c.lock.Lock() defer c.lock.Unlock() - c.cache.Put(key, value) c.lru.Put(key, value) // Mark as MRU if c.lru.Len() > c.maxSize { @@ -70,7 +66,6 @@ func (c *onEvictCache[K, V]) Put(key K, value V) error { func (c *onEvictCache[K, V]) Flush() error { c.lock.Lock() defer func() { - c.cache.Flush() c.lru = linkedhashmap.New[K, V]() c.lock.Unlock() }() diff --git a/x/merkledb/cache_test.go b/x/merkledb/cache_test.go index 50691ee676eb..b529801d0517 100644 --- a/x/merkledb/cache_test.go +++ b/x/merkledb/cache_test.go @@ -26,7 +26,6 @@ func TestNewOnEvictCache(t *testing.T) { require.Equal(maxSize, cache.maxSize) require.NotNil(cache.lru) require.Equal(0, cache.lru.Len()) - require.NotNil(cache.cache) // Can't test function equality directly so do this // to make sure it was assigned correctly err := cache.onEviction(0) From 51eae95428df2b5dce5ef2a4714f381e2af9afa6 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Wed, 1 Mar 2023 15:13:55 -0500 Subject: [PATCH 26/27] Ignore sigint signals to ensure proper shutdown flow (#2682) --- .../runtime/subprocess/linux_stopper.go | 4 +-- vms/rpcchainvm/vm.go | 31 +++++++++++++------ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/vms/rpcchainvm/runtime/subprocess/linux_stopper.go b/vms/rpcchainvm/runtime/subprocess/linux_stopper.go index 3963ed7f6aeb..7ba34a49b0a2 100644 --- a/vms/rpcchainvm/runtime/subprocess/linux_stopper.go +++ b/vms/rpcchainvm/runtime/subprocess/linux_stopper.go @@ -23,7 +23,7 @@ import ( func NewCmd(path string, args ...string) *exec.Cmd { cmd := exec.Command(path, args...) - cmd.SysProcAttr = &syscall.SysProcAttr{Pdeathsig: syscall.SIGINT} + cmd.SysProcAttr = &syscall.SysProcAttr{Pdeathsig: syscall.SIGTERM} return cmd } @@ -32,7 +32,7 @@ func stop(ctx context.Context, log logging.Logger, cmd *exec.Cmd) { go func() { // attempt graceful shutdown errs := wrappers.Errs{} - err := cmd.Process.Signal(syscall.SIGINT) + err := cmd.Process.Signal(syscall.SIGTERM) errs.Add(err) _, err = cmd.Process.Wait() errs.Add(err) diff --git a/vms/rpcchainvm/vm.go b/vms/rpcchainvm/vm.go index 6be646f3f44e..40ab86f1b0f2 100644 --- a/vms/rpcchainvm/vm.go +++ b/vms/rpcchainvm/vm.go @@ -33,20 +33,31 @@ const defaultRuntimeDialTimeout = 5 * time.Second // // Serve starts the RPC Chain VM server and performs a handshake with the VM runtime service. func Serve(ctx context.Context, vm block.ChainVM, opts ...grpcutils.ServerOption) error { - shutdownHandler := make(chan os.Signal, 2) - signal.Notify(shutdownHandler, os.Interrupt, syscall.SIGTERM) + signals := make(chan os.Signal, 2) + signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) server := newVMServer(vm, opts...) - go func(ctx context.Context) { - select { - case <-shutdownHandler: - fmt.Println("runtime engine: received shutdown signal") - case <-ctx.Done(): - fmt.Println("runtime engine: context has been cancelled") + defer func() { + server.GracefulStop() + fmt.Println("vm server: graceful termination success") + }() + + for { + select { + case s := <-signals: + switch s { + case syscall.SIGINT: + fmt.Println("runtime engine: ignoring signal: SIGINT") + case syscall.SIGTERM: + fmt.Println("runtime engine: received shutdown signal: SIGTERM") + return + } + case <-ctx.Done(): + fmt.Println("runtime engine: context has been cancelled") + return + } } - server.GracefulStop() - fmt.Println("vm server: graceful termination success") }(ctx) // address of Runtime server from ENV From 98c4466b0dba3d404a62e76a1e1623ec0621ba24 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 1 Mar 2023 19:09:45 -0500 Subject: [PATCH 27/27] Update versions for v1.9.10 (#2673) Co-authored-by: Patrick O'Grady --- .../workflows/build-and-test-mac-windows.yml | 2 +- .github/workflows/build-and-test.yml | 2 +- .github/workflows/build-linux-binaries.yml | 4 +- .github/workflows/build-macos-release.yml | 2 +- .github/workflows/build-public-api.yml | 2 +- .../workflows/build-ubuntu-amd64-release.yml | 4 +- .../workflows/build-ubuntu-arm64-release.yml | 4 +- .github/workflows/build-win-release.yml | 2 +- .github/workflows/build.yml | 2 +- ...atic-analysis.yaml => static-analysis.yml} | 2 +- .github/workflows/test.e2e.yml | 2 +- .github/workflows/test.upgrade.yml | 2 +- .golangci.yml | 4 +- Dockerfile | 2 +- README.md | 2 +- RELEASES.md | 37 +++++++++++++++++++ go.mod | 5 ++- go.sum | 6 ++- proto/Dockerfile.buf | 2 +- proto/README.md | 2 +- scripts/build_avalanche.sh | 2 +- scripts/build_coreth.sh | 2 +- scripts/constants.sh | 2 +- scripts/local.Dockerfile | 2 +- snow/engine/avalanche/transitive.go | 2 +- snow/engine/snowman/transitive.go | 2 +- version/compatibility.json | 3 ++ version/constants.go | 4 +- vms/components/avax/utxo_id.go | 4 +- .../txs/executor/staker_tx_verification.go | 14 +++---- .../txs/executor/subnet_tx_verification.go | 4 +- vms/platformvm/warp/signature.go | 2 +- vms/platformvm/warp/validator.go | 4 +- vms/rpcchainvm/runtime/subprocess/runtime.go | 4 +- x/sync/client.go | 6 +-- 35 files changed, 95 insertions(+), 52 deletions(-) rename .github/workflows/{static-analysis.yaml => static-analysis.yml} (94%) diff --git a/.github/workflows/build-and-test-mac-windows.yml b/.github/workflows/build-and-test-mac-windows.yml index c71dbf32489b..b9ea9edaed58 100644 --- a/.github/workflows/build-and-test-mac-windows.yml +++ b/.github/workflows/build-and-test-mac-windows.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - name: build_test shell: bash diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 8e0db4a3eade..b8042d2a2dac 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - name: build_test shell: bash diff --git a/.github/workflows/build-linux-binaries.yml b/.github/workflows/build-linux-binaries.yml index 679425e49699..96b9ee0db65b 100644 --- a/.github/workflows/build-linux-binaries.yml +++ b/.github/workflows/build-linux-binaries.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version @@ -54,7 +54,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version diff --git a/.github/workflows/build-macos-release.yml b/.github/workflows/build-macos-release.yml index 8c7ba7de87b0..9cacaefa0ef4 100644 --- a/.github/workflows/build-macos-release.yml +++ b/.github/workflows/build-macos-release.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version diff --git a/.github/workflows/build-public-api.yml b/.github/workflows/build-public-api.yml index 8a7fba22b6a8..0f9aba888105 100644 --- a/.github/workflows/build-public-api.yml +++ b/.github/workflows/build-public-api.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version diff --git a/.github/workflows/build-ubuntu-amd64-release.yml b/.github/workflows/build-ubuntu-amd64-release.yml index ec38fb275d18..492cc617ec99 100644 --- a/.github/workflows/build-ubuntu-amd64-release.yml +++ b/.github/workflows/build-ubuntu-amd64-release.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version @@ -54,7 +54,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version diff --git a/.github/workflows/build-ubuntu-arm64-release.yml b/.github/workflows/build-ubuntu-arm64-release.yml index 8ea87edb8093..208aae29ed9f 100644 --- a/.github/workflows/build-ubuntu-arm64-release.yml +++ b/.github/workflows/build-ubuntu-arm64-release.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version @@ -54,7 +54,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version diff --git a/.github/workflows/build-win-release.yml b/.github/workflows/build-win-release.yml index 77cd7a64535f..c9afd638bff1 100644 --- a/.github/workflows/build-win-release.yml +++ b/.github/workflows/build-win-release.yml @@ -21,7 +21,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - run: go version diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c44ea987d7eb..bfbb6ea4c259 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,7 +19,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - name: Run GoReleaser uses: goreleaser/goreleaser-action@v2 diff --git a/.github/workflows/static-analysis.yaml b/.github/workflows/static-analysis.yml similarity index 94% rename from .github/workflows/static-analysis.yaml rename to .github/workflows/static-analysis.yml index 7056e6765b08..298a33fbeea7 100644 --- a/.github/workflows/static-analysis.yaml +++ b/.github/workflows/static-analysis.yml @@ -17,7 +17,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - name: Run static analysis tests shell: bash diff --git a/.github/workflows/test.e2e.yml b/.github/workflows/test.e2e.yml index 911c79ae507d..1a8647ecb1f5 100644 --- a/.github/workflows/test.e2e.yml +++ b/.github/workflows/test.e2e.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - name: Build the avalanchego binary shell: bash diff --git a/.github/workflows/test.upgrade.yml b/.github/workflows/test.upgrade.yml index c7bc66b63747..5374b7bedef1 100644 --- a/.github/workflows/test.upgrade.yml +++ b/.github/workflows/test.upgrade.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.20' + go-version: '1.19' check-latest: true - name: Build the avalanchego binary shell: bash diff --git a/.golangci.yml b/.golangci.yml index ea7a6691310f..78f38391262a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,7 +19,6 @@ linters: - bodyclose - depguard - errcheck - - errorlint - exportloopref - goconst - gocritic @@ -44,6 +43,7 @@ linters: - unparam - unused - whitespace + # - errorlint (TODO: re-enable in go1.20 migration) # - goerr113 # - gomnd # - lll @@ -97,7 +97,7 @@ linters-settings: - name: useless-break disabled: false staticcheck: - go: "1.20" + go: "1.19" # https://staticcheck.io/docs/options#checks checks: - "all" diff --git a/Dockerfile b/Dockerfile index 3c3f0c23ee18..20c06bf458de 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ # README.md # go.mod # ============= Compilation Stage ================ -FROM golang:1.20.1-buster AS builder +FROM golang:1.19.6-buster AS builder RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 WORKDIR /build diff --git a/README.md b/README.md index 7f977cdf61ad..2a7255e2a816 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ The minimum recommended hardware specification for nodes connected to Mainnet is If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.20.1 +- [Go](https://golang.org/doc/install) version >= 1.19.6 - [gcc](https://gcc.gnu.org/) - g++ diff --git a/RELEASES.md b/RELEASES.md index 340088a6bf9a..1902cc7977d4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,42 @@ # Release Notes +## [v1.9.10](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.10) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +### MerkleDB + +- Removed parent tracking from `merkledb.trieView` +- Removed `base` caches from `merkledb.trieView` +- Fixed error handling during `merkledb` intermediate node eviction +- Replaced values larger than `32` bytes with a hash in the `merkledb` hash representation + +### AVM + +- Refactored `avm` API tx creation into a standalone `Spender` implementation +- Migrated UTXO interfaces from the `platformvm` into the `components` for use in the `avm` +- Refactored `avm` `tx.SyntacticVerify` to expect the config rather than the fee fields + +### Miscellaneous + +- Updated the minimum golang version to `v1.19.6` +- Fixed `rpcchainvm` signal handling to only shutdown upon receipt of `SIGTERM` +- Added `warp.Signature#NumSigners` for better cost tracking support +- Added `snow.Context#PublicKey` to provide access to the local node's BLS public key inside the VM execution environment +- Renamed Avalanche consensus metric prefix to `avalanche_{chainID}_avalanche` +- Specified an explicit TCP `Linger` timeout of `15` seconds +- Updated the `secp256k1` library to `v4.1.0` + +### Cleanup + +- Removed support for the `--whitelisted-subnets` flag +- Removed unnecessary abstractions from the `app` package +- Removed `Factory` embedding from `platformvm.VM` and `avm.VM` +- Removed `validator` package from the `platformvm` +- Removed `timer.TimeoutManager` +- Replaced `snow.Context` in `Factory.New` with `logging.Logger` +- Renamed `set.Bits#Len` to `BitLen` and `set.Bits#HammingWeight` to `Len` to align with `set.Bits64` + ## [v1.9.9](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.9) This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `23`. diff --git a/go.mod b/go.mod index 1ef6fe622174..b78e75ba02a9 100644 --- a/go.mod +++ b/go.mod @@ -6,13 +6,13 @@ module github.com/ava-labs/avalanchego // Dockerfile // README.md // go.mod (here, only major.minor can be specified) -go 1.20 +go 1.19 require ( github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 - github.com/ava-labs/coreth v0.11.8-0.20230223235704-14175cdd347d + github.com/ava-labs/coreth v0.11.8-rc.3 github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 @@ -89,6 +89,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect github.com/holiman/uint256 v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.12 // indirect diff --git a/go.sum b/go.sum index c9db5b8426ea..6496d0a11926 100644 --- a/go.sum +++ b/go.sum @@ -57,8 +57,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 h1:TVi9JEdKNU/RevYZ9PyW4pULbEdS+KQDA9Ki2DUvuAs= github.com/ava-labs/avalanche-network-runner-sdk v0.3.0/go.mod h1:SgKJvtqvgo/Bl/c8fxEHCLaSxEbzimYfBopcfrajxQk= -github.com/ava-labs/coreth v0.11.8-0.20230223235704-14175cdd347d h1:i2pf4SXb1kmmAMgQOsoTHwy2rpOhU3dy1ND06nEdwdE= -github.com/ava-labs/coreth v0.11.8-0.20230223235704-14175cdd347d/go.mod h1:UiSBTrY+KwCiwHaIsGMZgoDtLLah8UXealH6LK0wEbc= +github.com/ava-labs/coreth v0.11.8-rc.3 h1:pS+OTFPc9edcFuCJIQGn5TdyAZncT9Hhs9jCcmm7+PM= +github.com/ava-labs/coreth v0.11.8-rc.3/go.mod h1:pc44yvJD4jTPIwkPI64pUXyJDvQ/UAqkbmhXOx78PXA= github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 h1:EdxD90j5sClfL5Ngpz2TlnbnkNYdFPDXa0jDOjam65c= github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7/go.mod h1:XhiXSrh90sHUbkERzaxEftCmUz53eCijshDLZ4fByVM= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -269,6 +269,8 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= diff --git a/proto/Dockerfile.buf b/proto/Dockerfile.buf index 20795e66a791..3903baf2d675 100644 --- a/proto/Dockerfile.buf +++ b/proto/Dockerfile.buf @@ -6,7 +6,7 @@ RUN apt-get update && apt -y install bash curl unzip git WORKDIR /opt RUN \ - curl -L https://golang.org/dl/go1.18.5.linux-amd64.tar.gz > golang.tar.gz && \ + curl -L https://golang.org/dl/go1.19.6.linux-amd64.tar.gz > golang.tar.gz && \ mkdir golang && \ tar -zxvf golang.tar.gz -C golang/ diff --git a/proto/README.md b/proto/README.md index dbc8ebb15169..4baa2d531fa8 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,6 +1,6 @@ # Avalanche gRPC -Now Serving: **Protocol Version 23** +Now Serving: **Protocol Version 24** Protobuf files are hosted at [https://buf.build/ava-labs/avalanche](https://buf.build/ava-labs/avalanche) and can be used as dependencies in other projects. diff --git a/scripts/build_avalanche.sh b/scripts/build_avalanche.sh index cdf6fdb5af4b..855e6f360f69 100755 --- a/scripts/build_avalanche.sh +++ b/scripts/build_avalanche.sh @@ -30,7 +30,7 @@ done # Dockerfile # README.md # go.mod -go_version_minimum="1.20.1" +go_version_minimum="1.19.6" go_version() { go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' diff --git a/scripts/build_coreth.sh b/scripts/build_coreth.sh index bd3fe41c0be4..d22b7e4e8887 100755 --- a/scripts/build_coreth.sh +++ b/scripts/build_coreth.sh @@ -55,7 +55,7 @@ go build $build_args -ldflags "-X github.com/ava-labs/coreth/plugin/evm.Version= cd "$AVALANCHE_PATH" # Building coreth + using go get can mess with the go.mod file. -go mod tidy -compat=1.18 +go mod tidy -compat=1.19 # Exit build successfully if the Coreth EVM binary is created successfully if [[ -f "$evm_path" ]]; then diff --git a/scripts/constants.sh b/scripts/constants.sh index deb8be2a245b..51d318dd2b90 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -9,7 +9,7 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Direct avalanchego_path="$AVALANCHE_PATH/build/avalanchego" plugin_dir=${PLUGIN_DIR:-$HOME/.avalanchego/plugins} evm_path=${EVM_PATH:-$plugin_dir/evm} -coreth_version=${CORETH_VERSION:-'v0.11.8-0.20230223235704-14175cdd347d'} +coreth_version=${CORETH_VERSION:-'v0.11.8-rc.3'} # Set the PATHS GOPATH="$(go env GOPATH)" diff --git a/scripts/local.Dockerfile b/scripts/local.Dockerfile index e3db6b239c76..0603b1a4716b 100644 --- a/scripts/local.Dockerfile +++ b/scripts/local.Dockerfile @@ -9,7 +9,7 @@ # Dockerfile # README.md # go.mod -FROM golang:1.20.1-buster +FROM golang:1.19.6-buster RUN mkdir -p /go/src/github.com/ava-labs diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index c59aa9058d9d..28dc207bbd85 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -365,7 +365,7 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { if vmErr == nil { return intf, consensusErr } - return intf, fmt.Errorf("vm: %w ; consensus: %w", vmErr, consensusErr) + return intf, fmt.Errorf("vm: %w ; consensus: %v", vmErr, consensusErr) } func (t *Transitive) GetVM() common.VM { diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 2a6c227bf591..fece094ecab7 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -435,7 +435,7 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { if vmErr == nil { return intf, consensusErr } - return intf, fmt.Errorf("vm: %w ; consensus: %w", vmErr, consensusErr) + return intf, fmt.Errorf("vm: %w ; consensus: %v", vmErr, consensusErr) } func (t *Transitive) GetVM() common.VM { diff --git a/version/compatibility.json b/version/compatibility.json index e4d9aae0811f..9024236ac044 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,4 +1,7 @@ { + "24": [ + "v1.9.10" + ], "23": [ "v1.9.9" ], diff --git a/version/constants.go b/version/constants.go index f2ae98c8bbcf..b2378b3270ba 100644 --- a/version/constants.go +++ b/version/constants.go @@ -14,14 +14,14 @@ import ( // RPCChainVMProtocol should be bumped anytime changes are made which require // the plugin vm to upgrade to latest avalanchego release to be compatible. -const RPCChainVMProtocol uint = 23 +const RPCChainVMProtocol uint = 24 // These are globals that describe network upgrades and node versions var ( Current = &Semantic{ Major: 1, Minor: 9, - Patch: 9, + Patch: 10, } CurrentApp = &Application{ Major: Current.Major, diff --git a/vms/components/avax/utxo_id.go b/vms/components/avax/utxo_id.go index b6bd01dffde3..d8566ba42d5f 100644 --- a/vms/components/avax/utxo_id.go +++ b/vms/components/avax/utxo_id.go @@ -68,12 +68,12 @@ func UTXOIDFromString(s string) (*UTXOID, error) { txID, err := ids.FromString(ss[0]) if err != nil { - return nil, fmt.Errorf("%w: %w", errFailedDecodingUTXOIDTxID, err) + return nil, fmt.Errorf("%w: %v", errFailedDecodingUTXOIDTxID, err) } idx, err := strconv.ParseUint(ss[1], 10, 32) if err != nil { - return nil, fmt.Errorf("%w: %w", errFailedDecodingUTXOIDIndex, err) + return nil, fmt.Errorf("%w: %v", errFailedDecodingUTXOIDIndex, err) } return &UTXOID{ diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 7c8484c47dae..0a7080a3410a 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -126,7 +126,7 @@ func verifyAddValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkValidatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %w", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -225,7 +225,7 @@ func verifyAddSubnetValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddSubnetValidatorFee, }, ); err != nil { - return fmt.Errorf("%w: %w", errFlowCheckFailed, err) + return fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -266,7 +266,7 @@ func removeSubnetValidatorValidation( if err != nil { // It isn't a current or pending validator. return nil, false, fmt.Errorf( - "%s %w of %s: %w", + "%s %w of %s: %v", tx.NodeID, errNotValidator, tx.Subnet, @@ -300,7 +300,7 @@ func removeSubnetValidatorValidation( backend.Ctx.AVAXAssetID: backend.Config.TxFee, }, ); err != nil { - return nil, false, fmt.Errorf("%w: %w", errFlowCheckFailed, err) + return nil, false, fmt.Errorf("%w: %v", errFlowCheckFailed, err) } return vdr, isCurrentValidator, nil @@ -401,7 +401,7 @@ func verifyAddDelegatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkDelegatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %w", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -536,7 +536,7 @@ func verifyAddPermissionlessValidatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %w", errFlowCheckFailed, err) + return fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -717,7 +717,7 @@ func verifyAddPermissionlessDelegatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %w", errFlowCheckFailed, err) + return fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last diff --git a/vms/platformvm/txs/executor/subnet_tx_verification.go b/vms/platformvm/txs/executor/subnet_tx_verification.go index 7a2d4a5718d0..5beb7f05dfeb 100644 --- a/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -70,7 +70,7 @@ func verifySubnetAuthorization( subnetIntf, _, err := chainState.GetTx(subnetID) if err != nil { return nil, fmt.Errorf( - "%w %q: %w", + "%w %q: %v", errCantFindSubnet, subnetID, err, @@ -83,7 +83,7 @@ func verifySubnetAuthorization( } if err := backend.Fx.VerifyPermission(sTx.Unsigned, subnetAuth, subnetCred, subnet.Owner); err != nil { - return nil, fmt.Errorf("%w: %w", errUnauthorizedSubnetModification, err) + return nil, fmt.Errorf("%w: %v", errUnauthorizedSubnetModification, err) } return sTx.Creds[:baseTxCredsLen], nil diff --git a/vms/platformvm/warp/signature.go b/vms/platformvm/warp/signature.go index 52c58f8f8a54..429a24af7a85 100644 --- a/vms/platformvm/warp/signature.go +++ b/vms/platformvm/warp/signature.go @@ -115,7 +115,7 @@ func (s *BitSetSignature) Verify( // Parse the aggregate signature aggSig, err := bls.SignatureFromBytes(s.Signature[:]) if err != nil { - return fmt.Errorf("%w: %w", ErrParseSignature, err) + return fmt.Errorf("%w: %v", ErrParseSignature, err) } // Create the aggregate public key diff --git a/vms/platformvm/warp/validator.go b/vms/platformvm/warp/validator.go index 56e7213dbcd8..aaeab532700d 100644 --- a/vms/platformvm/warp/validator.go +++ b/vms/platformvm/warp/validator.go @@ -59,7 +59,7 @@ func GetCanonicalValidatorSet( for _, vdr := range vdrSet { totalWeight, err = math.Add64(totalWeight, vdr.Weight) if err != nil { - return nil, 0, fmt.Errorf("%w: %w", ErrWeightOverflow, err) + return nil, 0, fmt.Errorf("%w: %v", ErrWeightOverflow, err) } if vdr.PublicKey == nil { @@ -124,7 +124,7 @@ func SumWeight(vdrs []*Validator) (uint64, error) { for _, vdr := range vdrs { weight, err = math.Add64(weight, vdr.Weight) if err != nil { - return 0, fmt.Errorf("%w: %w", ErrWeightOverflow, err) + return 0, fmt.Errorf("%w: %v", ErrWeightOverflow, err) } } return weight, nil diff --git a/vms/rpcchainvm/runtime/subprocess/runtime.go b/vms/rpcchainvm/runtime/subprocess/runtime.go index 209bbab779ab..d606426d10d3 100644 --- a/vms/rpcchainvm/runtime/subprocess/runtime.go +++ b/vms/rpcchainvm/runtime/subprocess/runtime.go @@ -132,12 +132,12 @@ func Bootstrap( case <-intitializer.initialized: case <-timeout.C: stopper.Stop(ctx) - return nil, nil, fmt.Errorf("%w: %w", runtime.ErrHandshakeFailed, runtime.ErrProcessNotFound) + return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, runtime.ErrProcessNotFound) } if intitializer.err != nil { stopper.Stop(ctx) - return nil, nil, fmt.Errorf("%w: %w", runtime.ErrHandshakeFailed, err) + return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, err) } log.Info("plugin handshake succeeded", diff --git a/x/sync/client.go b/x/sync/client.go index 84f4d9a5b1ec..3adba39f2375 100644 --- a/x/sync/client.go +++ b/x/sync/client.go @@ -88,7 +88,7 @@ func (c *client) GetChangeProof(ctx context.Context, req *ChangeProofRequest, db } if err := changeProof.Verify(ctx, db, req.Start, req.End, req.EndingRoot); err != nil { - return nil, fmt.Errorf("%w due to %w", errInvalidRangeProof, err) + return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) } return changeProof, nil } @@ -116,7 +116,7 @@ func (c *client) GetRangeProof(ctx context.Context, req *RangeProofRequest) (*me req.End, req.Root, ); err != nil { - return nil, fmt.Errorf("%w due to %w", errInvalidRangeProof, err) + return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) } return rangeProof, nil } @@ -143,7 +143,7 @@ func getAndParse[T any](ctx context.Context, client *client, request Request, pa // If the context has finished, return the context error early. if err := ctx.Err(); err != nil { if lastErr != nil { - return nil, fmt.Errorf("request failed after %d attempts with last error %w and ctx error %w", attempt, lastErr, err) + return nil, fmt.Errorf("request failed after %d attempts with last error %w and ctx error %s", attempt, lastErr, err) } return nil, err }