diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 270b5d4ad9b0..cd3271dc1734 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -6,17 +6,8 @@ GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) DOCKER_REPO="avaplatform" -echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin - -TESTING_CONTROLLER_IMAGE="$DOCKER_REPO/avalanche-e2e-tests_controller:everest-latest" -BYZANTINE_IMAGE="$DOCKER_REPO/gecko-byzantine:everest-latest" - -docker pull "$TESTING_CONTROLLER_IMAGE" -docker pull "${BYZANTINE_IMAGE}" - - E2E_TESTING_REMOTE="https://github.com/ava-labs/avalanche-testing.git" -E2E_TAG="v0.8.2-dev" +E2E_TAG="v0.8.4-dev" mkdir -p "$E2E_TEST_HOME" git clone "$E2E_TESTING_REMOTE" "$E2E_TEST_HOME" @@ -26,4 +17,19 @@ git checkout "tags/$E2E_TAG" -b "$E2E_TAG" go mod edit -replace github.com/ava-labs/gecko="$GECKO_HOME" bash "./scripts/rebuild_initializer_binary.sh" -./build/avalanche-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" --byzantine-image-name="${BYZANTINE_IMAGE}" + + +TESTING_CONTROLLER_IMAGE="$DOCKER_REPO/avalanche-testing_controller:everest-dev" +BYZANTINE_IMAGE="$DOCKER_REPO/gecko-byzantine:everest-dev" + +docker pull "$TESTING_CONTROLLER_IMAGE" + +# If Docker Credentials are not available skip the Byzantine Tests +if [[ ${#DOCKER_USERNAME} == 0 ]]; then + echo "Skipping Byzantine Tests because Docker Credentials were not present." + ./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" +else + echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin + docker pull "${BYZANTINE_IMAGE}" + ./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" --byzantine-image-name="${BYZANTINE_IMAGE}" +fi diff --git a/.travis.yml b/.travis.yml index 89e4a11eb88d..9fd78bcdf5a7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,10 +17,7 @@ env: - E2E_TEST_HOME=$GOPATH/src/github.com/ava-labs/avalanche-testing/ - COMMIT=${TRAVIS_COMMIT::8} - DOCKERHUB_REPO=avaplatform/gecko - - secure: "L/A9+re0NEKP6EV6H9RcTGiDhX3WMvsiWrkRKDYKqnviqbjY30RK6EM4vvjrM4Lrw2QwsO3YKgnku3+zioE/TxEZFkpkbjNUXru0nYBrWAg1TKVsDXnYaIZkHUejfryST3E8N7F4Hx6zCtGEO0sEdUeKuT+MNUIuHezHooTgGzDjMogm70EWMFjQHc7VucTJu7dWU1RBPjovWQ0q9qflrtCpbrvXFIiihQQ1PQha1Q2C4wLakKuLbhhSafue90Mnyss0blaPHy/tyewcASJu4vsGTKRBn0DzttlkNTwuD6+nKrbmJY0ohunnkVFzYjrZAw1gyN+DCDb/lPbz4ZDItKPwrIUPEtL5xuUOrxUZPUh+0io3Q2d6rjaqkdGjd1KQXzbnW1mn0BxX3d3b2UpIqhBn9umYYjHBKnMuoRiTK33b7U9+LF3K84+tEvVDCPeHs/mw6Inp5jGRSravnM6yPQ6feGzogs4+3EMzZXxnkngKFKCsnd67Oe9xfV9amOU2aQAx4jaAwlPjEpBEkUa8YKx3lPznvmUk1QsNCUbLjdSl5JBaXojLJoiuPbj29hp4S5AXXgn+3Hvwk3ndcFCxi6/l1W9mjYSOtFqg3EAUdF4EgnA/ykQg9ZokkoKY0+qgOzG2bKOAYuCDWeGr7P1apToh00ccsQXL81nVPiq7uDw=" - - secure: "zfTm7tJBYiPYrli76d4Ep6Lc2TJQ8Xv//+7OoqTA/aIf6YJDHe05f2GFTWAHG2iOIix/yjwHYwnhyIW66eWPb+Ujejnmh4eXlYZFufX9J5jUpDpbFu/+ybOLgE1Tmr0je0ycneSMe/NAaS74nWU1wnP34/cEE4sYL7TJyhwbeEtgz3cbSWwkpdvHFbXCjSOA196jdIYYUwsnqU9yycAG+2WUSk3DHHzzdtMrh/UOH2r1VFyp5US0zmbW90WkWX+o3TIlzZJgTUGQRNnWKq95Mrh1EQotxgL6CJ8NkfY4bVAGAhusPjdjscJsHxfY93WRMH64TzPYYp0zdibatH0ztyhnZPXVKqv+AIIVTEW+xWv5V18kTQAd1uBW103NFacbgXhIGWtbFcN9g1+ws29HROMclYs7ci6+72Qnq0eL55huqSyFx6+InhYwn+LfJmaBcGW4wx1umdp505M0obZ4ghlyn6b0pDYmqsu1XyBC3mjUTFbwlQmWE2Fize4L5o+DdH4ZDc9japF9ntxIMvO+b3nOicr7tplY2AGp61bB89o3dUAFlN5mDaEJotiAuFk5mo244rY1FjSzyGiKkA3M9TkTIbgcbN098hOJoMCYybH7yqiPwNnZiFvUuYjHuC5D1kIYBWuqqO0iVcbIZn0rV2jyzbVFlhFVk2clTZGhkrY=" before_install: - - pip install --user awscli # need awscli to access byz-gecko images - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/before_install_linux.sh; fi install: diff --git a/api/info/service.go b/api/info/service.go index aa594d47662a..5b16274ab3eb 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -18,8 +18,6 @@ import ( "github.com/ava-labs/gecko/utils/json" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/version" - - cjson "github.com/ava-labs/gecko/utils/json" ) // Info is the API service for unprivileged info on a node @@ -36,7 +34,7 @@ type Info struct { // NewService returns a new admin API service func NewService(log logging.Logger, version version.Version, nodeID ids.ShortID, networkID uint32, chainManager chains.Manager, peers network.Network, txFee uint64) (*common.HTTPHandler, error) { newServer := rpc.NewServer() - codec := cjson.NewCodec() + codec := json.NewCodec() newServer.RegisterCodec(codec, "application/json") newServer.RegisterCodec(codec, "application/json;charset=UTF-8") if err := newServer.RegisterService(&Info{ @@ -81,14 +79,14 @@ func (service *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDRep // GetNetworkIDReply are the results from calling GetNetworkID type GetNetworkIDReply struct { - NetworkID cjson.Uint32 `json:"networkID"` + NetworkID json.Uint32 `json:"networkID"` } // GetNetworkID returns the network ID this node is running on func (service *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { service.log.Info("Info: GetNetworkID called") - reply.NetworkID = cjson.Uint32(service.networkID) + reply.NetworkID = json.Uint32(service.networkID) return nil } diff --git a/api/keystore/service.go b/api/keystore/service.go index 500cf31e040e..a5fce7295688 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -57,7 +57,7 @@ const ( var ( errEmptyUsername = errors.New("username can't be the empty string") errUserPassMaxLength = fmt.Errorf("CreateUser call rejected due to username or password exceeding maximum length of %d chars", maxUserPassLen) - errWeakPassword = errors.New("Failed to create user as the given password is too weak") + errWeakPassword = errors.New("failed to create user as the given password is too weak") ) // KeyValuePair ... diff --git a/chains/awaiter.go b/chains/awaiter.go index 5b9cfd6de922..bf9473a77582 100644 --- a/chains/awaiter.go +++ b/chains/awaiter.go @@ -28,11 +28,11 @@ func NewAwaiter(vdrs validators.Set, reqWeight uint64, connected func()) network } func (a *awaitConnected) Connected(vdrID ids.ShortID) bool { - vdr, ok := a.vdrs.Get(vdrID) + weight, ok := a.vdrs.GetWeight(vdrID) if !ok { return false } - weight, err := math.Add64(vdr.Weight(), a.weight) + weight, err := math.Add64(weight, a.weight) a.weight = weight // If the error is non-nil, then an overflow error has occurred such that // the required weight was surpassed. As per network.Handler interface, @@ -46,14 +46,14 @@ func (a *awaitConnected) Connected(vdrID ids.ShortID) bool { } func (a *awaitConnected) Disconnected(vdrID ids.ShortID) bool { - if vdr, ok := a.vdrs.Get(vdrID); ok { + if weight, ok := a.vdrs.GetWeight(vdrID); ok { // TODO: Account for weight changes in a more robust manner. // Sub64 should rarely error since only validators that have added their // weight can become disconnected. Because it is possible that there are // changes to the validators set, we utilize that Sub64 returns 0 on // error. - a.weight, _ = math.Sub64(a.weight, vdr.Weight()) + a.weight, _ = math.Sub64(a.weight, weight) } return false } diff --git a/chains/awaiter_test.go b/chains/awaiter_test.go index fedec4a5c049..6120cfd4b008 100644 --- a/chains/awaiter_test.go +++ b/chains/awaiter_test.go @@ -18,9 +18,9 @@ func TestAwaiter(t *testing.T) { vdrID3 := ids.NewShortID([20]byte{3}) s := validators.NewSet() - s.Add(validators.NewValidator(vdrID0, 1)) - s.Add(validators.NewValidator(vdrID1, 1)) - s.Add(validators.NewValidator(vdrID3, 1)) + s.AddWeight(vdrID0, 1) + s.AddWeight(vdrID1, 1) + s.AddWeight(vdrID3, 1) called := make(chan struct{}, 1) aw := NewAwaiter(s, 3, func() { diff --git a/chains/manager.go b/chains/manager.go index 34270fbe8af2..2991935ff379 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -115,6 +115,7 @@ type manager struct { stakingEnabled bool // True iff the network has staking enabled stakerMsgPortion, stakerCPUPortion float64 + maxNonStakerPendingMsgs uint32 log logging.Logger logFactory logging.Factory vmManager vms.Manager // Manage mappings from vm ID --> vm @@ -152,6 +153,7 @@ type manager struct { // TODO: Make this function take less arguments func New( stakingEnabled bool, + maxNonStakerPendingMsgs uint, stakerMsgPortion, stakerCPUPortion float64, log logging.Logger, @@ -186,29 +188,30 @@ func New( rtr.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout) m := &manager{ - stakingEnabled: stakingEnabled, - stakerMsgPortion: stakerMsgPortion, - stakerCPUPortion: stakerCPUPortion, - log: log, - logFactory: logFactory, - vmManager: vmManager, - decisionEvents: decisionEvents, - consensusEvents: consensusEvents, - db: db, - chainRouter: rtr, - net: net, - timeoutManager: &timeoutManager, - consensusParams: consensusParams, - validators: validators, - nodeID: nodeID, - networkID: networkID, - server: server, - keystore: keystore, - atomicMemory: atomicMemory, - avaxAssetID: avaxAssetID, - xChainID: xChainID, - criticalChains: criticalChains, - chains: make(map[[32]byte]*router.Handler), + stakingEnabled: stakingEnabled, + maxNonStakerPendingMsgs: uint32(maxNonStakerPendingMsgs), + stakerMsgPortion: stakerMsgPortion, + stakerCPUPortion: stakerCPUPortion, + log: log, + logFactory: logFactory, + vmManager: vmManager, + decisionEvents: decisionEvents, + consensusEvents: consensusEvents, + db: db, + chainRouter: rtr, + net: net, + timeoutManager: &timeoutManager, + consensusParams: consensusParams, + validators: validators, + nodeID: nodeID, + networkID: networkID, + server: server, + keystore: keystore, + atomicMemory: atomicMemory, + avaxAssetID: avaxAssetID, + xChainID: xChainID, + criticalChains: criticalChains, + chains: make(map[[32]byte]*router.Handler), } m.Initialize() return m, nil @@ -238,7 +241,8 @@ func (m *manager) ForceCreateChain(chainParams ChainParameters) { // Assert that there isn't already a chain with an alias in [chain].Aliases // (Recall that the string repr. of a chain's ID is also an alias for a chain) if alias, isRepeat := m.isChainWithAlias(chainParams.ID.String()); isRepeat { - m.log.Error("there is already a chain with alias '%s'. Chain not created.", alias) + m.log.Debug("there is already a chain with alias '%s'. Chain not created.", + alias) return } @@ -342,9 +346,9 @@ func (m *manager) buildChain(chainParams ChainParameters) (*chain, error) { var validators validators.Set // Validators validating this blockchain var ok bool if m.stakingEnabled { - validators, ok = m.validators.GetValidatorSet(chainParams.SubnetID) + validators, ok = m.validators.GetValidators(chainParams.SubnetID) } else { // Staking is disabled. Every peer validates every subnet. - validators, ok = m.validators.GetValidatorSet(constants.DefaultSubnetID) + validators, ok = m.validators.GetValidators(constants.PrimaryNetworkID) } if !ok { return nil, fmt.Errorf("couldn't get validator set of subnet with ID %s. The subnet may not exist", chainParams.SubnetID) @@ -510,6 +514,7 @@ func (m *manager) createAvalancheChain( validators, msgChan, defaultChannelSize, + m.maxNonStakerPendingMsgs, m.stakerMsgPortion, m.stakerCPUPortion, fmt.Sprintf("%s_handler", consensusParams.Namespace), @@ -588,6 +593,7 @@ func (m *manager) createSnowmanChain( validators, msgChan, defaultChannelSize, + m.maxNonStakerPendingMsgs, m.stakerMsgPortion, m.stakerCPUPortion, fmt.Sprintf("%s_handler", consensusParams.Namespace), diff --git a/genesis/genesis.go b/genesis/genesis.go index 02e69b98f250..2be3c527b9ca 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -119,7 +119,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { weight := json.Uint64(20 * units.KiloAvax) destAddr := config.FundedAddresses[i%len(config.FundedAddresses)] platformvmArgs.Validators = append(platformvmArgs.Validators, - platformvm.FormattedAPIDefaultSubnetValidator{ + platformvm.FormattedAPIPrimaryValidator{ FormattedAPIValidator: platformvm.FormattedAPIValidator{ StartTime: json.Uint64(genesisTime.Unix()), EndTime: json.Uint64(endStakingTime.Unix()), @@ -135,7 +135,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { platformvmArgs.Chains = []platformvm.APIChain{ { GenesisData: avmReply.Bytes, - SubnetID: constants.DefaultSubnetID, + SubnetID: constants.PrimaryNetworkID, VMID: avm.ID, FxIDs: []ids.ID{ secp256k1fx.ID, @@ -146,7 +146,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { }, { GenesisData: formatting.CB58{Bytes: config.EVMBytes}, - SubnetID: constants.DefaultSubnetID, + SubnetID: constants.PrimaryNetworkID, VMID: EVMID, Name: "C-Chain", }, diff --git a/genesis/network_id.go b/genesis/network_id.go index ce00cbabbc31..146c39d1396c 100644 --- a/genesis/network_id.go +++ b/genesis/network_id.go @@ -30,18 +30,18 @@ func NetworkID(networkName string) (uint32, error) { if id, err := strconv.ParseUint(networkName, 10, 0); err == nil { if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + return 0, fmt.Errorf("networkID %s not in [0, 2^32)", networkName) } return uint32(id), nil } if constants.ValidNetworkName.MatchString(networkName) { if id, err := strconv.Atoi(networkName[8:]); err == nil { if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + return 0, fmt.Errorf("networkID %s not in [0, 2^32)", networkName) } return uint32(id), nil } } - return 0, fmt.Errorf("Failed to parse %s as a network name", networkName) + return 0, fmt.Errorf("failed to parse %s as a network name", networkName) } diff --git a/go.mod b/go.mod index 4a219ebca215..260e3480d0f4 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/AppsFlyer/go-sundheit v0.2.0 github.com/allegro/bigcache v1.2.1 // indirect github.com/aristanetworks/goarista v0.0.0-20200812190859-4cb0e71f3c0e // indirect - github.com/ava-labs/coreth v0.2.12-rc.1 // indirect + github.com/ava-labs/coreth v0.2.13 // indirect github.com/ava-labs/go-ethereum v1.9.3 github.com/btcsuite/btcutil v1.0.2 github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 diff --git a/go.sum b/go.sum index 7a57321b2601..ad6caf93f241 100644 --- a/go.sum +++ b/go.sum @@ -21,8 +21,8 @@ github.com/aristanetworks/goarista v0.0.0-20200609010056-95bcf8053598/go.mod h1: github.com/aristanetworks/goarista v0.0.0-20200812190859-4cb0e71f3c0e h1:tkEt0le4Lv5+VmcxZPIVSrP8LVPLhndIm/BOP7iPh/w= github.com/aristanetworks/goarista v0.0.0-20200812190859-4cb0e71f3c0e/go.mod h1:QZe5Yh80Hp1b6JxQdpfSEEe8X7hTyTEZSosSrFf/oJE= github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= -github.com/ava-labs/coreth v0.2.12-rc.1 h1:BUUu+89KwsAFZpdcim8PGkc11P54BQhvRaB9XzX63F8= -github.com/ava-labs/coreth v0.2.12-rc.1/go.mod h1:ZwQ7rzHvQLorZsMoUm2FDWmLwOvDDoNzB+EEp2NhWyI= +github.com/ava-labs/coreth v0.2.13 h1:MaTf6Mbhfh2Ou5MpYmMqohCOtg3ZQgbDXTeEzcCiMb8= +github.com/ava-labs/coreth v0.2.13/go.mod h1:ZwQ7rzHvQLorZsMoUm2FDWmLwOvDDoNzB+EEp2NhWyI= github.com/ava-labs/gecko v0.6.1-rc.1/go.mod h1:TT6uA1BETZpVMR0xiFtE8I5Mv4DULlS+lAL3xuYKnpA= github.com/ava-labs/go-ethereum v1.9.3 h1:GmnMZ/dlvVAPFmWBzEpRJX49pUAymPfoASLNRJqR0AY= github.com/ava-labs/go-ethereum v1.9.3/go.mod h1:a+agc6fXfZFsPZCylA3ry4Y8CLCqLKg3Rc23NXZ9aw8= diff --git a/ids/bag.go b/ids/bag.go index e6c3f653854d..5d37de188252 100644 --- a/ids/bag.go +++ b/ids/bag.go @@ -90,7 +90,7 @@ func (b *Bag) Len() int { return b.size } // List returns a list of all ids that have been added. func (b *Bag) List() []ID { - idList := make([]ID, len(b.counts), len(b.counts)) + idList := make([]ID, len(b.counts)) i := 0 for id := range b.counts { idList[i] = NewID(id) diff --git a/ids/short_set.go b/ids/short_set.go index 9bcd37d2275b..df011bacb69d 100644 --- a/ids/short_set.go +++ b/ids/short_set.go @@ -80,7 +80,7 @@ func (ids ShortSet) CappedList(size int) []ShortID { // List converts this set into a list func (ids ShortSet) List() []ShortID { - idList := make([]ShortID, len(ids), len(ids)) + idList := make([]ShortID, len(ids)) i := 0 for id := range ids { idList[i] = NewShortID(id) diff --git a/ipcs/eventsocket.go b/ipcs/eventsocket.go index 2b0476bf5b38..80a7d85779fa 100644 --- a/ipcs/eventsocket.go +++ b/ipcs/eventsocket.go @@ -15,11 +15,6 @@ import ( "github.com/ava-labs/gecko/utils/wrappers" ) -type chainEventDipatcher struct { - chainID ids.ID - events *triggers.EventDispatcher -} - // EventSockets is a set of named eventSockets type EventSockets struct { consensusSocket *eventSocket diff --git a/main/params.go b/main/params.go index 8e3bdd3982ed..599da77ed323 100644 --- a/main/params.go +++ b/main/params.go @@ -32,7 +32,7 @@ import ( ) const ( - dbVersion = "v0.6.1" + dbVersion = "v0.7.0" ) // Results of parsing the CLI @@ -166,8 +166,8 @@ func init() { // AVAX fees: fs.Uint64Var(&Config.TxFee, "tx-fee", units.MilliAvax, "Transaction fee, in nAVAX") - // Minimum stake, in nAVAX, required to validate the Default Subnet - fs.Uint64Var(&Config.MinStake, "min-stake", 5*units.MilliAvax, "Minimum stake, in nAVAX, required to validate the Default Subnet") + // Minimum stake, in nAVAX, required to validate the primary network + fs.Uint64Var(&Config.MinStake, "min-stake", 5*units.MilliAvax, "Minimum stake, in nAVAX, required to validate the primary network") // Assertions: fs.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") @@ -202,6 +202,7 @@ func init() { fs.Uint64Var(&Config.DisabledStakingWeight, "staking-disabled-weight", 1, "Weight to provide to each peer when staking is disabled") // Throttling: + fs.UintVar(&Config.MaxNonStakerPendingMsgs, "max-non-staker-pending-msgs", 3, "Maximum number of messages a non-staker is allowed to have pending.") fs.Float64Var(&Config.StakerMsgPortion, "staker-msg-reserved", 0.2, "Reserve a portion of the chain message queue's space for stakers.") fs.Float64Var(&Config.StakerCPUPortion, "staker-cpu-reserved", 0.2, "Reserve a portion of the chain's CPU time for stakers.") @@ -298,7 +299,7 @@ func init() { } if ip == nil { - errs.Add(fmt.Errorf("Invalid IP Address %s", *consensusIP)) + errs.Add(fmt.Errorf("invalid IP Address %s", *consensusIP)) return } @@ -362,7 +363,7 @@ func init() { } } if len(Config.BootstrapPeers) != i { - errs.Add(fmt.Errorf("More bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i)) + errs.Add(fmt.Errorf("more bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i)) return } } else { diff --git a/nat/nat.go b/nat/nat.go index 079cb357efe3..cc6244cbeda5 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -121,7 +121,7 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, } updateTimer.Reset(mapUpdateTimeout) - case _, _ = <-dev.closer: + case <-dev.closer: return } } diff --git a/network/codec.go b/network/codec.go index 0d04f92b44b6..6e409209ba29 100644 --- a/network/codec.go +++ b/network/codec.go @@ -12,7 +12,6 @@ import ( ) var ( - errBadLength = errors.New("stream has unexpected length") errMissingField = errors.New("message missing field") errBadOp = errors.New("input field has invalid operation") ) diff --git a/network/network_test.go b/network/network_test.go index c2304028636c..5840ee4a2399 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -39,7 +39,7 @@ func (l *testListener) Accept() (net.Conn, error) { select { case c := <-l.inbound: return c, nil - case _, _ = <-l.closed: + case <-l.closed: return nil, errClosed } } @@ -102,7 +102,7 @@ func (c *testConn) Read(b []byte) (int, error) { return 0, errClosed } c.partialRead = read - case _, _ = <-c.closed: + case <-c.closed: return 0, errClosed } } @@ -122,7 +122,7 @@ func (c *testConn) Write(b []byte) (int, error) { select { case c.pendingWrites <- newB: - case _, _ = <-c.closed: + case <-c.closed: return 0, errClosed } diff --git a/network/peer.go b/network/peer.go index 9ac59a6f4a79..9e880814329d 100644 --- a/network/peer.go +++ b/network/peer.go @@ -4,7 +4,6 @@ package network import ( - "bytes" "math" "net" "sync" @@ -488,7 +487,7 @@ func (p *peer) version(msg Msg) { if err == nil { // If we have no clue what the peer's IP is, we can't perform any // verification - if bytes.Equal(peerIP.IP, localPeerIP.IP) { + if peerIP.IP.Equal(localPeerIP.IP) { // if the IPs match, add this ip:port pair to be tracked p.net.stateLock.Lock() p.ip = peerIP diff --git a/node/config.go b/node/config.go index 0bd595c13b7d..5e6ad9f8dcec 100644 --- a/node/config.go +++ b/node/config.go @@ -23,7 +23,7 @@ type Config struct { // Transaction fee configuration TxFee uint64 - // Minimum stake, in nAVAX, required to validate the Default Subnet + // Minimum stake, in nAVAX, required to validate the primary network MinStake uint64 // Assertions configuration @@ -36,15 +36,16 @@ type Config struct { DB database.Database // Staking configuration - StakingIP utils.IPDesc - StakingLocalPort uint16 - EnableP2PTLS bool - EnableStaking bool - StakingKeyFile string - StakingCertFile string - DisabledStakingWeight uint64 - StakerMsgPortion float64 - StakerCPUPortion float64 + StakingIP utils.IPDesc + StakingLocalPort uint16 + EnableP2PTLS bool + EnableStaking bool + StakingKeyFile string + StakingCertFile string + DisabledStakingWeight uint64 + MaxNonStakerPendingMsgs uint + StakerMsgPortion float64 + StakerCPUPortion float64 // Bootstrapping configuration BootstrapPeers []*Peer diff --git a/node/node.go b/node/node.go index 213367cbf875..d758df5c6282 100644 --- a/node/node.go +++ b/node/node.go @@ -63,7 +63,7 @@ var ( genesisHashKey = []byte("genesisID") // Version is the version of this code - Version = version.NewDefaultVersion("avalanche", 0, 6, 5) + Version = version.NewDefaultVersion("avalanche", 0, 7, 0) versionParser = version.NewDefaultParser() ) @@ -140,9 +140,9 @@ func (n *Node) initNetworking() error { tlsConfig := &tls.Config{ Certificates: []tls.Certificate{cert}, ClientAuth: tls.RequireAnyClientCert, - // We do not use TLS's CA functionality, we just require an - // authenticated channel. Therefore, we can safely skip verification - // here. + // We do not use TLS's CA functionality to authenticate a hostname. + // We only require an authenticated channel based on the peer's + // public key. Therefore, we can safely skip CA verification. // // TODO: Security audit required InsecureSkipVerify: true, @@ -155,10 +155,12 @@ func (n *Node) initNetworking() error { clientUpgrader = network.NewIPUpgrader() } - // Initialize validator manager and default subnet's validator set - defaultSubnetValidators := validators.NewSet() + // Initialize validator manager and primary network's validator set + primaryNetworkValidators := validators.NewSet() n.vdrs = validators.NewManager() - n.vdrs.PutValidatorSet(constants.DefaultSubnetID, defaultSubnetValidators) + if err := n.vdrs.Set(constants.PrimaryNetworkID, primaryNetworkValidators); err != nil { + return err + } n.Net = network.NewDefaultNetwork( n.Config.ConsensusParams.Metrics, @@ -172,14 +174,14 @@ func (n *Node) initNetworking() error { dialer, serverUpgrader, clientUpgrader, - defaultSubnetValidators, + primaryNetworkValidators, n.beacons, n.Config.ConsensusRouter, ) if !n.Config.EnableStaking { n.Net.RegisterHandler(&insecureValidatorManager{ - vdrs: defaultSubnetValidators, + vdrs: primaryNetworkValidators, weight: n.Config.DisabledStakingWeight, }) } @@ -198,14 +200,14 @@ type insecureValidatorManager struct { } func (i *insecureValidatorManager) Connected(vdrID ids.ShortID) bool { - _ = i.vdrs.Add(validators.NewValidator(vdrID, i.weight)) + _ = i.vdrs.AddWeight(vdrID, i.weight) return false } func (i *insecureValidatorManager) Disconnected(vdrID ids.ShortID) bool { // Shouldn't error unless the set previously had an error, which should // never happen as described above - _ = i.vdrs.Remove(vdrID) + _ = i.vdrs.RemoveWeight(vdrID, i.weight) return false } @@ -313,7 +315,7 @@ func (n *Node) initNodeID() error { func (n *Node) initBeacons() error { n.beacons = validators.NewSet() for _, peer := range n.Config.BootstrapPeers { - if err := n.beacons.Add(validators.NewValidator(peer.ID, 1)); err != nil { + if err := n.beacons.AddWeight(peer.ID, 1); err != nil { return err } } @@ -356,13 +358,14 @@ func (n *Node) initChains(genesisBytes []byte, avaxAssetID ids.ID) error { // Create the Platform Chain n.chainManager.ForceCreateChain(chains.ChainParameters{ ID: constants.PlatformChainID, - SubnetID: constants.DefaultSubnetID, + SubnetID: constants.PrimaryNetworkID, GenesisData: genesisBytes, // Specifies other chains to create VMAlias: platformvm.ID.String(), CustomBeacons: n.beacons, }) bootstrapWeight := n.beacons.Weight() + reqWeight := (3*bootstrapWeight + 3) / 4 if reqWeight == 0 { @@ -411,6 +414,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { n.chainManager, err = chains.New( n.Config.EnableStaking, + n.Config.MaxNonStakerPendingMsgs, n.Config.StakerMsgPortion, n.Config.StakerCPUPortion, n.Log, @@ -442,12 +446,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { // Instead of updating node's validator manager, platform chain makes changes // to its own local validator manager (which isn't used for sampling) if !n.Config.EnableStaking { - defaultSubnetValidators := validators.NewSet() - if err := defaultSubnetValidators.Add(validators.NewValidator(n.ID, 1)); err != nil { - return fmt.Errorf("couldn't add validator to Default Subnet: %w", err) - } vdrs = validators.NewManager() - vdrs.PutValidatorSet(constants.DefaultSubnetID, defaultSubnetValidators) } errs := wrappers.Errs{} @@ -706,8 +705,11 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg if err := n.initHealthAPI(); err != nil { // Start the Health API return fmt.Errorf("couldn't initialize health API: %w", err) } + if err := n.initIPCs(); err != nil { // Start the IPCs + return fmt.Errorf("couldn't initialize IPCs: %w", err) + } if err := n.initIPCAPI(); err != nil { // Start the IPC API - return fmt.Errorf("couldn't initialize ipc API: %w", err) + return fmt.Errorf("couldn't initialize the IPC API: %w", err) } if err := n.initAliases(); err != nil { // Set up aliases return fmt.Errorf("couldn't initialize aliases: %w", err) @@ -715,9 +717,6 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg if err := n.initChains(genesisBytes, avaxAssetID); err != nil { // Start the Platform chain return fmt.Errorf("couldn't initialize chains: %w", err) } - if err := n.initIPCs(); err != nil { // Start the IPCs - return fmt.Errorf("couldn't initialize IPCs: %w", err) - } return nil } diff --git a/scripts/build_coreth.sh b/scripts/build_coreth.sh index dedb497c8336..538db1fe6e34 100755 --- a/scripts/build_coreth.sh +++ b/scripts/build_coreth.sh @@ -12,7 +12,7 @@ BUILD_DIR="$GECKO_PATH/build" # Where binaries go PLUGIN_DIR="$BUILD_DIR/plugins" # Where plugin binaries (namely coreth) go BINARY_PATH="$PLUGIN_DIR/evm" -CORETH_VER="0.2.12-rc.1" # Should match coreth version in go.mod +CORETH_VER="0.2.13" # Should match coreth version in go.mod CORETH_PATH="$GOPATH/pkg/mod/github.com/ava-labs/coreth@v$CORETH_VER" if [[ $# -eq 2 ]]; then diff --git a/snow/choices/test_decidable.go b/snow/choices/test_decidable.go index 8d90c7b77395..1643e619bff9 100644 --- a/snow/choices/test_decidable.go +++ b/snow/choices/test_decidable.go @@ -23,7 +23,7 @@ func (d *TestDecidable) ID() ids.ID { return d.IDV } func (d *TestDecidable) Accept() error { switch d.StatusV { case Unknown, Rejected: - return fmt.Errorf("Invalid state transaition from %s to %s", + return fmt.Errorf("invalid state transaition from %s to %s", d.StatusV, Accepted) default: d.StatusV = Accepted @@ -35,7 +35,7 @@ func (d *TestDecidable) Accept() error { func (d *TestDecidable) Reject() error { switch d.StatusV { case Unknown, Accepted: - return fmt.Errorf("Invalid state transaition from %s to %s", + return fmt.Errorf("invalid state transaition from %s to %s", d.StatusV, Rejected) default: d.StatusV = Rejected diff --git a/snow/consensus/avalanche/consensus.go b/snow/consensus/avalanche/consensus.go index a4ee7c52003b..154ddf5a9653 100644 --- a/snow/consensus/avalanche/consensus.go +++ b/snow/consensus/avalanche/consensus.go @@ -23,7 +23,7 @@ type Consensus interface { // called, the status maps should be immediately updated accordingly. // Assumes each element in the accepted frontier will return accepted from // the join status map. - Initialize(*snow.Context, Parameters, []Vertex) + Initialize(*snow.Context, Parameters, []Vertex) error // Returns the parameters that describe this avalanche instance Parameters() Parameters diff --git a/snow/consensus/avalanche/consensus_test.go b/snow/consensus/avalanche/consensus_test.go index 9cbddbe8f4ac..f3851d43dace 100644 --- a/snow/consensus/avalanche/consensus_test.go +++ b/snow/consensus/avalanche/consensus_test.go @@ -19,8 +19,7 @@ import ( ) var ( - Genesis = ids.GenerateTestID() - Tests = []func(*testing.T, Factory){ + Tests = []func(*testing.T, Factory){ MetricsTest, ParamsTest, AddTest, @@ -122,18 +121,21 @@ func ParamsTest(t *testing.T, factory Factory) { ctx := snow.DefaultContextTest() params := Parameters{ Parameters: snowball.Parameters{ - Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, } - avl.Initialize(ctx, params, nil) + if err := avl.Initialize(ctx, params, nil); err != nil { + t.Fatal(err) + } if p := avl.Parameters(); p.K != params.K { t.Fatalf("Wrong K parameter") @@ -153,11 +155,12 @@ func AddTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -174,7 +177,9 @@ func AddTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } if !avl.Finalized() { t.Fatalf("An empty avalanche instance is not finalized") @@ -248,11 +253,12 @@ func VertexIssuedTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -269,7 +275,9 @@ func VertexIssuedTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } if !avl.VertexIssued(vts[0]) { t.Fatalf("Genesis Vertex not reported as issued") @@ -305,11 +313,12 @@ func TxIssuedTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -334,7 +343,9 @@ func TxIssuedTest(t *testing.T, factory Factory) { }} tx1.InputIDsV.Add(utxos[0]) - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } if !avl.TxIssued(tx0) { t.Fatalf("Genesis Tx not reported as issued") @@ -675,11 +686,12 @@ func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -697,7 +709,9 @@ func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), diff --git a/snow/consensus/avalanche/metrics.go b/snow/consensus/avalanche/metrics.go index 5caaaf767fc6..b599bdb48d39 100644 --- a/snow/consensus/avalanche/metrics.go +++ b/snow/consensus/avalanche/metrics.go @@ -26,35 +26,32 @@ type metrics struct { func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) error { m.processing = make(map[[32]byte]time.Time) - m.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "vtx_processing", - Help: "Number of currently processing vertices", - }) - m.latAccepted = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "vtx_accepted", - Help: "Latency of accepting from the time the vertex was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - m.latRejected = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "vtx_rejected", - Help: "Latency of rejecting from the time the vertex was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) + m.numProcessing = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "vtx_processing", + Help: "Number of currently processing vertices", + }) + m.latAccepted = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "vtx_accepted", + Help: "Latency of accepting from the time the vertex was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + m.latRejected = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "vtx_rejected", + Help: "Latency of rejecting from the time the vertex was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) if err := registerer.Register(m.numProcessing); err != nil { - return fmt.Errorf("Failed to register vtx_processing statistics due to %w", err) + return fmt.Errorf("failed to register vtx_processing statistics due to %w", err) } if err := registerer.Register(m.latAccepted); err != nil { - return fmt.Errorf("Failed to register vtx_accepted statistics due to %w", err) + return fmt.Errorf("failed to register vtx_accepted statistics due to %w", err) } if err := registerer.Register(m.latRejected); err != nil { - return fmt.Errorf("Failed to register vtx_rejected statistics due to %w", err) + return fmt.Errorf("failed to register vtx_rejected statistics due to %w", err) } return nil } diff --git a/snow/consensus/avalanche/poll/set.go b/snow/consensus/avalanche/poll/set.go index 19012bdc7e34..21df5a28e7bb 100644 --- a/snow/consensus/avalanche/poll/set.go +++ b/snow/consensus/avalanche/poll/set.go @@ -112,7 +112,7 @@ func (s *set) Vote( s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.durPolls.Observe(float64(time.Since(poll.start).Milliseconds())) s.numPolls.Dec() // decrease the metrics return poll.Result(), true } diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index a0447c03ff5a..eca9162702d3 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -59,26 +59,34 @@ type kahnNode struct { } // Initialize implements the Avalanche interface -func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier []Vertex) { - ctx.Log.AssertDeferredNoError(params.Valid) +func (ta *Topological) Initialize( + ctx *snow.Context, + params Parameters, + frontier []Vertex, +) error { + if err := params.Valid(); err != nil { + return err + } ta.ctx = ctx ta.params = params if err := ta.metrics.Initialize(ctx.Log, params.Namespace, params.Metrics); err != nil { - ta.ctx.Log.Error("%s", err) + return err } ta.nodes = make(map[[32]byte]Vertex, minMapSize) ta.cg = &snowstorm.Directed{} - ta.cg.Initialize(ctx, params.Parameters) + if err := ta.cg.Initialize(ctx, params.Parameters); err != nil { + return err + } ta.frontier = make(map[[32]byte]Vertex, minMapSize) for _, vtx := range frontier { ta.frontier[vtx.ID().Key()] = vtx } - ctx.Log.AssertNoError(ta.updateFrontiers()) + return ta.updateFrontiers() } // Parameters implements the Avalanche interface diff --git a/snow/consensus/snowball/consensus_test.go b/snow/consensus/snowball/consensus_test.go index 304bc19d7edf..334999d99594 100644 --- a/snow/consensus/snowball/consensus_test.go +++ b/snow/consensus/snowball/consensus_test.go @@ -11,12 +11,6 @@ import ( "github.com/ava-labs/gecko/ids" ) -// ByzantineFactory implements Factory by returning a byzantine struct -type ByzantineFactory struct{} - -// New implements Factory -func (ByzantineFactory) New() Consensus { return &Byzantine{} } - // Byzantine is a naive implementation of a multi-choice snowball instance type Byzantine struct { // params contains all the configurations of a snowball instance diff --git a/snow/consensus/snowman/metrics.go b/snow/consensus/snowman/metrics.go index 81f7989ea47f..d78d9d7af7dd 100644 --- a/snow/consensus/snowman/metrics.go +++ b/snow/consensus/snowman/metrics.go @@ -26,35 +26,32 @@ type metrics struct { func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) error { m.processing = make(map[[32]byte]time.Time) - m.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "processing", - Help: "Number of currently processing blocks", - }) - m.latAccepted = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "accepted", - Help: "Latency of accepting from the time the block was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - m.latRejected = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "rejected", - Help: "Latency of rejecting from the time the block was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) + m.numProcessing = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "processing", + Help: "Number of currently processing blocks", + }) + m.latAccepted = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "accepted", + Help: "Latency of accepting from the time the block was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + m.latRejected = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "rejected", + Help: "Latency of rejecting from the time the block was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) if err := registerer.Register(m.numProcessing); err != nil { - return fmt.Errorf("Failed to register processing statistics due to %w", err) + return fmt.Errorf("failed to register processing statistics due to %w", err) } if err := registerer.Register(m.latAccepted); err != nil { - return fmt.Errorf("Failed to register accepted statistics due to %w", err) + return fmt.Errorf("failed to register accepted statistics due to %w", err) } if err := registerer.Register(m.latRejected); err != nil { - return fmt.Errorf("Failed to register rejected statistics due to %w", err) + return fmt.Errorf("failed to register rejected statistics due to %w", err) } return nil } diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index 31dc89e65d1a..f419f68f90a4 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -112,7 +112,7 @@ func (s *set) Vote( s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.durPolls.Observe(float64(time.Since(poll.start).Milliseconds())) s.numPolls.Dec() // decrease the metrics return poll.Result(), true } @@ -140,7 +140,7 @@ func (s *set) Drop(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) { s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.durPolls.Observe(float64(time.Since(poll.start).Milliseconds())) s.numPolls.Dec() // decrease the metrics return poll.Result(), true } diff --git a/snow/consensus/snowstorm/benchmark_test.go b/snow/consensus/snowstorm/benchmark_test.go index 914167dd14c9..58e25ca0ba50 100644 --- a/snow/consensus/snowstorm/benchmark_test.go +++ b/snow/consensus/snowstorm/benchmark_test.go @@ -9,12 +9,12 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/snow/consensus/snowball" + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) func Simulate( numColors, colorsPerConsumer, maxInputConflicts, numNodes int, - params snowball.Parameters, + params sbcon.Parameters, seed int64, fact Factory, ) { @@ -53,7 +53,7 @@ func BenchmarkVirtuousDirected(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -73,7 +73,7 @@ func BenchmarkVirtuousInput(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -99,7 +99,7 @@ func BenchmarkRogueDirected(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -119,7 +119,7 @@ func BenchmarkRogueInput(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -145,7 +145,7 @@ func BenchmarkMultiDirected(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -165,7 +165,7 @@ func BenchmarkMultiInput(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -191,7 +191,7 @@ func BenchmarkMultiRogueDirected(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -211,7 +211,7 @@ func BenchmarkMultiRogueInput(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, diff --git a/snow/consensus/snowstorm/common.go b/snow/consensus/snowstorm/common.go index a64685dc2217..b49b34f02006 100644 --- a/snow/consensus/snowstorm/common.go +++ b/snow/consensus/snowstorm/common.go @@ -4,11 +4,19 @@ package snowstorm import ( + "bytes" + "fmt" + "sort" + "strings" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/events" + "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/wrappers" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) type common struct { @@ -19,7 +27,7 @@ type common struct { ctx *snow.Context // params describes how this instance was parameterized - params snowball.Parameters + params sbcon.Parameters // each element of preferences is the ID of a transaction that is preferred preferences ids.Set @@ -44,19 +52,18 @@ type common struct { } // Initialize implements the ConflictGraph interface -func (c *common) Initialize(ctx *snow.Context, params snowball.Parameters) { - ctx.Log.AssertDeferredNoError(params.Valid) - +func (c *common) Initialize(ctx *snow.Context, params sbcon.Parameters) error { c.ctx = ctx c.params = params if err := c.metrics.Initialize(params.Namespace, params.Metrics); err != nil { - ctx.Log.Error("failed to initialize metrics: %s", err) + return fmt.Errorf("failed to initialize metrics: %s", err) } + return params.Valid() } // Parameters implements the Snowstorm interface -func (c *common) Parameters() snowball.Parameters { return c.params } +func (c *common) Parameters() sbcon.Parameters { return c.params } // Virtuous implements the ConflictGraph interface func (c *common) Virtuous() ids.Set { return c.virtuous } @@ -79,3 +86,247 @@ func (c *common) Finalized() bool { numPreferences) return numPreferences == 0 } + +// shouldVote returns if the provided tx should be voted on to determine if it +// can be accepted. If the tx can be vacuously accepted, the tx will be accepted +// and will therefore not be valid to be voted on. +func (c *common) shouldVote(con Consensus, tx Tx) (bool, error) { + if con.Issued(tx) { + // If the tx was previously inserted, it shouldn't be re-inserted. + return false, nil + } + + txID := tx.ID() + bytes := tx.Bytes() + + // Notify the IPC socket that this tx has been issued. + c.ctx.DecisionDispatcher.Issue(c.ctx.ChainID, txID, bytes) + + // Notify the metrics that this transaction is being issued. + c.metrics.Issued(txID) + + // If this tx has inputs, it needs to be voted on before being accepted. + if inputs := tx.InputIDs(); inputs.Len() != 0 { + return true, nil + } + + // Since this tx doesn't have any inputs, it's impossible for there to be + // any conflicting transactions. Therefore, this transaction is treated as + // vacuously accepted and doesn't need to be voted on. + + // Accept is called before notifying the IPC so that acceptances that + // cause fatal errors aren't sent to an IPC peer. + if err := tx.Accept(); err != nil { + return false, err + } + + // Notify the IPC socket that this tx has been accepted. + c.ctx.DecisionDispatcher.Accept(c.ctx.ChainID, txID, bytes) + + // Notify the metrics that this transaction was just accepted. + c.metrics.Accepted(txID) + return false, nil +} + +// accept the provided tx. +func (c *common) acceptTx(tx Tx) error { + // Accept is called before notifying the IPC so that acceptances that cause + // fatal errors aren't sent to an IPC peer. + if err := tx.Accept(); err != nil { + return err + } + + txID := tx.ID() + + // Notify the IPC socket that this tx has been accepted. + c.ctx.DecisionDispatcher.Accept(c.ctx.ChainID, txID, tx.Bytes()) + + // Update the metrics to account for this transaction's acceptance + c.metrics.Accepted(txID) + + // If there is a tx that was accepted pending on this tx, the ancestor + // should be notified that it doesn't need to block on this tx anymore. + c.pendingAccept.Fulfill(txID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // doesn't need to be rejected because of this tx. + c.pendingReject.Abandon(txID) + return nil +} + +// reject the provided tx. +func (c *common) rejectTx(tx Tx) error { + // Reject is called before notifying the IPC so that rejections that + // cause fatal errors aren't sent to an IPC peer. + if err := tx.Reject(); err != nil { + return err + } + + txID := tx.ID() + + // Notify the IPC that the tx was rejected + c.ctx.DecisionDispatcher.Reject(c.ctx.ChainID, txID, tx.Bytes()) + + // Update the metrics to account for this transaction's rejection + c.metrics.Rejected(txID) + + // If there is a tx that was accepted pending on this tx, the ancestor + // tx can't be accepted. + c.pendingAccept.Abandon(txID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // must be rejected. + c.pendingReject.Fulfill(txID) + return nil +} + +// registerAcceptor attempts to accept this tx once all its dependencies are +// accepted. If all the dependencies are already accepted, this function will +// immediately accept the tx. +func (c *common) registerAcceptor(con Consensus, tx Tx) { + txID := tx.ID() + + toAccept := &acceptor{ + g: con, + errs: &c.errs, + txID: txID, + } + + for _, dependency := range tx.Dependencies() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. + // This tx should be accepted after this tx is accepted. Note that + // the dependencies can't already be rejected, because it is assumed + // that this tx is currently considered valid. + toAccept.deps.Add(dependency.ID()) + } + } + + // This tx is no longer being voted on, so we remove it from the voting set. + // This ensures that virtuous txs built on top of rogue txs don't force the + // node to treat the rogue tx as virtuous. + c.virtuousVoting.Remove(txID) + c.pendingAccept.Register(toAccept) +} + +// registerRejector rejects this tx if any of its dependencies are rejected. +func (c *common) registerRejector(con Consensus, tx Tx) { + // If a tx that this tx depends on is rejected, this tx should also be + // rejected. + toReject := &rejector{ + g: con, + errs: &c.errs, + txID: tx.ID(), + } + + // Register all of this txs dependencies as possibilities to reject this tx. + for _, dependency := range tx.Dependencies() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. So, + // this tx should be rejected if any of these processing txs are + // rejected. Note that the dependencies can't already be rejected, + // because it is assumed that this tx is currently considered valid. + toReject.deps.Add(dependency.ID()) + } + } + + // Register these dependencies + c.pendingReject.Register(toReject) +} + +// acceptor implements Blockable +type acceptor struct { + g Consensus + errs *wrappers.Errs + deps ids.Set + rejected bool + txID ids.ID +} + +func (a *acceptor) Dependencies() ids.Set { return a.deps } + +func (a *acceptor) Fulfill(id ids.ID) { + a.deps.Remove(id) + a.Update() +} + +func (a *acceptor) Abandon(id ids.ID) { a.rejected = true } + +func (a *acceptor) Update() { + // If I was rejected or I am still waiting on dependencies to finish or an + // error has occurred, I shouldn't do anything. + if a.rejected || a.deps.Len() != 0 || a.errs.Errored() { + return + } + a.errs.Add(a.g.accept(a.txID)) +} + +// rejector implements Blockable +type rejector struct { + g Consensus + errs *wrappers.Errs + deps ids.Set + rejected bool // true if the tx has been rejected + txID ids.ID +} + +func (r *rejector) Dependencies() ids.Set { return r.deps } + +func (r *rejector) Fulfill(ids.ID) { + if r.rejected || r.errs.Errored() { + return + } + r.rejected = true + r.errs.Add(r.g.reject(r.txID)) +} + +func (*rejector) Abandon(ids.ID) {} +func (*rejector) Update() {} + +type snowballNode struct { + txID ids.ID + numSuccessfulPolls int + confidence int +} + +func (sb *snowballNode) String() string { + return fmt.Sprintf( + "SB(NumSuccessfulPolls = %d, Confidence = %d)", + sb.numSuccessfulPolls, + sb.confidence) +} + +type sortSnowballNodeData []*snowballNode + +func (sb sortSnowballNodeData) Less(i, j int) bool { + return bytes.Compare(sb[i].txID.Bytes(), sb[j].txID.Bytes()) == -1 +} +func (sb sortSnowballNodeData) Len() int { return len(sb) } +func (sb sortSnowballNodeData) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } + +func sortSnowballNodes(nodes []*snowballNode) { + sort.Sort(sortSnowballNodeData(nodes)) +} + +// ConsensusString converts a list of snowball nodes into a human-readable +// string. +func ConsensusString(name string, nodes []*snowballNode) string { + // Sort the nodes so that the string representation is canonical + sortSnowballNodes(nodes) + + sb := strings.Builder{} + sb.WriteString(name) + sb.WriteString("(") + + format := fmt.Sprintf( + "\n Choice[%s] = ID: %%50s SB(NumSuccessfulPolls = %%d, Confidence = %%d)", + formatting.IntFormat(len(nodes)-1)) + for i, txNode := range nodes { + sb.WriteString(fmt.Sprintf(format, + i, txNode.txID, txNode.numSuccessfulPolls, txNode.confidence)) + } + + if len(nodes) > 0 { + sb.WriteString("\n") + } + sb.WriteString(")") + return sb.String() +} diff --git a/snow/consensus/snowstorm/consensus.go b/snow/consensus/snowstorm/consensus.go index 7f05c34bf845..9cb746fd65ec 100644 --- a/snow/consensus/snowstorm/consensus.go +++ b/snow/consensus/snowstorm/consensus.go @@ -8,7 +8,8 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/consensus/snowball" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) // Consensus is a snowball instance deciding between an unbounded number of @@ -18,10 +19,10 @@ type Consensus interface { fmt.Stringer // Takes in the context, alpha, betaVirtuous, and betaRogue - Initialize(*snow.Context, snowball.Parameters) + Initialize(*snow.Context, sbcon.Parameters) error // Returns the parameters that describe this snowstorm instance - Parameters() snowball.Parameters + Parameters() sbcon.Parameters // Returns true if transaction is virtuous. // That is, no transaction has been added that conflicts with @@ -58,4 +59,10 @@ type Consensus interface { // possible that after returning finalized, a new decision may be added such // that this instance is no longer finalized. Finalized() bool + + // Accept the provided tx remove it from the graph + accept(txID ids.ID) error + + // Reject all the provided txs and remove them from the graph + reject(txIDs ...ids.ID) error } diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index f24dece42d7d..287b27a6699c 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -9,10 +9,13 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowball" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) var ( @@ -28,6 +31,7 @@ var ( IsVirtuousTest, QuiesceTest, AcceptingDependencyTest, + AcceptingSlowDependencyTest, RejectingDependencyTest, VacuouslyAcceptedTest, ConflictsTest, @@ -36,6 +40,7 @@ var ( ErrorOnAcceptedTest, ErrorOnRejectingLowerConfidenceConflictTest, ErrorOnRejectingHigherConfidenceConflictTest, + UTXOCleanupTest, } Red, Green, Blue, Alpha *TestTx @@ -87,9 +92,13 @@ func MetricsTest(t *testing.T, factory Factory) { Setup() { - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_processing", @@ -98,9 +107,13 @@ func MetricsTest(t *testing.T, factory Factory) { graph.Initialize(snow.DefaultContextTest(), params) } { - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_accepted", @@ -109,9 +122,13 @@ func MetricsTest(t *testing.T, factory Factory) { graph.Initialize(snow.DefaultContextTest(), params) } { - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_rejected", @@ -126,9 +143,13 @@ func ParamsTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -148,9 +169,13 @@ func IssuedTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -174,9 +199,13 @@ func LeftoverInputTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -195,8 +224,10 @@ func LeftoverInputTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { @@ -213,9 +244,13 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -236,8 +271,10 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Blue.ID()) { @@ -252,9 +289,13 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -279,8 +320,10 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Alpha.ID()) { @@ -295,9 +338,13 @@ func IndependentTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 2, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 2, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -319,8 +366,10 @@ func IndependentTest(t *testing.T, factory Factory) { ra.SetThreshold(2) ra.AddCount(Red.ID(), 2) ra.AddCount(Alpha.ID(), 2) - if _, err := graph.RecordPoll(ra); err != nil { + if updated, err := graph.RecordPoll(ra); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { @@ -329,8 +378,10 @@ func IndependentTest(t *testing.T, factory Factory) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } else if _, err := graph.RecordPoll(ra); err != nil { + } else if updated, err := graph.RecordPoll(ra); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { @@ -343,9 +394,13 @@ func VirtuousTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -381,9 +436,13 @@ func IsVirtuousTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -421,9 +480,13 @@ func QuiesceTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -454,9 +517,134 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { } purple.InputIDsV.Add(ids.Empty.Prefix(8)) - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if err := graph.Add(purple); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + g := ids.Bag{} + g.Add(Green.ID()) + if updated, err := graph.RecordPoll(g); err != nil { + t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + rp := ids.Bag{} + rp.Add(Red.ID(), purple.ID()) + if updated, err := graph.RecordPoll(rp); err != nil { + t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + r := ids.Bag{} + r.Add(Red.ID()) + if updated, err := graph.RecordPoll(r); err != nil { + t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") + } else if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if Red.Status() != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Accepted) + } else if Green.Status() != choices.Rejected { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Rejected) + } else if purple.Status() != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) + } +} + +type singleAcceptTx struct { + Tx + + t *testing.T + accepted bool +} + +func (tx *singleAcceptTx) Accept() error { + if tx.accepted { + tx.t.Fatalf("accept called multiple times") + } + tx.accepted = true + return tx.Tx.Accept() +} + +func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + rawPurple := &TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(7), + StatusV: choices.Processing, + }, + DependenciesV: []Tx{Red}, + } + rawPurple.InputIDsV.Add(ids.Empty.Prefix(8)) + + purple := &singleAcceptTx{ + Tx: rawPurple, + t: t, + } + + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -482,8 +670,30 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { g := ids.Bag{} g.Add(Green.ID()) - if _, err := graph.RecordPoll(g); err != nil { + if updated, err := graph.RecordPoll(g); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + p := ids.Bag{} + p.Add(purple.ID()) + if updated, err := graph.RecordPoll(p); err != nil { + t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -500,8 +710,10 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { rp := ids.Bag{} rp.Add(Red.ID(), purple.ID()) - if _, err := graph.RecordPoll(rp); err != nil { + if updated, err := graph.RecordPoll(rp); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -518,8 +730,10 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { r := ids.Bag{} r.Add(Red.ID()) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if Red.Status() != choices.Accepted { @@ -545,9 +759,13 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { } purple.InputIDsV.Add(ids.Empty.Prefix(8)) - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -577,8 +795,10 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { gp := ids.Bag{} gp.Add(Green.ID(), purple.ID()) - if _, err := graph.RecordPoll(gp); err != nil { + if updated, err := graph.RecordPoll(gp); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -593,8 +813,10 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing) } else if purple.Status() != choices.Processing { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } else if _, err := graph.RecordPoll(gp); err != nil { + } else if updated, err := graph.RecordPoll(gp); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if Red.Status() != choices.Rejected { @@ -618,9 +840,13 @@ func VacuouslyAcceptedTest(t *testing.T, factory Factory) { StatusV: choices.Processing, }} - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -638,9 +864,13 @@ func ConflictsTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -688,9 +918,13 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -729,8 +963,10 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { votes := ids.Bag{} votes.Add(rogue1.ID()) votes.Add(virtuous.ID()) - if _, err := graph.RecordPoll(votes); err != nil { + if updated, err := graph.RecordPoll(votes); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if status := rogue1.Status(); status != choices.Processing { t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) } else if status := rogue2.Status(); status != choices.Processing { @@ -753,9 +989,13 @@ func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) { StatusV: choices.Processing, }} - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -776,9 +1016,13 @@ func ErrorOnAcceptedTest(t *testing.T, factory Factory) { }} purple.InputIDsV.Add(ids.Empty.Prefix(4)) - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -813,9 +1057,13 @@ func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) }} pink.InputIDsV.Add(X) - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -852,9 +1100,13 @@ func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) }} pink.InputIDsV.Add(X) - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -871,14 +1123,65 @@ func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) } } +func UTXOCleanupTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + err := graph.Initialize(snow.DefaultContextTest(), params) + assert.NoError(t, err) + + err = graph.Add(Red) + assert.NoError(t, err) + + err = graph.Add(Green) + assert.NoError(t, err) + + redVotes := ids.Bag{} + redVotes.Add(Red.ID()) + changed, err := graph.RecordPoll(redVotes) + assert.NoError(t, err) + assert.False(t, changed, "shouldn't have accepted the red tx") + + changed, err = graph.RecordPoll(redVotes) + assert.NoError(t, err) + assert.True(t, changed, "should have accepted the red tx") + + assert.Equal(t, choices.Accepted, Red.Status()) + assert.Equal(t, choices.Rejected, Green.Status()) + + err = graph.Add(Blue) + assert.NoError(t, err) + + blueVotes := ids.Bag{} + blueVotes.Add(Blue.ID()) + changed, err = graph.RecordPoll(blueVotes) + assert.NoError(t, err) + assert.True(t, changed, "should have accepted the blue tx") + + assert.Equal(t, choices.Accepted, Blue.Status()) +} + func StringTest(t *testing.T, factory Factory, prefix string) { Setup() graph := factory.New() - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -902,18 +1205,20 @@ func StringTest(t *testing.T, factory Factory, prefix string) { rb.SetThreshold(2) rb.AddCount(Red.ID(), 2) rb.AddCount(Blue.ID(), 2) - if _, err := graph.RecordPoll(rb); err != nil { + if changed, err := graph.RecordPoll(rb); err != nil { t.Fatal(err) + } else if !changed { + t.Fatalf("Should have caused the frontiers to recalculate") } else if err := graph.Add(Blue); err != nil { t.Fatal(err) } { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 1 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 0 Bias: 0\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 0 Bias: 0\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 1 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 0, Confidence = 0)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 0, Confidence = 0)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) @@ -934,16 +1239,18 @@ func StringTest(t *testing.T, factory Factory, prefix string) { ga.SetThreshold(2) ga.AddCount(Green.ID(), 2) ga.AddCount(Alpha.ID(), 2) - if _, err := graph.RecordPoll(ga); err != nil { + if changed, err := graph.RecordPoll(ga); err != nil { t.Fatal(err) + } else if changed { + t.Fatalf("Shouldn't have caused the frontiers to recalculate") } { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 1 Bias: 1\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 1 Bias: 1\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) @@ -961,16 +1268,18 @@ func StringTest(t *testing.T, factory Factory, prefix string) { } empty := ids.Bag{} - if _, err := graph.RecordPoll(empty); err != nil { + if changed, err := graph.RecordPoll(empty); err != nil { t.Fatal(err) + } else if changed { + t.Fatalf("Shouldn't have caused the frontiers to recalculate") } { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 0 Bias: 1\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 0 Bias: 1\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) @@ -985,16 +1294,18 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong preference. Expected %s", Blue.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } else if _, err := graph.RecordPoll(ga); err != nil { + } else if changed, err := graph.RecordPoll(ga); err != nil { t.Fatal(err) + } else if !changed { + t.Fatalf("Should have caused the frontiers to recalculate") } { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 1 Bias: 2\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 1 Bias: 2\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 2, Confidence = 1)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 2, Confidence = 1)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) @@ -1009,8 +1320,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } else if _, err := graph.RecordPoll(ga); err != nil { + } else if changed, err := graph.RecordPoll(ga); err != nil { t.Fatal(err) + } else if !changed { + t.Fatalf("Should have caused the frontiers to recalculate") } { @@ -1032,8 +1345,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("%s should have been rejected", Red.ID()) } else if Blue.Status() != choices.Rejected { t.Fatalf("%s should have been rejected", Blue.ID()) - } else if _, err := graph.RecordPoll(rb); err != nil { + } else if changed, err := graph.RecordPoll(rb); err != nil { t.Fatal(err) + } else if changed { + t.Fatalf("Shouldn't have caused the frontiers to recalculate") } { diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index e33640f71462..6fd5406f26c7 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -4,15 +4,11 @@ package snowstorm import ( - "bytes" - "fmt" - "sort" - "strings" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/consensus/snowball" - "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/snow/choices" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) // DirectedFactory implements Factory by returning a directed struct @@ -36,184 +32,214 @@ type Directed struct { } type directedTx struct { - bias, confidence, lastVote int - rogue bool + snowball + + // pendingAccept identifies if this transaction has been marked as accepted + // once its transitive dependencies have also been accepted + pendingAccept bool - pendingAccept, accepted bool - ins, outs ids.Set + // ins is the set of txIDs that this tx conflicts with that are less + // preferred than this tx + ins ids.Set + // outs is the set of txIDs that this tx conflicts with that are more + // preferred than this tx + outs ids.Set + + // tx is the actual transaction this node represents tx Tx } // Initialize implements the Consensus interface -func (dg *Directed) Initialize(ctx *snow.Context, params snowball.Parameters) { - dg.common.Initialize(ctx, params) - - dg.utxos = make(map[[32]byte]ids.Set) +func (dg *Directed) Initialize( + ctx *snow.Context, + params sbcon.Parameters, +) error { dg.txs = make(map[[32]byte]*directedTx) + dg.utxos = make(map[[32]byte]ids.Set) + + return dg.common.Initialize(ctx, params) } // IsVirtuous implements the Consensus interface func (dg *Directed) IsVirtuous(tx Tx) bool { - id := tx.ID() - if node, exists := dg.txs[id.Key()]; exists { + txID := tx.ID() + // If the tx is currently processing, we should just return if was + // registered as rogue or not. + if node, exists := dg.txs[txID.Key()]; exists { return !node.rogue } - for _, input := range tx.InputIDs().List() { - if _, exists := dg.utxos[input.Key()]; exists { + + // The tx isn't processing, so we need to check to see if it conflicts with + // any of the other txs that are currently processing. + for _, utxoID := range tx.InputIDs().List() { + if _, exists := dg.utxos[utxoID.Key()]; exists { + // A currently processing tx names the same input as the provided + // tx, so the provided tx would be rogue. return false } } + + // This tx is virtuous as far as this consensus instance knows. return true } // Conflicts implements the Consensus interface func (dg *Directed) Conflicts(tx Tx) ids.Set { - id := tx.ID() conflicts := ids.Set{} - - if node, exists := dg.txs[id.Key()]; exists { + if node, exists := dg.txs[tx.ID().Key()]; exists { + // If the tx is currently processing, the conflicting txs are just the + // union of the inbound conflicts and the outbound conflicts. conflicts.Union(node.ins) conflicts.Union(node.outs) } else { + // If the tx isn't currently processing, the conflicting txs are the + // union of all the txs that spend an input that this tx spends. for _, input := range tx.InputIDs().List() { if spends, exists := dg.utxos[input.Key()]; exists { conflicts.Union(spends) } } - conflicts.Remove(id) } - return conflicts } // Add implements the Consensus interface func (dg *Directed) Add(tx Tx) error { - if dg.Issued(tx) { - return nil // Already inserted + if shouldVote, err := dg.shouldVote(dg, tx); !shouldVote || err != nil { + return err } txID := tx.ID() - bytes := tx.Bytes() - - dg.ctx.DecisionDispatcher.Issue(dg.ctx.ChainID, txID, bytes) - inputs := tx.InputIDs() - // If there are no inputs, Tx is vacuously accepted - if inputs.Len() == 0 { - if err := tx.Accept(); err != nil { - return err - } - dg.ctx.DecisionDispatcher.Accept(dg.ctx.ChainID, txID, bytes) - dg.metrics.Issued(txID) - dg.metrics.Accepted(txID) - return nil - } - txNode := &directedTx{tx: tx} - // For each UTXO input to Tx: - // * Get all transactions that consume that UTXO - // * Add edges from Tx to those transactions in the conflict graph - // * Mark those transactions as rogue - for _, inputID := range inputs.List() { + // For each UTXO consumed by the tx: + // * Add edges between this tx and txs that consume this UTXO + // * Mark this tx as attempting to consume this UTXO + for _, inputID := range tx.InputIDs().List() { inputKey := inputID.Key() - spends := dg.utxos[inputKey] // Transactions spending this UTXO - // Add edges to conflict graph - txNode.outs.Union(spends) + // Get the set of txs that are currently processing that also consume + // this UTXO + spenders := dg.utxos[inputKey] + + // Add all the txs that spend this UTXO to this txs conflicts. These + // conflicting txs must be preferred over this tx. We know this because + // this tx currently has a bias of 0 and the tie goes to the tx whose + // bias was updated first. + txNode.outs.Union(spenders) - // Mark transactions conflicting with Tx as rogue - for _, conflictID := range spends.List() { + // Update txs conflicting with tx to account for its issuance + for _, conflictID := range spenders.List() { conflictKey := conflictID.Key() + + // Get the node that contains this conflicting tx conflict := dg.txs[conflictKey] + // This conflicting tx can't be virtuous anymore. So, we attempt to + // remove it from all of the virtuous sets. dg.virtuous.Remove(conflictID) dg.virtuousVoting.Remove(conflictID) + // This tx should be set to rogue if it wasn't rogue before. conflict.rogue = true - conflict.ins.Add(txID) - dg.txs[conflictKey] = conflict + // This conflicting tx is preferred over the tx being inserted, as + // described above. So we add the conflict to the inbound set. + conflict.ins.Add(txID) } - // Add Tx to list of transactions consuming UTXO whose ID is id - spends.Add(txID) - dg.utxos[inputKey] = spends + + // Add this tx to list of txs consuming the current UTXO + spenders.Add(txID) + + // Because this isn't a pointer, we should re-map the set. + dg.utxos[inputKey] = spenders } - txNode.rogue = txNode.outs.Len() != 0 // Mark this transaction as rogue if it has conflicts - // Add the node representing Tx to the node set - dg.txs[txID.Key()] = txNode + // Mark this transaction as rogue if had any conflicts registered above + txNode.rogue = txNode.outs.Len() != 0 + if !txNode.rogue { - // I'm not rogue + // If this tx is currently virtuous, add it to the virtuous sets dg.virtuous.Add(txID) dg.virtuousVoting.Add(txID) - // If I'm not rogue, I must be preferred + // If a tx is virtuous, it must be preferred. dg.preferences.Add(txID) } - dg.metrics.Issued(txID) - // Tx can be accepted only if the transactions it depends on are also accepted - // If any transactions that Tx depends on are rejected, reject Tx - toReject := &directedRejector{ - dg: dg, - txNode: txNode, - } - for _, dependency := range tx.Dependencies() { - if !dependency.Status().Decided() { - toReject.deps.Add(dependency.ID()) - } - } - dg.pendingReject.Register(toReject) - return dg.errs.Err + // Add this tx to the set of currently processing txs + dg.txs[txID.Key()] = txNode + + // If a tx that this tx depends on is rejected, this tx should also be + // rejected. + dg.registerRejector(dg, tx) + return nil } // Issued implements the Consensus interface func (dg *Directed) Issued(tx Tx) bool { + // If the tx is either Accepted or Rejected, then it must have been issued + // previously. if tx.Status().Decided() { return true } + + // If the tx is currently processing, then it must have been issued. _, ok := dg.txs[tx.ID().Key()] return ok } // RecordPoll implements the Consensus interface func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { + // Increase the vote ID. This is only updated here and is used to reset the + // confidence values of transactions lazily. dg.currentVote++ + + // This flag tracks if the Avalanche instance needs to recompute its + // frontiers. Frontiers only need to be recalculated if preferences change + // or if a tx was accepted. changed := false + // We only want to iterate over txs that received alpha votes votes.SetThreshold(dg.params.Alpha) - threshold := votes.Threshold() // Each element is ID of transaction preferred by >= Alpha poll respondents - for _, toInc := range threshold.List() { - incKey := toInc.Key() - txNode, exist := dg.txs[incKey] + // Get the set of IDs that meet this alpha threshold + metThreshold := votes.Threshold() + for _, txID := range metThreshold.List() { + // Get the node this tx represents + txNode, exist := dg.txs[txID.Key()] if !exist { - // Votes for decided consumers are ignored + // This tx may have already been accepted because of tx + // dependencies. If this is the case, we can just drop the vote. continue } - if txNode.lastVote+1 != dg.currentVote { - txNode.confidence = 0 - } - txNode.lastVote = dg.currentVote - - dg.ctx.Log.Verbo("Increasing (bias, confidence) of %s from (%d, %d) to (%d, %d)", - toInc, txNode.bias, txNode.confidence, txNode.bias+1, txNode.confidence+1) + txNode.RecordSuccessfulPoll(dg.currentVote) - txNode.bias++ - txNode.confidence++ + dg.ctx.Log.Verbo("Updated TxID=%s to have consensus state=%s", + txID, &txNode.snowball) + // If the tx should be accepted, then we should defer its acceptance + // until its dependencies are decided. If this tx was already marked to + // be accepted, we shouldn't register it again. if !txNode.pendingAccept && - ((!txNode.rogue && txNode.confidence >= dg.params.BetaVirtuous) || - txNode.confidence >= dg.params.BetaRogue) { - dg.deferAcceptance(txNode) + txNode.Finalized(dg.params.BetaVirtuous, dg.params.BetaRogue) { + // Mark that this tx is pending acceptance so acceptance is only + // registered once. + txNode.pendingAccept = true + + dg.registerAcceptor(dg, txNode.tx) if dg.errs.Errored() { return changed, dg.errs.Err } } - if !txNode.accepted { + + if txNode.tx.Status() != choices.Accepted { + // If this tx wasn't accepted, then this instance is only changed if + // preferences changed. changed = dg.redirectEdges(txNode) || changed } else { + // By accepting a tx, the state of this instance has changed. changed = true } } @@ -221,80 +247,97 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { } func (dg *Directed) String() string { - nodes := make([]*directedTx, 0, len(dg.txs)) - for _, tx := range dg.txs { - nodes = append(nodes, tx) + nodes := make([]*snowballNode, 0, len(dg.txs)) + for _, txNode := range dg.txs { + nodes = append(nodes, &snowballNode{ + txID: txNode.tx.ID(), + numSuccessfulPolls: txNode.numSuccessfulPolls, + confidence: txNode.Confidence(dg.currentVote), + }) } - sortTxNodes(nodes) - - sb := strings.Builder{} + return ConsensusString("DG", nodes) +} - sb.WriteString("DG(") +// accept the named txID and remove it from the graph +func (dg *Directed) accept(txID ids.ID) error { + txKey := txID.Key() + txNode := dg.txs[txKey] + // We are accepting the tx, so we should remove the node from the graph. + delete(dg.txs, txKey) + + // This tx is consuming all the UTXOs from its inputs, so we can prune them + // all from memory + for _, inputID := range txNode.tx.InputIDs().List() { + delete(dg.utxos, inputID.Key()) + } - format := fmt.Sprintf( - "\n Choice[%s] = ID: %%50s Confidence: %s Bias: %%d", - formatting.IntFormat(len(dg.txs)-1), - formatting.IntFormat(dg.params.BetaRogue-1)) + // This tx is now accepted, so it shouldn't be part of the virtuous set or + // the preferred set. Its status as Accepted implies these descriptions. + dg.virtuous.Remove(txID) + dg.preferences.Remove(txID) - for i, txNode := range nodes { - confidence := txNode.confidence - if txNode.lastVote != dg.currentVote { - confidence = 0 - } - sb.WriteString(fmt.Sprintf(format, - i, txNode.tx.ID(), confidence, txNode.bias)) + // Reject all the txs that conflicted with this tx. + if err := dg.reject(txNode.ins.List()...); err != nil { + return err } - - if len(nodes) > 0 { - sb.WriteString("\n") + // While it is typically true that a tx this is being accepted is preferred, + // it is possible for this to not be the case. So this is handled for + // completeness. + if err := dg.reject(txNode.outs.List()...); err != nil { + return err } - sb.WriteString(")") - - return sb.String() + return dg.acceptTx(txNode.tx) } -func (dg *Directed) deferAcceptance(txNode *directedTx) { - txNode.pendingAccept = true - - toAccept := &directedAccepter{ - dg: dg, - txNode: txNode, - } - for _, dependency := range txNode.tx.Dependencies() { - if !dependency.Status().Decided() { - toAccept.deps.Add(dependency.ID()) +// reject all the named txIDs and remove them from the graph +func (dg *Directed) reject(conflictIDs ...ids.ID) error { + for _, conflictID := range conflictIDs { + conflictKey := conflictID.Key() + conflict := dg.txs[conflictKey] + + // This tx is no longer an option for consuming the UTXOs from its + // inputs, so we should remove their reference to this tx. + for _, inputID := range conflict.tx.InputIDs().List() { + inputKey := inputID.Key() + txIDs, exists := dg.utxos[inputKey] + if !exists { + // This UTXO may no longer exist because it was removed due to + // the acceptance of a tx. If that is the case, there is nothing + // left to remove from memory. + continue + } + txIDs.Remove(conflictID) + if txIDs.Len() == 0 { + // If this tx was the last tx consuming this UTXO, we should + // prune the UTXO from memory entirely. + delete(dg.utxos, inputKey) + } else { + // If this UTXO still has txs consuming it, then we should make + // sure this update is written back to the UTXOs map. + dg.utxos[inputKey] = txIDs + } } - } - dg.virtuousVoting.Remove(txNode.tx.ID()) - dg.pendingAccept.Register(toAccept) -} - -func (dg *Directed) reject(ids ...ids.ID) error { - for _, conflict := range ids { - conflictKey := conflict.Key() - conf := dg.txs[conflictKey] + // We are rejecting the tx, so we should remove it from the graph delete(dg.txs, conflictKey) - dg.preferences.Remove(conflict) + // While it's statistically unlikely that something being rejected is + // preferred, it is handled for completion. + dg.preferences.Remove(conflictID) // remove the edge between this node and all its neighbors - dg.removeConflict(conflict, conf.ins.List()...) - dg.removeConflict(conflict, conf.outs.List()...) + dg.removeConflict(conflictID, conflict.ins.List()...) + dg.removeConflict(conflictID, conflict.outs.List()...) - // Mark it as rejected - if err := conf.tx.Reject(); err != nil { + if err := dg.rejectTx(conflict.tx); err != nil { return err } - dg.ctx.DecisionDispatcher.Reject(dg.ctx.ChainID, conf.tx.ID(), conf.tx.Bytes()) - dg.metrics.Rejected(conflict) - - dg.pendingAccept.Abandon(conflict) - dg.pendingReject.Fulfill(conflict) } return nil } +// redirectEdges attempts to turn outbound edges into inbound edges if the +// preferences have changed func (dg *Directed) redirectEdges(tx *directedTx) bool { changed := false for _, conflictID := range tx.outs.List() { @@ -303,137 +346,51 @@ func (dg *Directed) redirectEdges(tx *directedTx) bool { return changed } -// Set the confidence of all conflicts to 0 -// Change the direction of edges if needed +// Change the direction of this edge if needed. Returns true if the direction +// was switched. func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) bool { - nodeID := txNode.tx.ID() conflict := dg.txs[conflictID.Key()] - if txNode.bias <= conflict.bias { + if txNode.numSuccessfulPolls <= conflict.numSuccessfulPolls { return false } - // TODO: why is this confidence reset here? It should already be reset - // implicitly by the lack of a timestamp increase. - conflict.confidence = 0 + // Because this tx has a higher preference than the conflicting tx, we must + // ensure that the edge is directed towards this tx. + nodeID := txNode.tx.ID() - // Change the edge direction + // Change the edge direction according to the conflict tx conflict.ins.Remove(nodeID) conflict.outs.Add(nodeID) - dg.preferences.Remove(conflictID) // This consumer now has an out edge + dg.preferences.Remove(conflictID) // This conflict has an outbound edge + // Change the edge direction according to this tx txNode.ins.Add(conflictID) txNode.outs.Remove(conflictID) if txNode.outs.Len() == 0 { - // If I don't have out edges, I'm preferred + // If this tx doesn't have any outbound edges, it's preferred dg.preferences.Add(nodeID) } return true } -func (dg *Directed) removeConflict(id ids.ID, ids ...ids.ID) { - for _, neighborID := range ids { +func (dg *Directed) removeConflict(txID ids.ID, neighborIDs ...ids.ID) { + for _, neighborID := range neighborIDs { neighborKey := neighborID.Key() - // If the neighbor doesn't exist, they may have already been rejected - if neighbor, exists := dg.txs[neighborKey]; exists { - neighbor.ins.Remove(id) - neighbor.outs.Remove(id) - - if neighbor.outs.Len() == 0 { - // Make sure to mark the neighbor as preferred if needed - dg.preferences.Add(neighborID) - } - - dg.txs[neighborKey] = neighbor + neighbor, exists := dg.txs[neighborKey] + if !exists { + // If the neighbor doesn't exist, they may have already been + // rejected, so this mapping can be skipped. + continue } - } -} -type directedAccepter struct { - dg *Directed - deps ids.Set - rejected bool - txNode *directedTx -} + // Remove any edge to this tx. + neighbor.ins.Remove(txID) + neighbor.outs.Remove(txID) -func (a *directedAccepter) Dependencies() ids.Set { return a.deps } - -func (a *directedAccepter) Fulfill(id ids.ID) { - a.deps.Remove(id) - a.Update() -} - -func (a *directedAccepter) Abandon(id ids.ID) { a.rejected = true } - -func (a *directedAccepter) Update() { - // If I was rejected or I am still waiting on dependencies to finish do - // nothing. - if a.rejected || a.deps.Len() != 0 || a.dg.errs.Errored() { - return - } - - id := a.txNode.tx.ID() - delete(a.dg.txs, id.Key()) - - for _, inputID := range a.txNode.tx.InputIDs().List() { - delete(a.dg.utxos, inputID.Key()) - } - a.dg.virtuous.Remove(id) - a.dg.preferences.Remove(id) - - // Reject the conflicts - if err := a.dg.reject(a.txNode.ins.List()...); err != nil { - a.dg.errs.Add(err) - return - } - // Should normally be empty - if err := a.dg.reject(a.txNode.outs.List()...); err != nil { - a.dg.errs.Add(err) - return - } - - // Mark it as accepted - if err := a.txNode.tx.Accept(); err != nil { - a.dg.errs.Add(err) - return - } - a.txNode.accepted = true - a.dg.ctx.DecisionDispatcher.Accept(a.dg.ctx.ChainID, id, a.txNode.tx.Bytes()) - a.dg.metrics.Accepted(id) - - a.dg.pendingAccept.Fulfill(id) - a.dg.pendingReject.Abandon(id) -} - -// directedRejector implements Blockable -type directedRejector struct { - dg *Directed - deps ids.Set - rejected bool // true if the transaction has been rejected - txNode *directedTx -} - -func (r *directedRejector) Dependencies() ids.Set { return r.deps } - -func (r *directedRejector) Fulfill(id ids.ID) { - if r.rejected || r.dg.errs.Errored() { - return + if neighbor.outs.Len() == 0 { + // If this tx should now be preferred, make sure its status is + // updated. + dg.preferences.Add(neighborID) + } } - r.rejected = true - r.dg.errs.Add(r.dg.reject(r.txNode.tx.ID())) -} - -func (*directedRejector) Abandon(id ids.ID) {} - -func (*directedRejector) Update() {} - -type sortTxNodeData []*directedTx - -func (tnd sortTxNodeData) Less(i, j int) bool { - return bytes.Compare( - tnd[i].tx.ID().Bytes(), - tnd[j].tx.ID().Bytes()) == -1 } -func (tnd sortTxNodeData) Len() int { return len(tnd) } -func (tnd sortTxNodeData) Swap(i, j int) { tnd[j], tnd[i] = tnd[i], tnd[j] } - -func sortTxNodes(nodes []*directedTx) { sort.Sort(sortTxNodeData(nodes)) } diff --git a/snow/consensus/snowstorm/equality_test.go b/snow/consensus/snowstorm/equality_test.go index 8298c48329e1..383504bbdc78 100644 --- a/snow/consensus/snowstorm/equality_test.go +++ b/snow/consensus/snowstorm/equality_test.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/snow/consensus/snowball" + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) func TestConflictGraphEquality(t *testing.T) { @@ -19,7 +19,7 @@ func TestConflictGraphEquality(t *testing.T) { colorsPerConsumer := 2 maxInputConflicts := 2 numNodes := 100 - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index 0c191618a19f..3ea7133c9769 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -4,15 +4,13 @@ package snowstorm import ( - "bytes" - "fmt" - "sort" - "strings" + "math" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/consensus/snowball" - "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/snow/choices" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) // InputFactory implements Factory by returning an input struct @@ -28,485 +26,441 @@ type Input struct { // Key: Transaction ID // Value: Node that represents this transaction in the conflict graph - txs map[[32]byte]inputTx + txs map[[32]byte]*inputTx // Key: UTXO ID // Value: Node that represents the status of the transactions consuming this // input - utxos map[[32]byte]inputUtxo + utxos map[[32]byte]inputUTXO } type inputTx struct { - bias int - tx Tx - + // pendingAccept identifies if this transaction has been marked as accepted + // once its transitive dependencies have also been accepted + pendingAccept bool + + // numSuccessfulPolls is the number of times this tx was the successful + // result of a network poll + numSuccessfulPolls int + + // lastVote is the last poll number that this tx was included in a + // successful network poll. This timestamp is needed to ensure correctness + // in the case that a tx was rejected when it was preferred in a conflict + // set and there was a tie for the second highest numSuccessfulPolls. lastVote int + + // tx is the actual transaction this node represents + tx Tx } -type inputUtxo struct { - bias, confidence, lastVote int - rogue bool +type inputUTXO struct { + snowball + // preference is the txID which snowball says this UTXO should prefer preference ids.ID - color ids.ID - conflicts ids.Set + + // color is the txID which snowflake says this UTXO should prefer + color ids.ID + + // spenders is the set of txIDs that are currently attempting to spend this + // UTXO + spenders ids.Set } // Initialize implements the ConflictGraph interface -func (ig *Input) Initialize(ctx *snow.Context, params snowball.Parameters) { - ig.common.Initialize(ctx, params) +func (ig *Input) Initialize(ctx *snow.Context, params sbcon.Parameters) error { + ig.txs = make(map[[32]byte]*inputTx) + ig.utxos = make(map[[32]byte]inputUTXO) - ig.txs = make(map[[32]byte]inputTx) - ig.utxos = make(map[[32]byte]inputUtxo) + return ig.common.Initialize(ctx, params) } // IsVirtuous implements the ConflictGraph interface func (ig *Input) IsVirtuous(tx Tx) bool { - id := tx.ID() - for _, consumption := range tx.InputIDs().List() { - input := ig.utxos[consumption.Key()] - if input.rogue || - (input.conflicts.Len() > 0 && !input.conflicts.Contains(id)) { + txID := tx.ID() + for _, utxoID := range tx.InputIDs().List() { + utxo, exists := ig.utxos[utxoID.Key()] + // If the UTXO wasn't currently processing, then this tx won't conflict + // due to this UTXO. + if !exists { + continue + } + // If this UTXO is rogue, then this tx will have at least one conflict. + if utxo.rogue { + return false + } + // This UTXO is currently virtuous, so it must be spent by only one tx. + // If that tx is different from this tx, then these txs would conflict. + if !utxo.spenders.Contains(txID) { return false } } + + // None of the UTXOs consumed by this tx imply that this tx would be rogue, + // so it is virtuous as far as this consensus instance knows. return true } +// Conflicts implements the ConflictGraph interface +func (ig *Input) Conflicts(tx Tx) ids.Set { + conflicts := ids.Set{} + // The conflicting txs are the union of all the txs that spend an input that + // this tx spends. + for _, utxoID := range tx.InputIDs().List() { + if utxo, exists := ig.utxos[utxoID.Key()]; exists { + conflicts.Union(utxo.spenders) + } + } + // A tx can't conflict with itself, so we should make sure to remove the + // provided tx from the conflict set. This is needed in case this tx is + // currently processing. + conflicts.Remove(tx.ID()) + return conflicts +} + // Add implements the ConflictGraph interface func (ig *Input) Add(tx Tx) error { - if ig.Issued(tx) { - return nil // Already inserted + if shouldVote, err := ig.shouldVote(ig, tx); !shouldVote || err != nil { + return err } txID := tx.ID() - bytes := tx.Bytes() - - ig.ctx.DecisionDispatcher.Issue(ig.ctx.ChainID, txID, bytes) - inputs := tx.InputIDs() - // If there are no inputs, they are vacuously accepted - if inputs.Len() == 0 { - if err := tx.Accept(); err != nil { - return err - } - ig.ctx.DecisionDispatcher.Accept(ig.ctx.ChainID, txID, bytes) - ig.metrics.Issued(txID) - ig.metrics.Accepted(txID) - return nil - } + txNode := &inputTx{tx: tx} - cn := inputTx{tx: tx} + // This tx should be added to the virtuous sets and preferred sets if this + // tx is virtuous in all of the UTXOs it is trying to consume. virtuous := true - // If there are inputs, they must be voted on - for _, consumption := range inputs.List() { - consumptionKey := consumption.Key() - input, exists := ig.utxos[consumptionKey] - input.rogue = exists // If the input exists for a conflict + + // For each UTXO consumed by the tx: + // * Mark this tx as attempting to consume this UTXO + // * Mark the UTXO as being rogue if applicable + for _, inputID := range tx.InputIDs().List() { + inputKey := inputID.Key() + utxo, exists := ig.utxos[inputKey] if exists { - for _, conflictID := range input.conflicts.List() { + // If the utxo was already being consumed by another tx, this utxo + // is now rogue. + utxo.rogue = true + // Since this utxo is rogue, this tx is rogue as well. + virtuous = false + // If this utxo was previously virtuous, then there may be txs that + // were considered virtuous that are now known to be rogue. If + // that's the case we should remove those txs from the virtuous + // sets. + for _, conflictID := range utxo.spenders.List() { ig.virtuous.Remove(conflictID) ig.virtuousVoting.Remove(conflictID) } } else { - input.preference = txID // If there isn't a conflict, I'm preferred + // If there isn't a conflict for this UTXO, I'm the preferred + // spender. + utxo.preference = txID } - input.conflicts.Add(txID) - ig.utxos[consumptionKey] = input - virtuous = virtuous && !exists + // This UTXO needs to track that it is being spent by this tx. + utxo.spenders.Add(txID) + + // We need to write back + ig.utxos[inputKey] = utxo } - // Add the node to the set - ig.txs[txID.Key()] = cn if virtuous { - // If I'm preferred in all my conflict sets, I'm preferred. - // Because the preference graph is a DAG, there will always be at least - // one preferred consumer, if there is a consumer - ig.preferences.Add(txID) + // If this tx is currently virtuous, add it to the virtuous sets ig.virtuous.Add(txID) ig.virtuousVoting.Add(txID) - } - ig.metrics.Issued(txID) - toReject := &inputRejector{ - ig: ig, - tn: cn, + // If a tx is virtuous, it must be preferred. + ig.preferences.Add(txID) } - for _, dependency := range tx.Dependencies() { - if !dependency.Status().Decided() { - toReject.deps.Add(dependency.ID()) - } - } - ig.pendingReject.Register(toReject) - return ig.errs.Err + // Add this tx to the set of currently processing txs + ig.txs[txID.Key()] = txNode + + // If a tx that this tx depends on is rejected, this tx should also be + // rejected. + ig.registerRejector(ig, tx) + return nil } // Issued implements the ConflictGraph interface func (ig *Input) Issued(tx Tx) bool { + // If the tx is either Accepted or Rejected, then it must have been issued + // previously. if tx.Status().Decided() { return true } + + // If the tx is currently processing, then it must have been issued. _, ok := ig.txs[tx.ID().Key()] return ok } -// Conflicts implements the ConflictGraph interface -func (ig *Input) Conflicts(tx Tx) ids.Set { - id := tx.ID() - conflicts := ids.Set{} - - for _, input := range tx.InputIDs().List() { - inputNode := ig.utxos[input.Key()] - conflicts.Union(inputNode.conflicts) - } - - conflicts.Remove(id) - return conflicts -} - // RecordPoll implements the ConflictGraph interface func (ig *Input) RecordPoll(votes ids.Bag) (bool, error) { + // Increase the vote ID. This is only updated here and is used to reset the + // confidence values of transactions lazily. ig.currentVote++ + + // This flag tracks if the Avalanche instance needs to recompute its + // frontiers. Frontiers only need to be recalculated if preferences change + // or if a tx was accepted. changed := false + // We only want to iterate over txs that received alpha votes votes.SetThreshold(ig.params.Alpha) - threshold := votes.Threshold() - for _, toInc := range threshold.List() { - incKey := toInc.Key() - tx, exist := ig.txs[incKey] + // Get the set of IDs that meet this alpha threshold + metThreshold := votes.Threshold() + for _, txID := range metThreshold.List() { + txKey := txID.Key() + + // Get the node this tx represents + txNode, exist := ig.txs[txKey] if !exist { - // Votes for decided consumptions are ignored + // This tx may have already been accepted because of tx + // dependencies. If this is the case, we can just drop the vote. continue } - tx.bias++ - - // The timestamp is needed to ensure correctness in the case that a - // consumer was rejected from a conflict set, when it was preferred in - // this conflict set, when there is a tie for the second highest - // confidence. - tx.lastVote = ig.currentVote + txNode.numSuccessfulPolls++ + txNode.lastVote = ig.currentVote + // This tx is preferred if it is preferred in all of its conflict sets preferred := true + // This tx is rogue if any of its conflict sets are rogue rogue := false - confidence := ig.params.BetaRogue - - consumptions := tx.tx.InputIDs().List() - for _, inputID := range consumptions { + // The confidence of the tx is the minimum confidence of all the input's + // conflict sets + confidence := math.MaxInt32 + for _, inputID := range txNode.tx.InputIDs().List() { inputKey := inputID.Key() - input := ig.utxos[inputKey] - - // If I did not receive a vote in the last vote, reset my confidence to 0 - if input.lastVote+1 != ig.currentVote { - input.confidence = 0 + utxo := ig.utxos[inputKey] + + // If this tx wasn't voted for during the last poll, the confidence + // should have been reset during the last poll. So, we reset it now. + // Additionally, if a different tx was voted for in the last poll, + // the confidence should also be reset. + if utxo.lastVote+1 != ig.currentVote || !txID.Equals(utxo.color) { + utxo.confidence = 0 } - input.lastVote = ig.currentVote - - // check the snowflake preference - if !toInc.Equals(input.color) { - input.confidence = 0 - } - // update the snowball preference - if tx.bias > input.bias { - // if the previous preference lost it's preference in this - // input, it can't be preferred in all the inputs - if ig.preferences.Contains(input.preference) { - ig.preferences.Remove(input.preference) - changed = true + utxo.lastVote = ig.currentVote + + // Update the Snowflake counter and preference. + utxo.color = txID + utxo.confidence++ + + // Update the Snowball preference. + if txNode.numSuccessfulPolls > utxo.numSuccessfulPolls { + // If this node didn't previous prefer this tx, then we need to + // update the preferences. + if !txID.Equals(utxo.preference) { + // If the previous preference lost it's preference in this + // input, it can't be preferred in all the inputs. + if ig.preferences.Contains(utxo.preference) { + ig.preferences.Remove(utxo.preference) + // Because there was a change in preferences, Avalanche + // will need to recompute its frontiers. + changed = true + } + utxo.preference = txID } - - input.bias = tx.bias - input.preference = toInc + utxo.numSuccessfulPolls = txNode.numSuccessfulPolls + } else { + // This isn't the preferred choice in this conflict set so this + // tx isn't be preferred. + preferred = false } - // update snowflake vars - input.color = toInc - input.confidence++ - - ig.utxos[inputKey] = input + // If this utxo is rogue, the transaction must have at least one + // conflict. + rogue = rogue || utxo.rogue - // track cumulative statistics - preferred = preferred && toInc.Equals(input.preference) - rogue = rogue || input.rogue - if confidence > input.confidence { - confidence = input.confidence + // The confidence of this tx is the minimum confidence of its + // inputs. + if confidence > utxo.confidence { + confidence = utxo.confidence } + + // The input isn't a pointer, so it must be written back. + ig.utxos[inputKey] = utxo } - // If the node wasn't accepted, but was preferred, make sure it is - // marked as preferred - if preferred && !ig.preferences.Contains(toInc) { - ig.preferences.Add(toInc) + // If this tx is preferred and it isn't already marked as such, mark the + // tx as preferred and for Avalanche to recompute the frontiers. + if preferred && !ig.preferences.Contains(txID) { + ig.preferences.Add(txID) changed = true } - if (!rogue && confidence >= ig.params.BetaVirtuous) || - confidence >= ig.params.BetaRogue { - ig.deferAcceptance(tx) + // If the tx should be accepted, then we should defer its acceptance + // until its dependencies are decided. If this tx was already marked to + // be accepted, we shouldn't register it again. + if !txNode.pendingAccept && + ((!rogue && confidence >= ig.params.BetaVirtuous) || + confidence >= ig.params.BetaRogue) { + // Mark that this tx is pending acceptance so acceptance is only + // registered once. + txNode.pendingAccept = true + + ig.registerAcceptor(ig, txNode.tx) if ig.errs.Errored() { return changed, ig.errs.Err } - changed = true - continue } - ig.txs[incKey] = tx - } - return changed, ig.errs.Err -} - -func (ig *Input) deferAcceptance(tn inputTx) { - toAccept := &inputAccepter{ - ig: ig, - tn: tn, - } - - for _, dependency := range tn.tx.Dependencies() { - if !dependency.Status().Decided() { - toAccept.deps.Add(dependency.ID()) - } - } - - ig.virtuousVoting.Remove(tn.tx.ID()) - ig.pendingAccept.Register(toAccept) -} - -// reject all the ids and remove them from their conflict sets -func (ig *Input) reject(ids ...ids.ID) error { - for _, conflict := range ids { - conflictKey := conflict.Key() - cn := ig.txs[conflictKey] - delete(ig.txs, conflictKey) - ig.preferences.Remove(conflict) // A rejected value isn't preferred - - // Remove from all conflict sets - ig.removeConflict(conflict, cn.tx.InputIDs().List()...) - - // Mark it as rejected - if err := cn.tx.Reject(); err != nil { - return err - } - ig.ctx.DecisionDispatcher.Reject(ig.ctx.ChainID, cn.tx.ID(), cn.tx.Bytes()) - ig.metrics.Rejected(conflict) - ig.pendingAccept.Abandon(conflict) - ig.pendingReject.Fulfill(conflict) - } - return nil -} - -// Remove id from all of its conflict sets -func (ig *Input) removeConflict(id ids.ID, inputIDs ...ids.ID) { - for _, inputID := range inputIDs { - inputKey := inputID.Key() - // if the input doesn't exists, it was already decided - if input, exists := ig.utxos[inputKey]; exists { - input.conflicts.Remove(id) - - // If there is nothing attempting to consume the input, remove it - // from memory - if input.conflicts.Len() == 0 { - delete(ig.utxos, inputKey) - continue - } - - // If I was previously preferred, I must find who should now be - // preferred. This shouldn't normally happen, therefore it is okay - // to be fairly slow here - if input.preference.Equals(id) { - newPreference := ids.ID{} - newBias := -1 - newBiasTime := 0 - - // Find the highest bias conflict - for _, spend := range input.conflicts.List() { - tx := ig.txs[spend.Key()] - if tx.bias > newBias || - (tx.bias == newBias && - newBiasTime < tx.lastVote) { - newPreference = spend - newBias = tx.bias - newBiasTime = tx.lastVote - } - } - - // Set the preferences to the highest bias - input.preference = newPreference - input.bias = newBias - - ig.utxos[inputKey] = input - - // We need to check if this node is now preferred - preferenceNode, exist := ig.txs[newPreference.Key()] - if exist { - isPreferred := true - inputIDs := preferenceNode.tx.InputIDs().List() - for _, inputID := range inputIDs { - inputKey := inputID.Key() - input := ig.utxos[inputKey] - - if !newPreference.Equals(input.preference) { - // If this preference isn't the preferred color, it - // isn't preferred. Input might not exist, in which - // case this still isn't the preferred color - isPreferred = false - break - } - } - if isPreferred { - // If I'm preferred in all my conflict sets, I'm - // preferred - ig.preferences.Add(newPreference) - } - } - } else { - // If i'm rejecting the non-preference, do nothing - ig.utxos[inputKey] = input - } + if txNode.tx.Status() == choices.Accepted { + // By accepting a tx, the state of this instance has changed. + changed = true } } + return changed, ig.errs.Err } func (ig *Input) String() string { - nodes := []tempNode{} + nodes := make([]*snowballNode, 0, len(ig.txs)) for _, tx := range ig.txs { - id := tx.tx.ID() + txID := tx.tx.ID() confidence := ig.params.BetaRogue for _, inputID := range tx.tx.InputIDs().List() { input := ig.utxos[inputID.Key()] - if input.lastVote != ig.currentVote { + if input.lastVote != ig.currentVote || !txID.Equals(input.color) { confidence = 0 break } - if input.confidence < confidence { confidence = input.confidence } - if !id.Equals(input.color) { - confidence = 0 - break - } } - nodes = append(nodes, tempNode{ - id: id, - bias: tx.bias, - confidence: confidence, + nodes = append(nodes, &snowballNode{ + txID: txID, + numSuccessfulPolls: tx.numSuccessfulPolls, + confidence: confidence, }) } - sortTempNodes(nodes) - - sb := strings.Builder{} + return ConsensusString("IG", nodes) +} - sb.WriteString("IG(") +// accept the named txID and remove it from the graph +func (ig *Input) accept(txID ids.ID) error { + txKey := txID.Key() + txNode := ig.txs[txKey] + // We are accepting the tx, so we should remove the node from the graph. + delete(ig.txs, txID.Key()) - format := fmt.Sprintf( - "\n Choice[%s] = ID: %%50s Confidence: %s Bias: %%d", - formatting.IntFormat(len(nodes)-1), - formatting.IntFormat(ig.params.BetaRogue-1)) + // Get the conflicts of this tx so that we can reject them + conflicts := ig.Conflicts(txNode.tx) - for i, cn := range nodes { - sb.WriteString(fmt.Sprintf(format, i, cn.id, cn.confidence, cn.bias)) + // This tx is consuming all the UTXOs from its inputs, so we can prune them + // all from memory + for _, inputID := range txNode.tx.InputIDs().List() { + delete(ig.utxos, inputID.Key()) } - if len(nodes) > 0 { - sb.WriteString("\n") - } - sb.WriteString(")") + // This tx is now accepted, so it shouldn't be part of the virtuous set or + // the preferred set. Its status as Accepted implies these descriptions. + ig.virtuous.Remove(txID) + ig.preferences.Remove(txID) - return sb.String() + // Reject all the txs that conflicted with this tx. + if err := ig.reject(conflicts.List()...); err != nil { + return err + } + return ig.acceptTx(txNode.tx) } -type inputAccepter struct { - ig *Input - deps ids.Set - rejected bool - tn inputTx -} +// reject all the named txIDs and remove them from their conflict sets +func (ig *Input) reject(conflictIDs ...ids.ID) error { + for _, conflictID := range conflictIDs { + conflictKey := conflictID.Key() + conflict := ig.txs[conflictKey] -func (a *inputAccepter) Dependencies() ids.Set { return a.deps } + // We are rejecting the tx, so we should remove it from the graph + delete(ig.txs, conflictKey) -func (a *inputAccepter) Fulfill(id ids.ID) { - a.deps.Remove(id) - a.Update() -} + // While it's statistically unlikely that something being rejected is + // preferred, it is handled for completion. + ig.preferences.Remove(conflictID) -func (a *inputAccepter) Abandon(id ids.ID) { a.rejected = true } + // Remove this tx from all the conflict sets it's currently in + ig.removeConflict(conflictID, conflict.tx.InputIDs().List()...) -func (a *inputAccepter) Update() { - if a.rejected || a.deps.Len() != 0 || a.ig.errs.Errored() { - return + if err := ig.rejectTx(conflict.tx); err != nil { + return err + } } + return nil +} - id := a.tn.tx.ID() - delete(a.ig.txs, id.Key()) +// Remove id from all of its conflict sets +func (ig *Input) removeConflict(txID ids.ID, inputIDs ...ids.ID) { + for _, inputID := range inputIDs { + inputKey := inputID.Key() + utxo, exists := ig.utxos[inputKey] + if !exists { + // if the utxo doesn't exists, it was already consumed, so there is + // no mapping left to update. + continue + } - // Remove Tx from all of its conflicts - inputIDs := a.tn.tx.InputIDs() - a.ig.removeConflict(id, inputIDs.List()...) + // This tx is no longer attempting to spend this utxo. + utxo.spenders.Remove(txID) - a.ig.virtuous.Remove(id) - a.ig.preferences.Remove(id) + // If there is nothing attempting to consume the utxo anymore, remove it + // from memory. + if utxo.spenders.Len() == 0 { + delete(ig.utxos, inputKey) + continue + } - // Reject the conflicts - conflicts := ids.Set{} - for inputKey, exists := range inputIDs { - if exists { - inputNode := a.ig.utxos[inputKey] - conflicts.Union(inputNode.conflicts) + // If I'm rejecting the non-preference, there is nothing else to update. + if !utxo.preference.Equals(txID) { + ig.utxos[inputKey] = utxo + continue } - } - if err := a.ig.reject(conflicts.List()...); err != nil { - a.ig.errs.Add(err) - return - } - // Mark it as accepted - if err := a.tn.tx.Accept(); err != nil { - a.ig.errs.Add(err) - return - } - a.ig.ctx.DecisionDispatcher.Accept(a.ig.ctx.ChainID, id, a.tn.tx.Bytes()) - a.ig.metrics.Accepted(id) + // If I was previously preferred, I must find who should now be + // preferred. + preference := ids.ID{} + numSuccessfulPolls := -1 + lastVote := 0 + + // Find the new Snowball preference + for _, spender := range utxo.spenders.List() { + txNode := ig.txs[spender.Key()] + if txNode.numSuccessfulPolls > numSuccessfulPolls || + (txNode.numSuccessfulPolls == numSuccessfulPolls && + lastVote < txNode.lastVote) { + preference = spender + numSuccessfulPolls = txNode.numSuccessfulPolls + lastVote = txNode.lastVote + } + } - a.ig.pendingAccept.Fulfill(id) - a.ig.pendingReject.Abandon(id) -} + // Update the preferences + utxo.preference = preference + utxo.numSuccessfulPolls = numSuccessfulPolls -// inputRejector implements Blockable -type inputRejector struct { - ig *Input - deps ids.Set - rejected bool // true if the transaction represented by fn has been rejected - tn inputTx -} + ig.utxos[inputKey] = utxo -func (r *inputRejector) Dependencies() ids.Set { return r.deps } + // We need to check if this tx is now preferred + txNode := ig.txs[preference.Key()] + isPreferred := true + for _, inputID := range txNode.tx.InputIDs().List() { + inputKey := inputID.Key() + input := ig.utxos[inputKey] -func (r *inputRejector) Fulfill(id ids.ID) { - if r.rejected || r.ig.errs.Errored() { - return + if !preference.Equals(input.preference) { + // If this preference isn't the preferred color, the tx isn't + // preferred. Also note that the input might not exist, in which + // case this tx is going to be rejected in a later iteration. + isPreferred = false + break + } + } + if isPreferred { + // If I'm preferred in all my conflict sets, I'm preferred. + ig.preferences.Add(preference) + } } - r.rejected = true - r.ig.errs.Add(r.ig.reject(r.tn.tx.ID())) } - -func (*inputRejector) Abandon(id ids.ID) {} - -func (*inputRejector) Update() {} - -type tempNode struct { - id ids.ID - bias, confidence int -} - -type sortTempNodeData []tempNode - -func (tnd sortTempNodeData) Less(i, j int) bool { - return bytes.Compare(tnd[i].id.Bytes(), tnd[j].id.Bytes()) == -1 -} -func (tnd sortTempNodeData) Len() int { return len(tnd) } -func (tnd sortTempNodeData) Swap(i, j int) { tnd[j], tnd[i] = tnd[i], tnd[j] } - -func sortTempNodes(nodes []tempNode) { sort.Sort(sortTempNodeData(nodes)) } diff --git a/snow/consensus/snowstorm/metrics.go b/snow/consensus/snowstorm/metrics.go index 22add0082443..7d2e1b7d3ff1 100644 --- a/snow/consensus/snowstorm/metrics.go +++ b/snow/consensus/snowstorm/metrics.go @@ -4,65 +4,81 @@ package snowstorm import ( - "fmt" "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/utils/wrappers" ) type metrics struct { - numProcessing prometheus.Gauge - latAccepted, latRejected prometheus.Histogram + // numProcessing keeps track of the number of transactions currently + // processing in a snowstorm instance + numProcessing prometheus.Gauge - clock timer.Clock + // accepted tracks the number of milliseconds that a transaction was + // processing before being accepted + accepted prometheus.Histogram + + // rejected tracks the number of milliseconds that a transaction was + // processing before being rejected + rejected prometheus.Histogram + + // clock gives access to the current wall clock time + clock timer.Clock + + // processing keeps track of the time that each transaction was issued into + // the snowstorm instance. This is used to calculate the amount of time to + // accept or reject the transaction processing map[[32]byte]time.Time } // Initialize implements the Engine interface -func (m *metrics) Initialize(namespace string, registerer prometheus.Registerer) error { +func (m *metrics) Initialize( + namespace string, + registerer prometheus.Registerer, +) error { m.processing = make(map[[32]byte]time.Time) - m.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tx_processing", - Help: "Number of processing transactions", - }) - m.latAccepted = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "tx_accepted", - Help: "Latency of accepting from the time the transaction was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - m.latRejected = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "tx_rejected", - Help: "Latency of rejecting from the time the transaction was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - - if err := registerer.Register(m.numProcessing); err != nil { - return fmt.Errorf("Failed to register tx_processing statistics due to %s", err) - } - if err := registerer.Register(m.latAccepted); err != nil { - return fmt.Errorf("Failed to register tx_accepted statistics due to %s", err) - } - if err := registerer.Register(m.latRejected); err != nil { - return fmt.Errorf("Failed to register tx_rejected statistics due to %s", err) - } - return nil + m.numProcessing = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "tx_processing", + Help: "Number of processing transactions", + }) + m.accepted = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "tx_accepted", + Help: "Time spent processing before being accepted in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + m.rejected = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "tx_rejected", + Help: "Time spent processing before being rejected in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + + errs := wrappers.Errs{} + errs.Add( + registerer.Register(m.numProcessing), + registerer.Register(m.accepted), + registerer.Register(m.rejected), + ) + return errs.Err } +// Issued marks that a transaction with the provided ID was added to the +// snowstorm consensus instance. It is assumed that either Accept or Reject will +// be called with this same ID in the future. func (m *metrics) Issued(id ids.ID) { m.processing[id.Key()] = m.clock.Time() m.numProcessing.Inc() } +// Accepted marks that a transaction with the provided ID was accepted. It is +// assumed that Issued was previously called with this ID. func (m *metrics) Accepted(id ids.ID) { key := id.Key() start := m.processing[key] @@ -70,10 +86,12 @@ func (m *metrics) Accepted(id ids.ID) { delete(m.processing, key) - m.latAccepted.Observe(float64(end.Sub(start).Milliseconds())) + m.accepted.Observe(float64(end.Sub(start).Milliseconds())) m.numProcessing.Dec() } +// Rejected marks that a transaction with the provided ID was rejected. It is +// assumed that Issued was previously called with this ID. func (m *metrics) Rejected(id ids.ID) { key := id.Key() start := m.processing[key] @@ -81,6 +99,6 @@ func (m *metrics) Rejected(id ids.ID) { delete(m.processing, key) - m.latRejected.Observe(float64(end.Sub(start).Milliseconds())) + m.rejected.Observe(float64(end.Sub(start).Milliseconds())) m.numProcessing.Dec() } diff --git a/snow/consensus/snowstorm/network_test.go b/snow/consensus/snowstorm/network_test.go index f60cbe34f9e4..03b5b4446a94 100644 --- a/snow/consensus/snowstorm/network_test.go +++ b/snow/consensus/snowstorm/network_test.go @@ -9,12 +9,13 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowball" "github.com/ava-labs/gecko/utils/sampler" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) type Network struct { - params snowball.Parameters + params sbcon.Parameters consumers []*TestTx nodeTxs []map[[32]byte]*TestTx nodes, running []Consensus @@ -31,7 +32,12 @@ func (n *Network) shuffleConsumers() { n.consumers = consumers } -func (n *Network) Initialize(params snowball.Parameters, numColors, colorsPerConsumer, maxInputConflicts int) { +func (n *Network) Initialize( + params sbcon.Parameters, + numColors, + colorsPerConsumer, + maxInputConflicts int, +) { n.params = params idCount := uint64(0) diff --git a/snow/consensus/snowstorm/snowball.go b/snow/consensus/snowstorm/snowball.go new file mode 100644 index 000000000000..178e1184a849 --- /dev/null +++ b/snow/consensus/snowstorm/snowball.go @@ -0,0 +1,51 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +type snowball struct { + // numSuccessfulPolls is the number of times this choice was the successful + // result of a network poll + numSuccessfulPolls int + + // confidence is the number of consecutive times this choice was the + // successful result of a network poll as of [lastVote] + confidence int + + // lastVote is the last poll number that this choice was included in a + // successful network poll + lastVote int + + // rogue identifies if there is a known conflict with this choice + rogue bool +} + +func (sb *snowball) Confidence(currentVote int) int { + if sb.lastVote != currentVote { + return 0 + } + return sb.confidence +} + +func (sb *snowball) RecordSuccessfulPoll(currentVote int) { + // If this choice wasn't voted for during the last poll, the confidence + // should have been reset during the last poll. So, we reset it now. + if sb.lastVote+1 != currentVote { + sb.confidence = 0 + } + + // This choice was voted for in this poll. Mark it as such. + sb.lastVote = currentVote + + // An affirmative vote increases both the snowball and snowflake counters. + sb.numSuccessfulPolls++ + sb.confidence++ +} + +func (sb *snowball) Finalized(betaVirtuous, betaRogue int) bool { + // This choice is finalized if the snowflake counter is at least + // [betaRogue]. If there are no known conflicts with this operation, it can + // be accepted with a snowflake counter of at least [betaVirtuous]. + return (!sb.rogue && sb.confidence >= betaVirtuous) || + sb.confidence >= betaRogue +} diff --git a/snow/consensus/snowstorm/test_tx_test.go b/snow/consensus/snowstorm/test_tx_test.go deleted file mode 100644 index 7f34a97c3db8..000000000000 --- a/snow/consensus/snowstorm/test_tx_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "testing" -) - -func TestTxVerify(t *testing.T) { - Setup() - - if err := Red.Verify(); err != nil { - t.Fatal(err) - } -} - -func TestTxBytes(t *testing.T) { - Setup() - - if Red.Bytes() == nil { - t.Fatalf("Expected non-nil bytes") - } -} diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index ac3ef342c0ee..9974e231122f 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -136,9 +136,9 @@ func (b *Bootstrapper) fetch(vtxIDs ...ids.ID) error { continue } - validators, err := b.Validators.Sample(1) // validator to send request to + validators, err := b.Beacons.Sample(1) // validator to send request to if err != nil { - return fmt.Errorf("Dropping request for %s as there are no validators", vtxID) + return fmt.Errorf("dropping request for %s as there are no validators", vtxID) } validatorID := validators[0].ID() b.RequestID++ diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index b4f995410d56..25b939fa3915 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -48,9 +48,8 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *vertex.T sender.CantGetAcceptedFrontier = false - peer := validators.GenerateRandomValidator(1) - peerID := peer.ID() - peers.Add(peer) + peer := ids.GenerateTestShortID() + peers.AddWeight(peer, 1) vtxBlocker, _ := queue.New(prefixdb.New([]byte("vtx"), db)) txBlocker, _ := queue.New(prefixdb.New([]byte("tx"), db)) @@ -68,7 +67,7 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *vertex.T TxBlocked: txBlocker, Manager: manager, VM: vm, - }, peerID, sender, manager, vm + }, peer, sender, manager, vm } // Three vertices in the accepted frontier. None have parents. No need to fetch anything diff --git a/snow/engine/avalanche/state/unique_vertex_test.go b/snow/engine/avalanche/state/unique_vertex_test.go index 7cc309c84526..de31d56023ae 100644 --- a/snow/engine/avalanche/state/unique_vertex_test.go +++ b/snow/engine/avalanche/state/unique_vertex_test.go @@ -80,7 +80,7 @@ func TestUniqueVertexCacheHit(t *testing.T) { serializer: s, } if err := uVtx.setVertex(vtx); err != nil { - t.Fatalf("Failed to set vertex due to: %w", err) + t.Fatalf("Failed to set vertex due to: %s", err) } newUVtx := &uniqueVertex{ diff --git a/snow/engine/avalanche/state/vertex_test.go b/snow/engine/avalanche/state/vertex_test.go index 58c4c7666c63..0bdd9aa244f7 100644 --- a/snow/engine/avalanche/state/vertex_test.go +++ b/snow/engine/avalanche/state/vertex_test.go @@ -31,7 +31,7 @@ func TestVertexVerify(t *testing.T) { } if err := validVertex.Verify(); err != nil { - t.Fatalf("Valid vertex failed verification due to: %w", err) + t.Fatalf("Valid vertex failed verification due to: %s", err) } nonUniqueParentsVtx := &innerVertex{ diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 7a8d39cd1af1..b1d8692be660 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -95,10 +95,9 @@ func (t *Transitive) finishBootstrapping() error { t.Ctx.Log.Error("vertex %s failed to be loaded from the frontier with %s", vtxID, err) } } - t.Consensus.Initialize(t.Ctx, t.Params, frontier) t.Ctx.Log.Info("bootstrapping finished with %d vertices in the accepted frontier", len(frontier)) - return nil + return t.Consensus.Initialize(t.Ctx, t.Params, frontier) } // Gossip implements the Engine interface diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index 24ffc448e551..82dee760ecca 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -24,8 +24,6 @@ var ( errUnknownVertex = errors.New("unknown vertex") errFailedParsing = errors.New("failed parsing") errMissing = errors.New("missing") - - Genesis = ids.GenerateTestID() ) func TestEngineShutdown(t *testing.T) { @@ -49,12 +47,11 @@ func TestEngineShutdown(t *testing.T) { func TestEngineAdd(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -101,7 +98,7 @@ func TestEngineAdd(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx.ParentsV[0].ID().Equals(vtxID) { @@ -116,7 +113,7 @@ func TestEngineAdd(t *testing.T) { return vtx, nil } - te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.Put(vdr, 0, vtx.ID(), vtx.Bytes()) manager.ParseVertexF = nil @@ -130,7 +127,7 @@ func TestEngineAdd(t *testing.T) { manager.ParseVertexF = func(b []byte) (avalanche.Vertex, error) { return nil, errFailedParsing } - te.Put(vdr.ID(), *reqID, vtx.ParentsV[0].ID(), nil) + te.Put(vdr, *reqID, vtx.ParentsV[0].ID(), nil) manager.ParseVertexF = nil @@ -142,12 +139,11 @@ func TestEngineAdd(t *testing.T) { func TestEngineQuery(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -226,7 +222,7 @@ func TestEngineQuery(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx0.ID().Equals(vtxID) { @@ -234,7 +230,7 @@ func TestEngineQuery(t *testing.T) { } } - te.PullQuery(vdr.ID(), 0, vtx0.ID()) + te.PullQuery(vdr, 0, vtx0.ID()) if !*vertexed { t.Fatalf("Didn't request vertex") } @@ -251,7 +247,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -277,7 +273,7 @@ func TestEngineQuery(t *testing.T) { } return vtx0, nil } - te.Put(vdr.ID(), 0, vtx0.ID(), vtx0.Bytes()) + te.Put(vdr, 0, vtx0.ID(), vtx0.Bytes()) manager.ParseVertexF = nil if !*queried { @@ -319,7 +315,7 @@ func TestEngineQuery(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx1.ID().Equals(vtxID) { @@ -329,7 +325,7 @@ func TestEngineQuery(t *testing.T) { s := ids.Set{} s.Add(vtx1.ID()) - te.Chits(vdr.ID(), *queryRequestID, s) + te.Chits(vdr, *queryRequestID, s) *queried = false sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { @@ -339,7 +335,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -370,7 +366,7 @@ func TestEngineQuery(t *testing.T) { return vtx1, nil } - te.Put(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) + te.Put(vdr, 0, vtx1.ID(), vtx1.Bytes()) manager.ParseVertexF = nil if vtx0.Status() != choices.Accepted { @@ -382,7 +378,7 @@ func TestEngineQuery(t *testing.T) { _ = te.polls.String() // Shouldn't panic - te.QueryFailed(vdr.ID(), *queryRequestID) + te.QueryFailed(vdr, *queryRequestID) if len(te.vtxBlocked) != 0 { t.Fatalf("Should have finished blocking") } @@ -404,16 +400,16 @@ func TestEngineMultipleQuery(t *testing.T) { BatchSize: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -479,7 +475,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -523,7 +519,7 @@ func TestEngineMultipleQuery(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr0.ID().Equals(inVdr) { + if !vdr0.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx1.ID().Equals(vtxID) { @@ -538,14 +534,14 @@ func TestEngineMultipleQuery(t *testing.T) { s2 := ids.Set{} s2.Add(vtx0.ID()) - te.Chits(vdr0.ID(), *queryRequestID, s0) - te.QueryFailed(vdr1.ID(), *queryRequestID) - te.Chits(vdr2.ID(), *queryRequestID, s2) + te.Chits(vdr0, *queryRequestID, s0) + te.QueryFailed(vdr1, *queryRequestID) + te.Chits(vdr2, *queryRequestID, s2) // Should be dropped because the query was marked as failed - te.Chits(vdr1.ID(), *queryRequestID, s0) + te.Chits(vdr1, *queryRequestID, s0) - te.GetFailed(vdr0.ID(), *reqID) + te.GetFailed(vdr0, *reqID) if vtx0.Status() != choices.Accepted { t.Fatalf("Should have executed vertex") @@ -558,12 +554,11 @@ func TestEngineMultipleQuery(t *testing.T) { func TestEngineBlockedIssue(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -629,12 +624,11 @@ func TestEngineBlockedIssue(t *testing.T) { func TestEngineAbandonResponse(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -685,8 +679,8 @@ func TestEngineAbandonResponse(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, vtx.ID()) - te.GetFailed(vdr.ID(), *reqID) + te.PullQuery(vdr, 0, vtx.ID()) + te.GetFailed(vdr, *reqID) if len(te.vtxBlocked) != 0 { t.Fatalf("Should have removed blocking event") @@ -696,12 +690,11 @@ func TestEngineAbandonResponse(t *testing.T) { func TestEngineScheduleRepoll(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -766,7 +759,7 @@ func TestEngineScheduleRepoll(t *testing.T) { } } - te.QueryFailed(vdr.ID(), *requestID) + te.QueryFailed(vdr, *requestID) if !*repolled { t.Fatalf("Should have issued a noop") @@ -785,12 +778,11 @@ func TestEngineRejectDoubleSpendTx(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -884,12 +876,11 @@ func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -987,12 +978,11 @@ func TestEngineIssueRepoll(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -1027,7 +1017,7 @@ func TestEngineIssueRepoll(t *testing.T) { sender.PullQueryF = func(vdrs ids.ShortSet, _ uint32, vtxID ids.ID) { vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !vdrs.Equals(vdrSet) { t.Fatalf("Wrong query recipients") } @@ -1052,12 +1042,11 @@ func TestEngineReissue(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -1188,7 +1177,7 @@ func TestEngineReissue(t *testing.T) { } return vtx, nil } - te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.Put(vdr, 0, vtx.ID(), vtx.Bytes()) manager.ParseVertexF = nil vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx3} } @@ -1196,7 +1185,7 @@ func TestEngineReissue(t *testing.T) { s := ids.Set{} s.Add(vtx.ID()) - te.Chits(vdr.ID(), *queryRequestID, s) + te.Chits(vdr, *queryRequestID, s) if len(lastVtx.TxsV) != 1 || !lastVtx.TxsV[0].ID().Equals(tx0.ID()) { t.Fatalf("Should have re-issued the tx") @@ -1216,12 +1205,11 @@ func TestEngineLargeIssue(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -1436,11 +1424,12 @@ func TestEngineInsufficientValidators(t *testing.T) { func TestEnginePushGossip(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1508,7 +1497,7 @@ func TestEnginePushGossip(t *testing.T) { sender.CantPushQuery = false sender.CantChits = false - te.PushQuery(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.PushQuery(vdr, 0, vtx.ID(), vtx.Bytes()) if *requested { t.Fatalf("Shouldn't have requested the vertex") @@ -1518,11 +1507,12 @@ func TestEnginePushGossip(t *testing.T) { func TestEngineSingleQuery(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1584,11 +1574,12 @@ func TestEngineSingleQuery(t *testing.T) { func TestEngineParentBlockingInsert(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1679,11 +1670,12 @@ func TestEngineParentBlockingInsert(t *testing.T) { func TestEngineBlockingChitRequest(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1773,7 +1765,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { panic("Should have errored") } - te.PushQuery(vdr.ID(), 0, blockingVtx.ID(), blockingVtx.Bytes()) + te.PushQuery(vdr, 0, blockingVtx.ID(), blockingVtx.Bytes()) if len(te.vtxBlocked) != 3 { t.Fatalf("Both inserts and the query should be blocking") @@ -1793,11 +1785,12 @@ func TestEngineBlockingChitRequest(t *testing.T) { func TestEngineBlockingChitResponse(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1874,7 +1867,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1896,7 +1889,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { voteSet := ids.Set{} voteSet.Add(blockingVtx.ID()) - te.Chits(vdr.ID(), *queryRequestID, voteSet) + te.Chits(vdr, *queryRequestID, voteSet) if len(te.vtxBlocked) != 2 { t.Fatalf("The insert should be blocking, as well as the chit response") @@ -1917,11 +1910,12 @@ func TestEngineBlockingChitResponse(t *testing.T) { func TestEngineMissingTx(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1998,7 +1992,7 @@ func TestEngineMissingTx(t *testing.T) { sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -2020,7 +2014,7 @@ func TestEngineMissingTx(t *testing.T) { voteSet := ids.Set{} voteSet.Add(blockingVtx.ID()) - te.Chits(vdr.ID(), *queryRequestID, voteSet) + te.Chits(vdr, *queryRequestID, voteSet) if len(te.vtxBlocked) != 2 { t.Fatalf("The insert should be blocking, as well as the chit response") @@ -2041,12 +2035,11 @@ func TestEngineMissingTx(t *testing.T) { func TestEngineIssueBlockingTx(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -2099,13 +2092,11 @@ func TestEngineIssueBlockingTx(t *testing.T) { func TestEngineReissueAbortedVertex(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vdrID := vdr.ID() - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2193,12 +2184,12 @@ func TestEngineReissueAbortedVertex(t *testing.T) { panic("Unknown bytes provided") } - te.PushQuery(vdrID, 0, vtxID1, vtx1.Bytes()) + te.PushQuery(vdr, 0, vtxID1, vtx1.Bytes()) sender.GetF = nil manager.ParseVertexF = nil - te.GetFailed(vdrID, *requestID) + te.GetFailed(vdr, *requestID) requested := new(bool) sender.GetF = func(_ ids.ShortID, _ uint32, vtxID ids.ID) { @@ -2215,7 +2206,7 @@ func TestEngineReissueAbortedVertex(t *testing.T) { panic("Unknown bytes provided") } - te.PullQuery(vdrID, 0, vtxID1) + te.PullQuery(vdr, 0, vtxID1) if !*requested { t.Fatalf("Should have requested the missing vertex") @@ -2225,14 +2216,12 @@ func TestEngineReissueAbortedVertex(t *testing.T) { func TestEngineBootstrappingIntoConsensus(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vdrID := vdr.ID() - vals := validators.NewSet() config.Validators = vals config.Beacons = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2313,8 +2302,8 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } - if !vdrs.Contains(vdrID) { - t.Fatalf("Should have requested from %s", vdrID) + if !vdrs.Contains(vdr) { + t.Fatalf("Should have requested from %s", vdr) } *requested = true *requestID = reqID @@ -2338,8 +2327,8 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } - if !vdrs.Contains(vdrID) { - t.Fatalf("Should have requested from %s", vdrID) + if !vdrs.Contains(vdr) { + t.Fatalf("Should have requested from %s", vdr) } if !acceptedFrontier.Equals(proposedAccepted) { t.Fatalf("Wrong proposedAccepted vertices.\nExpected: %s\nGot: %s", acceptedFrontier, proposedAccepted) @@ -2348,7 +2337,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { *requestID = reqID } - te.AcceptedFrontier(vdrID, *requestID, acceptedFrontier) + te.AcceptedFrontier(vdr, *requestID, acceptedFrontier) if !*requested { t.Fatalf("Should have requested from the validators during AcceptedFrontier") @@ -2364,7 +2353,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { } sender.GetAncestorsF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) { - if !vdrID.Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx0.ID().Equals(vtxID) { @@ -2373,7 +2362,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { *requestID = reqID } - te.Accepted(vdrID, *requestID, acceptedFrontier) + te.Accepted(vdr, *requestID, acceptedFrontier) manager.GetVertexF = nil sender.GetF = nil @@ -2406,7 +2395,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - te.MultiPut(vdrID, *requestID, [][]byte{vtxBytes0}) + te.MultiPut(vdr, *requestID, [][]byte{vtxBytes0}) vm.ParseTxF = nil manager.ParseVertexF = nil @@ -2429,7 +2418,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } sender.ChitsF = func(inVdr ids.ShortID, _ uint32, chits ids.Set) { - if !inVdr.Equals(vdrID) { + if !inVdr.Equals(vdr) { t.Fatalf("Sent to the wrong validator") } @@ -2444,8 +2433,8 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } - if !vdrs.Contains(vdrID) { - t.Fatalf("Should have requested from %s", vdrID) + if !vdrs.Contains(vdr) { + t.Fatalf("Should have requested from %s", vdr) } if !vtxID1.Equals(vtxID) { @@ -2464,7 +2453,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - te.PushQuery(vdrID, 0, vtxID1, vtxBytes1) + te.PushQuery(vdr, 0, vtxID1, vtxBytes1) manager.ParseVertexF = nil sender.ChitsF = nil @@ -2475,12 +2464,11 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -2561,7 +2549,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { votes := ids.Set{} votes.Add(vtx1.ID()) - te.Chits(vdr.ID(), *reqID, votes) + te.Chits(vdr, *reqID, votes) if status := vtx0.Status(); status != choices.Accepted { t.Fatalf("should have accepted the vertex due to transitive voting") @@ -2571,12 +2559,11 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { func TestEnginePartiallyValidVertex(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -2701,14 +2688,14 @@ func TestEngineGossip(t *testing.T) { func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - secondVdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) - vals.Add(secondVdr) + vdr := ids.GenerateTestShortID() + secondVdr := ids.GenerateTestShortID() + + vals.AddWeight(vdr, 1) + vals.AddWeight(secondVdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2791,7 +2778,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, vtxID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !vtxID.Equals(vtx0.ID()) { @@ -2799,9 +2786,9 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) + te.PushQuery(vdr, 0, vtx1.ID(), vtx1.Bytes()) - te.Put(secondVdr.ID(), *reqID, vtx0.ID(), []byte{3}) + te.Put(secondVdr, *reqID, vtx0.ID(), []byte{3}) *parsed = false manager.ParseVertexF = func(b []byte) (avalanche.Vertex, error) { @@ -2829,7 +2816,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { vtx0.StatusV = choices.Processing - te.Put(vdr.ID(), *reqID, vtx0.ID(), vtx0.Bytes()) + te.Put(vdr, *reqID, vtx0.ID(), vtx0.Bytes()) prefs := te.Consensus.Preferences() if !prefs.Contains(vtx1.ID()) { @@ -2840,12 +2827,11 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { func TestEnginePushQueryRequestIDConflict(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2931,7 +2917,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, vtxID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !vtxID.Equals(vtx0.ID()) { @@ -2939,12 +2925,12 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) + te.PushQuery(vdr, 0, vtx1.ID(), vtx1.Bytes()) sender.GetF = nil sender.CantGet = false - te.PushQuery(vdr.ID(), *reqID, randomVtxID, []byte{3}) + te.PushQuery(vdr, *reqID, randomVtxID, []byte{3}) *parsed = false manager.ParseVertexF = func(b []byte) (avalanche.Vertex, error) { @@ -2972,7 +2958,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { vtx0.StatusV = choices.Processing - te.Put(vdr.ID(), *reqID, vtx0.ID(), vtx0.Bytes()) + te.Put(vdr, *reqID, vtx0.ID(), vtx0.Bytes()) prefs := te.Consensus.Preferences() if !prefs.Contains(vtx1.ID()) { @@ -2984,13 +2970,13 @@ func TestEngineAggressivePolling(t *testing.T) { config := DefaultConfig() config.Params.ConcurrentRepolls = 3 - - vdr := validators.GenerateRandomValidator(1) + config.Params.BetaRogue = 3 vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -3034,8 +3020,12 @@ func TestEngineAggressivePolling(t *testing.T) { } te := &Transitive{} - te.Initialize(config) - te.finishBootstrapping() + if err := te.Initialize(config); err != nil { + t.Fatal(err) + } + if err := te.finishBootstrapping(); err != nil { + t.Fatal(err) + } te.Ctx.Bootstrapped() parsed := new(bool) @@ -3066,13 +3056,13 @@ func TestEngineAggressivePolling(t *testing.T) { numPullQueries := new(int) sender.PullQueryF = func(ids.ShortSet, uint32, ids.ID) { *numPullQueries++ } - te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.Put(vdr, 0, vtx.ID(), vtx.Bytes()) if *numPushQueries != 1 { t.Fatalf("should have issued one push query") } if *numPullQueries != 2 { - t.Fatalf("should have issued one pull query") + t.Fatalf("should have issued two pull queries") } } @@ -3089,12 +3079,11 @@ func TestEngineDuplicatedIssuance(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -3187,13 +3176,15 @@ func TestEngineDoubleChit(t *testing.T) { config.Params.Alpha = 2 config.Params.K = 2 - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr0) - vals.Add(vdr1) config.Validators = vals + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -3280,19 +3271,19 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *reqID, votes) + te.Chits(vdr0, *reqID, votes) if status := tx.Status(); status != choices.Processing { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *reqID, votes) + te.Chits(vdr0, *reqID, votes) if status := tx.Status(); status != choices.Processing { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr1.ID(), *reqID, votes) + te.Chits(vdr1, *reqID, votes) if status := tx.Status(); status != choices.Accepted { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Accepted) diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index f1ce4eff52f2..a15e49639aa5 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -134,8 +134,8 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta b.pendingAccepted.Remove(validatorID) weight := uint64(0) - if vdr, ok := b.Beacons.Get(validatorID); ok { - weight = vdr.Weight() + if w, ok := b.Beacons.GetWeight(validatorID); ok { + weight = w } for _, containerID := range containerIDs.List() { diff --git a/snow/engine/common/queue/test_job.go b/snow/engine/common/queue/test_job.go index 3c50f49becc8..8cb8c9cf4bf0 100644 --- a/snow/engine/common/queue/test_job.go +++ b/snow/engine/common/queue/test_job.go @@ -62,7 +62,7 @@ func (j *TestJob) Execute() error { } else if j.CantExecute && j.T != nil { j.T.Fatalf("Unexpectedly called Execute") } - return errors.New("Unexpectedly called Execute") + return errors.New("unexpectedly called Execute") } // Bytes ... diff --git a/snow/engine/common/test_bootstrapable.go b/snow/engine/common/test_bootstrapable.go index b9eac91c81d1..a25a05076f61 100644 --- a/snow/engine/common/test_bootstrapable.go +++ b/snow/engine/common/test_bootstrapable.go @@ -60,7 +60,7 @@ func (b *BootstrapableTest) ForceAccepted(containerIDs ids.Set) error { if b.T != nil { b.T.Fatalf("Unexpectedly called ForceAccepted") } - return errors.New("Unexpectedly called ForceAccepted") + return errors.New("unexpectedly called ForceAccepted") } return nil } diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index dafbf1ab3691..9bc1d3687102 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -113,7 +113,7 @@ func (e *EngineTest) Startup() error { if e.T != nil { e.T.Fatalf("Unexpectedly called Startup") } - return errors.New("Unexpectedly called Startup") + return errors.New("unexpectedly called Startup") } // Gossip ... @@ -127,7 +127,7 @@ func (e *EngineTest) Gossip() error { if e.T != nil { e.T.Fatalf("Unexpectedly called Gossip") } - return errors.New("Unexpectedly called Gossip") + return errors.New("unexpectedly called Gossip") } // Shutdown ... @@ -141,7 +141,7 @@ func (e *EngineTest) Shutdown() error { if e.T != nil { e.T.Fatalf("Unexpectedly called Shutdown") } - return errors.New("Unexpectedly called Shutdown") + return errors.New("unexpectedly called Shutdown") } // Notify ... @@ -155,7 +155,7 @@ func (e *EngineTest) Notify(msg Message) error { if e.T != nil { e.T.Fatalf("Unexpectedly called Notify") } - return errors.New("Unexpectedly called Notify") + return errors.New("unexpectedly called Notify") } // GetAcceptedFrontier ... @@ -169,7 +169,7 @@ func (e *EngineTest) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedFrontier") } - return errors.New("Unexpectedly called GetAcceptedFrontier") + return errors.New("unexpectedly called GetAcceptedFrontier") } // GetAcceptedFrontierFailed ... @@ -183,7 +183,7 @@ func (e *EngineTest) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestI if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedFrontierFailed") } - return errors.New("Unexpectedly called GetAcceptedFrontierFailed") + return errors.New("unexpectedly called GetAcceptedFrontierFailed") } // AcceptedFrontier ... @@ -197,7 +197,7 @@ func (e *EngineTest) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, if e.T != nil { e.T.Fatalf("Unexpectedly called AcceptedFrontierF") } - return errors.New("Unexpectedly called AcceptedFrontierF") + return errors.New("unexpectedly called AcceptedFrontierF") } // GetAccepted ... @@ -211,7 +211,7 @@ func (e *EngineTest) GetAccepted(validatorID ids.ShortID, requestID uint32, cont if e.T != nil { e.T.Fatalf("Unexpectedly called GetAccepted") } - return errors.New("Unexpectedly called GetAccepted") + return errors.New("unexpectedly called GetAccepted") } // GetAcceptedFailed ... @@ -225,7 +225,7 @@ func (e *EngineTest) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32 if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedFailed") } - return errors.New("Unexpectedly called GetAcceptedFailed") + return errors.New("unexpectedly called GetAcceptedFailed") } // Accepted ... @@ -239,7 +239,7 @@ func (e *EngineTest) Accepted(validatorID ids.ShortID, requestID uint32, contain if e.T != nil { e.T.Fatalf("Unexpectedly called Accepted") } - return errors.New("Unexpectedly called Accepted") + return errors.New("unexpectedly called Accepted") } // Get ... @@ -253,7 +253,7 @@ func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID if e.T != nil { e.T.Fatalf("Unexpectedly called Get") } - return errors.New("Unexpectedly called Get") + return errors.New("unexpectedly called Get") } // GetAncestors ... @@ -267,7 +267,7 @@ func (e *EngineTest) GetAncestors(validatorID ids.ShortID, requestID uint32, con if e.T != nil { e.T.Fatalf("Unexpectedly called GetAncestors") } - return errors.New("Unexpectedly called GetAncestors") + return errors.New("unexpectedly called GetAncestors") } @@ -282,7 +282,7 @@ func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) error if e.T != nil { e.T.Fatalf("Unexpectedly called GetFailed") } - return errors.New("Unexpectedly called GetFailed") + return errors.New("unexpectedly called GetFailed") } // GetAncestorsFailed ... @@ -296,7 +296,7 @@ func (e *EngineTest) GetAncestorsFailed(validatorID ids.ShortID, requestID uint3 if e.T != nil { e.T.Fatalf("Unexpectedly called GetAncestorsFailed") } - return errors.New("Unexpectedly called GetAncestorsFailed") + return errors.New("unexpectedly called GetAncestorsFailed") } // Put ... @@ -310,7 +310,7 @@ func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID if e.T != nil { e.T.Fatalf("Unexpectedly called Put") } - return errors.New("Unexpectedly called Put") + return errors.New("unexpectedly called Put") } // MultiPut ... @@ -324,7 +324,7 @@ func (e *EngineTest) MultiPut(validatorID ids.ShortID, requestID uint32, contain if e.T != nil { e.T.Fatalf("Unexpectedly called MultiPut") } - return errors.New("Unexpectedly called MultiPut") + return errors.New("unexpectedly called MultiPut") } // PushQuery ... @@ -338,7 +338,7 @@ func (e *EngineTest) PushQuery(validatorID ids.ShortID, requestID uint32, contai if e.T != nil { e.T.Fatalf("Unexpectedly called PushQuery") } - return errors.New("Unexpectedly called PushQuery") + return errors.New("unexpectedly called PushQuery") } // PullQuery ... @@ -352,7 +352,7 @@ func (e *EngineTest) PullQuery(validatorID ids.ShortID, requestID uint32, contai if e.T != nil { e.T.Fatalf("Unexpectedly called PullQuery") } - return errors.New("Unexpectedly called PullQuery") + return errors.New("unexpectedly called PullQuery") } // QueryFailed ... @@ -366,7 +366,7 @@ func (e *EngineTest) QueryFailed(validatorID ids.ShortID, requestID uint32) erro if e.T != nil { e.T.Fatalf("Unexpectedly called QueryFailed") } - return errors.New("Unexpectedly called QueryFailed") + return errors.New("unexpectedly called QueryFailed") } // Chits ... @@ -380,7 +380,7 @@ func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerI if e.T != nil { e.T.Fatalf("Unexpectedly called Chits") } - return errors.New("Unexpectedly called Chits") + return errors.New("unexpectedly called Chits") } // IsBootstrapped ... diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go index c693a45a76c9..bb5c8338df41 100644 --- a/snow/engine/common/test_vm.go +++ b/snow/engine/common/test_vm.go @@ -56,7 +56,7 @@ func (vm *TestVM) Bootstrapping() error { if vm.T != nil { vm.T.Fatalf("Unexpectedly called Bootstrapping") } - return errors.New("Unexpectedly called Bootstrapping") + return errors.New("unexpectedly called Bootstrapping") } return nil } @@ -69,7 +69,7 @@ func (vm *TestVM) Bootstrapped() error { if vm.T != nil { vm.T.Fatalf("Unexpectedly called Bootstrapped") } - return errors.New("Unexpectedly called Bootstrapped") + return errors.New("unexpectedly called Bootstrapped") } return nil } @@ -82,7 +82,7 @@ func (vm *TestVM) Shutdown() error { if vm.T != nil { vm.T.Fatalf("Unexpectedly called Shutdown") } - return errors.New("Unexpectedly called Shutdown") + return errors.New("unexpectedly called Shutdown") } return nil } diff --git a/snow/engine/snowman/bootstrap/block_job.go b/snow/engine/snowman/bootstrap/block_job.go index 3e463cb472ca..0f99072b6d92 100644 --- a/snow/engine/snowman/bootstrap/block_job.go +++ b/snow/engine/snowman/bootstrap/block_job.go @@ -66,7 +66,7 @@ func (b *blockJob) Execute() error { return fmt.Errorf("attempting to execute block with status %s", status) case choices.Processing: if err := b.blk.Verify(); err != nil { - b.log.Debug("block %s failed verification during bootstrapping due to %s", + return fmt.Errorf("block %s failed verification during bootstrapping due to: %w", b.blk.ID(), err) } diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 400520b32556..629b386a1813 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -131,9 +131,9 @@ func (b *Bootstrapper) fetch(blkID ids.ID) error { return nil } - validators, err := b.Validators.Sample(1) // validator to send request to + validators, err := b.Beacons.Sample(1) // validator to send request to if err != nil { - return fmt.Errorf("Dropping request for %s as there are no validators", blkID) + return fmt.Errorf("dropping request for %s as there are no validators", blkID) } validatorID := validators[0].ID() b.RequestID++ diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 8e10b7208b07..1bdcef3183b6 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -42,9 +42,8 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *block.Te sender.CantGetAcceptedFrontier = false - peer := validators.GenerateRandomValidator(1) - peerID := peer.ID() - peers.Add(peer) + peer := ids.GenerateTestShortID() + peers.AddWeight(peer, 1) blocker, _ := queue.New(db) @@ -59,7 +58,7 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *block.Te Config: commonConfig, Blocked: blocker, VM: vm, - }, peerID, sender, vm + }, peer, sender, vm } // Single node in the accepted frontier; no need to fecth parent diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 23bdd6d6ecce..7c7ef8902c9f 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -26,15 +26,14 @@ var ( Genesis = ids.GenerateTestID() ) -func setup(t *testing.T) (validators.Validator, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { +func setup(t *testing.T) (ids.ShortID, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -118,7 +117,7 @@ func TestEngineAdd(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blkID.Equals(blk.Parent().ID()) { @@ -133,7 +132,7 @@ func TestEngineAdd(t *testing.T) { return blk, nil } - te.Put(vdr.ID(), 0, blk.ID(), blk.Bytes()) + te.Put(vdr, 0, blk.ID(), blk.Bytes()) vm.ParseBlockF = nil @@ -147,7 +146,7 @@ func TestEngineAdd(t *testing.T) { vm.ParseBlockF = func(b []byte) (snowman.Block, error) { return nil, errUnknownBytes } - te.Put(vdr.ID(), *reqID, blk.Parent().ID(), nil) + te.Put(vdr, *reqID, blk.Parent().ID(), nil) vm.ParseBlockF = nil @@ -189,7 +188,7 @@ func TestEngineQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk.ID().Equals(blkID) { @@ -197,7 +196,7 @@ func TestEngineQuery(t *testing.T) { } } - te.PullQuery(vdr.ID(), 15, blk.ID()) + te.PullQuery(vdr, 15, blk.ID()) if !*blocked { t.Fatalf("Didn't request block") } @@ -214,7 +213,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -246,7 +245,7 @@ func TestEngineQuery(t *testing.T) { } return blk, nil } - te.Put(vdr.ID(), *getRequestID, blk.ID(), blk.Bytes()) + te.Put(vdr, *getRequestID, blk.ID(), blk.Bytes()) vm.ParseBlockF = nil if !*queried { @@ -284,7 +283,7 @@ func TestEngineQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk1.ID().Equals(blkID) { @@ -293,7 +292,7 @@ func TestEngineQuery(t *testing.T) { } blkSet := ids.Set{} blkSet.Add(blk1.ID()) - te.Chits(vdr.ID(), *queryRequestID, blkSet) + te.Chits(vdr, *queryRequestID, blkSet) *queried = false sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { @@ -303,7 +302,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -330,7 +329,7 @@ func TestEngineQuery(t *testing.T) { return blk1, nil } - te.Put(vdr.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + te.Put(vdr, *getRequestID, blk1.ID(), blk1.Bytes()) vm.ParseBlockF = nil if blk1.Status() != choices.Accepted { @@ -342,7 +341,7 @@ func TestEngineQuery(t *testing.T) { _ = te.polls.String() // Shouldn't panic - te.QueryFailed(vdr.ID(), *queryRequestID) + te.QueryFailed(vdr, *queryRequestID) if len(te.blocked) != 0 { t.Fatalf("Should have finished blocking") } @@ -360,16 +359,16 @@ func TestEngineMultipleQuery(t *testing.T) { ConcurrentRepolls: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -428,7 +427,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -470,7 +469,7 @@ func TestEngineMultipleQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr0.ID().Equals(inVdr) { + if !vdr0.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk1.ID().Equals(blkID) { @@ -479,8 +478,8 @@ func TestEngineMultipleQuery(t *testing.T) { } blkSet := ids.Set{} blkSet.Add(blk1.ID()) - te.Chits(vdr0.ID(), *queryRequestID, blkSet) - te.Chits(vdr1.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) + te.Chits(vdr1, *queryRequestID, blkSet) vm.ParseBlockF = func(b []byte) (snowman.Block, error) { vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { @@ -506,7 +505,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *secondQueryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -514,12 +513,12 @@ func TestEngineMultipleQuery(t *testing.T) { t.Fatalf("Asking for wrong block") } } - te.Put(vdr0.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + te.Put(vdr0, *getRequestID, blk1.ID(), blk1.Bytes()) // Should be dropped because the query was already filled blkSet = ids.Set{} blkSet.Add(blk0.ID()) - te.Chits(vdr2.ID(), *queryRequestID, blkSet) + te.Chits(vdr2, *queryRequestID, blkSet) if blk1.Status() != choices.Accepted { t.Fatalf("Should have executed block") @@ -579,7 +578,7 @@ func TestEngineAbandonResponse(t *testing.T) { } te.issue(blk) - te.QueryFailed(vdr.ID(), 1) + te.QueryFailed(vdr, 1) if len(te.blocked) != 0 { t.Fatalf("Should have removed blocking event") @@ -601,7 +600,7 @@ func TestEngineFetchBlock(t *testing.T) { added := new(bool) sender.PutF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID, blk []byte) { - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Wrong validator") } if requestID != 123 { @@ -613,7 +612,7 @@ func TestEngineFetchBlock(t *testing.T) { *added = true } - te.Get(vdr.ID(), 123, gBlk.ID()) + te.Get(vdr, 123, gBlk.ID()) if !*added { t.Fatalf("Should have sent block to peer") @@ -656,7 +655,7 @@ func TestEnginePushQuery(t *testing.T) { t.Fatalf("Sent chit multiple times") } *chitted = true - if !inVdr.Equals(vdr.ID()) { + if !inVdr.Equals(vdr) { t.Fatalf("Asking wrong validator for preference") } if requestID != 20 { @@ -678,7 +677,7 @@ func TestEnginePushQuery(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -687,7 +686,7 @@ func TestEnginePushQuery(t *testing.T) { } } - te.PushQuery(vdr.ID(), 20, blk.ID(), blk.Bytes()) + te.PushQuery(vdr, 20, blk.ID(), blk.Bytes()) if !*chitted { t.Fatalf("Should have sent a chit to the peer") @@ -719,7 +718,7 @@ func TestEngineBuildBlock(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -745,7 +744,7 @@ func TestEngineRepoll(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -770,16 +769,16 @@ func TestVoteCanceling(t *testing.T) { ConcurrentRepolls: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -838,7 +837,7 @@ func TestVoteCanceling(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -853,7 +852,7 @@ func TestVoteCanceling(t *testing.T) { t.Fatalf("Shouldn't have finished blocking issue") } - te.QueryFailed(vdr0.ID(), *queryRequestID) + te.QueryFailed(vdr0, *queryRequestID) if te.polls.Len() != 1 { t.Fatalf("Shouldn't have finished blocking issue") @@ -863,7 +862,7 @@ func TestVoteCanceling(t *testing.T) { sender.PullQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID) { *repolled = true } - te.QueryFailed(vdr1.ID(), *queryRequestID) + te.QueryFailed(vdr1, *queryRequestID) if !*repolled { t.Fatalf("Should have finished blocking issue and repolled the network") @@ -958,13 +957,13 @@ func TestEngineAbandonQuery(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, blkID) + te.PullQuery(vdr, 0, blkID) if len(te.blocked) != 1 { t.Fatalf("Should have blocked on request") } - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) if len(te.blocked) != 0 { t.Fatalf("Should have removed request") @@ -1008,13 +1007,13 @@ func TestEngineAbandonChit(t *testing.T) { fakeBlkIDSet := ids.Set{} fakeBlkIDSet.Add(fakeBlkID) - te.Chits(vdr.ID(), 0, fakeBlkIDSet) + te.Chits(vdr, 0, fakeBlkIDSet) if len(te.blocked) != 1 { t.Fatalf("Should have blocked on request") } - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) if len(te.blocked) != 0 { t.Fatalf("Should have removed request") @@ -1075,7 +1074,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, blockingBlk.ID(), blockingBlk.Bytes()) + te.PushQuery(vdr, 0, blockingBlk.ID(), blockingBlk.Bytes()) if len(te.blocked) != 3 { t.Fatalf("Both inserts should be blocking in addition to the chit request") @@ -1131,7 +1130,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1153,7 +1152,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { } blockingBlkIDSet := ids.Set{} blockingBlkIDSet.Add(blockingBlk.ID()) - te.Chits(vdr.ID(), *queryRequestID, blockingBlkIDSet) + te.Chits(vdr, *queryRequestID, blockingBlkIDSet) if len(te.blocked) != 2 { t.Fatalf("The insert and the chit should be blocking") @@ -1188,12 +1187,12 @@ func TestEngineRetryFetch(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + te.PullQuery(vdr, 0, missingBlk.ID()) vm.CantGetBlock = true sender.GetF = nil - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) vm.CantGetBlock = false @@ -1202,7 +1201,7 @@ func TestEngineRetryFetch(t *testing.T) { *called = true } - te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + te.PullQuery(vdr, 0, missingBlk.ID()) vm.CantGetBlock = true sender.GetF = nil @@ -1263,7 +1262,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { votes := ids.Set{} votes.Add(invalidBlkID) - te.Chits(vdr.ID(), *reqID, votes) + te.Chits(vdr, *reqID, votes) vm.GetBlockF = nil @@ -1308,8 +1307,8 @@ func TestEngineGossip(t *testing.T) { func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { vdr, vdrs, sender, vm, te, gBlk := setup(t) - secondVdr := validators.GenerateRandomValidator(1) - vdrs.Add(secondVdr) + secondVdr := ids.GenerateTestShortID() + vdrs.AddWeight(secondVdr, 1) sender.Default(true) @@ -1357,7 +1356,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !blkID.Equals(missingBlk.ID()) { @@ -1365,9 +1364,9 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.PushQuery(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) - te.Put(secondVdr.ID(), *reqID, missingBlk.ID(), []byte{3}) + te.Put(secondVdr, *reqID, missingBlk.ID(), []byte{3}) *parsed = false vm.ParseBlockF = func(b []byte) (snowman.Block, error) { @@ -1394,7 +1393,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { missingBlk.StatusV = choices.Processing - te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes()) + te.Put(vdr, *reqID, missingBlk.ID(), missingBlk.Bytes()) pref := te.Consensus.Preference() if !pref.Equals(pendingBlk.ID()) { @@ -1453,7 +1452,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !blkID.Equals(missingBlk.ID()) { @@ -1461,12 +1460,12 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.PushQuery(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) sender.GetF = nil sender.CantGet = false - te.PushQuery(vdr.ID(), *reqID, randomBlkID, []byte{3}) + te.PushQuery(vdr, *reqID, randomBlkID, []byte{3}) *parsed = false vm.ParseBlockF = func(b []byte) (snowman.Block, error) { @@ -1491,7 +1490,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { sender.CantPushQuery = false sender.CantChits = false - te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes()) + te.Put(vdr, *reqID, missingBlk.ID(), missingBlk.Bytes()) pref := te.Consensus.Preference() if !pref.Equals(pendingBlk.ID()) { @@ -1504,12 +1503,11 @@ func TestEngineAggressivePolling(t *testing.T) { config.Params.ConcurrentRepolls = 2 - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -1590,7 +1588,7 @@ func TestEngineAggressivePolling(t *testing.T) { numPulled := new(int) sender.PullQueryF = func(_ ids.ShortSet, _ uint32, _ ids.ID) { *numPulled++ } - te.Put(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.Put(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) if *numPushed != 1 { t.Fatalf("Should have initially sent a push query") @@ -1612,14 +1610,14 @@ func TestEngineDoubleChit(t *testing.T) { BetaRogue: 2, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) sender := &common.SenderTest{} sender.T = t @@ -1678,7 +1676,7 @@ func TestEngineDoubleChit(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID()) + vdrSet.Add(vdr0, vdr1) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1707,19 +1705,19 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Processing { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Processing { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr1.ID(), *queryRequestID, blkSet) + te.Chits(vdr1, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Accepted { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Accepted) diff --git a/snow/networking/awaiting_connections.go b/snow/networking/awaiting_connections.go deleted file mode 100644 index 5887cea6a8fa..000000000000 --- a/snow/networking/awaiting_connections.go +++ /dev/null @@ -1,46 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package networking - -import ( - stdmath "math" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow/validators" - "github.com/ava-labs/gecko/utils/math" -) - -// AwaitingConnections ... -type AwaitingConnections struct { - Requested validators.Set - WeightRequired uint64 - Finish func() - - weight uint64 -} - -// Add ... -func (aw *AwaitingConnections) Add(conn ids.ShortID) { - vdr, ok := aw.Requested.Get(conn) - if !ok { - return - } - weight, err := math.Add64(vdr.Weight(), aw.weight) - if err != nil { - weight = stdmath.MaxUint64 - } - aw.weight = weight -} - -// Remove ... -func (aw *AwaitingConnections) Remove(conn ids.ShortID) { - vdr, ok := aw.Requested.Get(conn) - if !ok { - return - } - aw.weight -= vdr.Weight() -} - -// Ready ... -func (aw *AwaitingConnections) Ready() bool { return aw.weight >= aw.WeightRequired } diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index cd28209c225e..d581a65c030f 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -89,7 +89,7 @@ func (sr *ChainRouter) RemoveChain(chainID ids.ID) { ticker := time.NewTicker(sr.closeTimeout) select { - case _, _ = <-chain.closed: + case <-chain.closed: case <-ticker.C: chain.Context().Log.Warn("timed out while shutting down") } @@ -355,7 +355,7 @@ func (sr *ChainRouter) Shutdown() { timedout := false for _, chain := range prevChains { select { - case _, _ = <-chain.closed: + case <-chain.closed: case <-ticker.C: timedout = true } diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 471f280a525d..ede7004d4b41 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" @@ -39,8 +40,9 @@ func TestShutdown(t *testing.T) { validators.NewSet(), nil, 1, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -53,9 +55,9 @@ func TestShutdown(t *testing.T) { ticker := time.NewTicker(20 * time.Millisecond) select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Handler shutdown was not called or timed out after 20ms during chainRouter shutdown") - case _, _ = <-shutdownCalled: + case <-shutdownCalled: } select { @@ -97,8 +99,9 @@ func TestShutdownTimesOut(t *testing.T) { validators.NewSet(), nil, 1, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -118,8 +121,8 @@ func TestShutdownTimesOut(t *testing.T) { }() select { - case _, _ = <-engineFinished: + case <-engineFinished: t.Fatalf("Shutdown should have finished in one millisecond before timing out instead of waiting for engine to finish shutting down.") - case _, _ = <-shutdownFinished: + case <-shutdownFinished: } } diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 2ccc397f1354..751cd6445d15 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -17,10 +17,6 @@ import ( "github.com/ava-labs/gecko/utils/timer" ) -const ( - DefaultStakerPortion float64 = 0.2 -) - // Requirement: A set of nodes spamming messages (potentially costly) shouldn't // impact other node's queries. @@ -117,6 +113,7 @@ func (h *Handler) Initialize( validators validators.Set, msgChan <-chan common.Message, bufferSize int, + maxNonStakerPendingMsgs uint32, stakerMsgPortion, stakerCPUPortion float64, namespace string, @@ -156,6 +153,7 @@ func (h *Handler) Initialize( consumptionRanges, consumptionAllotments, bufferSize, + maxNonStakerPendingMsgs, cpuInterval, stakerMsgPortion, stakerCPUPortion, @@ -235,7 +233,12 @@ func (h *Handler) dispatchMsg(msg message) { h.ctx.Lock.Lock() defer h.ctx.Lock.Unlock() - h.ctx.Log.Debug("Forwarding message to consensus: %s", msg) + if msg.IsPeriodic() { + h.ctx.Log.Verbo("Forwarding message to consensus: %s", msg) + } else { + h.ctx.Log.Debug("Forwarding message to consensus: %s", msg) + } + var ( err error ) @@ -470,7 +473,7 @@ func (h *Handler) shutdownDispatch() { go h.toClose() } h.closing = true - h.shutdown.Observe(float64(time.Now().Sub(startTime))) + h.shutdown.Observe(float64(time.Since(startTime))) close(h.closed) } diff --git a/snow/networking/router/handler_test.go b/snow/networking/router/handler_test.go index 5965071b743b..3981b63c4885 100644 --- a/snow/networking/router/handler_test.go +++ b/snow/networking/router/handler_test.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/networking/throttler" ) func TestHandlerDropsTimedOutMessages(t *testing.T) { @@ -34,15 +35,16 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { handler := &Handler{} vdrs := validators.NewSet() - vdr0 := validators.GenerateRandomValidator(1) - vdrs.Add(vdr0) + vdr0 := ids.GenerateTestShortID() + vdrs.AddWeight(vdr0, 1) handler.Initialize( &engine, vdrs, nil, 16, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -58,9 +60,9 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { ticker := time.NewTicker(50 * time.Millisecond) defer ticker.Stop() select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Calling engine function timed out") - case _, _ = <-called: + case <-called: } } @@ -83,8 +85,9 @@ func TestHandlerDoesntDrop(t *testing.T) { validators, nil, 16, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -95,9 +98,9 @@ func TestHandlerDoesntDrop(t *testing.T) { ticker := time.NewTicker(20 * time.Millisecond) defer ticker.Stop() select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Calling engine function timed out") - case _, _ = <-called: + case <-called: } } @@ -118,8 +121,9 @@ func TestHandlerClosesOnError(t *testing.T) { validators.NewSet(), nil, 16, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -134,8 +138,8 @@ func TestHandlerClosesOnError(t *testing.T) { ticker := time.NewTicker(20 * time.Millisecond) select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Handler shutdown timed out before calling toClose") - case _, _ = <-closed: + case <-closed: } } diff --git a/snow/networking/router/message.go b/snow/networking/router/message.go index d88b71d74fc0..89d0742b1f85 100644 --- a/snow/networking/router/message.go +++ b/snow/networking/router/message.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/constants" ) type msgType int @@ -49,6 +50,11 @@ type message struct { deadline time.Time // Time this message must be responded to } +func (m message) IsPeriodic() bool { + return m.requestID == constants.GossipMsgRequestID || + m.messageType == gossipMsg +} + func (m message) String() string { sb := strings.Builder{} sb.WriteString(fmt.Sprintf("\n messageType: %s", m.messageType)) diff --git a/snow/networking/router/service_queue.go b/snow/networking/router/service_queue.go index e487fb87bcb9..708f87c23d5f 100644 --- a/snow/networking/router/service_queue.go +++ b/snow/networking/router/service_queue.go @@ -16,7 +16,7 @@ import ( ) var ( - errNoMessages = errors.New("No messages remaining on queue") + errNoMessages = errors.New("no messages remaining on queue") ) type messageQueue interface { @@ -31,8 +31,9 @@ type messageQueue interface { type multiLevelQueue struct { lock sync.Mutex - validators validators.Set - throttler throttler.Throttler + validators validators.Set + cpuTracker throttler.CPUTracker + msgThrottler throttler.CountingThrottler // Tracks total CPU consumption intervalConsumption, tierConsumption, cpuInterval time.Duration @@ -60,13 +61,15 @@ func newMultiLevelQueue( consumptionRanges []float64, consumptionAllotments []time.Duration, bufferSize int, + maxNonStakerPendingMsgs uint32, cpuInterval time.Duration, msgPortion, cpuPortion float64, ) (messageQueue, chan struct{}) { semaChan := make(chan struct{}, bufferSize) singleLevelSize := bufferSize / len(consumptionRanges) - throttler := throttler.NewEWMAThrottler(vdrs, uint32(bufferSize), msgPortion, cpuPortion, cpuInterval, log) + cpuTracker := throttler.NewEWMATracker(vdrs, cpuPortion, cpuInterval, log) + msgThrottler := throttler.NewMessageThrottler(vdrs, uint32(bufferSize), maxNonStakerPendingMsgs, msgPortion, log) queues := make([]singleLevelQueue, len(consumptionRanges)) for index := 0; index < len(queues); index++ { gauge, histogram, err := metrics.registerTierStatistics(index) @@ -84,7 +87,8 @@ func newMultiLevelQueue( return &multiLevelQueue{ validators: vdrs, - throttler: throttler, + cpuTracker: cpuTracker, + msgThrottler: msgThrottler, queues: queues, cpuRanges: consumptionRanges, cpuAllotments: consumptionAllotments, @@ -116,7 +120,7 @@ func (ml *multiLevelQueue) PushMessage(msg message) bool { return false } ml.pendingMessages++ - ml.throttler.AddMessage(msg.validatorID) + ml.msgThrottler.Add(msg.validatorID) select { case ml.semaChan <- struct{}{}: default: @@ -134,7 +138,7 @@ func (ml *multiLevelQueue) PopMessage() (message, error) { msg, err := ml.popMessage() if err == nil { ml.pendingMessages-- - ml.throttler.RemoveMessage(msg.validatorID) + ml.msgThrottler.Remove(msg.validatorID) ml.metrics.pending.Dec() } return msg, err @@ -145,7 +149,7 @@ func (ml *multiLevelQueue) UtilizeCPU(vdr ids.ShortID, duration time.Duration) { ml.lock.Lock() defer ml.lock.Unlock() - ml.throttler.UtilizeCPU(vdr, duration) + ml.cpuTracker.UtilizeCPU(vdr, duration) ml.intervalConsumption += duration ml.tierConsumption += duration if ml.tierConsumption > ml.cpuAllotments[ml.currentTier] { @@ -160,7 +164,8 @@ func (ml *multiLevelQueue) EndInterval() { ml.lock.Lock() defer ml.lock.Unlock() - ml.throttler.EndInterval() + ml.cpuTracker.EndInterval() + ml.msgThrottler.EndInterval() ml.metrics.cpu.Observe(float64(ml.intervalConsumption.Milliseconds())) ml.intervalConsumption = 0 } @@ -189,7 +194,7 @@ func (ml *multiLevelQueue) popMessage() (message, error) { ml.queues[ml.currentTier].waitingTime.Observe(float64(time.Since(msg.received))) // Check where messages from this validator currently belong - cpu, _ := ml.throttler.GetUtilization(msg.validatorID) + cpu := ml.cpuTracker.GetUtilization(msg.validatorID) correctIndex := ml.getPriorityIndex(cpu) // If the message is at least the priority of the current tier @@ -227,12 +232,12 @@ func (ml *multiLevelQueue) pushMessage(msg message) bool { ml.log.Warn("Dropping message due to invalid validatorID") return false } - cpu, throttle := ml.throttler.GetUtilization(validatorID) + throttle := ml.msgThrottler.Throttle(validatorID) if throttle { ml.metrics.throttled.Inc() return false } - + cpu := ml.cpuTracker.GetUtilization(validatorID) queueIndex := ml.getPriorityIndex(cpu) return ml.waterfallMessage(msg, queueIndex) diff --git a/snow/networking/router/service_queue_test.go b/snow/networking/router/service_queue_test.go index b545fa0783c3..bad122e57579 100644 --- a/snow/networking/router/service_queue_test.go +++ b/snow/networking/router/service_queue_test.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" ) @@ -42,9 +43,10 @@ func setupMultiLevelQueue(t *testing.T, bufferSize int) (messageQueue, chan stru consumptionRanges, consumptionAllotments, bufferSize, + throttler.DefaultMaxNonStakerPendingMsgs, time.Second, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, ) return queue, semaChan, vdrs @@ -86,7 +88,7 @@ func TestMultiLevelQueueSendsMessages(t *testing.T) { // Ensure that the 6th message was never added to the queue select { - case _ = <-semaChan: + case <-semaChan: t.Fatal("Semaphore channel should have been empty after reading all messages from the queue") default: } @@ -169,9 +171,10 @@ func TestMultiLevelQueuePrioritizes(t *testing.T) { consumptionRanges, consumptionAllotments, bufferSize, + throttler.DefaultMaxNonStakerPendingMsgs, time.Second, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, ) // Utilize CPU such that the next message from validator2 will be placed on a lower @@ -263,9 +266,10 @@ func TestMultiLevelQueuePushesDownOldMessages(t *testing.T) { consumptionRanges, consumptionAllotments, bufferSize, + throttler.DefaultMaxNonStakerPendingMsgs, time.Second, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, ) queue.PushMessage(message{ diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 5d63b5a48d28..26a2b91a37c7 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" @@ -67,8 +68,9 @@ func TestTimeout(t *testing.T) { validators.NewSet(), nil, 1, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -123,8 +125,9 @@ func TestReliableMessages(t *testing.T) { validators.NewSet(), nil, 1, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -150,7 +153,7 @@ func TestReliableMessages(t *testing.T) { }() for _, await := range awaiting { - _, _ = <-await + <-await } } @@ -189,8 +192,9 @@ func TestReliableMessagesToMyself(t *testing.T) { validators.NewSet(), nil, 1, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -216,6 +220,6 @@ func TestReliableMessagesToMyself(t *testing.T) { }() for _, await := range awaiting { - _, _ = <-await + <-await } } diff --git a/snow/networking/throttler/ewma.go b/snow/networking/throttler/ewma.go index a8068c553cf0..87fab7298d33 100644 --- a/snow/networking/throttler/ewma.go +++ b/snow/networking/throttler/ewma.go @@ -15,52 +15,39 @@ import ( ) const ( - defaultDecayFactor float64 = 2 - defaultIntervalsUntilPruning uint32 = 60 - defaultMinimumCPUAllotment = time.Nanosecond + defaultDecayFactor float64 = 2 + defaultMinimumCPUAllotment = time.Nanosecond ) -type ewmaThrottler struct { +type ewmaCPUTracker struct { lock sync.Mutex log logging.Logger // Track peers - spenders map[[20]byte]*spender + cpuSpenders map[[20]byte]*cpuSpender cumulativeEWMA time.Duration vdrs validators.Set // Track CPU utilization - decayFactor float64 - stakerCPU, nonReservedCPU time.Duration - - // Track pending messages - reservedStakerMessages uint32 - pendingNonReservedMsgs, nonReservedMsgs uint32 - maxNonStakerPendingMsgs uint32 - - // Statistics adjusted at every interval - currentPeriod uint32 + decayFactor float64 // Factor used to discount the EWMA at every period + stakerCPU time.Duration // Amount of CPU time reserved for stakers + nonReservedCPU time.Duration // Amount of CPU time that is not reserved for stakers } -// NewEWMAThrottler returns a Throttler that uses exponentially weighted moving +// NewEWMATracker returns a CPUTracker that uses exponentially weighted moving // average to estimate CPU utilization. // -// [maxMessages] is the maximum number of messages allotted to this chain -// [stakerMsgPortion] is the portion of messages to reserve exclusively for stakers -// [stakerCPUPortion] is the portion of CPU utilization to reserve for stakers -// both staker portions should be in the range (0, 1] +// [stakerCPUPortion] is the portion of CPU utilization to reserve for stakers (range (0, 1]) // [period] is the interval of time to use for the calculation of EWMA // -// Note: ewmaThrottler uses the period as the total amount of time per interval, +// Note: ewmaCPUTracker uses the period as the total amount of time per interval, // which is not the limit since it tracks consumption using EWMA. -func NewEWMAThrottler( +func NewEWMATracker( vdrs validators.Set, - maxMessages uint32, - stakerMsgPortion, stakerCPUPortion float64, period time.Duration, log logging.Logger, -) Throttler { +) CPUTracker { // Amount of CPU time reserved for processing messages from stakers stakerCPU := time.Duration(float64(period) * stakerCPUPortion) if stakerCPU < defaultMinimumCPUAllotment { @@ -75,67 +62,30 @@ func NewEWMAThrottler( nonReservedCPU = defaultMinimumCPUAllotment } - // Number of messages reserved for Stakers vs. Non-Stakers - reservedStakerMessages := uint32(stakerMsgPortion * float64(maxMessages)) - nonReservedMsgs := maxMessages - reservedStakerMessages - - throttler := &ewmaThrottler{ - spenders: make(map[[20]byte]*spender), - vdrs: vdrs, - log: log, + throttler := &ewmaCPUTracker{ + cpuSpenders: make(map[[20]byte]*cpuSpender), + vdrs: vdrs, + log: log, decayFactor: defaultDecayFactor, stakerCPU: stakerCPU, nonReservedCPU: nonReservedCPU, - - reservedStakerMessages: reservedStakerMessages, - nonReservedMsgs: nonReservedMsgs, } - // Add validators to spenders, so that they will be calculated correctly in + // Add validators to cpuSpenders, so that they will be calculated correctly in // EndInterval for _, vdr := range vdrs.List() { - throttler.spenders[vdr.ID().Key()] = &spender{} + throttler.cpuSpenders[vdr.ID().Key()] = &cpuSpender{} } // Call EndInterval to calculate initial period statistics and initial - // spender values for validators + // cpuSpender values for validators throttler.EndInterval() return throttler } -func (et *ewmaThrottler) AddMessage(validatorID ids.ShortID) { - et.lock.Lock() - defer et.lock.Unlock() - - sp := et.getSpender(validatorID) - sp.pendingMessages++ - - // If the spender has exceeded its message allotment, then the additional - // message is taken from the pool - if sp.pendingMessages > sp.msgAllotment { - sp.pendingPoolMessages++ - et.pendingNonReservedMsgs++ - } -} - -func (et *ewmaThrottler) RemoveMessage(validatorID ids.ShortID) { - et.lock.Lock() - defer et.lock.Unlock() - - sp := et.getSpender(validatorID) - sp.pendingMessages-- - - // If the spender has pending messages taken from the pool, - // they are the first messages to be removed. - if sp.pendingPoolMessages > 0 { - sp.pendingPoolMessages-- - et.pendingNonReservedMsgs-- - } -} - -func (et *ewmaThrottler) UtilizeCPU( +func (et *ewmaCPUTracker) UtilizeCPU( validatorID ids.ShortID, consumption time.Duration, ) { @@ -144,140 +94,86 @@ func (et *ewmaThrottler) UtilizeCPU( sp := et.getSpender(validatorID) sp.cpuEWMA += consumption - sp.lastSpend = et.currentPeriod et.cumulativeEWMA += consumption } -// Returns CPU GetUtilization metric as percentage of expected utilization and -// boolean specifying whether or not the validator has exceeded its message -// allotment. -func (et *ewmaThrottler) GetUtilization( - validatorID ids.ShortID, -) (float64, bool) { +// GetUtilization returns a percentage of expected CPU utilization of the peer +// corresponding to [validatorID] +func (et *ewmaCPUTracker) GetUtilization(validatorID ids.ShortID) float64 { et.lock.Lock() defer et.lock.Unlock() sharedUtilization := float64(et.cumulativeEWMA) / float64(et.nonReservedCPU) sp := et.getSpender(validatorID) if !sp.staking { - exceedsMessageAllotment := et.pendingNonReservedMsgs > et.nonReservedMsgs || // the shared message pool has been taken - sp.pendingMessages > sp.maxMessages // exceeds its own individual message cap - - if exceedsMessageAllotment { - et.log.Verbo("Throttling non-staker %s: %s. Pending pool messages: %d/%d.", - validatorID, - sp, - et.pendingNonReservedMsgs, - et.nonReservedMsgs) - } - return sharedUtilization, exceedsMessageAllotment - } - - // Staker should only be throttled if it has exceeded its message allotment - // and there are either no messages left in the shared pool or it has - // exceeded its own maximum message allocation. - exceedsMessageAllotment := sp.pendingMessages > sp.msgAllotment && // exceeds its own individual message allotment - (et.pendingNonReservedMsgs > et.nonReservedMsgs || // no unreserved messages - sp.pendingMessages > sp.maxMessages) // exceeds its own individual message cap - - if exceedsMessageAllotment { - et.log.Debug("Throttling staker %s: %s. Pending pool messages: %d/%d.", - validatorID, - sp, - et.pendingNonReservedMsgs, - et.nonReservedMsgs) + return sharedUtilization } - return math.Min(float64(sp.cpuEWMA)/float64(sp.expectedCPU), sharedUtilization), exceedsMessageAllotment + return math.Min(float64(sp.cpuEWMA)/float64(sp.expectedCPU), sharedUtilization) } -func (et *ewmaThrottler) EndInterval() { +// EndInterval registers the end of a given CPU interval by discounting +// all cpuSpenders' cpuEWMA and removing outstanding spenders that have sufficiently +// low cpuEWMA stats +func (et *ewmaCPUTracker) EndInterval() { et.lock.Lock() defer et.lock.Unlock() - et.currentPeriod++ - et.cumulativeEWMA = time.Duration(float64(et.cumulativeEWMA) / et.decayFactor) stakingWeight := et.vdrs.Weight() - numPeers := et.vdrs.Len() + 1 - et.maxNonStakerPendingMsgs = et.nonReservedMsgs / uint32(numPeers) - for key, spender := range et.spenders { - spender.cpuEWMA = time.Duration(float64(spender.cpuEWMA) / et.decayFactor) - if vdr, exists := et.vdrs.Get(ids.NewShortID(key)); exists { - stakerPortion := float64(vdr.Weight()) / float64(stakingWeight) + removed := 0 + for key, cpuSpender := range et.cpuSpenders { + cpuSpender.cpuEWMA = time.Duration(float64(cpuSpender.cpuEWMA) / et.decayFactor) + if weight, ok := et.vdrs.GetWeight(ids.NewShortID(key)); ok { + stakerPortion := float64(weight) / float64(stakingWeight) // Calculate staker allotment here - spender.staking = true - spender.msgAllotment = uint32(float64(et.reservedStakerMessages) * stakerPortion) - spender.maxMessages = uint32(float64(et.reservedStakerMessages)*stakerPortion) + et.maxNonStakerPendingMsgs - spender.expectedCPU = time.Duration(float64(et.stakerCPU)*stakerPortion) + defaultMinimumCPUAllotment + cpuSpender.staking = true + cpuSpender.expectedCPU = time.Duration(float64(et.stakerCPU)*stakerPortion) + defaultMinimumCPUAllotment continue } - if spender.lastSpend+defaultIntervalsUntilPruning < et.currentPeriod && spender.pendingMessages == 0 { - et.log.Debug("Removing validator from throttler after not hearing from it for %d periods", - et.currentPeriod-spender.lastSpend) - delete(et.spenders, key) + if cpuSpender.cpuEWMA == 0 { + removed++ + delete(et.cpuSpenders, key) } - // If the validator is not a staker and was not deleted, set its spender + // If the validator is not a staker and was not deleted, set its cpuSpender // attributes - spender.staking = false - spender.msgAllotment = 0 - spender.maxMessages = et.maxNonStakerPendingMsgs - spender.expectedCPU = defaultMinimumCPUAllotment + cpuSpender.staking = false + cpuSpender.expectedCPU = defaultMinimumCPUAllotment } + et.log.Verbo("Removed %d validators from CPU Tracker.", removed) } -// getSpender returns the [spender] corresponding to [validatorID] -func (et *ewmaThrottler) getSpender(validatorID ids.ShortID) *spender { +// getSpender returns the [cpuSpender] corresponding to [validatorID] +func (et *ewmaCPUTracker) getSpender(validatorID ids.ShortID) *cpuSpender { validatorKey := validatorID.Key() - if sp, exists := et.spenders[validatorKey]; exists { + if sp, exists := et.cpuSpenders[validatorKey]; exists { return sp } - // If this validator did not exist in spenders, create it and return - sp := &spender{ - maxMessages: et.maxNonStakerPendingMsgs, + // If this validator did not exist in cpuSpenders, create it and return + sp := &cpuSpender{ expectedCPU: defaultMinimumCPUAllotment, } - et.spenders[validatorKey] = sp + et.cpuSpenders[validatorKey] = sp return sp } -type spender struct { - // Last period that this spender utilized the CPU - lastSpend uint32 - - // Number of pending messages this spender has taken from the pool - pendingPoolMessages uint32 - - // Number of messages this spender currently has pending - pendingMessages uint32 - - // Number of messages allocated to this spender as a staker - msgAllotment uint32 - - // Max number of messages this spender can use even if the shared pool is - // non-empty - maxMessages uint32 - - // EWMA of this spender's CPU utilization +type cpuSpender struct { + // EWMA of this cpuSpender's CPU utilization cpuEWMA time.Duration // The expected CPU utilization of this peer expectedCPU time.Duration - // Flag to indicate if this spender is a staker + // Flag to indicate if this cpuSpender is a staker staking bool } -func (sp *spender) String() string { - return fmt.Sprintf("Spender(Messages: (%d+%d)/(%d+%d), CPU: %s/%s)", - sp.pendingPoolMessages, - sp.pendingMessages-sp.pendingPoolMessages, - sp.msgAllotment, - sp.maxMessages-sp.msgAllotment, +func (sp *cpuSpender) String() string { + return fmt.Sprintf("CPUTracker(CPU: %s/%s)", sp.cpuEWMA, sp.expectedCPU, ) diff --git a/snow/networking/throttler/message_throttler.go b/snow/networking/throttler/message_throttler.go new file mode 100644 index 000000000000..7d47dcf72b72 --- /dev/null +++ b/snow/networking/throttler/message_throttler.go @@ -0,0 +1,221 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package throttler + +import ( + "fmt" + "sync" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/logging" +) + +const ( + defaultIntervalsUntilPruning uint32 = 60 +) + +type messageThrottler struct { + lock sync.Mutex + log logging.Logger + + // Track peers + msgSpenders map[[20]byte]*msgSpender + vdrs validators.Set + + // Track pending messages + reservedStakerMessages uint32 // Number of messages reserved for stakers + nonReservedMsgs uint32 // Number of non-reserved messages left to a shared message pool + pendingNonReservedMsgs uint32 // Number of pending messages taken from the shared message pool + + // Cap on number of pending messages allowed to a non-staker + maxNonStakerPendingMsgs uint32 + + // Statistics adjusted at every interval + currentPeriod uint32 +} + +// NewMessageThrottler returns a MessageThrottler that throttles peers +// when they have too many pending messages outstanding. +// +// [maxMessages] is the maximum number of messages allotted to this chain +// [stakerMsgPortion] is the portion of messages to reserve exclusively for stakers +// should be in the range (0, 1] +func NewMessageThrottler( + vdrs validators.Set, + maxMessages, + maxNonStakerPendingMsgs uint32, + stakerMsgPortion float64, + log logging.Logger, +) CountingThrottler { + // Number of messages reserved for Stakers vs. Non-Stakers + reservedStakerMessages := uint32(stakerMsgPortion * float64(maxMessages)) + nonReservedMsgs := maxMessages - reservedStakerMessages + + throttler := &messageThrottler{ + msgSpenders: make(map[[20]byte]*msgSpender), + vdrs: vdrs, + log: log, + + reservedStakerMessages: reservedStakerMessages, + nonReservedMsgs: nonReservedMsgs, + maxNonStakerPendingMsgs: maxNonStakerPendingMsgs, + } + + // Add validators to msgSpenders, so that they will be calculated correctly in + // EndInterval + for _, vdr := range vdrs.List() { + throttler.msgSpenders[vdr.ID().Key()] = &msgSpender{} + } + + // Call EndInterval to calculate initial period statistics and initial + // msgSpender values for validators + throttler.EndInterval() + return throttler +} + +func (et *messageThrottler) Add(validatorID ids.ShortID) { + et.lock.Lock() + defer et.lock.Unlock() + + sp := et.getSpender(validatorID) + sp.pendingMessages++ + sp.lastSpend = et.currentPeriod + + // If the msgSpender has exceeded its message allotment, then the additional + // message is taken from the pool + if sp.pendingMessages > sp.msgAllotment { + sp.pendingPoolMessages++ + et.pendingNonReservedMsgs++ + } +} + +func (et *messageThrottler) Remove(validatorID ids.ShortID) { + et.lock.Lock() + defer et.lock.Unlock() + + sp := et.getSpender(validatorID) + sp.pendingMessages-- + + // If the msgSpender has pending messages taken from the pool, + // they are the first messages to be removed. + if sp.pendingPoolMessages > 0 { + sp.pendingPoolMessages-- + et.pendingNonReservedMsgs-- + } +} + +// Throttle returns true if messages from [validatorID] should be throttled due +// to having too many pending messages +func (et *messageThrottler) Throttle( + validatorID ids.ShortID, +) bool { + et.lock.Lock() + defer et.lock.Unlock() + + sp := et.getSpender(validatorID) + if !sp.staking { + exceedsMessageAllotment := et.pendingNonReservedMsgs > et.nonReservedMsgs || // the shared message pool has been taken + (sp.pendingMessages > sp.maxMessages) // Spender has exceeded its individual cap + + if exceedsMessageAllotment { + et.log.Verbo("Throttling non-staker %s: %s. Pending pool messages: %d/%d.", + validatorID, + sp, + et.pendingNonReservedMsgs, + et.nonReservedMsgs) + } + return exceedsMessageAllotment + } + + exceedsMessageAllotment := sp.pendingMessages > sp.msgAllotment && // Throttle if the staker has exceeded its allotment + (et.pendingNonReservedMsgs > et.nonReservedMsgs || // And either the shared message pool is empty + sp.pendingMessages > sp.maxMessages) // or this staker has exceeded its individual cap + + if exceedsMessageAllotment { + et.log.Debug("Throttling staker %s: %s. Pending pool messages: %d/%d.", + validatorID, + sp, + et.pendingNonReservedMsgs, + et.nonReservedMsgs) + } + return exceedsMessageAllotment +} + +func (et *messageThrottler) EndInterval() { + et.lock.Lock() + defer et.lock.Unlock() + + et.currentPeriod++ + stakingWeight := et.vdrs.Weight() + + for key, msgSpender := range et.msgSpenders { + if weight, exists := et.vdrs.GetWeight(ids.NewShortID(key)); exists { + stakerPortion := float64(weight) / float64(stakingWeight) + + // Calculate staker allotment here + msgSpender.staking = true + msgSpender.msgAllotment = uint32(float64(et.reservedStakerMessages) * stakerPortion) + msgSpender.maxMessages = msgSpender.msgAllotment + et.maxNonStakerPendingMsgs + continue + } + + if msgSpender.lastSpend+defaultIntervalsUntilPruning < et.currentPeriod && msgSpender.pendingMessages == 0 { + et.log.Debug("Removing validator from throttler after not hearing from it for %d periods", + et.currentPeriod-msgSpender.lastSpend) + delete(et.msgSpenders, key) + } + + // If the validator is not a staker and was not deleted, set its msgSpender + // attributes + msgSpender.staking = false + msgSpender.msgAllotment = 0 + msgSpender.maxMessages = et.maxNonStakerPendingMsgs + } +} + +// getSpender returns the [msgSpender] corresponding to [validatorID] +func (et *messageThrottler) getSpender(validatorID ids.ShortID) *msgSpender { + validatorKey := validatorID.Key() + if sp, exists := et.msgSpenders[validatorKey]; exists { + return sp + } + + // If this validator did not exist in msgSpenders, create it and return + sp := &msgSpender{ + maxMessages: et.maxNonStakerPendingMsgs, + } + et.msgSpenders[validatorKey] = sp + return sp +} + +type msgSpender struct { + // Last period that this msgSpender utilized the CPU + lastSpend uint32 + + // Number of pending messages this msgSpender has taken from the pool + pendingPoolMessages uint32 + + // Number of messages this msgSpender currently has pending + pendingMessages uint32 + + // Number of messages allocated to this msgSpender as a staker + msgAllotment uint32 + + // Max number of messages this msgSpender can use even if the shared pool is + // non-empty + maxMessages uint32 + + // Flag to indicate if this msgSpender is a staker + staking bool +} + +func (sp *msgSpender) String() string { + return fmt.Sprintf("MsgSpender(Messages: (%d+%d)/(%d+%d))", + sp.pendingPoolMessages, + sp.pendingMessages-sp.pendingPoolMessages, + sp.msgAllotment, + sp.maxMessages-sp.msgAllotment, + ) +} diff --git a/snow/networking/throttler/no.go b/snow/networking/throttler/no.go index 02f6dc18ae79..f734336375b9 100644 --- a/snow/networking/throttler/no.go +++ b/snow/networking/throttler/no.go @@ -9,17 +9,27 @@ import ( "github.com/ava-labs/gecko/ids" ) -type noThrottler struct{} +type noCountThrottler struct{} -func (noThrottler) AddMessage(ids.ShortID) {} +func (noCountThrottler) Add(ids.ShortID) {} -func (noThrottler) RemoveMessage(ids.ShortID) {} +func (noCountThrottler) Remove(ids.ShortID) {} -func (noThrottler) UtilizeCPU(ids.ShortID, time.Duration) {} +func (noCountThrottler) Throttle(ids.ShortID) bool { return false } -func (noThrottler) GetUtilization(ids.ShortID) (float64, bool) { return 0, false } +func (noCountThrottler) EndInterval() {} -func (noThrottler) EndInterval() {} +// NewNoCountThrottler returns a CountingThrottler that will never throttle +func NewNoCountThrottler() CountingThrottler { return noCountThrottler{} } -// NewNoThrottler returns a throttler that will never throttle -func NewNoThrottler() Throttler { return noThrottler{} } +type noCPUTracker struct{} + +func (noCPUTracker) UtilizeCPU(ids.ShortID, time.Duration) {} + +func (noCPUTracker) GetUtilization(ids.ShortID) float64 { return 0 } + +func (noCPUTracker) EndInterval() {} + +// NewNoCPUTracker returns a CPUTracker that does not track CPU usage and +// always returns 0 for the utilization value +func NewNoCPUTracker() CPUTracker { return noCPUTracker{} } diff --git a/snow/networking/throttler/throttler.go b/snow/networking/throttler/throttler.go index 3c0ef9679bab..39d4bcd4c0bb 100644 --- a/snow/networking/throttler/throttler.go +++ b/snow/networking/throttler/throttler.go @@ -9,6 +9,16 @@ import ( "github.com/ava-labs/gecko/ids" ) +const ( + // DefaultMaxNonStakerPendingMsgs rate limits the number of queued messages + // from non-stakers. + DefaultMaxNonStakerPendingMsgs uint32 = 3 + + // DefaultStakerPortion describes the percentage of resources that are + // reserved for stakers. + DefaultStakerPortion float64 = 0.2 +) + // Throttler provides an interface to register consumption // of resources and prioritize messages from nodes that have // used less CPU time. @@ -19,3 +29,19 @@ type Throttler interface { GetUtilization(ids.ShortID) (float64, bool) // Returns the CPU based priority and whether or not the peer has too many pending messages EndInterval() // Notify throttler that the current period has ended } + +// CPUTracker tracks the consumption of CPU time +type CPUTracker interface { + UtilizeCPU(ids.ShortID, time.Duration) + GetUtilization(ids.ShortID) float64 + EndInterval() +} + +// CountingThrottler tracks the usage of a discrete resource (ex. pending messages) by a peer +// and determines whether or not a peer should be throttled. +type CountingThrottler interface { + Add(ids.ShortID) + Remove(ids.ShortID) + Throttle(ids.ShortID) bool + EndInterval() +} diff --git a/snow/networking/throttler/throttler_test.go b/snow/networking/throttler/throttler_test.go index 835385e87dc2..9a77b4b6392e 100644 --- a/snow/networking/throttler/throttler_test.go +++ b/snow/networking/throttler/throttler_test.go @@ -12,150 +12,226 @@ import ( "github.com/ava-labs/gecko/utils/logging" ) -func TestEWMAThrottler(t *testing.T) { +func TestEWMATrackerPrioritizes(t *testing.T) { vdrs := validators.NewSet() - validator0 := validators.GenerateRandomValidator(1) - validator1 := validators.GenerateRandomValidator(1) - vdrs.Add(validator0) - vdrs.Add(validator1) - maxMessages := uint32(16) - msgPortion := 0.25 + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + nonStaker := ids.GenerateTestShortID() + + vdrs.AddWeight(vdr0, 1) + vdrs.AddWeight(vdr1, 1) + cpuPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, cpuPortion, period, logging.NoLog{}) + throttler := NewEWMATracker(vdrs, cpuPortion, period, logging.NoLog{}) - throttler.UtilizeCPU(validator0.ID(), 25*time.Millisecond) - throttler.UtilizeCPU(validator1.ID(), 5*time.Second) + throttler.UtilizeCPU(vdr0, 25*time.Millisecond) + throttler.UtilizeCPU(vdr1, 5*time.Second) - cpu0, throttle0 := throttler.GetUtilization(validator0.ID()) - cpu1, throttle1 := throttler.GetUtilization(validator1.ID()) + cpu0 := throttler.GetUtilization(vdr0) + cpu1 := throttler.GetUtilization(vdr1) + cpuNonStaker := throttler.GetUtilization(nonStaker) - if throttle0 { - t.Fatalf("Should not throttle validator0 with no pending messages") - } - if throttle1 { - t.Fatalf("Should not throttle validator1 with no pending messages") + if cpu1 <= cpu0 { + t.Fatalf("CPU utilization for vdr1: %f should be greater than that of vdr0: %f", cpu1, cpu0) } - if cpu1 <= cpu0 { - t.Fatalf("CPU utilization for validator1: %f should be greater than that of validator0: %f", cpu1, cpu0) + if cpuNonStaker < cpu1 { + t.Fatalf("CPU Utilization for non-staker: %f should be greater than or equal to the CPU Utilization for the highest spending staker: %f", cpuNonStaker, cpu1) } +} + +func TestEWMATrackerPrunesSpenders(t *testing.T) { + vdrs := validators.NewSet() + + staker0 := ids.GenerateTestShortID() + staker1 := ids.GenerateTestShortID() + nonStaker0 := ids.GenerateTestShortID() + nonStaker1 := ids.GenerateTestShortID() + + vdrs.AddWeight(staker0, 1) + vdrs.AddWeight(staker1, 1) + + cpuPortion := 0.25 + period := time.Second + throttler := NewEWMATracker(vdrs, cpuPortion, period, logging.NoLog{}) - // Test that throttler prevents unknown validators from taking up half the message queue - for i := uint32(0); i < maxMessages; i++ { - throttler.AddMessage(ids.NewShortID([20]byte{byte(i)})) + throttler.UtilizeCPU(staker0, 1.0) + throttler.UtilizeCPU(nonStaker0, 1.0) + + // 3 Cases: + // Stakers should not be pruned + // Non-stakers with non-zero cpuEWMA should not be pruned + // Non-stakers with cpuEWMA of 0 should be pruned + + // After 64 intervals nonStaker0 should be removed because its cpuEWMA statistic should reach 0 + // while nonStaker1 utilizes the CPU in every interval, so it should not be removed. + for i := 0; i < 64; i++ { + throttler.UtilizeCPU(nonStaker1, 1.0) + throttler.EndInterval() } - _, throttle := throttler.GetUtilization(ids.NewShortID([20]byte{'s', 'y', 'b', 'i', 'l'})) - if !throttle { - t.Fatal("Throttler should have started throttling messages from unknown peers") + // Ensure that the validators and the non-staker heard from every interval were not pruned + ewmat := throttler.(*ewmaCPUTracker) + if _, ok := ewmat.cpuSpenders[staker0.Key()]; !ok { + t.Fatal("Staker was pruned from the set of spenders") + } + if _, ok := ewmat.cpuSpenders[staker1.Key()]; !ok { + t.Fatal("Staker was pruned from the set of spenders") + } + if _, ok := ewmat.cpuSpenders[nonStaker0.Key()]; ok { + t.Fatal("Non-staker, not heard from in 64 periods, should have been pruned from the set of spenders") + } + if _, ok := ewmat.cpuSpenders[nonStaker1.Key()]; ok { + t.Fatal("Non-staker heard from in every period, was pruned from the set of spenders") } } -func TestThrottlerPrunesSpenders(t *testing.T) { +func TestMessageThrottlerPrunesSpenders(t *testing.T) { vdrs := validators.NewSet() - staker0 := validators.GenerateRandomValidator(1) - staker1 := validators.GenerateRandomValidator(1) - nonStaker0 := ids.NewShortID([20]byte{1}) - nonStaker1 := ids.NewShortID([20]byte{2}) - nonStaker2 := ids.NewShortID([20]byte{3}) - vdrs.Add(staker0) - vdrs.Add(staker1) + staker0 := ids.GenerateTestShortID() + staker1 := ids.GenerateTestShortID() + nonStaker0 := ids.GenerateTestShortID() + nonStaker1 := ids.GenerateTestShortID() + nonStaker2 := ids.GenerateTestShortID() + + vdrs.AddWeight(staker0, 1) + vdrs.AddWeight(staker1, 1) maxMessages := uint32(1024) - cpuPortion := 0.25 msgPortion := 0.25 - period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, cpuPortion, period, logging.NoLog{}) - throttler.AddMessage(nonStaker2) // nonStaker2 should not be removed with a pending message - throttler.UtilizeCPU(nonStaker0, 1.0) - throttler.UtilizeCPU(nonStaker1, 1.0) - intervalsUntilPruning := int(defaultIntervalsUntilPruning) - // Let two intervals pass with no activity to ensure that nonStaker1 can be pruned + throttler := NewMessageThrottler(vdrs, maxMessages, DefaultMaxNonStakerPendingMsgs, msgPortion, logging.NoLog{}) + + // 4 Cases: + // Stakers should not be pruned + // Non-stakers with pending messages should not be pruned + // Non-stakers heard from recently should not be pruned + // Non-stakers not heard from in [defaultIntervalsUntilPruning] should be pruned + + // Add pending messages for nonStaker1 and nonStaker2 + throttler.Add(nonStaker2) // Will not be removed, so it should not be pruned + throttler.Add(nonStaker1) + throttler.EndInterval() + throttler.Remove(nonStaker1) // The pending message was removed, so nonStaker1 should be pruned throttler.EndInterval() - throttler.UtilizeCPU(nonStaker0, 1.0) + intervalsUntilPruning := int(defaultIntervalsUntilPruning) // Let the required number of intervals elapse to allow nonStaker1 to be pruned for i := 0; i < intervalsUntilPruning; i++ { + throttler.Add(nonStaker0) // nonStaker0 is heard from in every interval, so it should not be pruned throttler.EndInterval() + throttler.Remove(nonStaker0) } - // Ensure that the validators and the non-staker heard from in the past [intervalsUntilPruning] were not pruned - ewmat := throttler.(*ewmaThrottler) - if _, ok := ewmat.spenders[staker0.ID().Key()]; !ok { + msgThrottler := throttler.(*messageThrottler) + if _, ok := msgThrottler.msgSpenders[staker0.Key()]; !ok { t.Fatal("Staker was pruned from the set of spenders") } - if _, ok := ewmat.spenders[staker1.ID().Key()]; !ok { + if _, ok := msgThrottler.msgSpenders[staker1.Key()]; !ok { t.Fatal("Staker was pruned from the set of spenders") } - if _, ok := ewmat.spenders[nonStaker0.Key()]; !ok { - t.Fatal("Non-staker heard from recently was pruned from the set of spenders") + if _, ok := msgThrottler.msgSpenders[nonStaker0.Key()]; !ok { + t.Fatal("Non-staker heard from within [intervalsUntilPruning] was removed from the set of spenders") } - if _, ok := ewmat.spenders[nonStaker1.Key()]; ok { - t.Fatal("Non-staker not heard from in a long time was not pruned from the set of spenders") + if _, ok := msgThrottler.msgSpenders[nonStaker1.Key()]; ok { + t.Fatal("Non-staker not heard from within [intervalsUntilPruning] was not removed from the set of spenders") } - if _, ok := ewmat.spenders[nonStaker2.Key()]; !ok { + if _, ok := msgThrottler.msgSpenders[nonStaker2.Key()]; !ok { t.Fatal("Non-staker with a pending message was pruned from the set of spenders") } } -func TestThrottleStaker(t *testing.T) { +func TestMessageThrottling(t *testing.T) { vdrs := validators.NewSet() - staker0 := validators.GenerateRandomValidator(1) - staker1 := validators.GenerateRandomValidator(1) - nonStaker0 := ids.NewShortID([20]byte{1}) - vdrs.Add(staker0) - vdrs.Add(staker1) + staker0 := ids.GenerateTestShortID() + staker1 := ids.GenerateTestShortID() + nonStaker0 := ids.GenerateTestShortID() + nonStaker1 := ids.GenerateTestShortID() + + vdrs.AddWeight(staker0, 1) + vdrs.AddWeight(staker1, 1) - maxMessages := uint32(16) + maxMessages := uint32(8) msgPortion := 0.25 - cpuPortion := 0.25 - period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, cpuPortion, period, logging.NoLog{}) + throttler := NewMessageThrottler(vdrs, maxMessages, DefaultMaxNonStakerPendingMsgs, msgPortion, logging.NoLog{}) - // Message Allotment: 0.5 * 0.25 * 15 = 2 - // Message Pool: 12 messages - // Validator should be throttled iff it has exceeded its message allotment and the shared - // message pool is empty + // Message Allotment: 0.5 * 0.25 * 8 = 1 + // Message Pool: 8 * 0.75 = 6 messages + // Max Messages: 1 + DefaultMaxNonStakerPendingMsgs + // Validator should be throttled if it has exceeded its max messages + // or it has exceeded its message allotment and the shared message pool is empty. - // staker0 consumes its own allotment plus 10 messages from the shared pool - for i := 0; i < 12; i++ { - throttler.AddMessage(staker0.ID()) - } + // staker0 consumes its entire message allotment - for i := 0; i < 3; i++ { - throttler.AddMessage(staker1.ID()) - if _, throttle := throttler.GetUtilization(staker1.ID()); throttle { + // Ensure that it is allowed to consume its entire max messages before being throttled + for i := 0; i < int(DefaultMaxNonStakerPendingMsgs)+1; i++ { + throttler.Add(staker0) + if throttler.Throttle(staker0) { t.Fatal("Should not throttle message from staker until it has exceeded its own allotment") } } - // Consume the last message and one extra message from the shared pool - throttler.AddMessage(nonStaker0) - throttler.AddMessage(nonStaker0) - throttler.AddMessage(nonStaker0) + // Ensure staker is throttled after exceeding its own max messages cap + throttler.Add(staker0) + if !throttler.Throttle(staker0) { + t.Fatal("Should have throttled message after exceeding message cap") + } + + // Remove messages to reduce staker0 to have its normal message allotment in pending + for i := 0; i < int(DefaultMaxNonStakerPendingMsgs)+1; i++ { + throttler.Remove(staker0) + } + + // Consume the entire message pool among two non-stakers + for i := 0; i < int(DefaultMaxNonStakerPendingMsgs); i++ { + throttler.Add(nonStaker0) + throttler.Add(nonStaker1) + + // Neither should be throttled because they are only consuming until their own messsage cap + // and the shared pool has been emptied. + if throttler.Throttle(nonStaker0) { + t.Fatalf("Should not have throttled message from nonStaker0 after %d messages", i) + } + if throttler.Throttle(nonStaker1) { + t.Fatalf("Should not have throttled message from nonStaker1 after %d messages", i) + } + } + + // An additional message from staker0 should now cause it to be throttled since the mesasage pool + // has been emptied. + if throttler.Throttle(staker0) { + t.Fatal("Should not have throttled message from staker until it had exceeded its message allotment.") + } + throttler.Add(staker0) + if !throttler.Throttle(staker0) { + t.Fatal("Should have throttled message from staker0 after it exceeded its message allotment because the message pool was empty.") + } + + if !throttler.Throttle(nonStaker0) { + t.Fatal("Should have throttled message from nonStaker0 after the message pool was emptied") + } - if _, throttle := throttler.GetUtilization(staker1.ID()); !throttle { - t.Fatal("Should have throttled message from staker after it exceeded its own allotment and the shared pool was empty") + if !throttler.Throttle(nonStaker1) { + t.Fatal("Should have throttled message from nonStaker1 after the message pool was emptied") } } func TestCalculatesEWMA(t *testing.T) { vdrs := validators.NewSet() - validator0 := validators.GenerateRandomValidator(1) - validator1 := validators.GenerateRandomValidator(1) - vdrs.Add(validator0) - vdrs.Add(validator1) - maxMessages := uint32(16) - msgPortion := 0.25 + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vdrs.AddWeight(vdr0, 1) + vdrs.AddWeight(vdr1, 1) + stakerPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, stakerPortion, period, logging.NoLog{}) + throttler := NewEWMATracker(vdrs, stakerPortion, period, logging.NoLog{}) // Spend X CPU time in consecutive intervals and ensure that the throttler correctly calculates EWMA spends := []time.Duration{ @@ -172,12 +248,12 @@ func TestCalculatesEWMA(t *testing.T) { ewma += spend ewma = time.Duration(float64(ewma) / decayFactor) - throttler.UtilizeCPU(validator0.ID(), spend) + throttler.UtilizeCPU(vdr0, spend) throttler.EndInterval() } - ewmat := throttler.(*ewmaThrottler) - sp := ewmat.getSpender(validator0.ID()) + ewmat := throttler.(*ewmaCPUTracker) + sp := ewmat.getSpender(vdr0) if sp.cpuEWMA != ewma { t.Fatalf("EWMA Throttler calculated EWMA incorrectly, expected: %s, but calculated: %s", ewma, sp.cpuEWMA) } diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 1a85b46c7a3a..da83e21e0a6a 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -11,52 +11,79 @@ import ( // Manager holds the validator set of each subnet type Manager interface { - // PutValidatorSet puts associaties the given subnet ID with the given validator set - PutValidatorSet(ids.ID, Set) + // Set a subnet's validator set + Set(ids.ID, Set) error - // RemoveValidatorSet removes the specified validator set - RemoveValidatorSet(ids.ID) + // AddWeight adds weight to a given validator on the given subnet + AddWeight(ids.ID, ids.ShortID, uint64) error - // GetGroup returns: - // 1) the validator set of the subnet with the specified ID - // 2) false if there is no subnet with the specified ID - GetValidatorSet(ids.ID) (Set, bool) + // RemoveWeight removes weight from a given validator on a given subnet + RemoveWeight(ids.ID, ids.ShortID, uint64) error + + // GetValidators returns the validator set for the given subnet + // Returns false if the subnet doesn't exist + GetValidators(ids.ID) (Set, bool) } // NewManager returns a new, empty manager func NewManager() Manager { return &manager{ - validatorSets: make(map[[32]byte]Set), + subnetToVdrs: make(map[[32]byte]Set), } } // manager implements Manager type manager struct { - lock sync.Mutex - validatorSets map[[32]byte]Set + lock sync.Mutex + // Key: Subnet ID + // Value: The validators that validate the subnet + subnetToVdrs map[[32]byte]Set +} + +func (m *manager) Set(subnetID ids.ID, newSet Set) error { + m.lock.Lock() + defer m.lock.Unlock() + + subnetKey := subnetID.Key() + + oldSet, exists := m.subnetToVdrs[subnetKey] + if !exists { + m.subnetToVdrs[subnetKey] = newSet + return nil + } + return oldSet.Set(newSet.List()) } -// PutValidatorSet implements the Manager interface. -func (m *manager) PutValidatorSet(subnetID ids.ID, set Set) { +// AddWeight implements the Manager interface. +func (m *manager) AddWeight(subnetID ids.ID, vdrID ids.ShortID, weight uint64) error { m.lock.Lock() defer m.lock.Unlock() + subnetIDKey := subnetID.Key() - m.validatorSets[subnetID.Key()] = set + vdrs, ok := m.subnetToVdrs[subnetIDKey] + if !ok { + vdrs = NewSet() + m.subnetToVdrs[subnetIDKey] = vdrs + } + return vdrs.AddWeight(vdrID, weight) } // RemoveValidatorSet implements the Manager interface. -func (m *manager) RemoveValidatorSet(subnetID ids.ID) { +func (m *manager) RemoveWeight(subnetID ids.ID, vdrID ids.ShortID, weight uint64) error { m.lock.Lock() defer m.lock.Unlock() - delete(m.validatorSets, subnetID.Key()) + if vdrs, ok := m.subnetToVdrs[subnetID.Key()]; ok { + return vdrs.RemoveWeight(vdrID, weight) + } + return nil } // GetValidatorSet implements the Manager interface. -func (m *manager) GetValidatorSet(subnetID ids.ID) (Set, bool) { +func (m *manager) GetValidators(subnetID ids.ID) (Set, bool) { m.lock.Lock() defer m.lock.Unlock() - set, exists := m.validatorSets[subnetID.Key()] - return set, exists + vdrs, ok := m.subnetToVdrs[subnetID.Key()] + return vdrs, ok } diff --git a/snow/validators/set.go b/snow/validators/set.go index ef967ae55e26..b5986e571a7a 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -10,9 +10,8 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" - "github.com/ava-labs/gecko/utils/sampler" - safemath "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/sampler" ) const ( @@ -36,14 +35,14 @@ type Set interface { // validators to the set. Set([]Validator) error - // Add the provided validator to the set. - Add(Validator) error + // AddWeight to a staker. + AddWeight(ids.ShortID, uint64) error // Get the validator from the set. - Get(ids.ShortID) (Validator, bool) + GetWeight(ids.ShortID) (uint64, bool) - // Remove the validator with the specified ID. - Remove(ids.ShortID) error + // RemoveWeight from a staker. + RemoveWeight(ids.ShortID, uint64) error // Contains returns true if there is a validator with the specified ID // currently in the set. @@ -85,7 +84,7 @@ func NewBestSet(expectedSampleSize int) Set { type set struct { lock sync.Mutex vdrMap map[[20]byte]int - vdrSlice []Validator + vdrSlice []*validator vdrWeights []uint64 sampler sampler.WeightedWithoutReplacement totalWeight uint64 @@ -108,7 +107,7 @@ func (s *set) set(vdrs []Validator) error { if newCap < lenVdrs { newCap = lenVdrs } - s.vdrSlice = make([]Validator, 0, newCap) + s.vdrSlice = make([]*validator, 0, newCap) s.vdrWeights = make([]uint64, 0, newCap) } else { s.vdrSlice = s.vdrSlice[:0] @@ -122,7 +121,6 @@ func (s *set) set(vdrs []Validator) error { if s.contains(vdrID) { continue } - w := vdr.Weight() if w == 0 { continue // This validator would never be sampled anyway @@ -130,7 +128,10 @@ func (s *set) set(vdrs []Validator) error { i := len(s.vdrSlice) s.vdrMap[vdrID.Key()] = i - s.vdrSlice = append(s.vdrSlice, vdr) + s.vdrSlice = append(s.vdrSlice, &validator{ + id: vdr.ID(), + weight: vdr.Weight(), + }) s.vdrWeights = append(s.vdrWeights, w) newTotalWeight, err := safemath.Add64(s.totalWeight, w) if err != nil { @@ -142,35 +143,91 @@ func (s *set) set(vdrs []Validator) error { } // Add implements the Set interface. -func (s *set) Add(vdr Validator) error { +func (s *set) AddWeight(vdrID ids.ShortID, weight uint64) error { s.lock.Lock() defer s.lock.Unlock() - return s.add(vdr) + return s.addWeight(vdrID, weight) } -func (s *set) add(vdr Validator) error { - vdrID := vdr.ID() - if s.contains(vdrID) { - if err := s.remove(vdrID); err != nil { - return err - } - } - - w := vdr.Weight() - if w == 0 { +func (s *set) addWeight(vdrID ids.ShortID, weight uint64) error { + if weight == 0 { return nil // This validator would never be sampled anyway } - i := len(s.vdrSlice) - s.vdrMap[vdrID.Key()] = i - s.vdrSlice = append(s.vdrSlice, vdr) - s.vdrWeights = append(s.vdrWeights, w) - newTotalWeight, err := safemath.Add64(s.totalWeight, w) + newTotalWeight, err := safemath.Add64(s.totalWeight, weight) if err != nil { - return err + return nil } s.totalWeight = newTotalWeight + + vdrIDKey := vdrID.Key() + + var vdr *validator + i, ok := s.vdrMap[vdrIDKey] + if !ok { + vdr = &validator{ + id: vdrID, + } + i = len(s.vdrSlice) + s.vdrSlice = append(s.vdrSlice, vdr) + s.vdrWeights = append(s.vdrWeights, 0) + s.vdrMap[vdrIDKey] = i + } else { + vdr = s.vdrSlice[i] + } + + s.vdrWeights[i] += weight + vdr.addWeight(weight) + return s.sampler.Initialize(s.vdrWeights) +} + +// GetWeight implements the Set interface. +func (s *set) GetWeight(vdrID ids.ShortID) (uint64, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.getWeight(vdrID) +} + +func (s *set) getWeight(vdrID ids.ShortID) (uint64, bool) { + if index, ok := s.vdrMap[vdrID.Key()]; ok { + return s.vdrWeights[index], true + } + return 0, false +} + +// RemoveWeight implements the Set interface. +func (s *set) RemoveWeight(vdrID ids.ShortID, weight uint64) error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.removeWeight(vdrID, weight) +} + +func (s *set) removeWeight(vdrID ids.ShortID, weight uint64) error { + if weight == 0 { + return nil + } + + i, ok := s.vdrMap[vdrID.Key()] + if !ok { + return nil + } + + // Validator exists + vdr := s.vdrSlice[i] + + weight = safemath.Min64(s.vdrWeights[i], weight) + s.vdrWeights[i] -= weight + s.totalWeight -= weight + vdr.removeWeight(weight) + + if vdr.Weight() == 0 { + if err := s.remove(vdrID); err != nil { + return err + } + } return s.sampler.Initialize(s.vdrWeights) } @@ -190,14 +247,6 @@ func (s *set) get(vdrID ids.ShortID) (Validator, bool) { return s.vdrSlice[index], true } -// Remove implements the Set interface. -func (s *set) Remove(vdrID ids.ShortID) error { - s.lock.Lock() - defer s.lock.Unlock() - - return s.remove(vdrID) -} - func (s *set) remove(vdrID ids.ShortID) error { // Get the element to remove iKey := vdrID.Key() @@ -263,7 +312,9 @@ func (s *set) List() []Validator { func (s *set) list() []Validator { list := make([]Validator, len(s.vdrSlice)) - copy(list, s.vdrSlice) + for i, vdr := range s.vdrSlice { + list[i] = vdr + } return list } @@ -295,17 +346,6 @@ func (s *set) Weight() uint64 { return s.totalWeight } -func (s *set) calculateWeight() (uint64, error) { - weight := uint64(0) - for _, vdr := range s.vdrSlice { - weight, err := safemath.Add64(weight, vdr.Weight()) - if err != nil { - return weight, err - } - } - return weight, nil -} - func (s *set) String() string { s.lock.Lock() defer s.lock.Unlock() diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 05447515a5b5..170e5c966b14 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -38,101 +38,92 @@ func TestSetSet(t *testing.T) { } func TestSamplerSample(t *testing.T) { - vdr0 := GenerateRandomValidator(1) - vdr1 := GenerateRandomValidator(math.MaxInt64 - 1) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() s := NewSet() - err := s.Add(vdr0) + err := s.AddWeight(vdr0, 1) assert.NoError(t, err) sampled, err := s.Sample(1) assert.NoError(t, err) assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr0.ID(), sampled[0].ID(), "should have sampled vdr0") + assert.Equal(t, vdr0, sampled[0].ID(), "should have sampled vdr0") _, err = s.Sample(2) assert.Error(t, err, "should have errored during sampling") - err = s.Add(vdr1) + err = s.AddWeight(vdr1, math.MaxInt64-1) assert.NoError(t, err) sampled, err = s.Sample(1) assert.NoError(t, err) assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr1.ID(), sampled[0].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") sampled, err = s.Sample(2) assert.NoError(t, err) assert.Len(t, sampled, 2, "should have sampled two validators") - assert.Equal(t, vdr1.ID(), sampled[0].ID(), "should have sampled vdr1") - assert.Equal(t, vdr1.ID(), sampled[1].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[1].ID(), "should have sampled vdr1") sampled, err = s.Sample(3) assert.NoError(t, err) assert.Len(t, sampled, 3, "should have sampled three validators") - assert.Equal(t, vdr1.ID(), sampled[0].ID(), "should have sampled vdr1") - assert.Equal(t, vdr1.ID(), sampled[1].ID(), "should have sampled vdr1") - assert.Equal(t, vdr1.ID(), sampled[2].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[1].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[2].ID(), "should have sampled vdr1") } func TestSamplerDuplicate(t *testing.T) { - vdr0 := GenerateRandomValidator(1) - vdr1_0 := GenerateRandomValidator(math.MaxInt64 - 1) - vdr1_1 := NewValidator(vdr1_0.ID(), 0) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() s := NewSet() - err := s.Add(vdr0) - assert.NoError(t, err) - - err = s.Add(vdr1_0) + err := s.AddWeight(vdr0, 1) assert.NoError(t, err) - sampled, err := s.Sample(1) + err = s.AddWeight(vdr1, 1) assert.NoError(t, err) - assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr1_0.ID(), sampled[0].ID(), "should have sampled vdr1") - err = s.Add(vdr1_1) + err = s.AddWeight(vdr1, math.MaxInt64-2) assert.NoError(t, err) - sampled, err = s.Sample(1) + sampled, err := s.Sample(1) assert.NoError(t, err) assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr0.ID(), sampled[0].ID(), "should have sampled vdr0") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") } func TestSamplerContains(t *testing.T) { - vdr := GenerateRandomValidator(1) + vdr := ids.GenerateTestShortID() s := NewSet() - err := s.Add(vdr) + err := s.AddWeight(vdr, 1) assert.NoError(t, err) - contains := s.Contains(vdr.ID()) + contains := s.Contains(vdr) assert.True(t, contains, "should have contained validator") - err = s.Remove(vdr.ID()) + err = s.RemoveWeight(vdr, 1) assert.NoError(t, err) - contains = s.Contains(vdr.ID()) + contains = s.Contains(vdr) assert.False(t, contains, "shouldn't have contained validator") } func TestSamplerString(t *testing.T) { - vdr0 := NewValidator(ids.ShortEmpty, 1) - vdr1 := NewValidator( - ids.NewShortID([20]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - }), - math.MaxInt64-1, - ) + vdr0 := ids.ShortEmpty + vdr1 := ids.NewShortID([20]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + }) s := NewSet() - err := s.Add(vdr0) + err := s.AddWeight(vdr0, 1) assert.NoError(t, err) - err = s.Add(vdr1) + err = s.AddWeight(vdr1, math.MaxInt64-1) assert.NoError(t, err) expected := "Validator Set: (Size = 2)\n" + @@ -143,16 +134,16 @@ func TestSamplerString(t *testing.T) { } func TestSetWeight(t *testing.T) { + vdr0 := ids.NewShortID([20]byte{1}) weight0 := uint64(93) - vdr0 := NewValidator(ids.NewShortID([20]byte{1}), weight0) + vdr1 := ids.NewShortID([20]byte{2}) weight1 := uint64(123) - vdr1 := NewValidator(ids.NewShortID([20]byte{2}), weight1) s := NewSet() - err := s.Add(vdr0) + err := s.AddWeight(vdr0, weight0) assert.NoError(t, err) - err = s.Add(vdr1) + err = s.AddWeight(vdr1, weight1) assert.NoError(t, err) setWeight := s.Weight() diff --git a/snow/validators/validator.go b/snow/validators/validator.go index bbdda0ad646a..b4ce1f2b4406 100644 --- a/snow/validators/validator.go +++ b/snow/validators/validator.go @@ -4,16 +4,46 @@ package validators import ( + "math" + "github.com/ava-labs/gecko/ids" + safemath "github.com/ava-labs/gecko/utils/math" ) // Validator is the minimal description of someone that can be sampled. type Validator interface { - // ID returns the unique id of this validator + // ID returns the node ID of this validator ID() ids.ShortID - // Weight that can be used for weighted sampling. - // If this validator is validating the default subnet, returns the amount of - // AVAX staked + // Returns this validator's weight Weight() uint64 } + +type validator struct { + id ids.ShortID + weight uint64 +} + +func (v *validator) ID() ids.ShortID { + return v.id +} + +func (v *validator) addWeight(weight uint64) { + newTotalWeight, err := safemath.Add64(weight, v.weight) + if err != nil { + newTotalWeight = math.MaxUint64 + } + v.weight = newTotalWeight +} + +func (v *validator) removeWeight(weight uint64) { + newTotalWeight, err := safemath.Sub64(v.weight, weight) + if err != nil { + newTotalWeight = 0 + } + v.weight = newTotalWeight +} + +func (v *validator) Weight() uint64 { + return v.weight +} diff --git a/utils/codec/codec_test.go b/utils/codec/codec_test.go index 8d4f059ba9f6..e48ed3411a6c 100644 --- a/utils/codec/codec_test.go +++ b/utils/codec/codec_test.go @@ -148,7 +148,7 @@ func TestSlice(t *testing.T) { // Test marshalling/unmarshalling largest possible slice func TestMaxSizeSlice(t *testing.T) { - mySlice := make([]string, math.MaxUint16, math.MaxUint16) + mySlice := make([]string, math.MaxUint16) mySlice[0] = "first!" mySlice[math.MaxUint16-1] = "last!" codec := NewDefault() diff --git a/utils/constants/constants.go b/utils/constants/constants.go index 51c1f2a0373e..203b406e1cf1 100644 --- a/utils/constants/constants.go +++ b/utils/constants/constants.go @@ -21,8 +21,8 @@ const ( // Variables to be exported var ( - DefaultSubnetID = ids.Empty - PlatformChainID = ids.Empty + PrimaryNetworkID = ids.Empty + PlatformChainID = ids.Empty MainnetID uint32 = 1 CascadeID uint32 = 2 diff --git a/utils/math/safe_math_test.go b/utils/math/safe_math_test.go index 47f65f994395..e5653ea7bf5f 100644 --- a/utils/math/safe_math_test.go +++ b/utils/math/safe_math_test.go @@ -57,17 +57,17 @@ func TestAdd64(t *testing.T) { t.Fatalf("Expected %d, got %d", uint64(1<<63), sum) } - sum, err = Add64(1, maxUint64) + _, err = Add64(1, maxUint64) if err == nil { t.Fatalf("Add64 succeeded unexpectedly") } - sum, err = Add64(maxUint64, 1) + _, err = Add64(maxUint64, 1) if err == nil { t.Fatalf("Add64 succeeded unexpectedly") } - sum, err = Add64(maxUint64, maxUint64) + _, err = Add64(maxUint64, maxUint64) if err == nil { t.Fatalf("Add64 succeeded unexpectedly") } diff --git a/vms/avm/create_asset_tx_test.go b/vms/avm/create_asset_tx_test.go index 18a77bfb6d85..b939080f10fc 100644 --- a/vms/avm/create_asset_tx_test.go +++ b/vms/avm/create_asset_tx_test.go @@ -21,7 +21,7 @@ var ( illegalNameCharacter = "h8*32" invalidASCIIStr = "ÉÎ" invalidWhitespaceStr = " HAT" - denominationTooLarge = maxDenomination + 1 + denominationTooLarge = byte(maxDenomination + 1) ) func validCreateAssetTx(t *testing.T) (*CreateAssetTx, codec.Codec, *snow.Context) { @@ -461,7 +461,7 @@ func TestCreateAssetTxSyntacticVerifyDenominationTooLong(t *testing.T) { }}, Name: "BRADY", Symbol: "TOM", - Denomination: 33, + Denomination: denominationTooLarge, States: []*InitialState{{ FxID: 0, }}, @@ -519,6 +519,29 @@ func TestCreateAssetTxSyntacticVerifyNameWithInvalidCharacter(t *testing.T) { } } +func TestCreateAssetTxSyntacticVerifyNameWithUnicodeCharacter(t *testing.T) { + ctx := NewContext(t) + c := setupCodec() + + tx := &CreateAssetTx{ + BaseTx: BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + }}, + Name: illegalNameCharacter, + Symbol: "TOM", + Denomination: 0, + States: []*InitialState{{ + FxID: 0, + }}, + } + tx.Initialize(nil, nil) + + if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 1); err == nil { + t.Fatalf("Name with an invalid character should have errored") + } +} + func TestCreateAssetTxSyntacticVerifySymbolWithInvalidCharacter(t *testing.T) { ctx := NewContext(t) c := setupCodec() diff --git a/vms/avm/initial_state.go b/vms/avm/initial_state.go index b8fb1a9ce5d8..40a2e71e0fb7 100644 --- a/vms/avm/initial_state.go +++ b/vms/avm/initial_state.go @@ -52,35 +52,6 @@ func (is *InitialState) Verify(c codec.Codec, numFxs int) error { // Sort ... func (is *InitialState) Sort(c codec.Codec) { sortState(is.Outs, c) } -type innerSortVerifiables struct { - vers []verify.Verifiable - codec codec.Codec -} - -func (vers *innerSortVerifiables) Less(i, j int) bool { - iVer := vers.vers[i] - jVer := vers.vers[j] - - iBytes, err := vers.codec.Marshal(&iVer) - if err != nil { - return false - } - jBytes, err := vers.codec.Marshal(&jVer) - if err != nil { - return false - } - return bytes.Compare(iBytes, jBytes) == -1 -} -func (vers *innerSortVerifiables) Len() int { return len(vers.vers) } -func (vers *innerSortVerifiables) Swap(i, j int) { v := vers.vers; v[j], v[i] = v[i], v[j] } - -func sortVerifiables(vers []verify.Verifiable, c codec.Codec) { - sort.Sort(&innerSortVerifiables{vers: vers, codec: c}) -} -func isSortedVerifiables(vers []verify.Verifiable, c codec.Codec) bool { - return sort.IsSorted(&innerSortVerifiables{vers: vers, codec: c}) -} - type innerSortState struct { vers []verify.State codec codec.Codec diff --git a/vms/avm/service.go b/vms/avm/service.go index a0cf8c672051..005b28ccda81 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -116,7 +116,7 @@ func (service *Service) GetTx(r *http.Request, args *api.JsonTxID, reply *Format // Marks a starting or stopping point when fetching UTXOs. Used for pagination. type Index struct { Address string `json:"address"` // The address as a string - Utxo string `json:"utxo"` // The UTXO ID as a string + UTXO string `json:"utxo"` // The UTXO ID as a string } // GetUTXOsArgs are arguments for passing into GetUTXOs. @@ -125,7 +125,7 @@ type Index struct { // If [limit] == 0 or > [maxUTXOsToFetch], fetches up to [maxUTXOsToFetch]. // [StartIndex] defines where to start fetching UTXOs (for pagination.) // UTXOs fetched are from addresses equal to or greater than [StartIndex.Address] -// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.Utxo] will be returned. +// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.UTXO] will be returned. // If [StartIndex] is omitted, gets all UTXOs. // If GetUTXOs is called multiple times, with our without [StartIndex], it is not guaranteed // that returned UTXOs are unique. That is, the same UTXO may appear in the response of multiple calls. @@ -176,7 +176,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *Get startAddr := ids.ShortEmpty startUTXO := ids.Empty - if args.StartIndex.Address != "" || args.StartIndex.Utxo != "" { + if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { addrChainID, addr, err := service.vm.ParseAddress(args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address: %w", err) @@ -185,7 +185,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *Get return fmt.Errorf("addresses from multiple chains provided: %q and %q", chainID, addrChainID) } - utxo, err := ids.FromString(args.StartIndex.Utxo) + utxo, err := ids.FromString(args.StartIndex.UTXO) if err != nil { return fmt.Errorf("couldn't parse start index utxo: %w", err) } @@ -235,7 +235,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *Get } reply.EndIndex.Address = endAddress - reply.EndIndex.Utxo = endUTXOID.String() + reply.EndIndex.UTXO = endUTXOID.String() return nil } @@ -372,8 +372,8 @@ func (service *Service) GetAllBalances(r *http.Request, args *api.JsonAddress, r return fmt.Errorf("couldn't get address's UTXOs: %s", err) } - assetIDs := ids.Set{} // IDs of assets the address has a non-zero balance of - balances := make(map[[32]byte]uint64, 0) // key: ID (as bytes). value: balance of that asset + assetIDs := ids.Set{} // IDs of assets the address has a non-zero balance of + balances := make(map[[32]byte]uint64) // key: ID (as bytes). value: balance of that asset for _, utxo := range utxos { transferable, ok := utxo.Out.(avax.TransferableOut) if !ok { @@ -886,17 +886,30 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a // SendArgs are arguments for passing into Send requests type SendArgs struct { + // Username and password of user sending the funds api.UserPass - Amount json.Uint64 `json:"amount"` - AssetID string `json:"assetID"` - To string `json:"to"` + + // The amount of funds to send + Amount json.Uint64 `json:"amount"` + + // ID of the asset being sent + AssetID string `json:"assetID"` + + // Address of the recipient + To string `json:"to"` + + // Memo field + Memo string `json:"memo"` } // Send returns the ID of the newly created transaction func (service *Service) Send(r *http.Request, args *SendArgs, reply *api.JsonTxID) error { service.vm.ctx.Log.Info("AVM: Send called with username: %s", args.Username) - if args.Amount == 0 { + memoBytes := []byte(args.Memo) + if l := len(memoBytes); l > avax.MaxMemoSize { + return fmt.Errorf("max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) + } else if args.Amount == 0 { return errInvalidAmount } @@ -983,6 +996,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *api.JsonTxI BlockchainID: service.vm.ctx.ChainID, Outs: outs, Ins: ins, + Memo: memoBytes, }}} if err := tx.SignSECP256K1Fx(service.vm.codec, keys); err != nil { return err @@ -1315,12 +1329,12 @@ func (service *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, reply return err } - atomicUtxos, _, _, err := service.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, -1) + atomicUTXOs, _, _, err := service.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, -1) if err != nil { return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) } - amountsSpent, importInputs, importKeys, err := service.vm.SpendAll(atomicUtxos, kc) + amountsSpent, importInputs, importKeys, err := service.vm.SpendAll(atomicUTXOs, kc) if err != nil { return err } diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index cd9e3d306370..07c6b3eb2909 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -17,7 +17,7 @@ import ( // Test function IDs when argument start is empty func TestStateIDsNoStart(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -56,6 +56,9 @@ func TestStateIDsNoStart(t *testing.T) { } result, err = state.IDs(ids.Empty.Bytes(), []byte{}, math.MaxInt32) + if err != nil { + t.Fatal(err) + } if len(result) != len(expected) { t.Fatalf("Returned the wrong number of ids") } @@ -149,7 +152,7 @@ func TestStateIDsNoStart(t *testing.T) { } func TestStateIDsWithStart(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -195,7 +198,7 @@ func TestStateIDsWithStart(t *testing.T) { } func TestStateStatuses(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -242,7 +245,7 @@ func TestStateStatuses(t *testing.T) { } func TestStateUTXOs(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -318,7 +321,7 @@ func TestStateUTXOs(t *testing.T) { } func TestStateTXs(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go index c47ca442c83d..1fe1daaf9ced 100644 --- a/vms/avm/unique_tx.go +++ b/vms/avm/unique_tx.go @@ -269,8 +269,7 @@ func (tx *UniqueTx) Bytes() []byte { return tx.Tx.Bytes() } -// Verify the validity of this transaction -func (tx *UniqueTx) Verify() error { +func (tx *UniqueTx) verifyWithoutCacheWrites() error { switch status := tx.Status(); status { case choices.Unknown: return errUnknownTx @@ -283,6 +282,17 @@ func (tx *UniqueTx) Verify() error { } } +// Verify the validity of this transaction +func (tx *UniqueTx) Verify() error { + if err := tx.verifyWithoutCacheWrites(); err != nil { + return err + } + + tx.verifiedState = true + tx.vm.pubsub.Publish("verified", tx.ID()) + return nil +} + // SyntacticVerify verifies that this transaction is well formed func (tx *UniqueTx) SyntacticVerify() error { tx.refresh() @@ -310,11 +320,5 @@ func (tx *UniqueTx) SemanticVerify() error { return tx.validity } - if err := tx.Tx.SemanticVerify(tx.vm, tx.UnsignedTx); err != nil { - return err - } - - tx.verifiedState = true - tx.vm.pubsub.Publish("verified", tx.ID()) - return nil + return tx.Tx.SemanticVerify(tx.vm, tx.UnsignedTx) } diff --git a/vms/avm/vm.go b/vms/avm/vm.go index f4fd392b079d..e4b8f03451ea 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -49,7 +49,6 @@ var ( errIncompatibleFx = errors.New("incompatible feature extension") errUnknownFx = errors.New("unknown feature extension") errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state") - errInvalidAddress = errors.New("invalid address") errWrongBlockchainID = errors.New("wrong blockchain ID") errBootstrapping = errors.New("chain is currently bootstrapping") errInsufficientFunds = errors.New("insufficient funds") @@ -307,7 +306,7 @@ func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) { } // Verify must be called in the case the that tx was flushed from the unique // cache. - return tx, tx.Verify() + return tx, tx.verifyWithoutCacheWrites() } /* @@ -328,7 +327,7 @@ func (vm *VM) IssueTx(b []byte) (ids.ID, error) { if err != nil { return ids.ID{}, err } - if err := tx.Verify(); err != nil { + if err := tx.verifyWithoutCacheWrites(); err != nil { return ids.ID{}, err } vm.issueTx(tx) @@ -394,7 +393,7 @@ func (vm *VM) GetAtomicUTXOs( // Returns at most [limit] UTXOs. // If [limit] <= 0 or [limit] > maxUTXOsToFetch, it is set to [maxUTXOsToFetch]. // Only returns UTXOs associated with addresses >= [startAddr]. -// For address [startAddr], only returns UTXOs whose IDs are greater than [startUtxoID]. +// For address [startAddr], only returns UTXOs whose IDs are greater than [startUTXOID]. // Returns: // * The fetched of UTXOs // * The address associated with the last UTXO fetched @@ -609,7 +608,7 @@ func (vm *VM) getUTXO(utxoID *avax.UTXOID) (*avax.UTXO, error) { txID: inputTx, } - if err := parent.Verify(); err != nil { + if err := parent.verifyWithoutCacheWrites(); err != nil { return nil, errMissingUTXO } else if status := parent.Status(); status.Decided() { return nil, errMissingUTXO diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 37ab6b4b7701..7c7ff45e7001 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -96,23 +96,23 @@ func GetFirstTxFromGenesisTest(genesisBytes []byte, t *testing.T) *Tx { t.Fatal(err) } - for _, genesisTx := range genesis.Txs { - if len(genesisTx.Outs) != 0 { - t.Fatal("genesis tx can't have non-new assets") - } + if len(genesis.Txs) == 0 { + t.Fatal("genesis tx didn't have any txs") + } - tx := Tx{ - UnsignedTx: &genesisTx.CreateAssetTx, - } - if err := tx.SignSECP256K1Fx(c, nil); err != nil { - t.Fatal(err) - } + genesisTx := genesis.Txs[0] + if len(genesisTx.Outs) != 0 { + t.Fatal("genesis tx can't have non-new assets") + } - return &tx + tx := Tx{ + UnsignedTx: &genesisTx.CreateAssetTx, + } + if err := tx.SignSECP256K1Fx(c, nil); err != nil { + t.Fatal(err) } - t.Fatal("genesis tx didn't have any txs") - return nil + return &tx } func BuildGenesisTest(t *testing.T) []byte { @@ -516,12 +516,8 @@ func TestFxInitializationFailure(t *testing.T) { } } -type testTxBytes struct{ unsignedBytes []byte } - -func (tx *testTxBytes) UnsignedBytes() []byte { return tx.unsignedBytes } - func TestIssueTx(t *testing.T) { - genesisBytes, issuer, vm , _ := GenesisVM(t) + genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -551,7 +547,7 @@ func TestIssueTx(t *testing.T) { } func TestGenesisGetUTXOs(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -575,7 +571,7 @@ func TestGenesisGetUTXOs(t *testing.T) { // Test issuing a transaction that consumes a currently pending UTXO. The // transaction should be issued successfully. func TestIssueDependentTx(t *testing.T) { - genesisBytes, issuer, vm , _ := GenesisVM(t) + genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -968,7 +964,7 @@ func TestIssueProperty(t *testing.T) { } func TestVMFormat(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) defer func() { vm.Shutdown() vm.ctx.Lock.Unlock() @@ -994,7 +990,7 @@ func TestVMFormat(t *testing.T) { } func TestTxCached(t *testing.T) { - genesisBytes, _, vm , _ := GenesisVM(t) + genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -1022,7 +1018,7 @@ func TestTxCached(t *testing.T) { } func TestTxNotCached(t *testing.T) { - genesisBytes, _, vm , _ := GenesisVM(t) + genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -1050,3 +1046,343 @@ func TestTxNotCached(t *testing.T) { assert.NoError(t, err) assert.True(t, *called, "should have called the DB") } + +func TestTxVerifyAfterIssueTx(t *testing.T) { + genesisBytes, issuer, vm, _ := GenesisVM(t) + ctx := vm.ctx + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + key := keys[0] + firstTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + secondTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := secondTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Verify(); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Accept(); err != nil { + t.Fatal(err) + } + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + ctx.Lock.Lock() + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + parsedFirstTx := txs[0] + + if err := parsedFirstTx.Verify(); err == nil { + t.Fatalf("Should have errored due to a missing UTXO") + } +} + +func TestTxVerifyAfterGetTx(t *testing.T) { + genesisBytes, _, vm, _ := GenesisVM(t) + ctx := vm.ctx + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + key := keys[0] + firstTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + secondTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := secondTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Verify(); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { + t.Fatal(err) + } + parsedFirstTx, err := vm.GetTx(firstTx.ID()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Accept(); err != nil { + t.Fatal(err) + } + if err := parsedFirstTx.Verify(); err == nil { + t.Fatalf("Should have errored due to a missing UTXO") + } +} + +func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { + genesisBytes, _, vm, _ := GenesisVM(t) + ctx := vm.ctx + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + key := keys[0] + firstTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + firstTxDescendant := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: firstTx.ID(), + OutputIndex: 0, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTxDescendant.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + secondTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := secondTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Verify(); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTxDescendant.Bytes()); err != nil { + t.Fatal(err) + } + parsedFirstTx, err := vm.GetTx(firstTx.ID()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Accept(); err != nil { + t.Fatal(err) + } + if err := parsedFirstTx.Verify(); err == nil { + t.Fatalf("Should have errored due to a missing UTXO") + } +} diff --git a/vms/components/core/snowman_vm.go b/vms/components/core/snowman_vm.go index cbc058c0bb8b..13d1ff5950ba 100644 --- a/vms/components/core/snowman_vm.go +++ b/vms/components/core/snowman_vm.go @@ -20,8 +20,7 @@ import ( ) var ( - errUnmarshalBlockUndefined = errors.New("vm's UnmarshalBlock member is undefined") - errBadData = errors.New("got unexpected value from database") + errBadData = errors.New("got unexpected value from database") ) // If the status of this ID is not choices.Accepted, diff --git a/vms/nftfx/fx.go b/vms/nftfx/fx.go index 1440ff038b66..7edd7426f600 100644 --- a/vms/nftfx/fx.go +++ b/vms/nftfx/fx.go @@ -14,15 +14,10 @@ var ( errWrongUTXOType = errors.New("wrong utxo type") errWrongOperationType = errors.New("wrong operation type") errWrongCredentialType = errors.New("wrong credential type") - - errNoUTXOs = errors.New("an operation must consume at least one UTXO") - errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") - errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") - - errWrongUniqueID = errors.New("wrong unique ID provided") - errWrongBytes = errors.New("wrong bytes provided") - - errCantTransfer = errors.New("cant transfer with this fx") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongUniqueID = errors.New("wrong unique ID provided") + errWrongBytes = errors.New("wrong bytes provided") + errCantTransfer = errors.New("cant transfer with this fx") ) // Fx ... diff --git a/vms/platformvm/add_default_subnet_delegator_tx.go b/vms/platformvm/add_delegator_tx.go similarity index 70% rename from vms/platformvm/add_default_subnet_delegator_tx.go rename to vms/platformvm/add_delegator_tx.go index 1f9817738d99..1c1f08a16add 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx.go +++ b/vms/platformvm/add_delegator_tx.go @@ -23,15 +23,16 @@ import ( ) var ( - errInvalidState = errors.New("generated output isn't valid state") - errInvalidAmount = errors.New("invalid amount") + errDelegatorSubset = errors.New("delegator's time range must be a subset of the validator's time range") + errInvalidState = errors.New("generated output isn't valid state") + errInvalidAmount = errors.New("invalid amount") - _ UnsignedProposalTx = &UnsignedAddDefaultSubnetDelegatorTx{} - _ TimedTx = &UnsignedAddDefaultSubnetDelegatorTx{} + _ UnsignedProposalTx = &UnsignedAddDelegatorTx{} + _ TimedTx = &UnsignedAddDelegatorTx{} ) -// UnsignedAddDefaultSubnetDelegatorTx is an unsigned addDefaultSubnetDelegatorTx -type UnsignedAddDefaultSubnetDelegatorTx struct { +// UnsignedAddDelegatorTx is an unsigned addDelegatorTx +type UnsignedAddDelegatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee @@ -43,17 +44,17 @@ type UnsignedAddDefaultSubnetDelegatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddDefaultSubnetDelegatorTx) StartTime() time.Time { +func (tx *UnsignedAddDelegatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddDefaultSubnetDelegatorTx) EndTime() time.Time { +func (tx *UnsignedAddDelegatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddDefaultSubnetDelegatorTx) Verify( +func (tx *UnsignedAddDelegatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -102,7 +103,7 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( +func (tx *UnsignedAddDelegatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -127,33 +128,24 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( validatorStartTime)} } - // Ensure that the period this delegator is running is a subset of the time - // the validator is running. First, see if the validator is currently - // running. - currentValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + // Ensure that the period this delegator delegates is a subset of the time + // the validator validates. + vdr, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { - return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current validators of default subnet: %w", err)} + return nil, nil, nil, nil, tempError{err} } - pendingValidators, err := vm.getPendingValidators(db, constants.DefaultSubnetID) - if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of default subnet: %w", err)} + if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{errDelegatorSubset} } - - if validator, err := currentValidators.getDefaultSubnetStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := validator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, permError{errDSValidatorSubset} - } - } else { - // They aren't currently validating, so check to see if they will - // validate in the future. - validator, err := pendingValidators.getDefaultSubnetStaker(tx.Validator.NodeID) + if !isValidator { + // Ensure that the period this delegator delegates is a subset of the + // time the validator will validates. + vdr, willBeValidator, err := vm.willBeValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { - return nil, nil, nil, nil, permError{errDSValidatorSubset} + return nil, nil, nil, nil, tempError{err} } - unsignedValidator := validator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, permError{errDSValidatorSubset} + if !willBeValidator || !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{errDelegatorSubset} } } @@ -179,10 +171,8 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } - // Add the delegator to the pending validators heap - pendingValidators.Add(stx) // If this proposal is committed, update the pending validator set to include the delegator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.DefaultSubnetID); err != nil { + if err := vm.enqueueStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { return nil, nil, nil, nil, tempError{err} } @@ -202,12 +192,12 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddDefaultSubnetDelegatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddDelegatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } // Creates a new transaction -func (vm *VM) newAddDefaultSubnetDelegatorTx( +func (vm *VM) newAddDelegatorTx( stakeAmt, // Amount the delegator stakes startTime, // Unix time they start delegating endTime uint64, // Unix time they stop delegating @@ -220,7 +210,7 @@ func (vm *VM) newAddDefaultSubnetDelegatorTx( return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } // Create the tx - utx := &UnsignedAddDefaultSubnetDelegatorTx{ + utx := &UnsignedAddDelegatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_delegator_tx_test.go similarity index 64% rename from vms/platformvm/add_default_subnet_delegator_tx_test.go rename to vms/platformvm/add_delegator_tx_test.go index 49b5e6c53390..3ffd3e7b93fc 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_delegator_tx_test.go @@ -15,8 +15,8 @@ import ( "github.com/ava-labs/gecko/utils/crypto" ) -func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddDelegatorTxSyntacticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -27,13 +27,13 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { rewardAddress := nodeID // Case : tx is nil - var unsignedTx *UnsignedAddDefaultSubnetDelegatorTx + var unsignedTx *UnsignedAddDelegatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because tx is nil") } // Case: Wrong network ID - tx, err := vm.newAddDefaultSubnetDelegatorTx( + tx, err := vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -44,15 +44,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddDelegatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Missing Node ID - tx, err = vm.newAddDefaultSubnetDelegatorTx( + tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -63,15 +63,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.NodeID = ids.ShortID{} + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.NodeID = ids.ShortID{} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because NodeID is nil") } // Case: Not enough weight - tx, err = vm.newAddDefaultSubnetDelegatorTx( + tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -82,15 +82,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.Wght = vm.minStake - 1 + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.Wght = vm.minStake - 1 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because of not enough weight") } // Case: Validation length is too short - tx, err = vm.newAddDefaultSubnetDelegatorTx( + tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -101,15 +101,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.End-- // 1 shorter than minimum stake time + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.End-- // 1 shorter than minimum stake time // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err = tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err = tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - if tx, err = vm.newAddDefaultSubnetDelegatorTx( + if tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -119,15 +119,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.End++ // 1 longer than maximum stake time + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.End++ // 1 longer than maximum stake time // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err = vm.newAddDefaultSubnetDelegatorTx( + if tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -136,13 +136,13 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { t.Fatal(err) } } -func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddDelegatorTxSemanticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -164,9 +164,9 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { newValidatorID := newValidatorKey.PublicKey().Address() newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) newValidatorEndTime := uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()) - // [addValidator] adds a new validator to the default subnet's pending validator set + // [addValidator] adds a new validator to the primary network's pending validator set addValidator := func(db database.Database) { - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, // stake amount newValidatorStartTime, // start time newValidatorEndTime, // end time @@ -176,14 +176,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, // key ); err != nil { t.Fatal(err) - } else if err := vm.putPendingValidators( - db, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{tx}, - }, - constants.DefaultSubnetID, - ); err != nil { + } else if err := vm.addStaker(db, constants.PrimaryNetworkID, tx); err != nil { t.Fatal(err) } } @@ -210,7 +203,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, nil, true, - "validator stops validating default subnet earlier than non-default subnet", + "validator stops validating primary network earlier than subnet", }, { vm.minStake, @@ -221,7 +214,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, nil, true, - "end time is after the default subnets end time", + "end time is after the primary network end time", }, { vm.minStake, @@ -232,34 +225,34 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, nil, true, - "validator not in the current or pending validator sets of the default subnet", + "validator not in the current or pending validator sets of the subnet", }, { vm.minStake, - newValidatorStartTime - 1, // start validating non-default subnet before default subnet + newValidatorStartTime - 1, // start validating subnet before primary network newValidatorEndTime, newValidatorID, rewardAddress, []*crypto.PrivateKeySECP256K1R{keys[0]}, addValidator, true, - "validator starts validating non-default subnet before default subnet", + "validator starts validating subnet before primary network", }, { vm.minStake, newValidatorStartTime, - newValidatorEndTime + 1, // stop validating non-default subnet after stopping validating default subnet + newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network newValidatorID, rewardAddress, []*crypto.PrivateKeySECP256K1R{keys[0]}, addValidator, true, - "validator stops validating default subnet before non-default subnet", + "validator stops validating primary network before subnet", }, { vm.minStake, - newValidatorStartTime, // same start time as for default subnet - newValidatorEndTime, // same end time as for default subnet + newValidatorStartTime, // same start time as for primary network + newValidatorEndTime, // same end time as for primary network newValidatorID, rewardAddress, []*crypto.PrivateKeySECP256K1R{keys[0]}, @@ -302,25 +295,27 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } for _, tt := range tests { - vdb.Abort() - tx, err := vm.newAddDefaultSubnetDelegatorTx( - tt.stakeAmount, - tt.startTime, - tt.endTime, - tt.nodeID, - tt.rewardAddress, - tt.feeKeys, - ) - if err != nil { - t.Fatalf("couldn't build tx in test '%s': %s", tt.description, err) - } - if tt.setup != nil { - tt.setup(vdb) - } - if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vdb, tx); err != nil && !tt.shouldErr { - t.Fatalf("test '%s' shouldn't have errored but got %s", tt.description, err) - } else if err == nil && tt.shouldErr { - t.Fatalf("expected test '%s' to error but got none", tt.description) - } + t.Run(tt.description, func(t *testing.T) { + vdb.Abort() + tx, err := vm.newAddDelegatorTx( + tt.stakeAmount, + tt.startTime, + tt.endTime, + tt.nodeID, + tt.rewardAddress, + tt.feeKeys, + ) + if err != nil { + t.Fatalf("couldn't build tx: %s", err) + } + if tt.setup != nil { + tt.setup(vdb) + } + if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vdb, tx); err != nil && !tt.shouldErr { + t.Fatalf("shouldn't have errored but got %s", err) + } else if err == nil && tt.shouldErr { + t.Fatalf("expected test to error but got none") + } + }) } } diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx.go b/vms/platformvm/add_subnet_validator_tx.go similarity index 55% rename from vms/platformvm/add_nondefault_subnet_validator_tx.go rename to vms/platformvm/add_subnet_validator_tx.go index 0f8aefc95647..2a995038b7b1 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx.go +++ b/vms/platformvm/add_subnet_validator_tx.go @@ -20,16 +20,14 @@ import ( ) var ( - errSigsNotUniqueOrNotSorted = errors.New("control signatures not unique or not sorted") - errWrongNumberOfSignatures = errors.New("wrong number of signatures") - errDSValidatorSubset = errors.New("all subnets must be a subset of the default subnet") + errDSValidatorSubset = errors.New("all subnets must be a subset of the primary network") - _ UnsignedProposalTx = &UnsignedAddNonDefaultSubnetValidatorTx{} - _ TimedTx = &UnsignedAddNonDefaultSubnetValidatorTx{} + _ UnsignedProposalTx = &UnsignedAddSubnetValidatorTx{} + _ TimedTx = &UnsignedAddSubnetValidatorTx{} ) -// UnsignedAddNonDefaultSubnetValidatorTx is an unsigned addNonDefaultSubnetValidatorTx -type UnsignedAddNonDefaultSubnetValidatorTx struct { +// UnsignedAddSubnetValidatorTx is an unsigned addSubnetValidatorTx +type UnsignedAddSubnetValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // The validator @@ -39,17 +37,17 @@ type UnsignedAddNonDefaultSubnetValidatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) StartTime() time.Time { +func (tx *UnsignedAddSubnetValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) EndTime() time.Time { +func (tx *UnsignedAddSubnetValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) Verify( +func (tx *UnsignedAddSubnetValidatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -75,7 +73,7 @@ func (tx *UnsignedAddNonDefaultSubnetValidatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) SemanticVerify( +func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -104,76 +102,53 @@ func (tx *UnsignedAddNonDefaultSubnetValidatorTx) SemanticVerify( } // Ensure that the period this validator validates the specified subnet is a - // subnet of the time they validate the default subnet. First, see if - // they're currently validating the default subnet. - currentDSValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + // subnet of the time they validate the primary network. + vdr, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get current validators of default subnet: %v", err)} + return nil, nil, nil, nil, tempError{err} } - if dsValidator, err := currentDSValidators.getDefaultSubnetStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, - permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]", - tx.StartTime(), tx.EndTime(), - unsignedValidator.StartTime(), unsignedValidator.EndTime())} - } - } else { - // They aren't currently validating the default subnet. See if they will - // validate the default subnet in the future. - pendingDSValidators, err := vm.getPendingValidators(db, constants.DefaultSubnetID) - if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of default subnet: %v", err)} - } - dsValidator, err := pendingDSValidators.getDefaultSubnetStaker(tx.Validator.NodeID) + if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{errDSValidatorSubset} + } + if !isValidator { + // Ensure that the period this validator validates the specified subnet + // is a subnet of the time they will validate the primary network. + vdr, willBeValidator, err := vm.willBeValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { - return nil, nil, nil, nil, - permError{fmt.Errorf("validator would not be validating default subnet while validating non-default subnet")} + return nil, nil, nil, nil, tempError{err} } - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, - permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]", - tx.StartTime(), tx.EndTime(), - unsignedValidator.StartTime(), unsignedValidator.EndTime())} + if !willBeValidator || !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{errDSValidatorSubset} } } - // Ensure the proposed validator is not already a validator of the specified subnet - currentValidators, err := vm.getCurrentValidators(db, tx.Validator.Subnet) + // Ensure that the period this validator validates the specified subnet is a + // subnet of the time they validate the primary network. + _, isValidator, err = vm.isValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get current validators of subnet %s: %v", - tx.Validator.Subnet, err)} + return nil, nil, nil, nil, tempError{err} } - for _, currentVdr := range vm.getValidators(currentValidators) { - if currentVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the current validator set for subnet with ID %s", - tx.Validator.NodeID, - tx.Validator.Subnet)} - } + if isValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("already validating subnet between")} } - // Ensure the proposed validator is not already slated to validate for the specified subnet - pendingValidators, err := vm.getPendingValidators(db, tx.Validator.Subnet) + // Ensure that the period this validator validates the specified subnet + // is a subnet of the time they will validate the primary network. + _, willBeValidator, err := vm.willBeValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of subnet %s: %v", - tx.Validator.Subnet, err)} + return nil, nil, nil, nil, tempError{err} } - for _, pendingVdr := range vm.getValidators(pendingValidators) { - if pendingVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the pending validator set for subnet with ID %s", - tx.Validator.NodeID, - tx.Validator.Subnet)} - } + if willBeValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("already validating subnet between")} } baseTxCredsLen := len(stx.Creds) - 1 baseTxCreds := stx.Creds[:baseTxCredsLen] subnetCred := stx.Creds[baseTxCredsLen] - subnet, txErr := vm.getSubnet(db, tx.Validator.Subnet) + subnet, timedErr := vm.getSubnet(db, tx.Validator.Subnet) if err != nil { - return nil, nil, nil, nil, txErr + return nil, nil, nil, nil, timedErr } unsignedSubnet := subnet.UnsignedTx.(*UnsignedCreateSubnetTx) if err := vm.fx.VerifyPermission(tx, tx.SubnetAuth, subnetCred, unsignedSubnet.Owner); err != nil { @@ -198,9 +173,7 @@ func (tx *UnsignedAddNonDefaultSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } // Add the validator to the set of pending validators - pendingValidators.Add(stx) - // If this proposal is committed, update the pending validator set to include the delegator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, tx.Validator.Subnet); err != nil { + if err := vm.enqueueStaker(onCommitDB, tx.Validator.Subnet, stx); err != nil { return nil, nil, nil, nil, tempError{err} } @@ -219,12 +192,12 @@ func (tx *UnsignedAddNonDefaultSubnetValidatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddSubnetValidatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } // Create a new transaction -func (vm *VM) newAddNonDefaultSubnetValidatorTx( +func (vm *VM) newAddSubnetValidatorTx( weight, // Sampling weight of the new validator startTime, // Unix time they start delegating endTime uint64, // Unix time they top delegating @@ -244,7 +217,7 @@ func (vm *VM) newAddNonDefaultSubnetValidatorTx( signers = append(signers, subnetSigners) // Create the tx - utx := &UnsignedAddNonDefaultSubnetValidatorTx{ + utx := &UnsignedAddSubnetValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go b/vms/platformvm/add_subnet_validator_tx_test.go similarity index 65% rename from vms/platformvm/add_nondefault_subnet_validator_tx_test.go rename to vms/platformvm/add_subnet_validator_tx_test.go index cd6a78112246..39c3f1084075 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go +++ b/vms/platformvm/add_subnet_validator_tx_test.go @@ -15,8 +15,8 @@ import ( "github.com/ava-labs/gecko/vms/secp256k1fx" ) -func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -26,13 +26,13 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { nodeID := keys[0].PublicKey().Address() // Case: tx is nil - var unsignedTx *UnsignedAddNonDefaultSubnetValidatorTx + var unsignedTx *UnsignedAddSubnetValidatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because tx is nil") } // Case: Wrong network ID - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -43,15 +43,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Missing Node ID - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -62,15 +62,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because NodeID is empty") } // Case: Missing Subnet ID - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -81,15 +81,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.Subnet = ids.ID{ID: nil} + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.Subnet = ids.ID{ID: nil} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because Subnet ID is nil") } // Case: No weight - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( 1, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -100,15 +100,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.Wght = 0 + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.Wght = 0 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because of no weight") } // Case: Subnet auth indices not unique - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())-1, @@ -119,16 +119,16 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[0] = - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1] + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[0] = + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1] // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err = tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err = tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because sig indices weren't unique") } // Case: Validation length is too short - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -139,15 +139,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.End-- // 1 less than min duration + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.End-- // 1 less than min duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -158,15 +158,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.End++ // 1 more than max duration + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.End++ // 1 more than max duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err = vm.newAddNonDefaultSubnetValidatorTx( + if tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -175,13 +175,13 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { t.Fatal(err) } } -func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -190,10 +190,10 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { nodeID := keys[0].PublicKey().Address() - // Case: Proposed validator currently validating default subnet - // but stops validating non-default subnet after stops validating default subnet + // Case: Proposed validator currently validating primary network + // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())+1, @@ -203,14 +203,14 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator stops validating default subnet earlier than non-default subnet") + t.Fatal("should have failed because validator stops validating primary network earlier than subnet") } - // Case: Proposed validator currently validating default subnet - // and proposed non-default subnet validation period is subset of - // default subnet validation period + // Case: Proposed validator currently validating primary network + // and proposed subnet validation period is subset of + // primary network validation period // (note that keys[0] is a genesis validator) - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()+1), uint64(defaultValidateEndTime.Unix()), @@ -223,18 +223,18 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - // Add a validator to pending validator set of default subnet + // Add a validator to pending validator set of primary network key, err := vm.factory.NewPrivateKey() if err != nil { t.Fatal(err) } pendingDSValidatorID := key.PublicKey().Address() - // starts validating default subnet 10 seconds after genesis + // starts validating primary network 10 seconds after genesis DSStartTime := defaultGenesisTime.Add(10 * time.Second) DSEndTime := DSStartTime.Add(5 * MinimumStakingDuration) - addDSTx, err := vm.newAddDefaultSubnetValidatorTx( + addDSTx, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(DSStartTime.Unix()), // start time uint64(DSEndTime.Unix()), // end time @@ -248,9 +248,9 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Case: Proposed validator isn't in pending or current validator sets - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // start validating non-default subnet before default subnet + uint64(DSStartTime.Unix()), // start validating subnet before primary network uint64(DSEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), @@ -258,26 +258,19 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } else if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator not in the current or pending validator sets of the default subnet") + t.Fatal("should have failed because validator not in the current or pending validator sets of the primary network") } - if err := vm.putPendingValidators( - vm.DB, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{addDSTx}, - }, - constants.DefaultSubnetID, - ); err != nil { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, addDSTx); err != nil { t.Fatal(err) } - // Node with ID key.PublicKey().Address() now a pending validator for default subnet + // Node with ID key.PublicKey().Address() now a pending validator for primary network - // Case: Proposed validator is pending validator of default subnet - // but starts validating non-default subnet before default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator is pending validator of primary network + // but starts validating subnet before primary network + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix())-1, // start validating non-default subnet before default subnet + uint64(DSStartTime.Unix())-1, // start validating subnet before primary network uint64(DSEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), @@ -285,32 +278,32 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator starts validating non-default " + - "subnet before starting to validate default subnet") + t.Fatal("should have failed because validator starts validating primary " + + "network before starting to validate primary network") } - // Case: Proposed validator is pending validator of default subnet - // but stops validating non-default subnet after default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator is pending validator of primary network + // but stops validating subnet after primary network + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(DSStartTime.Unix()), - uint64(DSEndTime.Unix())+1, // stop validating non-default subnet after stopping validating default subnet + uint64(DSEndTime.Unix())+1, // stop validating subnet after stopping validating primary network pendingDSValidatorID, testSubnet1.ID(), []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) } else if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator stops validating non-default " + - "subnet after stops validating default subnet") + t.Fatal("should have failed because validator stops validating primary " + + "network after stops validating primary network") } - // Case: Proposed validator is pending validator of default subnet - // and period validating non-default subnet is subset of time validating default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator is pending validator of primary network + // and period validating subnet is subset of time validating primary network + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // same start time as for default subnet - uint64(DSEndTime.Unix()), // same end time as for default subnet + uint64(DSStartTime.Unix()), // same start time as for primary network + uint64(DSEndTime.Unix()), // same end time as for primary network pendingDSValidatorID, testSubnet1.ID(), []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, @@ -327,7 +320,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time @@ -345,9 +338,9 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - // Case: Proposed validator already validating the non-default subnet - // First, add validator as validator of non-default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator already validating the subnet + // First, add validator as validator of subnet + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time @@ -356,18 +349,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) - } else if err := vm.putCurrentValidators(vm.DB, - &EventHeap{ - SortByStartTime: false, - Txs: []*Tx{tx}, - }, - testSubnet1.ID(), - ); err != nil { + } else if err := vm.addStaker(vm.DB, testSubnet1.ID(), tx); err != nil { t.Fatal(err) } // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time @@ -378,17 +365,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatal("should have failed verification because validator already validating the specified subnet") - } else if err := vm.putCurrentValidators(vm.DB, // reset validator heap - &EventHeap{ - SortByStartTime: false, - }, - testSubnet1.ID(), - ); err != nil { + } else if err := vm.removeStaker(vm.DB, testSubnet1.ID(), tx); err != nil { t.Fatal(err) } // Case: Too many signatures - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time @@ -402,7 +384,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Case: Too few signatures - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time @@ -414,16 +396,16 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } // Remove a signature - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices = - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1:] + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices = + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1:] // This tx was syntactically verified when it was created...pretend it wan't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatal("should have failed verification because not enough control sigs") } // Case: Control Signature from invalid key (keys[3] is not a control key) - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time @@ -446,7 +428,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time @@ -455,13 +437,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) - } else if err = vm.putPendingValidators(vm.DB, // Node ID nodeIDKey.PublicKey().Address() now pending - &EventHeap{ // validator for subnet testSubnet1.ID - SortByStartTime: true, - Txs: []*Tx{tx}, - }, - testSubnet1.ID(), - ); err != nil { + } else if err = vm.addStaker(vm.DB, testSubnet1.ID(), tx); err != nil { t.Fatal(err) } else if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatal("should have failed verification because validator already in pending validator set of the specified subnet") @@ -469,8 +445,8 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Test that marshalling/unmarshalling works -func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { - vm , _ := defaultVM() +func TestAddSubnetValidatorMarshal(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -480,7 +456,7 @@ func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { var unmarshaledTx Tx // valid tx - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -496,11 +472,11 @@ func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { t.Fatal(err) } else if err := unmarshaledTx.Sign(vm.codec, nil); err != nil { t.Fatal(err) - } else if err := unmarshaledTx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { + } else if err := unmarshaledTx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { t.Fatal(err) } - if tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Memo == nil { // reflect.DeepEqual considers []byte{} and nil to be different so change nil to []byte{} - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Memo = []byte{} + if tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Memo == nil { // reflect.DeepEqual considers []byte{} and nil to be different so change nil to []byte{} + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Memo = []byte{} } if !reflect.DeepEqual(*tx, unmarshaledTx) { t.Fatal("should be equal") diff --git a/vms/platformvm/add_default_subnet_validator_tx.go b/vms/platformvm/add_validator_tx.go similarity index 75% rename from vms/platformvm/add_default_subnet_validator_tx.go rename to vms/platformvm/add_validator_tx.go index b9c09c388ecb..4631123c7126 100644 --- a/vms/platformvm/add_default_subnet_validator_tx.go +++ b/vms/platformvm/add_validator_tx.go @@ -24,18 +24,17 @@ import ( var ( errNilTx = errors.New("tx is nil") - errWrongNetworkID = errors.New("tx was issued with a different network ID") errWeightTooSmall = errors.New("weight of this validator is too low") errStakeTooShort = errors.New("staking period is too short") errStakeTooLong = errors.New("staking period is too long") errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", NumberOfShares) - _ UnsignedProposalTx = &UnsignedAddDefaultSubnetValidatorTx{} - _ TimedTx = &UnsignedAddDefaultSubnetValidatorTx{} + _ UnsignedProposalTx = &UnsignedAddValidatorTx{} + _ TimedTx = &UnsignedAddValidatorTx{} ) -// UnsignedAddDefaultSubnetValidatorTx is an unsigned addDefaultSubnetValidatorTx -type UnsignedAddDefaultSubnetValidatorTx struct { +// UnsignedAddValidatorTx is an unsigned addValidatorTx +type UnsignedAddValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee @@ -50,17 +49,17 @@ type UnsignedAddDefaultSubnetValidatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddDefaultSubnetValidatorTx) StartTime() time.Time { +func (tx *UnsignedAddValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddDefaultSubnetValidatorTx) EndTime() time.Time { +func (tx *UnsignedAddValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddDefaultSubnetValidatorTx) Verify( +func (tx *UnsignedAddValidatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -110,7 +109,7 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( +func (tx *UnsignedAddValidatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -135,28 +134,24 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( startTime)} } - // Ensure the proposed validator is not already a validator of the specified subnet - currentValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + _, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, tempError{err} } - for _, currentVdr := range vm.getValidators(currentValidators) { - if currentVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a Default Subnet validator", - tx.Validator.NodeID)} - } + if isValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator", + tx.Validator.NodeID)} } - // Ensure the proposed validator is not already slated to validate for the specified subnet - pendingValidators, err := vm.getPendingValidators(db, constants.DefaultSubnetID) + // Ensure that the period this validator validates the specified subnet + // is a subnet of the time they will validate the primary network. + _, willBeValidator, err := vm.willBeValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, tempError{err} } - for _, pendingVdr := range vm.getValidators(pendingValidators) { - if pendingVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, tempError{fmt.Errorf("validator %s is already a pending Default Subnet validator", - tx.Validator.NodeID)} - } + if willBeValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator", + tx.Validator.NodeID)} } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.Stake)) @@ -182,9 +177,7 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( } // Add validator to set of pending validators - pendingValidators.Add(stx) - // If this proposal is committed, update the pending validator set to include the validator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.DefaultSubnetID); err != nil { + if err := vm.enqueueStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { return nil, nil, nil, nil, tempError{err} } @@ -194,7 +187,7 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } // Produce the UTXOS - if err := vm.produceOutputs(onAbortDB, txID, tx.Outs); err != nil { + if err := vm.produceOutputs(onAbortDB, txID, outs); err != nil { return nil, nil, nil, nil, tempError{err} } @@ -203,12 +196,12 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddDefaultSubnetValidatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddValidatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } -// NewAddDefaultSubnetValidatorTx returns a new NewAddDefaultSubnetValidatorTx -func (vm *VM) newAddDefaultSubnetValidatorTx( +// NewAddValidatorTx returns a new NewAddValidatorTx +func (vm *VM) newAddValidatorTx( stakeAmt, // Amount the delegator stakes startTime, // Unix time they start delegating endTime uint64, // Unix time they stop delegating @@ -222,7 +215,7 @@ func (vm *VM) newAddDefaultSubnetValidatorTx( return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } // Create the tx - utx := &UnsignedAddDefaultSubnetValidatorTx{ + utx := &UnsignedAddValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_default_subnet_validator_tx_test.go b/vms/platformvm/add_validator_tx_test.go similarity index 64% rename from vms/platformvm/add_default_subnet_validator_tx_test.go rename to vms/platformvm/add_validator_tx_test.go index 4a7449503db7..64c795ce5875 100644 --- a/vms/platformvm/add_default_subnet_validator_tx_test.go +++ b/vms/platformvm/add_validator_tx_test.go @@ -16,8 +16,8 @@ import ( "github.com/ava-labs/gecko/vms/secp256k1fx" ) -func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddValidatorTxSyntacticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -31,13 +31,13 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { nodeID := key.PublicKey().Address() // Case: tx is nil - var unsignedTx *UnsignedAddDefaultSubnetValidatorTx + var unsignedTx *UnsignedAddValidatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because tx is nil") } // Case 3: Wrong Network ID - tx, err := vm.newAddDefaultSubnetValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -49,15 +49,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddValidatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Node ID is nil - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -69,15 +69,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because node ID is nil") } // Case: Stake owner has no addresses - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -89,7 +89,7 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Stake = []*avax.TransferableOutput{{ + tx.UnsignedTx.(*UnsignedAddValidatorTx).Stake = []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: vm.minStake, @@ -101,13 +101,13 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { }, }} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because stake owner has no addresses") } // Case: Rewards owner has no addresses - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -119,19 +119,19 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).RewardsOwner = &secp256k1fx.OutputOwners{ + tx.UnsignedTx.(*UnsignedAddValidatorTx).RewardsOwner = &secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, Addrs: nil, } // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because rewards owner has no addresses") } // Case: Stake amount too small - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -143,15 +143,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.Wght-- // 1 less than minimum amount + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.Wght-- // 1 less than minimum amount // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because stake amount too small") } // Case: Too many shares - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -163,15 +163,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Shares++ // 1 more than max amount + tx.UnsignedTx.(*UnsignedAddValidatorTx).Shares++ // 1 more than max amount // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because of too many shares") } // Case: Validation length is too short - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -183,15 +183,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.End-- // 1 less than min duration + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.End-- // 1 less than min duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is negative - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -203,15 +203,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.End = tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.Start - 1 + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.End = tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.Start - 1 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -223,15 +223,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.End++ // 1 more than maximum duration + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.End++ // 1 more than maximum duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -241,14 +241,14 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { t.Fatal(err) } } -// Test AddDefaultSubnetValidatorTx.SemanticVerify -func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() +// Test AddValidatorTx.SemanticVerify +func TestAddValidatorTxSemanticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -263,7 +263,7 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { nodeID := key.PublicKey().Address() // Case: Validator's start time too early - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix())-1, uint64(defaultValidateEndTime.Unix()), @@ -278,8 +278,8 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } vDB.Abort() - // Case: Validator already validating default subnet - if tx, err := vm.newAddDefaultSubnetValidatorTx( + // Case: Validator already validating primary network + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -294,13 +294,13 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } vDB.Abort() - // Case: Validator in pending validator set of default subnet + // Case: Validator in pending validator set of primary network key2, err := vm.factory.NewPrivateKey() if err != nil { t.Fatal(err) } startTime := defaultGenesisTime.Add(1 * time.Second) - tx, err := vm.newAddDefaultSubnetValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(startTime.Unix()), // start time uint64(startTime.Add(MinimumStakingDuration).Unix()), // end time @@ -311,13 +311,7 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { ) if err != nil { t.Fatal(err) - } else if err := vm.putPendingValidators(vDB, // Put validator in pending validator set - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{tx}, - }, - constants.DefaultSubnetID, - ); err != nil { + } else if err := vm.addStaker(vDB, constants.PrimaryNetworkID, tx); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vDB, tx); err == nil { t.Fatal("should have failed because validator in pending validator set") @@ -325,7 +319,7 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { vDB.Abort() // Case: Validator doesn't have enough tokens to cover stake amount - if _, err := vm.newAddDefaultSubnetValidatorTx( // create the tx + if _, err := vm.newAddValidatorTx( // create the tx vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go index 4ffdc8a2dcba..885d1063cc50 100644 --- a/vms/platformvm/advance_time_tx.go +++ b/vms/platformvm/advance_time_tx.go @@ -4,14 +4,11 @@ package platformvm import ( - "bytes" "fmt" "time" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/versiondb" - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/vms/components/avax" ) @@ -66,95 +63,30 @@ func (tx *UnsignedAdvanceTimeTx) SemanticVerify( tx.Timestamp(), currentTimestamp)} } - // Only allow timestamp to move forward as far as the next validator's end time - if nextValidatorEndTime := vm.nextValidatorChangeTime(db, false); tx.Time > uint64(nextValidatorEndTime.Unix()) { - return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp (%s) later than next validator end time (%s)", - tx.Timestamp(), nextValidatorEndTime)} - } - - // Only allow timestamp to move forward as far as the next pending validator's start time - if nextValidatorStartTime := vm.nextValidatorChangeTime(db, true); tx.Time > uint64(nextValidatorStartTime.Unix()) { - return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp (%s) later than next validator start time (%s)", - tx.Timestamp(), nextValidatorStartTime)} + // Only allow timestamp to move forward as far as the time of next staker set change time + nextStakerChangeTime, err := vm.nextStakerChangeTime(db) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } else if tx.Time > uint64(nextStakerChangeTime.Unix()) { + return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp (%s) later than next staker change time (%s)", + tx.Timestamp(), nextStakerChangeTime)} } - // Calculate what the validator sets will be given new timestamp - // Move validators from pending to current if their start time is <= new timestamp. - // Remove validators from current if their end time <= proposed timestamp - // Specify what the state of the chain will be if this proposal is committed onCommitDB := versiondb.New(db) if err := vm.putTimestamp(onCommitDB, tx.Timestamp()); err != nil { return nil, nil, nil, nil, tempError{err} } - - current, pending, _, _, err := vm.calculateValidators(db, tx.Timestamp(), constants.DefaultSubnetID) - if err != nil { - return nil, nil, nil, nil, tempError{err} - } else if err := vm.putCurrentValidators(onCommitDB, current, constants.DefaultSubnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } else if err := vm.putPendingValidators(onCommitDB, pending, constants.DefaultSubnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } - - // For each Subnet, calculate what current and pending validator sets should be - // given new timestamp - - // Key: Subnet ID - // Value: IDs of validators that will have started validating this Subnet when - // timestamp is advanced to tx.Timestamp() - startedValidating := make(map[[32]byte]ids.ShortSet, 0) - subnets, err := vm.getSubnets(db) - if err != nil { + if err := vm.updateValidators(onCommitDB); err != nil { return nil, nil, nil, nil, tempError{err} } - for _, subnet := range subnets { - subnetID := subnet.ID() - if current, pending, started, _, err := vm.calculateValidators(db, tx.Timestamp(), subnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } else if err := vm.putCurrentValidators(onCommitDB, current, subnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } else if err := vm.putPendingValidators(onCommitDB, pending, subnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } else { - startedValidating[subnet.ID().Key()] = started - } - } // If this block is committed, update the validator sets // onAbortDB or onCommitDB should commit (flush to vm.DB) before this is called onCommitFunc := func() error { + // For each Subnet, update the node's validator manager to reflect current Subnet membership - subnets, err := vm.getSubnets(vm.DB) - if err != nil { - return err - } - for _, subnet := range subnets { - if err := vm.updateValidators(subnet.ID()); err != nil { - return err - } - } - if err := vm.updateValidators(constants.DefaultSubnetID); err != nil { - return err - } - - // If this node started validating a Subnet, create the blockchains that the Subnet validates - chains, err := vm.getChains(vm.DB) // all blockchains - if err != nil { - return err - } - for subnetID, validatorIDs := range startedValidating { - if !validatorIDs.Contains(vm.Ctx.NodeID) { - continue - } - for _, chain := range chains { - unsignedChain := chain.UnsignedTx.(*UnsignedCreateChainTx) - if bytes.Equal(subnetID[:], unsignedChain.SubnetID.Bytes()) { - vm.createChain(chain) - } - } - } - return nil + return vm.updateVdrMgr(false) } // State doesn't change if this proposal is aborted diff --git a/vms/platformvm/advance_time_tx_test.go b/vms/platformvm/advance_time_tx_test.go index 056378829478..aed48a128c4b 100644 --- a/vms/platformvm/advance_time_tx_test.go +++ b/vms/platformvm/advance_time_tx_test.go @@ -38,7 +38,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) nodeIDKey, _ := vm.factory.NewPrivateKey() nodeID := nodeIDKey.PublicKey().Address() - addPendingValidatorTx, err := vm.newAddDefaultSubnetValidatorTx( + addPendingValidatorTx, err := vm.newAddValidatorTx( vm.minStake, uint64(pendingValidatorStartTime.Unix()), uint64(pendingValidatorEndTime.Unix()), @@ -51,15 +51,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { t.Fatal(err) } - err = vm.putPendingValidators( - vm.DB, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{addPendingValidatorTx}, - }, - constants.DefaultSubnetID, - ) - if err != nil { + if err := vm.enqueueStaker(vm.DB, constants.PrimaryNetworkID, addPendingValidatorTx); err != nil { t.Fatal(err) } @@ -106,7 +98,7 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) nodeIDKey, _ := vm.factory.NewPrivateKey() nodeID := nodeIDKey.PublicKey().Address() - addPendingValidatorTx, err := vm.newAddDefaultSubnetValidatorTx( + addPendingValidatorTx, err := vm.newAddValidatorTx( vm.minStake, uint64(pendingValidatorStartTime.Unix()), uint64(pendingValidatorEndTime.Unix()), @@ -119,14 +111,7 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { t.Fatal(err) } - if err := vm.putPendingValidators( - vm.DB, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{addPendingValidatorTx}, - }, - constants.DefaultSubnetID, - ); err != nil { + if err := vm.enqueueStaker(vm.DB, constants.PrimaryNetworkID, addPendingValidatorTx); err != nil { t.Fatal(err) } @@ -139,28 +124,28 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { t.Fatal(err) } - if onCommitCurrentEvents, err := vm.getCurrentValidators(onCommit, constants.DefaultSubnetID); err != nil { + if validatorTx, isValidator, err := vm.isValidator(onCommit, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onCommitCurrentEvents.Len() != len(keys)+1 { // Each key in [keys] is a validator to start with...then we added a validator + } else if !isValidator { t.Fatalf("Should have added the validator to the validator set") - } - - if onCommitPendingEvents, err := vm.getPendingValidators(onCommit, constants.DefaultSubnetID); err != nil { + } else if !validatorTx.ID().Equals(addPendingValidatorTx.ID()) { + t.Fatalf("Added the wrong tx to the validator set") + } else if _, willBeValidator, err := vm.willBeValidator(onCommit, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onCommitPendingEvents.Len() != 0 { + } else if willBeValidator { t.Fatalf("Should have removed the validator from the pending validator set") } - if onAbortCurrentEvents, err := vm.getCurrentValidators(onAbort, constants.DefaultSubnetID); err != nil { + if _, isValidator, err := vm.isValidator(onAbort, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onAbortCurrentEvents.Len() != len(keys) { + } else if isValidator { t.Fatalf("Shouldn't have added the validator to the validator set") - } - - if onAbortPendingEvents, err := vm.getPendingValidators(onAbort, constants.DefaultSubnetID); err != nil { + } else if validatorTx, willBeValidator, err := vm.willBeValidator(onAbort, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onAbortPendingEvents.Len() != 1 { + } else if !willBeValidator { t.Fatalf("Shouldn't have removed the validator from the pending validator set") + } else if !validatorTx.ID().Equals(addPendingValidatorTx.ID()) { + t.Fatalf("Added the wrong tx to the pending validator set") } } diff --git a/vms/platformvm/base_tx_test.go b/vms/platformvm/base_tx_test.go index 53a76539487c..d740bb24c871 100644 --- a/vms/platformvm/base_tx_test.go +++ b/vms/platformvm/base_tx_test.go @@ -10,7 +10,7 @@ import ( ) func TestBaseTxMarshalJSON(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() diff --git a/vms/platformvm/common_blocks.go b/vms/platformvm/common_blocks.go index b62f92d57552..a54695c5988e 100644 --- a/vms/platformvm/common_blocks.go +++ b/vms/platformvm/common_blocks.go @@ -206,6 +206,9 @@ func (cdb *CommonDecisionBlock) onAccept() database.Database { if cdb.Status().Decided() { return cdb.vm.DB } + if cdb.onAcceptDB == nil { + panic(":(") + } return cdb.onAcceptDB } diff --git a/vms/platformvm/create_chain_tx.go b/vms/platformvm/create_chain_tx.go index 8759f56ba0d1..4a3af086f857 100644 --- a/vms/platformvm/create_chain_tx.go +++ b/vms/platformvm/create_chain_tx.go @@ -19,12 +19,11 @@ import ( ) var ( - errInvalidVMID = errors.New("invalid VM ID") - errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") - errControlSigsNotSortedAndUnique = errors.New("control signatures must be sorted and unique") - errNameTooLong = errors.New("name too long") - errGenesisTooLong = errors.New("genesis too long") - errIllegalNameCharacter = errors.New("illegal name character") + errInvalidVMID = errors.New("invalid VM ID") + errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") + errNameTooLong = errors.New("name too long") + errGenesisTooLong = errors.New("genesis too long") + errIllegalNameCharacter = errors.New("illegal name character") _ UnsignedDecisionTx = &UnsignedCreateChainTx{} ) @@ -66,7 +65,7 @@ func (tx *UnsignedCreateChainTx) Verify( return nil case tx.SubnetID.IsZero(): return errNoSubnetID - case tx.SubnetID.Equals(constants.DefaultSubnetID): + case tx.SubnetID.Equals(constants.PrimaryNetworkID): return errDSCantValidate case len(tx.ChainName) > maxNameLen: return errNameTooLong diff --git a/vms/platformvm/create_chain_tx_test.go b/vms/platformvm/create_chain_tx_test.go index 388e29498881..a95057beefa9 100644 --- a/vms/platformvm/create_chain_tx_test.go +++ b/vms/platformvm/create_chain_tx_test.go @@ -14,7 +14,7 @@ import ( ) func TestUnsignedCreateChainTxVerify(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -43,7 +43,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { fxIDs: nil, chainName: "yeet", keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - setup: func(tx *UnsignedCreateChainTx) *UnsignedCreateChainTx { tx = nil; return tx }, + setup: func(*UnsignedCreateChainTx) *UnsignedCreateChainTx { return nil }, }, { description: "vm ID is empty", @@ -146,7 +146,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { // Ensure SemanticVerify fails when there are not enough control sigs func TestCreateChainTxInsufficientControlSigs(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -174,7 +174,7 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { // Ensure SemanticVerify fails when an incorrect control signature is given func TestCreateChainTxWrongControlSig(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -214,7 +214,7 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { // Ensure SemanticVerify fails when the Subnet the blockchain specifies as // its validator set doesn't exist func TestCreateChainTxNoSuchSubnet(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -239,7 +239,7 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { } func TestCreateChainTxAlreadyExists(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -272,7 +272,7 @@ func TestCreateChainTxAlreadyExists(t *testing.T) { // Ensure valid tx passes semanticVerify func TestCreateChainTxValid(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() diff --git a/vms/platformvm/create_subnet_tx.go b/vms/platformvm/create_subnet_tx.go index de85351ffd10..63ab11d7554c 100644 --- a/vms/platformvm/create_subnet_tx.go +++ b/vms/platformvm/create_subnet_tx.go @@ -93,11 +93,9 @@ func (tx *UnsignedCreateSubnetTx) SemanticVerify( if err := vm.produceOutputs(db, txID, tx.Outs); err != nil { return nil, tempError{err} } - // Register new subnet in validator manager onAccept := func() error { - vm.validators.PutValidatorSet(tx.ID(), validators.NewSet()) - return nil + return vm.vdrMgr.Set(tx.ID(), validators.NewSet()) } return onAccept, nil } diff --git a/vms/platformvm/event_heap.go b/vms/platformvm/event_heap.go index 6fc30d4f7a01..01edffe0b78b 100644 --- a/vms/platformvm/event_heap.go +++ b/vms/platformvm/event_heap.go @@ -17,6 +17,7 @@ type TimedTx interface { ID() ids.ID StartTime() time.Time EndTime() time.Time + Bytes() []byte } // EventHeap is a collection of timedTxs where elements are ordered by either @@ -48,8 +49,8 @@ func (h *EventHeap) Less(i, j int) bool { case iTime.Unix() < jTime.Unix(): return true case iTime == jTime: - _, iOk := iTx.(*UnsignedAddDefaultSubnetValidatorTx) - _, jOk := jTx.(*UnsignedAddDefaultSubnetValidatorTx) + _, iOk := iTx.(*UnsignedAddValidatorTx) + _, jOk := jTx.(*UnsignedAddValidatorTx) if iOk != jOk { return iOk == h.SortByStartTime @@ -95,10 +96,10 @@ func (h *EventHeap) Bytes() ([]byte, error) { return Codec.Marshal(h) } -// getDefaultSubnetStaker ... -func (h *EventHeap) getDefaultSubnetStaker(id ids.ShortID) (*Tx, error) { +// getPrimaryStaker ... +func (h *EventHeap) getPrimaryStaker(id ids.ShortID) (*Tx, error) { for _, txIntf := range h.Txs { - tx, ok := txIntf.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + tx, ok := txIntf.UnsignedTx.(*UnsignedAddValidatorTx) if !ok { continue } @@ -106,5 +107,5 @@ func (h *EventHeap) getDefaultSubnetStaker(id ids.ShortID) (*Tx, error) { return txIntf, nil } } - return nil, errors.New("couldn't find validator in the default subnet") + return nil, errors.New("couldn't find validator in the primary network") } diff --git a/vms/platformvm/event_heap_test.go b/vms/platformvm/event_heap_test.go index 99310c78b84f..6e11bf4ea78c 100644 --- a/vms/platformvm/event_heap_test.go +++ b/vms/platformvm/event_heap_test.go @@ -11,7 +11,7 @@ import ( ) func TestTxHeapStart(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -20,7 +20,7 @@ func TestTxHeapStart(t *testing.T) { txHeap := EventHeap{SortByStartTime: true} - validator0, err := vm.newAddDefaultSubnetValidatorTx( + validator0, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -32,9 +32,9 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr0Tx := validator0.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr0Tx := validator0.UnsignedTx.(*UnsignedAddValidatorTx) - validator1, err := vm.newAddDefaultSubnetValidatorTx( + validator1, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+2), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+2), // endTime @@ -46,9 +46,9 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr1Tx := validator1.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr1Tx := validator1.UnsignedTx.(*UnsignedAddValidatorTx) - validator2, err := vm.newAddDefaultSubnetValidatorTx( + validator2, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+3), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+3), // endTime @@ -60,7 +60,7 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr2Tx := validator2.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr2Tx := validator2.UnsignedTx.(*UnsignedAddValidatorTx) txHeap.Add(validator2) if timestamp := txHeap.Timestamp(); !timestamp.Equal(vdr2Tx.StartTime()) { @@ -81,7 +81,7 @@ func TestTxHeapStart(t *testing.T) { } func TestTxHeapStop(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -90,7 +90,7 @@ func TestTxHeapStop(t *testing.T) { txHeap := EventHeap{} - validator0, err := vm.newAddDefaultSubnetValidatorTx( + validator0, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -102,9 +102,9 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr0Tx := validator0.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr0Tx := validator0.UnsignedTx.(*UnsignedAddValidatorTx) - validator1, err := vm.newAddDefaultSubnetValidatorTx( + validator1, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+2), // endTime @@ -116,9 +116,9 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr1Tx := validator1.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr1Tx := validator1.UnsignedTx.(*UnsignedAddValidatorTx) - validator2, err := vm.newAddDefaultSubnetValidatorTx( + validator2, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+3), // endTime @@ -130,7 +130,7 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr2Tx := validator2.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr2Tx := validator2.UnsignedTx.(*UnsignedAddValidatorTx) txHeap.Add(validator2) if timestamp := txHeap.Timestamp(); !timestamp.Equal(vdr2Tx.EndTime()) { @@ -151,7 +151,7 @@ func TestTxHeapStop(t *testing.T) { } func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -160,7 +160,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { txHeap := EventHeap{SortByStartTime: true} - validator, err := vm.newAddDefaultSubnetValidatorTx( + validator, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -173,7 +173,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { t.Fatal(err) } - delegator, err := vm.newAddDefaultSubnetDelegatorTx( + delegator, err := vm.newAddDelegatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -194,7 +194,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { } func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -203,7 +203,7 @@ func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { txHeap := EventHeap{} - validator, err := vm.newAddDefaultSubnetValidatorTx( + validator, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -216,7 +216,7 @@ func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { t.Fatal(err) } - delegator, err := vm.newAddDefaultSubnetDelegatorTx( + delegator, err := vm.newAddDelegatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index 9ddd69b63655..48bc080a0539 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -28,7 +28,7 @@ type Factory struct { func (f *Factory) New(*snow.Context) (interface{}, error) { return &VM{ chainManager: f.ChainManager, - validators: f.Validators, + vdrMgr: f.Validators, stakingEnabled: f.StakingEnabled, txFee: f.Fee, minStake: f.MinStake, diff --git a/vms/platformvm/import_tx.go b/vms/platformvm/import_tx.go index 3a872bfb55c3..cf979c4e3a0a 100644 --- a/vms/platformvm/import_tx.go +++ b/vms/platformvm/import_tx.go @@ -18,13 +18,10 @@ import ( ) var ( - errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") - errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") - errNoInputs = errors.New("tx has no inputs") - errNoImportInputs = errors.New("tx has no imported inputs") - errInputsNotSortedUnique = errors.New("inputs not sorted and unique") - errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") - errUnknownAsset = errors.New("unknown asset ID") + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") + errNoImportInputs = errors.New("tx has no imported inputs") + errInputsNotSortedUnique = errors.New("inputs not sorted and unique") _ UnsignedAtomicTx = &UnsignedImportTx{} ) diff --git a/vms/platformvm/import_tx_test.go b/vms/platformvm/import_tx_test.go index b7b03aa393bb..c21ec6f49846 100644 --- a/vms/platformvm/import_tx_test.go +++ b/vms/platformvm/import_tx_test.go @@ -25,7 +25,6 @@ func TestNewImportTx(t *testing.T) { type test struct { description string sharedMemory atomic.SharedMemory - feeKeys []*crypto.PrivateKeySECP256K1R recipientKeys []*crypto.PrivateKeySECP256K1R shouldErr bool } diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index 96f4fa02297d..d2659766e6c3 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -18,8 +18,7 @@ import ( ) var ( - errShouldBeDSValidator = errors.New("expected validator to be in the default subnet") - errOverflowReward = errors.New("overflow while calculating validator reward") + errShouldBeDSValidator = errors.New("expected validator to be in the primary network") errWrongTxType = errors.New("wrong transaction type") _ UnsignedProposalTx = &UnsignedRewardValidatorTx{} @@ -68,19 +67,14 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( return nil, nil, nil, nil, permError{errWrongNumberOfCredentials} } - defaultSubnetVdrHeap, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + stakerTx, err := vm.nextStakerStop(db, constants.PrimaryNetworkID) if err != nil { - return nil, nil, nil, nil, tempError{err} - } else if defaultSubnetVdrHeap.Len() == 0 { // there is no validator to remove - return nil, nil, nil, nil, permError{errEmptyValidatingSet} + return nil, nil, nil, nil, permError{err} } - - vdrTx := defaultSubnetVdrHeap.Remove() - txID := vdrTx.ID() - if !txID.Equals(tx.TxID) { + if stakerID := stakerTx.ID(); !stakerID.Equals(tx.TxID) { return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s. Should be removing %s", tx.TxID, - txID)} + stakerID)} } // Verify that the chain's timestamp is the validator's end time @@ -89,11 +83,11 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } - unsignedVdrTx, ok := vdrTx.UnsignedTx.(TimedTx) + staker, ok := stakerTx.UnsignedTx.(TimedTx) if !ok { return nil, nil, nil, nil, permError{errWrongTxType} } - if endTime := unsignedVdrTx.EndTime(); !endTime.Equal(currentTime) { + if endTime := staker.EndTime(); !endTime.Equal(currentTime) { return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s before their end time %s", tx.TxID, endTime)} @@ -101,24 +95,24 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // If this tx's proposal is committed, remove the validator from the validator set onCommitDB := versiondb.New(db) - if err := vm.putCurrentValidators(onCommitDB, defaultSubnetVdrHeap, constants.DefaultSubnetID); err != nil { + if err := vm.removeStaker(onCommitDB, constants.PrimaryNetworkID, stakerTx); err != nil { return nil, nil, nil, nil, tempError{err} } // If this tx's proposal is aborted, remove the validator from the validator set onAbortDB := versiondb.New(db) - if err := vm.putCurrentValidators(onAbortDB, defaultSubnetVdrHeap, constants.DefaultSubnetID); err != nil { + if err := vm.removeStaker(onAbortDB, constants.PrimaryNetworkID, stakerTx); err != nil { return nil, nil, nil, nil, tempError{err} } - switch uVdrTx := vdrTx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: + switch uStakerTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddValidatorTx: // Refund the stake here - for i, out := range uVdrTx.Stake { + for i, out := range uStakerTx.Stake { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + i), + TxID: tx.TxID, + OutputIndex: uint32(len(uStakerTx.Outs) + i), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out.Output(), @@ -133,8 +127,8 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } // Provide the reward here - if reward := reward(uVdrTx.Validator.Duration(), uVdrTx.Validator.Wght, InflationRate); reward > 0 { - outIntf, err := vm.fx.CreateOutput(reward, uVdrTx.RewardsOwner) + if reward := reward(uStakerTx.Validator.Duration(), uStakerTx.Validator.Wght, InflationRate); reward > 0 { + outIntf, err := vm.fx.CreateOutput(reward, uStakerTx.RewardsOwner) if err != nil { return nil, nil, nil, nil, permError{err} } @@ -144,8 +138,8 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + len(uVdrTx.Stake)), + TxID: tx.TxID, + OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake)), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out, @@ -153,20 +147,28 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } } - case *UnsignedAddDefaultSubnetDelegatorTx: + case *UnsignedAddDelegatorTx: // We're removing a delegator - parentTx, err := defaultSubnetVdrHeap.getDefaultSubnetStaker(uVdrTx.Validator.NodeID) + vdrTx, ok, err := vm.isValidator(db, constants.PrimaryNetworkID, uStakerTx.Validator.NodeID) if err != nil { - return nil, nil, nil, nil, permError{err} + return nil, nil, nil, nil, tempError{err} + } + if !ok { + return nil, nil, nil, nil, permError{ + fmt.Errorf("couldn't find validator %s: %w", uStakerTx.Validator.NodeID, err)} + } + vdr, ok := vdrTx.(*UnsignedAddValidatorTx) + if !ok { + return nil, nil, nil, nil, permError{ + fmt.Errorf("expected vdr to be *UnsignedAddValidatorTx but is %T", vdrTx)} } - unsignedParentTx := parentTx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) // Refund the stake here - for i, out := range uVdrTx.Stake { + for i, out := range uStakerTx.Stake { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + i), + TxID: tx.TxID, + OutputIndex: uint32(len(uStakerTx.Outs) + i), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out.Output(), @@ -181,11 +183,11 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } // If reward given, it will be this amount - reward := reward(uVdrTx.Validator.Duration(), uVdrTx.Validator.Wght, InflationRate) + reward := reward(uStakerTx.Validator.Duration(), uStakerTx.Validator.Wght, InflationRate) // Calculate split of reward between delegator/delegatee // The delegator gives stake to the validatee - delegatorShares := NumberOfShares - uint64(unsignedParentTx.Shares) // parentTx.Shares <= NumberOfShares so no underflow - delegatorReward := delegatorShares * (reward / NumberOfShares) // delegatorShares <= NumberOfShares so no overflow + delegatorShares := NumberOfShares - uint64(vdr.Shares) // parentTx.Shares <= NumberOfShares so no underflow + delegatorReward := delegatorShares * (reward / NumberOfShares) // delegatorShares <= NumberOfShares so no overflow // Delay rounding as long as possible for small numbers if optimisticReward, err := safemath.Mul64(delegatorShares, reward); err == nil { delegatorReward = optimisticReward / NumberOfShares @@ -196,7 +198,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // Reward the delegator here if delegatorReward > 0 { - outIntf, err := vm.fx.CreateOutput(delegatorReward, uVdrTx.RewardsOwner) + outIntf, err := vm.fx.CreateOutput(delegatorReward, uStakerTx.RewardsOwner) if err != nil { return nil, nil, nil, nil, permError{err} } @@ -206,8 +208,8 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + len(uVdrTx.Stake)), + TxID: tx.TxID, + OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake)), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out, @@ -220,7 +222,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // Reward the delegatee here if delegateeReward > 0 { - outIntf, err := vm.fx.CreateOutput(delegateeReward, unsignedParentTx.RewardsOwner) + outIntf, err := vm.fx.CreateOutput(delegateeReward, vdr.RewardsOwner) if err != nil { return nil, nil, nil, nil, permError{err} } @@ -230,8 +232,8 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + len(uVdrTx.Stake) + offset), + TxID: tx.TxID, + OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake) + offset), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out, @@ -247,7 +249,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // validator set to remove the staker. onAbortDB or onCommitDB should commit // (flush to vm.DB) before this is called updateValidators := func() error { - return vm.updateValidators(constants.DefaultSubnetID) + return vm.updateVdrMgr(false) } return onCommitDB, onAbortDB, updateValidators, updateValidators, nil diff --git a/vms/platformvm/reward_validator_tx_test.go b/vms/platformvm/reward_validator_tx_test.go index 890dc38f3274..64cf28ee887f 100644 --- a/vms/platformvm/reward_validator_tx_test.go +++ b/vms/platformvm/reward_validator_tx_test.go @@ -16,40 +16,40 @@ import ( ) func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() vm.Ctx.Lock.Unlock() }() - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) + // ID of validator that should leave DS validator set next + toRemoveIntf, err := vm.nextStakerStop(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } - // ID of validator that should leave DS validator set next - nextToRemove := currentValidators.Peek().UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + toRemove := toRemoveIntf.UnsignedTx.(*UnsignedAddValidatorTx) // Case 1: Chain timestamp is wrong - if tx, err := vm.newRewardValidatorTx(nextToRemove.ID()); err != nil { + if tx, err := vm.newRewardValidatorTx(toRemove.ID()); err != nil { t.Fatal(err) - } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { + } else if _, _, _, _, err := toRemove.SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatalf("should have failed because validator end time doesn't match chain timestamp") } // Case 2: Wrong validator if tx, err := vm.newRewardValidatorTx(ids.GenerateTestID()); err != nil { t.Fatal(err) - } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { + } else if _, _, _, _, err := toRemove.SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatalf("should have failed because validator ID is wrong") } // Case 3: Happy path // Advance chain timestamp to time that next validator leaves - if err := vm.putTimestamp(vm.DB, nextToRemove.EndTime()); err != nil { + if err := vm.putTimestamp(vm.DB, toRemove.EndTime()); err != nil { t.Fatal(err) } - tx, err := vm.newRewardValidatorTx(nextToRemove.ID()) + tx, err := vm.newRewardValidatorTx(toRemove.ID()) if err != nil { t.Fatal(err) } @@ -58,20 +58,16 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - // Should be one less validator than before - oldNumValidators := len(currentValidators.Txs) - if currentValidators, err := vm.getCurrentValidators(onCommitDB, constants.DefaultSubnetID); err != nil { - t.Fatal(err) - } else if numValidators := currentValidators.Len(); numValidators != oldNumValidators-1 { - t.Fatalf("Should be %d validators but are %d", oldNumValidators-1, numValidators) - } else if currentValidators, err = vm.getCurrentValidators(onAbortDB, constants.DefaultSubnetID); err != nil { + // ID of validator that should leave DS validator set next + + if nextToRemove, err := vm.nextStakerStop(onCommitDB, constants.PrimaryNetworkID); err != nil { t.Fatal(err) - } else if numValidators := currentValidators.Len(); numValidators != oldNumValidators-1 { - t.Fatalf("Should be %d validators but there are %d", oldNumValidators-1, numValidators) + } else if toRemove.ID().Equals(nextToRemove.ID()) { + t.Fatalf("Should have removed the previous validator") } // check that stake/reward is given back - stakeOwners := nextToRemove.Stake[0].Out.(*secp256k1fx.TransferOutput).AddressesSet() + stakeOwners := toRemove.Stake[0].Out.(*secp256k1fx.TransferOutput).AddressesSet() // Get old balances, balances if tx abort, balances if tx committed for _, stakeOwner := range stakeOwners.List() { stakeOwnerSet := ids.ShortSet{} @@ -89,19 +85,19 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - if onAbortBalance != oldBalance+nextToRemove.Validator.Weight() { + if onAbortBalance != oldBalance+toRemove.Validator.Weight() { t.Fatalf("on abort, should have got back staked amount") } - expectedReward := reward(nextToRemove.Validator.Duration(), nextToRemove.Validator.Weight(), InflationRate) - if onCommitBalance != oldBalance+expectedReward+nextToRemove.Validator.Weight() { + expectedReward := reward(toRemove.Validator.Duration(), toRemove.Validator.Weight(), InflationRate) + if onCommitBalance != oldBalance+expectedReward+toRemove.Validator.Weight() { t.Fatalf("on commit, should have old balance (%d) + staked amount (%d) + reward (%d) but have %d", - oldBalance, nextToRemove.Validator.Weight(), expectedReward, onCommitBalance) + oldBalance, toRemove.Validator.Weight(), expectedReward, onCommitBalance) } } } func TestRewardDelegatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -114,7 +110,7 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { vdrStartTime := uint64(defaultValidateStartTime.Unix()) + 1 vdrEndTime := uint64(defaultValidateStartTime.Add(2 * MinimumStakingDuration).Unix()) vdrNodeID := ids.GenerateTestShortID() - vdrTx, err := vm.newAddDefaultSubnetValidatorTx( + vdrTx, err := vm.newAddValidatorTx( vm.minStake, // stakeAmt vdrStartTime, vdrEndTime, @@ -127,9 +123,9 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - delStartTime := vdrStartTime + 1 - delEndTime := vdrEndTime - 1 - delTx, err := vm.newAddDefaultSubnetDelegatorTx( + delStartTime := vdrStartTime + delEndTime := vdrEndTime + delTx, err := vm.newAddDelegatorTx( vm.minStake, // stakeAmt delStartTime, delEndTime, @@ -140,18 +136,15 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - unsignedDelTx := delTx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx) + unsignedDelTx := delTx.UnsignedTx.(*UnsignedAddDelegatorTx) - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) - if err != nil { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, vdrTx); err != nil { t.Fatal(err) } - currentValidators.Add(vdrTx) - currentValidators.Add(delTx) - if err := vm.putCurrentValidators(vm.DB, currentValidators, constants.DefaultSubnetID); err != nil { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, delTx); err != nil { t.Fatal(err) - // Advance timestamp to when delegator should leave validator set - } else if err := vm.putTimestamp(vm.DB, time.Unix(int64(delEndTime), 0)); err != nil { + } + if err := vm.putTimestamp(vm.DB, time.Unix(int64(delEndTime), 0)); err != nil { t.Fatal(err) } diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index c854e567af46..2046f0df33a4 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -11,6 +11,7 @@ import ( "time" "github.com/ava-labs/gecko/api" + "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/utils/crypto" @@ -30,11 +31,8 @@ const ( var ( errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") errNoFunds = errors.New("no spendable funds were found") - errNoUsername = errors.New("argument 'username' not provided") - errNoPassword = errors.New("argument 'password' not provided") errNoSubnetID = errors.New("argument 'subnetID' not provided") errNoRewardAddress = errors.New("argument 'rewardAddress' not provided") - errUnexpectedTxType = errors.New("expected tx to be a DecisionTx, ProposalTx or AtomicTx but is not") errInvalidDelegationRate = errors.New("argument 'delegationFeeRate' must be between 0 and 100, inclusive") errNoAddresses = errors.New("no addresses provided") ) @@ -239,7 +237,7 @@ func (service *Service) ListAddresses(_ *http.Request, args *api.UserPass, respo // Marks a starting or stopping point when fetching UTXOs. Used for pagination. type Index struct { Address string `json:"address"` // The address as a string - Utxo string `json:"utxo"` // The UTXO ID as a string + UTXO string `json:"utxo"` // The UTXO ID as a string } // GetUTXOsArgs are arguments for passing into GetUTXOs. @@ -248,7 +246,7 @@ type Index struct { // If [limit] == 0 or > [maxUTXOsToFetch], fetches up to [maxUTXOsToFetch]. // [StartIndex] defines where to start fetching UTXOs (for pagination.) // UTXOs fetched are from addresses equal to or greater than [StartIndex.Address] -// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.Utxo] will be returned. +// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.UTXO] will be returned. // If [StartIndex] is omitted, gets all UTXOs. // If GetUTXOs is called multiple times, with our without [StartIndex], it is not guaranteed // that returned UTXOs are unique. That is, the same UTXO may appear in the response of multiple calls. @@ -306,7 +304,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *GetUTXOsArgs, response * startAddr := ids.ShortEmpty startUTXO := ids.Empty - if args.StartIndex.Address != "" || args.StartIndex.Utxo != "" { + if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { addrChainID, addr, err := service.vm.ParseAddress(args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address: %w", err) @@ -315,7 +313,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *GetUTXOsArgs, response * return fmt.Errorf("addresses from multiple chains provided: %q and %q", chainID, addrChainID) } - utxo, err := ids.FromString(args.StartIndex.Utxo) + utxo, err := ids.FromString(args.StartIndex.UTXO) if err != nil { return fmt.Errorf("couldn't parse start index utxo: %w", err) } @@ -366,7 +364,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *GetUTXOsArgs, response * } response.EndIndex.Address = endAddress - response.EndIndex.Utxo = endUTXOID.String() + response.EndIndex.UTXO = endUTXOID.String() return nil } @@ -398,12 +396,12 @@ type GetSubnetsArgs struct { // GetSubnetsResponse is the response from calling GetSubnets type GetSubnetsResponse struct { // Each element is a subnet that exists - // Null if there are no subnets other than the default subnet + // Null if there are no subnets other than the primary network Subnets []APISubnet `json:"subnets"` } // GetSubnets returns the subnets whose ID are in [args.IDs] -// The response will include the default subnet +// The response will include the primary network func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error { service.vm.SnowmanVM.Ctx.Log.Info("Platform: GetSubnets called") subnets, err := service.vm.getSubnets(service.vm.DB) // all subnets @@ -432,9 +430,9 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon Threshold: json.Uint32(owner.Threshold), } } - // Include Default Subnet + // Include primary network response.Subnets[len(subnets)] = APISubnet{ - ID: constants.DefaultSubnetID, + ID: constants.PrimaryNetworkID, ControlKeys: []string{}, Threshold: json.Uint32(0), } @@ -464,10 +462,10 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon ) } } - if idsSet.Contains(constants.DefaultSubnetID) { + if idsSet.Contains(constants.PrimaryNetworkID) { response.Subnets = append(response.Subnets, APISubnet{ - ID: constants.DefaultSubnetID, + ID: constants.PrimaryNetworkID, ControlKeys: []string{}, Threshold: json.Uint32(0), }, @@ -492,10 +490,10 @@ func (service *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAsset service.vm.SnowmanVM.Ctx.Log.Info("Platform: GetStakingAssetID called") if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } - if !args.SubnetID.Equals(constants.DefaultSubnetID) { + if !args.SubnetID.Equals(constants.PrimaryNetworkID) { return fmt.Errorf("Subnet %s doesn't have a valid staking token", args.SubnetID) } @@ -513,7 +511,7 @@ func (service *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAsset // GetCurrentValidatorsArgs are the arguments for calling GetCurrentValidators type GetCurrentValidatorsArgs struct { // Subnet we're listing the validators of - // If omitted, defaults to default subnet + // If omitted, defaults to primary network SubnetID ids.ID `json:"subnetID"` } @@ -526,58 +524,63 @@ type GetCurrentValidatorsReply struct { func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { service.vm.Ctx.Log.Info("Platform: GetCurrentValidators called") if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } - validators, err := service.vm.getCurrentValidators(service.vm.DB, args.SubnetID) - if err != nil { - return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) - } + stopPrefix := []byte(fmt.Sprintf("%s%s", args.SubnetID, stop)) + stopDB := prefixdb.NewNested(stopPrefix, service.vm.DB) + defer stopDB.Close() - reply.Validators = make([]FormattedAPIValidator, validators.Len()) - if args.SubnetID.Equals(constants.DefaultSubnetID) { - for i, tx := range validators.Txs { - switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - case *UnsignedAddDefaultSubnetDelegatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - default: // Shouldn't happen - return fmt.Errorf("couldn't get the reward address of %s", tx.ID()) - } + stopIter := stopDB.NewIterator() + defer stopIter.Release() + + for stopIter.Next() { // Iterates in order of increasing start time + txBytes := stopIter.Value() + + tx := Tx{} + if err := service.vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) } - } else { - for i, tx := range validators.Txs { - utx := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx) - weight := json.Uint64(utx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(utx.StartTime().Unix()), - EndTime: json.Uint64(utx.EndTime().Unix()), + if err := tx.Sign(service.vm.codec, nil); err != nil { + return err + } + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddSubnetValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), Weight: &weight, - } + }) + default: + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) } } - - return nil + return stopIter.Error() } // GetPendingValidatorsArgs are the arguments for calling GetPendingValidators type GetPendingValidatorsArgs struct { // Subnet we're getting the pending validators of - // If omitted, defaults to default subnet + // If omitted, defaults to primary network SubnetID ids.ID `json:"subnetID"` } @@ -590,50 +593,57 @@ type GetPendingValidatorsReply struct { func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { service.vm.Ctx.Log.Info("Platform: GetPendingValidators called") if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } - validators, err := service.vm.getPendingValidators(service.vm.DB, args.SubnetID) - if err != nil { - return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) - } + startPrefix := []byte(fmt.Sprintf("%s%s", args.SubnetID, start)) + startDB := prefixdb.NewNested(startPrefix, service.vm.DB) + defer startDB.Close() - reply.Validators = make([]FormattedAPIValidator, validators.Len()) - for i, tx := range validators.Txs { - if args.SubnetID.Equals(constants.DefaultSubnetID) { - switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - case *UnsignedAddDefaultSubnetDelegatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - default: // Shouldn't happen - return fmt.Errorf("couldn't get the reward address of %s", tx.ID()) - } - } else { - utx := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx) - weight := json.Uint64(utx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(utx.StartTime().Unix()), - EndTime: json.Uint64(utx.EndTime().Unix()), + startIter := startDB.NewIterator() + defer startIter.Release() + + for startIter.Next() { // Iterates in order of increasing start time + txBytes := startIter.Value() + + tx := Tx{} + if err := service.vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(service.vm.codec, nil); err != nil { + return err + } + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddSubnetValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), Weight: &weight, - } + }) + default: + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) } } - - return nil + return startIter.Error() } // SampleValidatorsArgs are the arguments for calling SampleValidators @@ -642,7 +652,7 @@ type SampleValidatorsArgs struct { Size json.Uint16 `json:"size"` // ID of subnet to sample validators from - // If omitted, defaults to the default subnet + // If omitted, defaults to the primary network SubnetID ids.ID `json:"subnetID"` } @@ -655,10 +665,10 @@ type SampleValidatorsReply struct { func (service *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { service.vm.Ctx.Log.Info("Platform: SampleValidators called with Size = %d", args.Size) if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } - validators, ok := service.vm.validators.GetValidatorSet(args.SubnetID) + validators, ok := service.vm.vdrMgr.GetValidators(args.SubnetID) if !ok { return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) } @@ -687,17 +697,17 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators ****************************************************** */ -// AddDefaultSubnetValidatorArgs are the arguments to AddDefaultSubnetValidator -type AddDefaultSubnetValidatorArgs struct { - FormattedAPIDefaultSubnetValidator +// AddValidatorArgs are the arguments to AddValidator +type AddValidatorArgs struct { + FormattedAPIPrimaryValidator api.UserPass } -// AddDefaultSubnetValidator creates and signs and issues a transaction to add a -// validator to the default subnet -func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *api.JsonTxID) error { - service.vm.Ctx.Log.Info("Platform: AddDefaultSubnetValidator called") +// AddValidator creates and signs and issues a transaction to add a +// validator to the primary network +func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *api.JsonTxID) error { + service.vm.Ctx.Log.Info("Platform: AddValidator called") switch { case args.RewardAddress == "": return errNoRewardAddress @@ -735,14 +745,14 @@ func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefa } // Create the transaction - tx, err := service.vm.newAddDefaultSubnetValidatorTx( + tx, err := service.vm.newAddValidatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time nodeID, // Node ID rewardAddress, // Reward Address uint32(10000*args.DelegationFeeRate), // Shares - privKeys, // Private keys + privKeys, // Private keys ) if err != nil { return fmt.Errorf("couldn't create tx: %w", err) @@ -752,17 +762,17 @@ func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefa return service.vm.issueTx(tx) } -// AddDefaultSubnetDelegatorArgs are the arguments to AddDefaultSubnetDelegator -type AddDefaultSubnetDelegatorArgs struct { +// AddDelegatorArgs are the arguments to AddDelegator +type AddDelegatorArgs struct { FormattedAPIValidator api.UserPass RewardAddress string `json:"rewardAddress"` } -// AddDefaultSubnetDelegator creates and signs and issues a transaction to add a -// delegator to the default subnet -func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *api.JsonTxID) error { - service.vm.Ctx.Log.Info("Platform: AddDefaultSubnetDelegator called") +// AddDelegator creates and signs and issues a transaction to add a +// delegator to the primary network +func (service *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *api.JsonTxID) error { + service.vm.Ctx.Log.Info("Platform: AddDelegator called") switch { case int64(args.StartTime) < time.Now().Unix(): return fmt.Errorf("start time must be in the future") @@ -798,7 +808,7 @@ func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefa } // Create the transaction - tx, err := service.vm.newAddDefaultSubnetDelegatorTx( + tx, err := service.vm.newAddDelegatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time @@ -814,18 +824,18 @@ func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefa return service.vm.issueTx(tx) } -// AddNonDefaultSubnetValidatorArgs are the arguments to AddNonDefaultSubnetValidator -type AddNonDefaultSubnetValidatorArgs struct { +// AddSubnetValidatorArgs are the arguments to AddSubnetValidator +type AddSubnetValidatorArgs struct { FormattedAPIValidator api.UserPass // ID of subnet to validate SubnetID string `json:"subnetID"` } -// AddNonDefaultSubnetValidator creates and signs and issues a transaction to -// add a validator to a subnet other than the default subnet -func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *api.JsonTxID) error { - service.vm.SnowmanVM.Ctx.Log.Info("Platform: AddNonDefaultSubnetValidator called") +// AddSubnetValidator creates and signs and issues a transaction to +// add a validator to a subnet other than the primary network +func (service *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorArgs, response *api.JsonTxID) error { + service.vm.SnowmanVM.Ctx.Log.Info("Platform: AddSubnetValidator called") switch { case args.SubnetID == "": return errNoSubnetID @@ -833,15 +843,15 @@ func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddN nodeID, err := ids.ShortFromPrefixedString(args.ID, constants.NodeIDPrefix) if err != nil { - return fmt.Errorf("Error parsing nodeID: '%s': %w", args.ID, err) + return fmt.Errorf("error parsing nodeID: '%s': %w", args.ID, err) } subnetID, err := ids.FromString(args.SubnetID) if err != nil { return fmt.Errorf("problem parsing subnetID '%s': %w", args.SubnetID, err) } - if subnetID.Equals(constants.DefaultSubnetID) { - return errors.New("non-default subnet validator attempts to validate default subnet") + if subnetID.Equals(constants.PrimaryNetworkID) { + return errors.New("subnet validator attempts to validate primary network") } // Get the keys controlled by the user @@ -856,7 +866,7 @@ func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddN } // Create the transaction - tx, err := service.vm.newAddNonDefaultSubnetValidatorTx( + tx, err := service.vm.newAddSubnetValidatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time @@ -1069,7 +1079,7 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain fxIDs = append(fxIDs, secp256k1fx.ID) } - if args.SubnetID.Equals(constants.DefaultSubnetID) { + if args.SubnetID.Equals(constants.PrimaryNetworkID) { return errDSCantValidate } @@ -1205,8 +1215,8 @@ type ValidatesResponse struct { func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { service.vm.Ctx.Log.Info("Platform: Validates called") // Verify that the Subnet exists - // Ignore lookup error if it's the DefaultSubnetID - if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil && !args.SubnetID.Equals(constants.DefaultSubnetID) { + // Ignore lookup error if it's the PrimaryNetworkID + if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil && !args.SubnetID.Equals(constants.PrimaryNetworkID) { return fmt.Errorf("problem retrieving subnet '%s': %w", args.SubnetID, err) } // Get the chains that exist diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index dd8502b98643..0d491cf91427 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -40,7 +40,7 @@ var ( ) func defaultService(t *testing.T) *Service { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer vm.Ctx.Lock.Unlock() ks := keystore.CreateTestKeystore() @@ -72,9 +72,9 @@ func defaultAddress(t *testing.T, service *Service) { } } -func TestAddDefaultSubnetValidator(t *testing.T) { +func TestAddValidator(t *testing.T) { expectedJSONString := `{"startTime":"0","endTime":"0","nodeID":"","rewardAddress":"","delegationFeeRate":"0.0000","username":"","password":""}` - args := AddDefaultSubnetValidatorArgs{} + args := AddValidatorArgs{} bytes, err := json.Marshal(&args) if err != nil { t.Fatal(err) @@ -235,7 +235,7 @@ func TestGetTx(t *testing.T) { test{ "proposal block", func() (*Tx, error) { - return service.vm.newAddDefaultSubnetValidatorTx( // Test GetTx works for proposal blocks + return service.vm.newAddValidatorTx( // Test GetTx works for proposal blocks service.vm.minStake, uint64(service.vm.clock.Time().Add(Delta).Unix()), uint64(service.vm.clock.Time().Add(Delta).Add(MinimumStakingDuration).Unix()), diff --git a/vms/platformvm/spend.go b/vms/platformvm/spend.go index 5ca60bda30a6..392e0b82b94e 100644 --- a/vms/platformvm/spend.go +++ b/vms/platformvm/spend.go @@ -16,14 +16,10 @@ import ( ) var ( - errSpendOverflow = errors.New("spent amount overflows uint64") - errNoKeys = errors.New("no keys provided") errLockedFundsNotMarkedAsLocked = errors.New("locked funds not marked as locked") errWrongLocktime = errors.New("wrong locktime reported") errUnknownOwners = errors.New("unknown owners") errCantSign = errors.New("can't sign") - errInputOverflow = errors.New("inputs overflowed uint64") - errOutputOverflow = errors.New("outputs overflowed uint64") ) // stake the provided amount while deducting the provided fee. @@ -341,7 +337,7 @@ func (vm *VM) semanticVerifySpend( utxoID := input.UTXOID.InputID() utxo, err := vm.getUTXO(db, utxoID) if err != nil { - return tempError{err} + return tempError{fmt.Errorf("failed to read consumed UTXO %s due to: %w", utxoID, err)} } utxos[index] = utxo } diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index 23c0dcf6d39c..b62124c1d593 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -13,7 +13,10 @@ import ( "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/avax" safemath "github.com/ava-labs/gecko/utils/math" @@ -25,6 +28,14 @@ import ( const ( currentValidatorsPrefix uint64 = iota pendingValidatorsPrefix + + delegator = "delegator" + start = "start" + stop = "stop" +) + +var ( + errNoValidators = errors.New("there are no validators") ) // persist a tx @@ -61,72 +72,257 @@ func (vm *VM) getStatus(db database.Database, ID ids.ID) (Status, error) { return Unknown, fmt.Errorf("expected status to be type Status but is type %T", statusIntf) } -// get the validators currently validating the specified subnet -func (vm *VM) getCurrentValidators(db database.Database, subnetID ids.ID) (*EventHeap, error) { - return vm.getValidatorsFromDB(db, subnetID, currentValidatorsPrefix, false) +// Add a staker to subnet [subnetID]'s pending validator queue. A staker may be +// a validator or a delegator +func (vm *VM) enqueueStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var ( + staker TimedTx + priority byte + ) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + priority = 1 + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + priority = 0 + case *UnsignedAddValidatorTx: + staker = unsignedTx + priority = 2 + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) + } + stakerID := staker.ID().Bytes() // Tx ID of this tx + txBytes := stakerTx.Bytes() + + // Sorted by subnet ID then start time then tx ID + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) + prefixStartDB := prefixdb.NewNested(prefixStart, db) + defer prefixStartDB.Close() + + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} + p.PackLong(uint64(staker.StartTime().Unix())) + p.PackByte(priority) + p.PackFixedBytes(stakerID) + if p.Err != nil { + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) + } + startKey := p.Bytes + + return prefixStartDB.Put(startKey, txBytes) } -// put the validators currently validating the specified subnet -func (vm *VM) putCurrentValidators(db database.Database, validators *EventHeap, subnetID ids.ID) error { - if validators.SortByStartTime { - return errors.New("current validators should be sorted by end time") - } - err := vm.State.Put(db, validatorsTypeID, subnetID.Prefix(currentValidatorsPrefix), validators) - if err != nil { - return fmt.Errorf("couldn't put current validator set: %w", err) - } - return nil +// Remove a staker from subnet [subnetID]'s pending validator queue. A staker +// may be a validator or a delegator +func (vm *VM) dequeueStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var ( + staker TimedTx + priority byte + ) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + priority = 1 + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + priority = 0 + case *UnsignedAddValidatorTx: + staker = unsignedTx + priority = 2 + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) + } + stakerID := staker.ID().Bytes() // Tx ID of this tx + + // Sorted by subnet ID then start time then ID + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) + prefixStartDB := prefixdb.NewNested(prefixStart, db) + defer prefixStartDB.Close() + + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} + p.PackLong(uint64(staker.StartTime().Unix())) + p.PackByte(priority) + p.PackFixedBytes(stakerID) + if p.Err != nil { + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) + } + startKey := p.Bytes + + return prefixStartDB.Delete(startKey) } -// get the validators that are slated to validate the specified subnet in the future -func (vm *VM) getPendingValidators(db database.Database, subnetID ids.ID) (*EventHeap, error) { - return vm.getValidatorsFromDB(db, subnetID, pendingValidatorsPrefix, true) +// Add a staker to subnet [subnetID] +// A staker may be a validator or a delegator +func (vm *VM) addStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var ( + staker TimedTx + priority byte + ) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + priority = 0 + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + priority = 1 + case *UnsignedAddValidatorTx: + staker = unsignedTx + priority = 2 + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) + } + stakerID := staker.ID().Bytes() // Tx ID of this tx + txBytes := stakerTx.Bytes() + + // Sorted by subnet ID then stop time then tx ID + prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + prefixStopDB := prefixdb.NewNested(prefixStop, db) + defer prefixStopDB.Close() + + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} + p.PackLong(uint64(staker.EndTime().Unix())) + p.PackByte(priority) + p.PackFixedBytes(stakerID) + if p.Err != nil { + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) + } + stopKey := p.Bytes + + return prefixStopDB.Put(stopKey, txBytes) } -// put the validators that are slated to validate the specified subnet in the future -func (vm *VM) putPendingValidators(db database.Database, validators *EventHeap, subnetID ids.ID) error { - if !validators.SortByStartTime { - return errors.New("pending validators should be sorted by start time") - } - err := vm.State.Put(db, validatorsTypeID, subnetID.Prefix(pendingValidatorsPrefix), validators) - if err != nil { - return fmt.Errorf("couldn't put pending validator set: %w", err) - } - return nil +// Remove a staker from subnet [subnetID] +// A staker may be a validator or a delegator +func (vm *VM) removeStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var ( + staker TimedTx + priority byte + ) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + priority = 0 + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + priority = 1 + case *UnsignedAddValidatorTx: + staker = unsignedTx + priority = 2 + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) + } + stakerID := staker.ID().Bytes() // Tx ID of this tx + + // Sorted by subnet ID then stop time + prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + prefixStopDB := prefixdb.NewNested(prefixStop, db) + defer prefixStopDB.Close() + + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} + p.PackLong(uint64(staker.EndTime().Unix())) + p.PackByte(priority) + p.PackFixedBytes(stakerID) + if p.Err != nil { + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) + } + stopKey := p.Bytes + + return prefixStopDB.Delete(stopKey) } -// get the validators currently validating the specified subnet -func (vm *VM) getValidatorsFromDB( - db database.Database, - subnetID ids.ID, - prefix uint64, - sortByStartTime bool, -) (*EventHeap, error) { - // if current validators aren't specified in database, return empty validator set - key := subnetID.Prefix(prefix) - has, err := vm.State.Has(db, validatorsTypeID, key) - if err != nil { +// Returns the pending staker that will start staking next +func (vm *VM) nextStakerStart(db database.Database, subnetID ids.ID) (*Tx, error) { + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() + defer iter.Release() + + if !iter.Next() { + return nil, errNoValidators + } + // Key: [Staker start time] | [Tx ID] + // Value: Byte repr. of tx that added this validator + + tx := Tx{} + if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { return nil, err } - if !has { - return &EventHeap{SortByStartTime: sortByStartTime}, nil + return &tx, tx.Sign(vm.codec, nil) +} + +// Returns the current staker that will stop staking next +func (vm *VM) nextStakerStop(db database.Database, subnetID ids.ID) (*Tx, error) { + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, stop)), db).NewIterator() + defer iter.Release() + + if !iter.Next() { + return nil, errNoValidators } - validatorsInterface, err := vm.State.Get(db, validatorsTypeID, key) - if err != nil { + // Key: [Staker stop time] | [Tx ID] + // Value: Byte repr. of tx that added this validator + + tx := Tx{} + if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { return nil, err } - validators, ok := validatorsInterface.(*EventHeap) - if !ok { - err := fmt.Errorf("expected to retrieve *EventHeap from database but got type %T", validatorsInterface) - vm.Ctx.Log.Error("error while fetching validators: %s", err) - return nil, err + return &tx, tx.Sign(vm.codec, nil) +} + +// Returns true if [nodeID] is a validator (not a delegator) of subnet [subnetID] +func (vm *VM) isValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool, error) { + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, stop)), db).NewIterator() + defer iter.Release() + + for iter.Next() { + txBytes := iter.Value() + tx := Tx{} + if err := Codec.Unmarshal(txBytes, &tx); err != nil { + return nil, false, err + } + if err := tx.Sign(vm.codec, nil); err != nil { + return nil, false, err + } + + switch vdr := tx.UnsignedTx.(type) { + case *UnsignedAddValidatorTx: + if subnetID.Equals(constants.PrimaryNetworkID) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true, nil + } + case *UnsignedAddSubnetValidatorTx: + if subnetID.Equals(vdr.Validator.SubnetID()) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true, nil + } + } } - for _, tx := range validators.Txs { + return nil, false, nil +} + +// Returns true if [nodeID] will be a validator (not a delegator) of subnet +// [subnetID] +func (vm *VM) willBeValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool, error) { + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() + defer iter.Release() + + for iter.Next() { + txBytes := iter.Value() + tx := Tx{} + if err := Codec.Unmarshal(txBytes, &tx); err != nil { + return nil, false, err + } if err := tx.Sign(vm.codec, nil); err != nil { - return nil, err + return nil, false, err + } + + switch vdr := tx.UnsignedTx.(type) { + case *UnsignedAddValidatorTx: + if subnetID.Equals(constants.PrimaryNetworkID) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true, nil + } + case *UnsignedAddSubnetValidatorTx: + if subnetID.Equals(vdr.Validator.SubnetID()) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true, nil + } } } - return validators, nil + return nil, false, nil } // getUTXO returns the UTXO with the specified ID @@ -222,7 +418,7 @@ func (vm *VM) removeReferencingUTXO(db database.Database, addrBytes []byte, utxo // Returns at most [limit] UTXOs. // If [limit] <= 0 or [limit] > maxUTXOsToFetch, it is set to [maxUTXOsToFetch]. // Only returns UTXOs associated with addresses >= [startAddr]. -// For address [startAddr], only returns UTXOs whose IDs are greater than [startUtxoID]. +// For address [startAddr], only returns UTXOs whose IDs are greater than [startUTXOID]. // Returns: // * The fetched of UTXOs // * The address associated with the last UTXO fetched diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go index b63a6025e5b1..74244ad4fc31 100644 --- a/vms/platformvm/static_service.go +++ b/vms/platformvm/static_service.go @@ -50,19 +50,8 @@ type APIValidator struct { ID ids.ShortID `json:"id"` } -func (v *APIValidator) weight() uint64 { - switch { - case v.Weight != nil: - return uint64(*v.Weight) - case v.StakeAmount != nil: - return uint64(*v.StakeAmount) - default: - return 0 - } -} - -// APIDefaultSubnetValidator is a validator of the default subnet -type APIDefaultSubnetValidator struct { +// APIPrimaryValidator is a validator of the primary network +type APIPrimaryValidator struct { APIValidator RewardAddress string `json:"rewardAddress"` @@ -89,8 +78,8 @@ func (v *FormattedAPIValidator) weight() uint64 { } } -// FormattedAPIDefaultSubnetValidator is a formatted validator of the default subnet -type FormattedAPIDefaultSubnetValidator struct { +// FormattedAPIPrimaryValidator is a formatted validator of the primary network +type FormattedAPIPrimaryValidator struct { FormattedAPIValidator RewardAddress string `json:"rewardAddress"` @@ -118,17 +107,17 @@ type APIChain struct { // the genesis data of the Platform Chain. // [NetworkID] is the ID of the network // [UTXOs] are the UTXOs on the Platform Chain that exist at genesis. -// [Validators] are the validators of the default subnet at genesis. +// [Validators] are the validators of the primary network at genesis. // [Chains] are the chains that exist at genesis. // [Time] is the Platform Chain's time at network genesis. type BuildGenesisArgs struct { - AvaxAssetID ids.ID `json:"avaxAssetID"` - NetworkID json.Uint32 `json:"address"` - UTXOs []APIUTXO `json:"utxos"` - Validators []FormattedAPIDefaultSubnetValidator `json:"defaultSubnetValidators"` - Chains []APIChain `json:"chains"` - Time json.Uint64 `json:"time"` - Message string `json:"message"` + AvaxAssetID ids.ID `json:"avaxAssetID"` + NetworkID json.Uint32 `json:"address"` + UTXOs []APIUTXO `json:"utxos"` + Validators []FormattedAPIPrimaryValidator `json:"primaryNetworkValidators"` + Chains []APIChain `json:"chains"` + Time json.Uint64 `json:"time"` + Message string `json:"message"` } // BuildGenesisReply is the reply from BuildGenesis @@ -198,7 +187,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r }) } - // Specify the validators that are validating the default subnet at genesis. + // Specify the validators that are validating the primary network at genesis. validators := &EventHeap{} for _, validator := range args.Validators { weight := validator.weight() @@ -217,7 +206,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r return err } - tx := &Tx{UnsignedTx: &UnsignedAddDefaultSubnetValidatorTx{ + tx := &Tx{UnsignedTx: &UnsignedAddValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: uint32(args.NetworkID), BlockchainID: ids.Empty, diff --git a/vms/platformvm/static_service_test.go b/vms/platformvm/static_service_test.go index ecfabb9016e4..fcfc8fda067a 100644 --- a/vms/platformvm/static_service_test.go +++ b/vms/platformvm/static_service_test.go @@ -25,7 +25,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { Amount: 0, } weight := json.Uint64(987654321) - validator := FormattedAPIDefaultSubnetValidator{ + validator := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ EndTime: 15, Weight: &weight, @@ -38,7 +38,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator, }, Time: 5, @@ -64,7 +64,7 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { Amount: 123456789, } weight := json.Uint64(0) - validator := FormattedAPIDefaultSubnetValidator{ + validator := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 0, EndTime: 15, @@ -78,7 +78,7 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator, }, Time: 5, @@ -105,7 +105,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { } weight := json.Uint64(987654321) - validator := FormattedAPIDefaultSubnetValidator{ + validator := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 0, EndTime: 5, @@ -119,7 +119,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator, }, Time: 5, @@ -146,7 +146,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { } weight := json.Uint64(987654321) - validator1 := FormattedAPIDefaultSubnetValidator{ + validator1 := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 0, EndTime: 20, @@ -156,7 +156,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { RewardAddress: addr, } - validator2 := FormattedAPIDefaultSubnetValidator{ + validator2 := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 3, EndTime: 15, @@ -166,7 +166,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { RewardAddress: addr, } - validator3 := FormattedAPIDefaultSubnetValidator{ + validator3 := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 1, EndTime: 10, @@ -181,7 +181,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator1, validator2, validator3, diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index deb680f29382..1b7da10d21d2 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -4,15 +4,14 @@ package platformvm import ( - "container/heap" "errors" "fmt" - "math" "time" "github.com/ava-labs/gecko/cache" "github.com/ava-labs/gecko/chains" "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/database/versiondb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" @@ -30,8 +29,6 @@ import ( "github.com/ava-labs/gecko/vms/components/avax" "github.com/ava-labs/gecko/vms/components/core" "github.com/ava-labs/gecko/vms/secp256k1fx" - - safemath "github.com/ava-labs/gecko/utils/math" ) const ( @@ -78,29 +75,19 @@ var ( // taken from https://stackoverflow.com/questions/25065055/what-is-the-maximum-time-time-in-go/32620397#32620397 maxTime = time.Unix(1<<63-62135596801, 0) // 0 is used because we drop the nano-seconds - timestampKey = ids.NewID([32]byte{'t', 'i', 'm', 'e'}) - currentValidatorsKey = ids.NewID([32]byte{'c', 'u', 'r', 'r', 'e', 'n', 't'}) - pendingValidatorsKey = ids.NewID([32]byte{'p', 'e', 'n', 'd', 'i', 'n', 'g'}) - chainsKey = ids.NewID([32]byte{'c', 'h', 'a', 'i', 'n', 's'}) - subnetsKey = ids.NewID([32]byte{'s', 'u', 'b', 'n', 'e', 't', 's'}) + timestampKey = ids.NewID([32]byte{'t', 'i', 'm', 'e'}) + chainsKey = ids.NewID([32]byte{'c', 'h', 'a', 'i', 'n', 's'}) + subnetsKey = ids.NewID([32]byte{'s', 'u', 'b', 'n', 'e', 't', 's'}) ) var ( errEndOfTime = errors.New("program time is suspiciously far in the future. Either this codebase was way more successful than expected, or a critical error has occurred") - errTimeTooAdvanced = errors.New("this is proposing a time too far in the future") errNoPendingBlocks = errors.New("no pending blocks") - errUnsupportedFXs = errors.New("unsupported feature extensions") errRegisteringType = errors.New("error registering type with database") - errMissingBlock = errors.New("missing block") errInvalidLastAcceptedBlock = errors.New("last accepted block must be a decision block") - errInvalidAddress = errors.New("invalid address") - errInvalidAddressSeperator = errors.New("invalid address seperator") - errInvalidAddressPrefix = errors.New("invalid address prefix") - errInvalidAddressSuffix = errors.New("invalid address suffix") - errEmptyAddressPrefix = errors.New("empty address prefix") - errEmptyAddressSuffix = errors.New("empty address suffix") errInvalidID = errors.New("invalid ID") - errDSCantValidate = errors.New("new blockchain can't be validated by default Subnet") + errDSCantValidate = errors.New("new blockchain can't be validated by primary network") + errUnknownTxType = errors.New("unknown transaction type") ) // Codec does serialization and deserialization @@ -128,9 +115,9 @@ func init() { Codec.RegisterType(&secp256k1fx.Input{}), Codec.RegisterType(&secp256k1fx.OutputOwners{}), - Codec.RegisterType(&UnsignedAddDefaultSubnetValidatorTx{}), - Codec.RegisterType(&UnsignedAddNonDefaultSubnetValidatorTx{}), - Codec.RegisterType(&UnsignedAddDefaultSubnetDelegatorTx{}), + Codec.RegisterType(&UnsignedAddValidatorTx{}), + Codec.RegisterType(&UnsignedAddSubnetValidatorTx{}), + Codec.RegisterType(&UnsignedAddDelegatorTx{}), Codec.RegisterType(&UnsignedCreateChainTx{}), Codec.RegisterType(&UnsignedCreateSubnetTx{}), @@ -155,7 +142,7 @@ type VM struct { // Node's validator manager // Maps Subnets --> nodes in the Subnet - validators validators.Manager + vdrMgr validators.Manager // true if the node is being run with staking enabled stakingEnabled bool @@ -201,7 +188,7 @@ type VM struct { } // Initialize this blockchain. -// [vm.ChainManager] and [vm.Validators] must be set before this function is called. +// [vm.ChainManager] and [vm.vdrMgr] must be set before this function is called. func (vm *VM) Initialize( ctx *snow.Context, db database.Database, @@ -246,15 +233,17 @@ func (vm *VM) Initialize( } } - validators := &EventHeap{ - SortByStartTime: false, - Txs: genesis.Validators, + // Persist the platform chain's timestamp at genesis + time := time.Unix(int64(genesis.Timestamp), 0) + if err := vm.State.PutTime(vm.DB, timestampKey, time); err != nil { + return err } - heap.Init(validators) - // Persist default subnet validator set at genesis - if err := vm.putCurrentValidators(vm.DB, validators, constants.DefaultSubnetID); err != nil { - return err + // Persist primary network validator set at genesis + for _, vdrTx := range genesis.Validators { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, vdrTx); err != nil { + return err + } } // Persist the subnets that exist at genesis (none do) @@ -283,17 +272,6 @@ func (vm *VM) Initialize( return err } - // Persist the platform chain's timestamp at genesis - time := time.Unix(int64(genesis.Timestamp), 0) - if err := vm.State.PutTime(vm.DB, timestampKey, time); err != nil { - return err - } - - // There are no pending stakers at genesis - if err := vm.putPendingValidators(vm.DB, &EventHeap{SortByStartTime: true}, constants.DefaultSubnetID); err != nil { - return err - } - // Create the genesis block and save it as being accepted (We don't just // do genesisBlock.Accept() because then it'd look for genesisBlock's // non-existent parent) @@ -377,7 +355,7 @@ func (vm *VM) issueTx(tx *Tx) error { case UnsignedAtomicTx: vm.unissuedAtomicTxs = append(vm.unissuedAtomicTxs, tx) default: - return errors.New("Could not parse given tx. Provided tx needs to be a ProposalTx, DecisionTx, or AtomicTx") + return errUnknownTxType } vm.resetTimer() return nil @@ -401,22 +379,11 @@ func (vm *VM) initBlockchains() error { // Set the node's validator manager to be up to date func (vm *VM) initSubnets() error { vm.Ctx.Log.Info("initializing Subnets") - subnets, err := vm.getSubnets(vm.DB) - if err != nil { - return err - } - if err := vm.updateValidators(constants.DefaultSubnetID); err != nil { + if err := vm.updateValidators(vm.DB); err != nil { return err } - - for _, subnet := range subnets { - if err := vm.updateValidators(subnet.ID()); err != nil { - return err - } - } - - return nil + return vm.updateVdrMgr(true) } // Create the blockchain described in [tx], but only if this node is a member of @@ -428,13 +395,14 @@ func (vm *VM) createChain(tx *Tx) { return } // The validators that compose the Subnet that validates this chain - validators, subnetExists := vm.validators.GetValidatorSet(unsignedTx.SubnetID) + validators, subnetExists := vm.vdrMgr.GetValidators(unsignedTx.SubnetID) if !subnetExists { - vm.Ctx.Log.Error("blockchain %s validated by Subnet %s but couldn't get that Subnet. Blockchain not created") + vm.Ctx.Log.Error("blockchain %s validated by Subnet %s but couldn't get that Subnet. Blockchain not created", + tx.ID(), unsignedTx.SubnetID) return } if vm.stakingEnabled && // Staking is enabled, so nodes might not validate all chains - !constants.DefaultSubnetID.Equals(unsignedTx.SubnetID) && // All nodes must validate the default subnet + !constants.PrimaryNetworkID.Equals(unsignedTx.SubnetID) && // All nodes must validate the primary network !validators.Contains(vm.Ctx.NodeID) { // This node doesn't validate this blockchain return } @@ -455,7 +423,15 @@ func (vm *VM) createChain(tx *Tx) { func (vm *VM) Bootstrapping() error { vm.bootstrapped = false; return vm.fx.Bootstrapping() } // Bootstrapped marks this VM as bootstrapped -func (vm *VM) Bootstrapped() error { vm.bootstrapped = true; return vm.fx.Bootstrapped() } +func (vm *VM) Bootstrapped() error { + vm.bootstrapped = true + errs := wrappers.Errs{} + errs.Add( + vm.updateVdrMgr(false), + vm.fx.Bootstrapped(), + ) + return errs.Err +} // Shutdown this blockchain func (vm *VM) Shutdown() error { @@ -547,19 +523,20 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { return nil, errEndOfTime } - // If the chain time would be the time for the next default subnet validator to leave, - // then we create a block that removes the validator and proposes they receive a validator reward - currentValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + // If the chain time would be the time for the next primary network staker to leave, + // then we create a block that removes the staker and proposes they receive a staker reward + nextValidatorEndtime := maxTime + tx, err := vm.nextStakerStop(db, constants.PrimaryNetworkID) if err != nil { - return nil, fmt.Errorf("couldn't get validator set: %w", err) + return nil, err } - nextValidatorEndtime := maxTime - if currentValidators.Len() > 0 { - nextValidatorEndtime = currentValidators.Peek().UnsignedTx.(TimedTx).EndTime() + staker, ok := tx.UnsignedTx.(TimedTx) + if !ok { + return nil, fmt.Errorf("expected staker tx to be TimedTx but got %T", tx) } + nextValidatorEndtime = staker.EndTime() if currentChainTimestamp.Equal(nextValidatorEndtime) { - stakerTx := currentValidators.Peek() - rewardValidatorTx, err := vm.newRewardValidatorTx(stakerTx.ID()) + rewardValidatorTx, err := vm.newRewardValidatorTx(tx.ID()) if err != nil { return nil, err } @@ -573,19 +550,16 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { return blk, vm.DB.Commit() } - // If local time is >= time of the next validator set change, + // If local time is >= time of the next staker set change, // propose moving the chain time forward - nextValidatorStartTime := vm.nextValidatorChangeTime(db /*start=*/, true) - nextValidatorEndTime := vm.nextValidatorChangeTime(db /*start=*/, false) - - nextValidatorSetChangeTime := nextValidatorStartTime - if nextValidatorEndTime.Before(nextValidatorStartTime) { - nextValidatorSetChangeTime = nextValidatorEndTime + nextStakerChangeTime, err := vm.nextStakerChangeTime(db) + if err != nil { + return nil, err } localTime := vm.clock.Time() - if !localTime.Before(nextValidatorSetChangeTime) { // time is at or after the time for the next validator to join/leave - advanceTimeTx, err := vm.newAdvanceTimeTx(nextValidatorSetChangeTime) + if !localTime.Before(nextStakerChangeTime) { // local time is at or after the time for the next staker to start/stop + advanceTimeTx, err := vm.newAdvanceTimeTx(nextStakerChangeTime) if err != nil { return nil, err } @@ -722,31 +696,26 @@ func (vm *VM) resetTimer() { if err != nil { vm.Ctx.Log.Error("could not retrieve timestamp from database") return - } - if timestamp.Equal(maxTime) { + } else if timestamp.Equal(maxTime) { vm.Ctx.Log.Error("Program time is suspiciously far in the future. Either this codebase was way more successful than expected, or a critical error has occurred.") return } - nextDSValidatorEndTime := vm.nextSubnetValidatorChangeTime(db, constants.DefaultSubnetID, false) - if timestamp.Equal(nextDSValidatorEndTime) { - vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeRewardValidator - return - } - // If local time is >= time of the next change in the validator set, // propose moving forward the chain timestamp - nextValidatorStartTime := vm.nextValidatorChangeTime(db, true) - nextValidatorEndTime := vm.nextValidatorChangeTime(db, false) - - nextValidatorSetChangeTime := nextValidatorStartTime - if nextValidatorEndTime.Before(nextValidatorStartTime) { - nextValidatorSetChangeTime = nextValidatorEndTime + nextStakerChangeTime, err := vm.nextStakerChangeTime(db) + if err != nil { + vm.Ctx.Log.Error("couldn't get next staker change time: %w", err) + return + } + if timestamp.Equal(nextStakerChangeTime) { + vm.SnowmanVM.NotifyBlockReady() // Should issue a proposal to reward validator + return } localTime := vm.clock.Time() - if !localTime.Before(nextValidatorSetChangeTime) { // time is at or after the time for the next validator to join/leave - vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeTimestamp + if !localTime.Before(nextStakerChangeTime) { // time is at or after the time for the next validator to join/leave + vm.SnowmanVM.NotifyBlockReady() // Should issue a proposal to advance timestamp return } @@ -761,161 +730,264 @@ func (vm *VM) resetTimer() { vm.Ctx.Log.Debug("dropping tx to add validator because its start time has passed") } - waitTime := nextValidatorSetChangeTime.Sub(localTime) - vm.Ctx.Log.Debug("next scheduled event is at %s (%s in the future)", nextValidatorSetChangeTime, waitTime) + waitTime := nextStakerChangeTime.Sub(localTime) + vm.Ctx.Log.Debug("next scheduled event is at %s (%s in the future)", nextStakerChangeTime, waitTime) // Wake up when it's time to add/remove the next validator vm.timer.SetTimeoutIn(waitTime) } -// If [start], returns the time at which the next validator (of any subnet) in the pending set starts validating -// Otherwise, returns the time at which the next validator (of any subnet) stops validating -// If no such validator is found, returns maxTime -func (vm *VM) nextValidatorChangeTime(db database.Database, start bool) time.Time { - earliest := vm.nextSubnetValidatorChangeTime(db, constants.DefaultSubnetID, start) +// Returns the time when the next staker of any subnet starts/stops staking +// after the current timestamp +func (vm *VM) nextStakerChangeTime(db database.Database) (time.Time, error) { subnets, err := vm.getSubnets(db) if err != nil { - return earliest + return time.Time{}, fmt.Errorf("couldn't get subnets: %w", err) } + subnetIDs := ids.Set{} + subnetIDs.Add(constants.PrimaryNetworkID) for _, subnet := range subnets { - t := vm.nextSubnetValidatorChangeTime(db, subnet.ID(), start) - if t.Before(earliest) { - earliest = t + subnetIDs.Add(subnet.ID()) + } + + earliest := maxTime + for _, subnetID := range subnetIDs.List() { + if tx, err := vm.nextStakerStart(db, subnetID); err == nil { + if staker, ok := tx.UnsignedTx.(TimedTx); ok { + if startTime := staker.StartTime(); startTime.Before(earliest) { + earliest = startTime + } + } + } + if tx, err := vm.nextStakerStop(db, subnetID); err == nil { + if staker, ok := tx.UnsignedTx.(TimedTx); ok { + if endTime := staker.EndTime(); endTime.Before(earliest) { + earliest = endTime + } + } } } - return earliest + return earliest, nil } -func (vm *VM) nextSubnetValidatorChangeTime(db database.Database, subnetID ids.ID, start bool) time.Time { - var validators *EventHeap - var err error - if start { - validators, err = vm.getPendingValidators(db, subnetID) - } else { - validators, err = vm.getCurrentValidators(db, subnetID) - } +// update validator set of [subnetID] based on the current chain timestamp +func (vm *VM) updateValidators(db database.Database) error { + timestamp, err := vm.getTimestamp(db) if err != nil { - vm.Ctx.Log.Error("couldn't get validators of subnet with ID %s: %v", subnetID, err) - return maxTime - } - if validators.Len() == 0 { - vm.Ctx.Log.Verbo("subnet, %s, has no validators", subnetID) - return maxTime + return fmt.Errorf("can't get timestamp: %w", err) } - return validators.Timestamp() -} -// Returns: -// 1) The validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] -// 2) The pending validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] -// 3) The IDs of the validators that start validating [subnetID] between now and [timestamp] -// 4) The IDs of the validators that stop validating [subnetID] between now and [timestamp] -// Note that this method will not remove validators from the current validator set of the default subnet. -// That happens in reward blocks. -func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, - pending *EventHeap, started, stopped ids.ShortSet, err error) { - // remove validators whose end time <= [timestamp] - current, err = vm.getCurrentValidators(db, subnetID) + subnets, err := vm.getSubnets(db) if err != nil { - return nil, nil, nil, nil, err + return err } - if !subnetID.Equals(constants.DefaultSubnetID) { // validators of default subnet removed in rewardValidatorTxs, not here - for current.Len() > 0 { - next := current.Peek().UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx) // current validator with earliest end time - if timestamp.Before(next.EndTime()) { - break - } - current.Remove() - stopped.Add(next.Validator.ID()) + + subnetIDs := ids.Set{} + subnetIDs.Add(constants.PrimaryNetworkID) + for _, subnet := range subnets { + subnetIDs.Add(subnet.ID()) + } + subnetIDList := subnetIDs.List() + + for _, subnetID := range subnetIDList { + if err := vm.updateSubnetValidators(db, subnetID, timestamp); err != nil { + return err } } - pending, err = vm.getPendingValidators(db, subnetID) - if err != nil { - return nil, nil, nil, nil, err - } - for pending.Len() > 0 { - nextTx := pending.Peek() // pending staker with earliest start time - switch tx := nextTx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: - if timestamp.Before(tx.StartTime()) { - return current, pending, started, stopped, nil + return nil +} +func (vm *VM) updateSubnetValidators(db database.Database, subnetID ids.ID, timestamp time.Time) error { + startPrefix := []byte(fmt.Sprintf("%s%s", subnetID, start)) + startDB := prefixdb.NewNested(startPrefix, db) + defer startDB.Close() + + startIter := startDB.NewIterator() + defer startIter.Release() + + for startIter.Next() { // Iterates in order of increasing start time + txBytes := startIter.Value() + + tx := Tx{} + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(vm.codec, nil); err != nil { + return err + } + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddDelegatorTx is invalid for subnet %s", + subnetID) + } + if staker.StartTime().After(timestamp) { + return nil + } + if err := vm.dequeueStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't dequeue staker: %w", err) + } + if err := vm.addStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't add staker: %w", err) + } + case *UnsignedAddValidatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddValidatorTx is invalid for subnet %s", + subnetID) + } + if staker.StartTime().After(timestamp) { + return nil + } + if err := vm.dequeueStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't dequeue staker: %w", err) + } + if err := vm.addStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't add staker: %w", err) } - current.Add(nextTx) - pending.Remove() - started.Add(tx.Validator.ID()) - case *UnsignedAddNonDefaultSubnetValidatorTx: - if timestamp.Before(tx.StartTime()) { - return current, pending, started, stopped, nil + case *UnsignedAddSubnetValidatorTx: + if txSubnetID := staker.Validator.SubnetID(); !subnetID.Equals(txSubnetID) { + return fmt.Errorf("AddSubnetValidatorTx references the incorrect subnet. Expected %s; Got %s", + subnetID, txSubnetID) } - current.Add(nextTx) - pending.Remove() - started.Add(tx.Validator.ID()) - case *UnsignedAddDefaultSubnetDelegatorTx: - if timestamp.Before(tx.StartTime()) { - return current, pending, started, stopped, nil + if staker.StartTime().After(timestamp) { + return nil + } + if err := vm.dequeueStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't dequeue staker: %w", err) + } + if err := vm.addStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't add staker: %w", err) } - current.Add(nextTx) - pending.Remove() - started.Add(tx.Validator.ID()) default: - pending.Remove() + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) } } - return current, pending, started, stopped, nil -} -func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { - vdrMap := make(map[[20]byte]*Validator, validatorEvents.Len()) - for _, event := range validatorEvents.Txs { - var vdr validators.Validator - switch tx := event.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: - vdr = &tx.Validator - case *UnsignedAddDefaultSubnetDelegatorTx: - vdr = &tx.Validator - case *UnsignedAddNonDefaultSubnetValidatorTx: - vdr = &tx.Validator - default: - continue + stopPrefix := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + stopDB := prefixdb.NewNested(stopPrefix, db) + defer stopDB.Close() + + stopIter := stopDB.NewIterator() + defer stopIter.Release() + + for stopIter.Next() { // Iterates in order of increasing start time + txBytes := stopIter.Value() + + tx := Tx{} + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) } - vdrID := vdr.ID() - vdrKey := vdrID.Key() - validator, exists := vdrMap[vdrKey] - if !exists { - validator = &Validator{NodeID: vdrID} - vdrMap[vdrKey] = validator + if err := tx.Sign(vm.codec, nil); err != nil { + return err } - weight, err := safemath.Add64(validator.Wght, vdr.Weight()) - if err != nil { - weight = math.MaxUint64 + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddDelegatorTx is invalid for subnet %s", + subnetID) + } + if staker.EndTime().After(timestamp) { + return nil + } + case *UnsignedAddValidatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddValidatorTx is invalid for subnet %s", + subnetID) + } + if staker.EndTime().After(timestamp) { + return nil + } + case *UnsignedAddSubnetValidatorTx: + if txSubnetID := staker.Validator.SubnetID(); !subnetID.Equals(txSubnetID) { + return fmt.Errorf("AddSubnetValidatorTx references the incorrect subnet. Expected %s; Got %s", + subnetID, txSubnetID) + } + if staker.EndTime().After(timestamp) { + return nil + } + if err := vm.removeStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't remove staker: %w", err) + } + default: + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) } - validator.Wght = weight } - vdrList := make([]validators.Validator, len(vdrMap)) - i := 0 - for _, validator := range vdrMap { - vdrList[i] = validator - i++ - } - return vdrList + errs := wrappers.Errs{} + errs.Add( + startIter.Error(), + stopIter.Error(), + ) + return errs.Err } -// update the node's validator manager to contain the current validator set of the given Subnet -func (vm *VM) updateValidators(subnetID ids.ID) error { - validatorSet, subnetInitialized := vm.validators.GetValidatorSet(subnetID) - if !subnetInitialized { // validator manager doesn't know about this subnet yet - validatorSet = validators.NewSet() - vm.validators.PutValidatorSet(subnetID, validatorSet) +func (vm *VM) updateVdrMgr(force bool) error { + if !force && !vm.bootstrapped { + return nil } - currentValidators, err := vm.getCurrentValidators(vm.DB, subnetID) + subnets, err := vm.getSubnets(vm.DB) if err != nil { return err } - validators := vm.getValidators(currentValidators) - return validatorSet.Set(validators) + subnetIDs := ids.Set{} + subnetIDs.Add(constants.PrimaryNetworkID) + for _, subnet := range subnets { + subnetIDs.Add(subnet.ID()) + } + + for _, subnetID := range subnetIDs.List() { + if err := vm.updateVdrSet(subnetID); err != nil { + return err + } + } + return vm.initBlockchains() +} + +func (vm *VM) updateVdrSet(subnetID ids.ID) error { + vdrs := validators.NewSet() + + stopPrefix := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + stopDB := prefixdb.NewNested(stopPrefix, vm.DB) + defer stopDB.Close() + stopIter := stopDB.NewIterator() + defer stopIter.Release() + + for stopIter.Next() { // Iterates in order of increasing start time + txBytes := stopIter.Value() + + tx := Tx{} + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(vm.codec, nil); err != nil { + return err + } + + var err error + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + err = vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + case *UnsignedAddValidatorTx: + err = vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + case *UnsignedAddSubnetValidatorTx: + err = vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + default: + err = fmt.Errorf("expected validator but got %T", tx.UnsignedTx) + } + if err != nil { + return err + } + } + + errs := wrappers.Errs{} + errs.Add( + vm.vdrMgr.Set(subnetID, vdrs), + stopIter.Error(), + ) + return errs.Err } // Codec ... diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index bbec6dc67d69..c29b12796ad9 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/chains" @@ -26,6 +28,7 @@ import ( "github.com/ava-labs/gecko/snow/engine/snowman/bootstrap" "github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/snow/networking/sender" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/constants" @@ -50,7 +53,7 @@ var ( defaultTxFee = uint64(100) // chain timestamp at genesis - defaultGenesisTime = time.Now().Round(time.Second) + defaultGenesisTime = time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC) // time that genesis validators start validating defaultValidateStartTime = defaultGenesisTime @@ -63,13 +66,10 @@ var ( minStake = 5 * units.MilliAvax - // balance of addresses that exist at genesis in defaultVM - defaultBalance = 100 * minStake - // amount all genesis validators stake in defaultVM defaultStakeAmount uint64 = 100 * minStake - // non-default Subnet that exists at genesis in defaultVM + // subnet that exists at genesis in defaultVM // Its controlKeys are keys[0], keys[1], keys[2] testSubnet1 *UnsignedCreateSubnetTx testSubnet1ControlKeys []*crypto.PrivateKeySECP256K1R @@ -78,9 +78,8 @@ var ( ) var ( - errShouldNotifyEngine = errors.New("should have notified engine of block ready") - errShouldPrefCommit = errors.New("should prefer to commit proposal") - errShouldPrefAbort = errors.New("should prefer to abort proposal") + errShouldPrefCommit = errors.New("should prefer to commit proposal") + errShouldPrefAbort = errors.New("should prefer to abort proposal") ) const ( @@ -122,31 +121,6 @@ func defaultContext() *snow.Context { return ctx } -// The UTXOs that exist at genesis in the default VM -func defaultGenesisUTXOs() []*avax.UTXO { - utxos := []*avax.UTXO(nil) - for i, key := range keys { - utxos = append(utxos, - &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: ids.Empty, - OutputIndex: uint32(i), - }, - Asset: avax.Asset{ID: avaxAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: defaultBalance, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }, - ) - } - return utxos -} - // Returns: // 1) The genesis state // 2) The byte representation of the default genesis for tests @@ -165,7 +139,7 @@ func defaultGenesis() (*BuildGenesisArgs, []byte) { } } - genesisValidators := make([]FormattedAPIDefaultSubnetValidator, len(keys)) + genesisValidators := make([]FormattedAPIPrimaryValidator, len(keys)) for i, key := range keys { weight := json.Uint64(defaultWeight) id := key.PublicKey().Address() @@ -173,7 +147,7 @@ func defaultGenesis() (*BuildGenesisArgs, []byte) { if err != nil { panic(err) } - genesisValidators[i] = FormattedAPIDefaultSubnetValidator{ + genesisValidators[i] = FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), @@ -214,9 +188,7 @@ func defaultVM() (*VM, database.Database) { chainDB := prefixdb.New([]byte{0}, baseDB) atomicDB := prefixdb.New([]byte{1}, baseDB) - defaultSubnet := validators.NewSet() // TODO do we need this? - vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.DefaultSubnetID, defaultSubnet) + vm.vdrMgr = validators.NewManager() vm.clock.Set(defaultGenesisTime) msgChan := make(chan common.Message, 1) @@ -237,7 +209,7 @@ func defaultVM() (*VM, database.Database) { panic(err) } - // Create a non-default subnet and store it in testSubnet1 + // Create a subnet and store it in testSubnet1 if tx, err := vm.newCreateSubnetTx( 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet // control keys are keys[0], keys[1], keys[2] @@ -314,30 +286,21 @@ func TestGenesis(t *testing.T) { } } - // Ensure current validator set of default subnet is correct - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) - if err != nil { - t.Fatal(err) - } else if len(currentValidators.Txs) != len(genesisState.Validators) { + // Ensure current validator set of primary network is correct + vdrSet, ok := vm.vdrMgr.GetValidators(constants.PrimaryNetworkID) + if !ok { + t.Fatalf("Missing the primary network validator set") + } + currentValidators := vdrSet.List() + if len(currentValidators) != len(genesisState.Validators) { t.Fatal("vm's current validator set is wrong") - } else if currentValidators.SortByStartTime == true { - t.Fatal("vm's current validators should be sorted by end time") } - currentSampler := validators.NewSet() - currentSampler.Set(vm.getValidators(currentValidators)) for _, key := range keys { - if addr := key.PublicKey().Address(); !currentSampler.Contains(addr) { + if addr := key.PublicKey().Address(); !vdrSet.Contains(addr) { t.Fatalf("should have had validator with NodeID %s", addr) } } - // Ensure pending validator set is correct (empty) - if pendingValidators, err := vm.getPendingValidators(vm.DB, constants.DefaultSubnetID); err != nil { - t.Fatal(err) - } else if pendingValidators.Len() != 0 { - t.Fatal("vm's pending validator set should be empty") - } - // Ensure genesis timestamp is correct if timestamp, err := vm.getTimestamp(vm.DB); err != nil { t.Fatal(err) @@ -351,8 +314,8 @@ func TestGenesis(t *testing.T) { } } -// accept proposal to add validator to default subnet -func TestAddDefaultSubnetValidatorCommit(t *testing.T) { +// accept proposal to add validator to primary network +func TestAddValidatorCommit(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -369,7 +332,7 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) { ID := key.PublicKey().Address() // create valid tx - tx, err := vm.newAddDefaultSubnetValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -419,19 +382,17 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) { } // Verify that new validator now in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, constants.DefaultSubnetID) + _, willBeValidator, err := vm.willBeValidator(vm.DB, constants.PrimaryNetworkID, ID) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if !pendingSampler.Contains(ID) { - t.Fatalf("pending validator should have validator with ID %s", ID) + if !willBeValidator { + t.Fatalf("Should have added validator to the pending queue") } } -// verify invalid proposal to add validator to default subnet -func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) { +// verify invalid proposal to add validator to primary network +func TestInvalidAddValidatorCommit(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -445,7 +406,7 @@ func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) { ID := key.PublicKey().Address() // create invalid tx - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -476,8 +437,8 @@ func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) { } } -// Reject proposal to add validator to default subnet -func TestAddDefaultSubnetValidatorReject(t *testing.T) { +// Reject proposal to add validator to primary network +func TestAddValidatorReject(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -491,7 +452,7 @@ func TestAddDefaultSubnetValidatorReject(t *testing.T) { ID := key.PublicKey().Address() // create valid tx - tx, err := vm.newAddDefaultSubnetValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -543,19 +504,17 @@ func TestAddDefaultSubnetValidatorReject(t *testing.T) { } // Verify that new validator NOT in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, constants.DefaultSubnetID) + _, willBeValidator, err := vm.willBeValidator(vm.DB, constants.PrimaryNetworkID, ID) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if pendingSampler.Contains(ID) { - t.Fatalf("should not have added validator to pending validator set") + if willBeValidator { + t.Fatalf("Shouldn't have added validator to the pending queue") } } -// Accept proposal to add validator to non-default subnet -func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { +// Accept proposal to add validator to subnet +func TestAddSubnetValidatorAccept(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -568,8 +527,8 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] - // validates default subnet ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -622,19 +581,17 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { } // Verify that new validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID()) + _, willBeValidator, err := vm.willBeValidator(vm.DB, testSubnet1.ID(), keys[0].PublicKey().Address()) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if !pendingSampler.Contains(keys[0].PublicKey().Address()) { - t.Fatalf("should have added validator to pending validator set") + if !willBeValidator { + t.Fatalf("Should have added validator to the pending queue") } } -// Reject proposal to add validator to non-default subnet -func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { +// Reject proposal to add validator to subnet +func TestAddSubnetValidatorReject(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -644,17 +601,16 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) endTime := startTime.Add(MinimumStakingDuration) - key, _ := vm.factory.NewPrivateKey() - ID := key.PublicKey().Address() + nodeID := keys[0].PublicKey().Address() // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] - // validates default subnet ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(startTime.Unix()), uint64(endTime.Unix()), - keys[0].PublicKey().Address(), + nodeID, testSubnet1.ID(), []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ) @@ -703,18 +659,16 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { } // Verify that new validator NOT in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID()) + _, willBeValidator, err := vm.willBeValidator(vm.DB, testSubnet1.ID(), nodeID) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if pendingSampler.Contains(ID) { - t.Fatalf("should not have added validator to pending validator set") + if willBeValidator { + t.Fatalf("Shouldn't have added validator to the pending queue") } } -// Test case where default subnet validator rewarded +// Test case where primary network validator rewarded func TestRewardValidatorAccept(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() @@ -802,15 +756,15 @@ func TestRewardValidatorAccept(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } else if currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID); err != nil { + } else if _, isValidator, err := vm.isValidator(vm.DB, constants.PrimaryNetworkID, keys[1].PublicKey().Address()); err != nil { // Verify that genesis validator was rewarded and removed from current validator set t.Fatal(err) - } else if currentValidators.Len() != len(keys)-1 { + } else if isValidator { t.Fatal("should have removed a genesis validator") } } -// Test case where default subnet validator not rewarded +// Test case where primary network validator not rewarded func TestRewardValidatorReject(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() @@ -886,14 +840,10 @@ func TestRewardValidatorReject(t *testing.T) { t.Fatal(err) } else if status != Aborted { t.Fatalf("status should be Aborted but is %s", status) - } - - // Verify that genesis validator was removed from current validator set - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) - if err != nil { + } else if _, isValidator, err := vm.isValidator(vm.DB, constants.PrimaryNetworkID, keys[1].PublicKey().Address()); err != nil { + // Verify that genesis validator was removed from current validator set t.Fatal(err) - } - if currentValidators.Len() != len(keys)-1 { + } else if isValidator { t.Fatal("should have removed a genesis validator") } } @@ -1005,7 +955,7 @@ func TestCreateSubnet(t *testing.T) { startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) endTime := startTime.Add(MinimumStakingDuration) // [startTime, endTime] is subset of time keys[0] validates default subent so tx is valid - if addValidatorTx, err := vm.newAddNonDefaultSubnetValidatorTx( + if addValidatorTx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -1053,22 +1003,11 @@ func TestCreateSubnet(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } - - // Verify validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, createSubnetTx.ID()) - if err != nil { + } else if _, willBeValidator, err := vm.willBeValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was added to the pending validator set t.Fatal(err) - } - foundNewValidator := false - for _, tx := range pendingValidators.Txs { - if tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.ID().Equals(nodeID) { - foundNewValidator = true - break - } - } - if !foundNewValidator { - t.Fatal("didn't add validator to new subnet's pending validator set") + } else if !willBeValidator { + t.Fatal("should have added a pending validator") } // Advance time to when new validator should start validating @@ -1110,30 +1049,16 @@ func TestCreateSubnet(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } - - // Verify validator no longer in pending validator set - // Verify validator is in pending validator set - if pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID()); err != nil { + } else if _, willBeValidator, err := vm.willBeValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was removed from the pending validator set t.Fatal(err) - } else if pendingValidators.Len() != 0 { - t.Fatal("pending validator set should be empty") - } - - // Verify validator is in current validator set - currentValidators, err := vm.getCurrentValidators(vm.DB, createSubnetTx.ID()) - if err != nil { + } else if willBeValidator { + t.Fatal("should have removed the pending validator") + } else if _, isValidator, err := vm.isValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was added to the validator set t.Fatal(err) - } - foundNewValidator = false - for _, tx := range currentValidators.Txs { - if tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.ID().Equals(nodeID) { - foundNewValidator = true - break - } - } - if !foundNewValidator { - t.Fatal("didn't add validator to new subnet's current validator set") + } else if !isValidator { + t.Fatal("should have been added to the validator set") } // fast forward clock to time validator should stop validating @@ -1173,16 +1098,16 @@ func TestCreateSubnet(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } - // pending validators and current validator should be empty - if pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID()); err != nil { + } else if _, willBeValidator, err := vm.willBeValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was removed from the pending validator set t.Fatal(err) - } else if pendingValidators.Len() != 0 { - t.Fatal("pending validator set should be empty") - } else if currentValidators, err = vm.getCurrentValidators(vm.DB, createSubnetTx.ID()); err != nil { + } else if willBeValidator { + t.Fatal("should have removed the pending validator") + } else if _, isValidator, err := vm.isValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was added to the validator set t.Fatal(err) - } else if currentValidators.Len() != 0 { - t.Fatal("pending validator set should be empty") + } else if isValidator { + t.Fatal("should have removed from the validator set") } } @@ -1350,9 +1275,7 @@ func TestRestartPartiallyAccepted(t *testing.T) { SnowmanVM: &core.SnowmanVM{}, chainManager: chains.MockManager{}, } - firstDefaultSubnet := validators.NewSet() - firstVM.validators = validators.NewManager() - firstVM.validators.PutValidatorSet(constants.DefaultSubnetID, firstDefaultSubnet) + firstVM.vdrMgr = validators.NewManager() firstVM.clock.Set(defaultGenesisTime) firstCtx := defaultContext() firstCtx.Lock.Lock() @@ -1423,9 +1346,7 @@ func TestRestartPartiallyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - secondDefaultSubnet := validators.NewSet() - secondVM.validators = validators.NewManager() - secondVM.validators.PutValidatorSet(constants.DefaultSubnetID, secondDefaultSubnet) + secondVM.vdrMgr = validators.NewManager() secondVM.clock.Set(defaultGenesisTime) secondCtx := defaultContext() @@ -1456,9 +1377,7 @@ func TestRestartFullyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - firstDefaultSubnet := validators.NewSet() - firstVM.validators = validators.NewManager() - firstVM.validators.PutValidatorSet(constants.DefaultSubnetID, firstDefaultSubnet) + firstVM.vdrMgr = validators.NewManager() firstVM.clock.Set(defaultGenesisTime) firstCtx := defaultContext() @@ -1544,9 +1463,7 @@ func TestRestartFullyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - secondDefaultSubnet := validators.NewSet() - secondVM.validators = validators.NewManager() - secondVM.validators.PutValidatorSet(constants.DefaultSubnetID, secondDefaultSubnet) + secondVM.vdrMgr = validators.NewManager() secondVM.clock.Set(defaultGenesisTime) secondCtx := defaultContext() @@ -1582,9 +1499,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - defaultSubnet := validators.NewSet() - vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.DefaultSubnetID, defaultSubnet) + vm.vdrMgr = validators.NewManager() vm.clock.Set(defaultGenesisTime) ctx := defaultContext() @@ -1620,7 +1535,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { peerID := ids.NewShortID([20]byte{1, 2, 3, 4, 5, 4, 3, 2, 1}) vdrs := validators.NewSet() - vdrs.Add(validators.NewValidator(peerID, 1)) + vdrs.AddWeight(peerID, 1) beacons := vdrs timeoutManager := timeout.Manager{} @@ -1670,8 +1585,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { vdrs, msgChan, 1000, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -1735,9 +1651,7 @@ func TestUnverifiedParent(t *testing.T) { chainManager: chains.MockManager{}, } - defaultSubnet := validators.NewSet() - vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.DefaultSubnetID, defaultSubnet) + vm.vdrMgr = validators.NewManager() vm.clock.Set(defaultGenesisTime) ctx := defaultContext() @@ -1856,3 +1770,41 @@ func TestFormatAddress(t *testing.T) { }) } } + +func TestNextValidatorStartTime(t *testing.T) { + vm, _ := defaultVM() + vm.Ctx.Lock.Lock() + defer func() { + vm.Shutdown() + vm.Ctx.Lock.Unlock() + }() + + currentTime, err := vm.getTimestamp(vm.DB) + assert.NoError(t, err) + + startTime := currentTime.Add(time.Second) + endTime := startTime.Add(MinimumStakingDuration) + + tx, err := vm.newAddValidatorTx( + vm.minStake, // stake amount + uint64(startTime.Unix()), // start time + uint64(endTime.Unix()), // end time + vm.Ctx.NodeID, // node ID + ids.GenerateTestShortID(), // reward address + NumberOfShares, // shares + []*crypto.PrivateKeySECP256K1R{keys[0]}, // key + ) + assert.NoError(t, err) + + err = vm.enqueueStaker(vm.DB, constants.PrimaryNetworkID, tx) + assert.NoError(t, err) + + nextStaker, err := vm.nextStakerStart(vm.DB, constants.PrimaryNetworkID) + assert.NoError(t, err) + assert.Equal( + t, + tx.ID().Bytes(), + nextStaker.ID().Bytes(), + "should have marked the new tx as the next validator to be added", + ) +} diff --git a/vms/propertyfx/fx.go b/vms/propertyfx/fx.go index 41cd2252ce08..95209d76d1c3 100644 --- a/vms/propertyfx/fx.go +++ b/vms/propertyfx/fx.go @@ -13,14 +13,9 @@ var ( errWrongUTXOType = errors.New("wrong utxo type") errWrongOperationType = errors.New("wrong operation type") errWrongCredentialType = errors.New("wrong credential type") - - errNoUTXOs = errors.New("an operation must consume at least one UTXO") - errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") - errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") - - errWrongMintOutput = errors.New("wrong mint output provided") - - errCantTransfer = errors.New("cant transfer with this fx") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongMintOutput = errors.New("wrong mint output provided") + errCantTransfer = errors.New("cant transfer with this fx") ) // Fx ... diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go index 74575fcd69c2..5681a9c99504 100644 --- a/vms/secp256k1fx/fx.go +++ b/vms/secp256k1fx/fx.go @@ -17,7 +17,6 @@ var ( errWrongTxType = errors.New("wrong tx type") errWrongOpType = errors.New("wrong operation type") errWrongUTXOType = errors.New("wrong utxo type") - errWrongOutputType = errors.New("wrong output type") errWrongInputType = errors.New("wrong input type") errWrongCredentialType = errors.New("wrong credential type") errWrongOwnerType = errors.New("wrong owner type") diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index bdcfb973303c..4fe05c084bbe 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -58,10 +58,6 @@ func (vm *testVM) Clock() *timer.Clock { return &vm.clock } func (vm *testVM) Logger() logging.Logger { return logging.NoLog{} } -type testCodec struct{} - -func (c *testCodec) RegisterStruct(interface{}) {} - type testTx struct{ bytes []byte } func (tx *testTx) UnsignedBytes() []byte { return tx.bytes } diff --git a/vms/secp256k1fx/keychain.go b/vms/secp256k1fx/keychain.go index be6b89edbdec..47321fb66a38 100644 --- a/vms/secp256k1fx/keychain.go +++ b/vms/secp256k1fx/keychain.go @@ -15,8 +15,7 @@ import ( ) var ( - errLockedFunds = errors.New("funds currently locked") - errCantSpend = errors.New("unable to spend this UTXO") + errCantSpend = errors.New("unable to spend this UTXO") ) // Keychain is a collection of keys that can be used to spend outputs diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go index 019ffe0ab3eb..1f73ed5fd105 100644 --- a/vms/spchainvm/consensus_benchmark_test.go +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/gecko/snow/engine/snowman/bootstrap" "github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/snow/networking/sender" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" @@ -54,7 +55,7 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { msgChan := make(chan common.Message, 1000) vdrs := validators.NewSet() - vdrs.Add(validators.NewValidator(ctx.NodeID, 1)) + vdrs.AddWeight(ctx.NodeID, 1) beacons := validators.NewSet() timeoutManager := timeout.Manager{} @@ -111,8 +112,9 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { vdrs, msgChan, 1000, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -191,7 +193,7 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { msgChan := make(chan common.Message, 1000) vdrs := validators.NewSet() - vdrs.Add(validators.NewValidator(ctx.NodeID, 1)) + vdrs.AddWeight(ctx.NodeID, 1) beacons := validators.NewSet() timeoutManager := timeout.Manager{} @@ -253,8 +255,9 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { vdrs, msgChan, 1000, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) diff --git a/vms/spchainvm/vm.go b/vms/spchainvm/vm.go index 5a4d37d17bf9..f73152646ae1 100644 --- a/vms/spchainvm/vm.go +++ b/vms/spchainvm/vm.go @@ -37,7 +37,6 @@ var ( var ( errNoTxs = errors.New("no transactions") - errUnknownBlock = errors.New("unknown block") errUnsupportedFXs = errors.New("unsupported feature extensions") ) diff --git a/vms/spchainvm/vm_test.go b/vms/spchainvm/vm_test.go index e26f4f424f80..20f951cba644 100644 --- a/vms/spchainvm/vm_test.go +++ b/vms/spchainvm/vm_test.go @@ -79,8 +79,8 @@ func TestPayments(t *testing.T) { sender.Default(true) vdrs := validators.NewSet() - vdr := validators.GenerateRandomValidator(1) - vdrs.Add(vdr) + vdr := ids.GenerateTestShortID() + vdrs.AddWeight(vdr, 1) ctx.Lock.Lock() consensus := smeng.Transitive{} @@ -141,7 +141,7 @@ func TestPayments(t *testing.T) { queriedVtxIDSet := ids.Set{} queriedVtxIDSet.Add(*queriedVtxID) - consensus.Chits(vdr.ID(), *queryRequestID, queriedVtxIDSet) + consensus.Chits(vdr, *queryRequestID, queriedVtxIDSet) if account := vm.GetAccount(vm.baseDB, keys[0].PublicKey().Address()); account.Balance() != 20*units.KiloAvax-200 { t.Fatalf("Wrong Balance") diff --git a/vms/spdagvm/prefixed_state.go b/vms/spdagvm/prefixed_state.go index 99cc834a010a..29cf5f511c28 100644 --- a/vms/spdagvm/prefixed_state.go +++ b/vms/spdagvm/prefixed_state.go @@ -29,8 +29,6 @@ type prefixedState struct { tx, utxo, txStatus, funds cache.Cacher uniqueTx cache.Deduplicator - - generatedStatus ids.ID } // UniqueTx de-duplicates the transaction. diff --git a/vms/spdagvm/vm.go b/vms/spdagvm/vm.go index 42a94e16a050..49a39ff6b399 100644 --- a/vms/spdagvm/vm.go +++ b/vms/spdagvm/vm.go @@ -36,7 +36,6 @@ const ( ) var ( - errNoKeys = errors.New("no private keys were provided") errUnknownUTXOType = errors.New("utxo has unknown output type") errAsset = errors.New("assetID must be blank") errAmountOverflow = errors.New("the amount of this transaction plus the transaction fee overflows") diff --git a/vms/spdagvm/vm_test.go b/vms/spdagvm/vm_test.go index ae06305839f2..dd42e3a03839 100644 --- a/vms/spdagvm/vm_test.go +++ b/vms/spdagvm/vm_test.go @@ -333,8 +333,7 @@ func TestRPCAPI(t *testing.T) { // Inverse of the above map pkToAddr := map[string]string{} - pks := []string{} // List of private keys - addresses := []string{} // List of addresses controlled by the private keys + pks := []string{} // List of private keys // Populate the above data structures using [keys] for _, v := range keys { @@ -347,7 +346,6 @@ func TestRPCAPI(t *testing.T) { pkToAddr[pk] = address pks = append(pks, pk) - addresses = append(addresses, address) } // Ensure GetAddress and GetBalance return the correct values for the @@ -537,7 +535,6 @@ func TestMultipleSend(t *testing.T) { addrToPK := map[string]string{} pkToAddr := map[string]string{} pks := []string{} - addresses := []string{} for _, v := range keys { cb58 := formatting.CB58{Bytes: v.Bytes()} pk := cb58.String() @@ -548,7 +545,6 @@ func TestMultipleSend(t *testing.T) { pkToAddr[pk] = address pks = append(pks, pk) - addresses = append(addresses, address) } ctx.Lock.Lock() diff --git a/vms/timestampvm/service.go b/vms/timestampvm/service.go index 73af6d9c77b4..cbb98d00898a 100644 --- a/vms/timestampvm/service.go +++ b/vms/timestampvm/service.go @@ -8,13 +8,11 @@ import ( "net/http" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/json" - "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" ) var ( - errDBError = errors.New("error getting data from database") errBadData = errors.New("data must be base 58 repr. of 32 bytes") errNoSuchBlock = errors.New("couldn't get block from database. Does it exist?") ) diff --git a/xputtest/avmwallet/wallet.go b/xputtest/avmwallet/wallet.go index f51d511eea89..fb51f56843a6 100644 --- a/xputtest/avmwallet/wallet.go +++ b/xputtest/avmwallet/wallet.go @@ -37,8 +37,7 @@ type Wallet struct { balance map[[32]byte]uint64 txFee uint64 - txsSent int32 - txs []*avm.Tx + txs []*avm.Tx } // NewWallet returns a new Wallet