From 27fee52f99d34fefb21a5a603b2ee132e8ad1b5d Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 23 Aug 2020 14:48:56 -0400 Subject: [PATCH 01/47] cleaned up snowstorm error handling --- snow/consensus/avalanche/consensus.go | 2 +- snow/consensus/avalanche/consensus_test.go | 77 +++++++++++-------- snow/consensus/avalanche/topological.go | 18 +++-- snow/consensus/snowstorm/common.go | 9 ++- snow/consensus/snowstorm/consensus.go | 2 +- snow/consensus/snowstorm/directed.go | 6 +- snow/consensus/snowstorm/input.go | 6 +- snow/consensus/snowstorm/metrics.go | 89 +++++++++++++--------- snow/consensus/snowstorm/test_tx_test.go | 24 ------ snow/engine/avalanche/transitive.go | 3 +- snow/engine/avalanche/transitive_test.go | 11 ++- 11 files changed, 133 insertions(+), 114 deletions(-) delete mode 100644 snow/consensus/snowstorm/test_tx_test.go diff --git a/snow/consensus/avalanche/consensus.go b/snow/consensus/avalanche/consensus.go index a4ee7c52003b..154ddf5a9653 100644 --- a/snow/consensus/avalanche/consensus.go +++ b/snow/consensus/avalanche/consensus.go @@ -23,7 +23,7 @@ type Consensus interface { // called, the status maps should be immediately updated accordingly. // Assumes each element in the accepted frontier will return accepted from // the join status map. - Initialize(*snow.Context, Parameters, []Vertex) + Initialize(*snow.Context, Parameters, []Vertex) error // Returns the parameters that describe this avalanche instance Parameters() Parameters diff --git a/snow/consensus/avalanche/consensus_test.go b/snow/consensus/avalanche/consensus_test.go index 9cbddbe8f4ac..99bb89a6b3f1 100644 --- a/snow/consensus/avalanche/consensus_test.go +++ b/snow/consensus/avalanche/consensus_test.go @@ -122,18 +122,21 @@ func ParamsTest(t *testing.T, factory Factory) { ctx := snow.DefaultContextTest() params := Parameters{ Parameters: snowball.Parameters{ - Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, } - avl.Initialize(ctx, params, nil) + if err := avl.Initialize(ctx, params, nil); err != nil { + t.Fatal(err) + } if p := avl.Parameters(); p.K != params.K { t.Fatalf("Wrong K parameter") @@ -153,11 +156,12 @@ func AddTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -174,7 +178,9 @@ func AddTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } if !avl.Finalized() { t.Fatalf("An empty avalanche instance is not finalized") @@ -248,11 +254,12 @@ func VertexIssuedTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -269,7 +276,9 @@ func VertexIssuedTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } if !avl.VertexIssued(vts[0]) { t.Fatalf("Genesis Vertex not reported as issued") @@ -305,11 +314,12 @@ func TxIssuedTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -334,7 +344,9 @@ func TxIssuedTest(t *testing.T, factory Factory) { }} tx1.InputIDsV.Add(utxos[0]) - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } if !avl.TxIssued(tx0) { t.Fatalf("Genesis Tx not reported as issued") @@ -675,11 +687,12 @@ func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -697,7 +710,9 @@ func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - avl.Initialize(snow.DefaultContextTest(), params, vts) + if err := avl.Initialize(snow.DefaultContextTest(), params, vts); err != nil { + t.Fatal(err) + } tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index a0447c03ff5a..eca9162702d3 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -59,26 +59,34 @@ type kahnNode struct { } // Initialize implements the Avalanche interface -func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier []Vertex) { - ctx.Log.AssertDeferredNoError(params.Valid) +func (ta *Topological) Initialize( + ctx *snow.Context, + params Parameters, + frontier []Vertex, +) error { + if err := params.Valid(); err != nil { + return err + } ta.ctx = ctx ta.params = params if err := ta.metrics.Initialize(ctx.Log, params.Namespace, params.Metrics); err != nil { - ta.ctx.Log.Error("%s", err) + return err } ta.nodes = make(map[[32]byte]Vertex, minMapSize) ta.cg = &snowstorm.Directed{} - ta.cg.Initialize(ctx, params.Parameters) + if err := ta.cg.Initialize(ctx, params.Parameters); err != nil { + return err + } ta.frontier = make(map[[32]byte]Vertex, minMapSize) for _, vtx := range frontier { ta.frontier[vtx.ID().Key()] = vtx } - ctx.Log.AssertNoError(ta.updateFrontiers()) + return ta.updateFrontiers() } // Parameters implements the Avalanche interface diff --git a/snow/consensus/snowstorm/common.go b/snow/consensus/snowstorm/common.go index a64685dc2217..6fe964e750ee 100644 --- a/snow/consensus/snowstorm/common.go +++ b/snow/consensus/snowstorm/common.go @@ -4,6 +4,8 @@ package snowstorm import ( + "fmt" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/consensus/snowball" @@ -44,15 +46,14 @@ type common struct { } // Initialize implements the ConflictGraph interface -func (c *common) Initialize(ctx *snow.Context, params snowball.Parameters) { - ctx.Log.AssertDeferredNoError(params.Valid) - +func (c *common) Initialize(ctx *snow.Context, params snowball.Parameters) error { c.ctx = ctx c.params = params if err := c.metrics.Initialize(params.Namespace, params.Metrics); err != nil { - ctx.Log.Error("failed to initialize metrics: %s", err) + return fmt.Errorf("failed to initialize metrics: %s", err) } + return params.Valid() } // Parameters implements the Snowstorm interface diff --git a/snow/consensus/snowstorm/consensus.go b/snow/consensus/snowstorm/consensus.go index 7f05c34bf845..67f24b0f33f1 100644 --- a/snow/consensus/snowstorm/consensus.go +++ b/snow/consensus/snowstorm/consensus.go @@ -18,7 +18,7 @@ type Consensus interface { fmt.Stringer // Takes in the context, alpha, betaVirtuous, and betaRogue - Initialize(*snow.Context, snowball.Parameters) + Initialize(*snow.Context, snowball.Parameters) error // Returns the parameters that describe this snowstorm instance Parameters() snowball.Parameters diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index e33640f71462..1394c7f0eac7 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -46,11 +46,11 @@ type directedTx struct { } // Initialize implements the Consensus interface -func (dg *Directed) Initialize(ctx *snow.Context, params snowball.Parameters) { - dg.common.Initialize(ctx, params) - +func (dg *Directed) Initialize(ctx *snow.Context, params snowball.Parameters) error { dg.utxos = make(map[[32]byte]ids.Set) dg.txs = make(map[[32]byte]*directedTx) + + return dg.common.Initialize(ctx, params) } // IsVirtuous implements the Consensus interface diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index 0c191618a19f..1dd6aff99c96 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -53,11 +53,11 @@ type inputUtxo struct { } // Initialize implements the ConflictGraph interface -func (ig *Input) Initialize(ctx *snow.Context, params snowball.Parameters) { - ig.common.Initialize(ctx, params) - +func (ig *Input) Initialize(ctx *snow.Context, params snowball.Parameters) error { ig.txs = make(map[[32]byte]inputTx) ig.utxos = make(map[[32]byte]inputUtxo) + + return ig.common.Initialize(ctx, params) } // IsVirtuous implements the ConflictGraph interface diff --git a/snow/consensus/snowstorm/metrics.go b/snow/consensus/snowstorm/metrics.go index 22add0082443..36a5332c43f3 100644 --- a/snow/consensus/snowstorm/metrics.go +++ b/snow/consensus/snowstorm/metrics.go @@ -4,20 +4,34 @@ package snowstorm import ( - "fmt" "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/utils/wrappers" ) type metrics struct { - numProcessing prometheus.Gauge - latAccepted, latRejected prometheus.Histogram + // numProcessing keeps track of the number of transactions currently + // processing in a snowstorm instance + numProcessing prometheus.Gauge - clock timer.Clock + // accepted tracks the number of milliseconds that a transaction was + // processing before being accepted + accepted prometheus.Histogram + + // rejected tracks the number of milliseconds that a transaction was + // processing before being rejected + rejected prometheus.Histogram + + // clock gives access to the current wall clock time + clock timer.Clock + + // processing keeps track of the time that each transaction was issued into + // the snowstorm instance. This is used to calculate the amount of time to + // accept or reject the transaction processing map[[32]byte]time.Time } @@ -25,44 +39,43 @@ type metrics struct { func (m *metrics) Initialize(namespace string, registerer prometheus.Registerer) error { m.processing = make(map[[32]byte]time.Time) - m.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tx_processing", - Help: "Number of processing transactions", - }) - m.latAccepted = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "tx_accepted", - Help: "Latency of accepting from the time the transaction was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - m.latRejected = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "tx_rejected", - Help: "Latency of rejecting from the time the transaction was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - - if err := registerer.Register(m.numProcessing); err != nil { - return fmt.Errorf("Failed to register tx_processing statistics due to %s", err) - } - if err := registerer.Register(m.latAccepted); err != nil { - return fmt.Errorf("Failed to register tx_accepted statistics due to %s", err) - } - if err := registerer.Register(m.latRejected); err != nil { - return fmt.Errorf("Failed to register tx_rejected statistics due to %s", err) - } - return nil + m.numProcessing = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "tx_processing", + Help: "Number of processing transactions", + }) + m.accepted = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "tx_accepted", + Help: "Time spent processing before being accepted in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + m.rejected = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "tx_rejected", + Help: "Time spent processing before being rejected in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + + errs := wrappers.Errs{} + errs.Add( + registerer.Register(m.numProcessing), + registerer.Register(m.accepted), + registerer.Register(m.rejected), + ) + return errs.Err } +// Issued marks that a transaction with the provided ID was added to the +// snowstorm consensus instance. It is assumed that either Accept or Reject will +// be called with this same ID in the future. func (m *metrics) Issued(id ids.ID) { m.processing[id.Key()] = m.clock.Time() m.numProcessing.Inc() } +// Accepted marks that a transaction with the provided ID was accepted. It is +// assumed that Issued was previously called with this ID. func (m *metrics) Accepted(id ids.ID) { key := id.Key() start := m.processing[key] @@ -70,10 +83,12 @@ func (m *metrics) Accepted(id ids.ID) { delete(m.processing, key) - m.latAccepted.Observe(float64(end.Sub(start).Milliseconds())) + m.accepted.Observe(float64(end.Sub(start).Milliseconds())) m.numProcessing.Dec() } +// Rejected marks that a transaction with the provided ID was rejected. It is +// assumed that Issued was previously called with this ID. func (m *metrics) Rejected(id ids.ID) { key := id.Key() start := m.processing[key] @@ -81,6 +96,6 @@ func (m *metrics) Rejected(id ids.ID) { delete(m.processing, key) - m.latRejected.Observe(float64(end.Sub(start).Milliseconds())) + m.rejected.Observe(float64(end.Sub(start).Milliseconds())) m.numProcessing.Dec() } diff --git a/snow/consensus/snowstorm/test_tx_test.go b/snow/consensus/snowstorm/test_tx_test.go deleted file mode 100644 index 7f34a97c3db8..000000000000 --- a/snow/consensus/snowstorm/test_tx_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "testing" -) - -func TestTxVerify(t *testing.T) { - Setup() - - if err := Red.Verify(); err != nil { - t.Fatal(err) - } -} - -func TestTxBytes(t *testing.T) { - Setup() - - if Red.Bytes() == nil { - t.Fatalf("Expected non-nil bytes") - } -} diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 7a8d39cd1af1..b1d8692be660 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -95,10 +95,9 @@ func (t *Transitive) finishBootstrapping() error { t.Ctx.Log.Error("vertex %s failed to be loaded from the frontier with %s", vtxID, err) } } - t.Consensus.Initialize(t.Ctx, t.Params, frontier) t.Ctx.Log.Info("bootstrapping finished with %d vertices in the accepted frontier", len(frontier)) - return nil + return t.Consensus.Initialize(t.Ctx, t.Params, frontier) } // Gossip implements the Engine interface diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index 24ffc448e551..ad76fc9b1811 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -2984,6 +2984,7 @@ func TestEngineAggressivePolling(t *testing.T) { config := DefaultConfig() config.Params.ConcurrentRepolls = 3 + config.Params.BetaRogue = 3 vdr := validators.GenerateRandomValidator(1) @@ -3034,8 +3035,12 @@ func TestEngineAggressivePolling(t *testing.T) { } te := &Transitive{} - te.Initialize(config) - te.finishBootstrapping() + if err := te.Initialize(config); err != nil { + t.Fatal(err) + } + if err := te.finishBootstrapping(); err != nil { + t.Fatal(err) + } te.Ctx.Bootstrapped() parsed := new(bool) @@ -3072,7 +3077,7 @@ func TestEngineAggressivePolling(t *testing.T) { t.Fatalf("should have issued one push query") } if *numPullQueries != 2 { - t.Fatalf("should have issued one pull query") + t.Fatalf("should have issued two pull query") } } From 4f260ca1560ecdf1038843decd7438463ef00f6a Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 24 Aug 2020 11:54:10 -0400 Subject: [PATCH 02/47] started commented directed.go --- snow/consensus/snowstorm/directed.go | 57 +++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 10 deletions(-) diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 1394c7f0eac7..0beb75bc6673 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -36,43 +36,80 @@ type Directed struct { } type directedTx struct { - bias, confidence, lastVote int - rogue bool + // bias is the number of times this transaction was the successful result of + // a network poll + bias int - pendingAccept, accepted bool - ins, outs ids.Set + // confidence is the number of consecutive times this transaction was the + // successful result of a network poll as of [lastVote] + confidence int + // lastVote is the last poll number that this transaction was included in a + // successful network poll + lastVote int + + // rogue identifies if there is a known conflict with this transaction + rogue bool + + // pendingAccept identifies if this transaction has been marked as accepted + // once its transitive dependencies have also been accepted + pendingAccept bool + + // accepted identifies if this transaction has been accepted. This should + // only be set if [pendingAccept] is also set + accepted bool + + // ins is the set of txIDs that this tx conflicts with that are less + // preferred than this tx + ins ids.Set + + // outs is the set of txIDs that this tx conflicts with that are more + // preferred than this tx + outs ids.Set + + // tx is the actual transaction this node represents tx Tx } // Initialize implements the Consensus interface func (dg *Directed) Initialize(ctx *snow.Context, params snowball.Parameters) error { - dg.utxos = make(map[[32]byte]ids.Set) dg.txs = make(map[[32]byte]*directedTx) + dg.utxos = make(map[[32]byte]ids.Set) return dg.common.Initialize(ctx, params) } // IsVirtuous implements the Consensus interface func (dg *Directed) IsVirtuous(tx Tx) bool { - id := tx.ID() - if node, exists := dg.txs[id.Key()]; exists { + txID := tx.ID() + // If the tx is currently processing, we should just return if was registed + // as rogue or not. + if node, exists := dg.txs[txID.Key()]; exists { return !node.rogue } + + // The tx isn't processing, so we need to check to see if it conflicts with + // any of the other txs that are currently processing. This means that we + // need to iterate over all the inputs of this tx to see if currently issued + // txs also name one of those inputs. for _, input := range tx.InputIDs().List() { if _, exists := dg.utxos[input.Key()]; exists { + // A currently processing tx names the same input as the provided + // tx, so the provided tx would be rogue. return false } } + + // This tx is virtuous as far as this consensus instance knows return true } // Conflicts implements the Consensus interface func (dg *Directed) Conflicts(tx Tx) ids.Set { - id := tx.ID() + txID := tx.ID() conflicts := ids.Set{} - if node, exists := dg.txs[id.Key()]; exists { + if node, exists := dg.txs[txID.Key()]; exists { conflicts.Union(node.ins) conflicts.Union(node.outs) } else { @@ -81,7 +118,7 @@ func (dg *Directed) Conflicts(tx Tx) ids.Set { conflicts.Union(spends) } } - conflicts.Remove(id) + conflicts.Remove(txID) } return conflicts From 33ea747cd95b24cdac502071acb8c82536fd36cb Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 24 Aug 2020 18:05:59 -0400 Subject: [PATCH 03/47] cleaning up snowstorm issue comments --- node/node.go | 6 +- snow/consensus/snowstorm/directed.go | 117 ++++++++++++++++++--------- 2 files changed, 84 insertions(+), 39 deletions(-) diff --git a/node/node.go b/node/node.go index 95acaa0cc108..e909dbd69d93 100644 --- a/node/node.go +++ b/node/node.go @@ -140,9 +140,9 @@ func (n *Node) initNetworking() error { tlsConfig := &tls.Config{ Certificates: []tls.Certificate{cert}, ClientAuth: tls.RequireAnyClientCert, - // We do not use TLS's CA functionality, we just require an - // authenticated channel. Therefore, we can safely skip verification - // here. + // We do not use TLS's CA functionality to authenticate a hostname. + // We only require an authenticated channel based on the peer's + // public key. Therefore, we can safely skip CA verification. // // TODO: Security audit required InsecureSkipVerify: true, diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 0beb75bc6673..3245bfd5d075 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/snowball" "github.com/ava-labs/gecko/utils/formatting" ) @@ -89,9 +90,7 @@ func (dg *Directed) IsVirtuous(tx Tx) bool { } // The tx isn't processing, so we need to check to see if it conflicts with - // any of the other txs that are currently processing. This means that we - // need to iterate over all the inputs of this tx to see if currently issued - // txs also name one of those inputs. + // any of the other txs that are currently processing. for _, input := range tx.InputIDs().List() { if _, exists := dg.utxos[input.Key()]; exists { // A currently processing tx names the same input as the provided @@ -100,116 +99,162 @@ func (dg *Directed) IsVirtuous(tx Tx) bool { } } - // This tx is virtuous as far as this consensus instance knows + // This tx is virtuous as far as this consensus instance knows. return true } // Conflicts implements the Consensus interface func (dg *Directed) Conflicts(tx Tx) ids.Set { - txID := tx.ID() conflicts := ids.Set{} - - if node, exists := dg.txs[txID.Key()]; exists { + if node, exists := dg.txs[tx.ID().Key()]; exists { + // If the tx is currently processing, the conflicting txs is just the + // union of the inbound conflicts and the outbound conflicts. conflicts.Union(node.ins) conflicts.Union(node.outs) } else { + // If the tx isn't currently processing, the conflicting txs is the + // union of all the txs that spend an input that this tx spends. for _, input := range tx.InputIDs().List() { if spends, exists := dg.utxos[input.Key()]; exists { conflicts.Union(spends) } } - conflicts.Remove(txID) } - return conflicts } // Add implements the Consensus interface func (dg *Directed) Add(tx Tx) error { if dg.Issued(tx) { - return nil // Already inserted + // If the tx was previously inserted, nothing should be done here. + return nil } txID := tx.ID() bytes := tx.Bytes() + // Notify the IPC socket that this tx has been issued. dg.ctx.DecisionDispatcher.Issue(dg.ctx.ChainID, txID, bytes) + + // Notify the metrics that this transaction was just issued. + dg.metrics.Issued(txID) + inputs := tx.InputIDs() - // If there are no inputs, Tx is vacuously accepted + + // If this tx doesn't have any inputs, it's impossible for there to be any + // conflicting transactions. Therefore, this transaction is treated as + // vacuously accepted. if inputs.Len() == 0 { + // Accept is called before notifying the IPC so that acceptances that + // cause fatal errors aren't sent to an IPC peer. if err := tx.Accept(); err != nil { return err } + + // Notify the IPC socket that this tx has been accepted. dg.ctx.DecisionDispatcher.Accept(dg.ctx.ChainID, txID, bytes) - dg.metrics.Issued(txID) + + // Notify the metrics that this transaction was just accepted. dg.metrics.Accepted(txID) return nil } txNode := &directedTx{tx: tx} - // For each UTXO input to Tx: - // * Get all transactions that consume that UTXO - // * Add edges from Tx to those transactions in the conflict graph - // * Mark those transactions as rogue + // For each UTXO consumed by the tx: + // * Add edges between this tx and txs that consume this UTXO + // * Mark this tx as attempting to consume this UTXO for _, inputID := range inputs.List() { inputKey := inputID.Key() - spends := dg.utxos[inputKey] // Transactions spending this UTXO - // Add edges to conflict graph - txNode.outs.Union(spends) + // Get the set of txs that are currently processing that also consume + // this UTXO + spenders := dg.utxos[inputKey] - // Mark transactions conflicting with Tx as rogue - for _, conflictID := range spends.List() { + // Add all the txs that spend this UTXO to this tx's conflicts that are + // preferred over this tx. We know all these tx's are preferred over + // this tx, because this tx currently has a bias of 0 and the tie-break + // goes to the tx whose bias was updated first. + txNode.outs.Union(spenders) + + // Update txs conflicting with tx to account for its issuance + for _, conflictID := range spenders.List() { conflictKey := conflictID.Key() + + // Get the node that contains this conflicting tx conflict := dg.txs[conflictKey] + // This conflicting tx can't be virtuous anymore. So we remove this + // conflicting tx from any of the virtuous sets if it was previously + // in them. dg.virtuous.Remove(conflictID) dg.virtuousVoting.Remove(conflictID) + // This tx should be set to rogue if it wasn't rogue before. conflict.rogue = true - conflict.ins.Add(txID) - dg.txs[conflictKey] = conflict + // This conflicting tx is preferred over the tx being inserted, as + // described above. So we add the conflict to the inbound set. + conflict.ins.Add(txID) } - // Add Tx to list of transactions consuming UTXO whose ID is id - spends.Add(txID) - dg.utxos[inputKey] = spends + + // Add this tx to list of txs consuming the current UTXO + spenders.Add(txID) + + // Because this isn't a pointer, we should re-map the set. + dg.utxos[inputKey] = spenders } - txNode.rogue = txNode.outs.Len() != 0 // Mark this transaction as rogue if it has conflicts - // Add the node representing Tx to the node set - dg.txs[txID.Key()] = txNode + // Mark this transaction as rogue if had any conflicts registered above + txNode.rogue = txNode.outs.Len() != 0 + if !txNode.rogue { - // I'm not rogue + // If this tx is currently virtuous, add it to the virtuous sets dg.virtuous.Add(txID) dg.virtuousVoting.Add(txID) - // If I'm not rogue, I must be preferred + // If a tx is virtuous, it must be preferred. dg.preferences.Add(txID) } - dg.metrics.Issued(txID) - // Tx can be accepted only if the transactions it depends on are also accepted - // If any transactions that Tx depends on are rejected, reject Tx + // Add this tx to the set of currently processing txs + dg.txs[txID.Key()] = txNode + + // This tx can be accepted only if all the txs it depends on are also + // accepted. If any txs that this tx depends on are rejected, reject it. toReject := &directedRejector{ dg: dg, txNode: txNode, } + + // Register all of this txs dependencies as possibilities to reject this tx. for _, dependency := range tx.Dependencies() { - if !dependency.Status().Decided() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. So, + // this tx should be rejected if any of these processing txs are + // rejected. Note that the dependencies can't be rejected, because + // it is assumped that this tx is currently considered valid. toReject.deps.Add(dependency.ID()) } } + + // Register these dependencies dg.pendingReject.Register(toReject) - return dg.errs.Err + + // Registering the rejector can't result in an error, so we can safely + // return nil here. + return nil } // Issued implements the Consensus interface func (dg *Directed) Issued(tx Tx) bool { + // If the tx is either Accepted or Rejected, then it must have been issued + // previously. if tx.Status().Decided() { return true } + + // If the tx is currently processing, then it must have been issued. _, ok := dg.txs[tx.ID().Key()] return ok } From 4657d062c8945076a32adf323fe0b844642eab00 Mon Sep 17 00:00:00 2001 From: "Collin K. Cusce" Date: Mon, 24 Aug 2020 18:29:06 -0400 Subject: [PATCH 04/47] changed default subnet to primary network --- chains/manager.go | 2 +- genesis/genesis.go | 6 +- main/params.go | 4 +- node/config.go | 2 +- node/node.go | 20 +- snow/validators/validator.go | 2 +- utils/constants/constants.go | 4 +- .../add_default_subnet_delegator_tx.go | 40 ++-- .../add_default_subnet_delegator_tx_test.go | 80 +++---- .../add_default_subnet_validator_tx.go | 34 +-- .../add_default_subnet_validator_tx_test.go | 108 +++++----- .../add_nondefault_subnet_validator_tx.go | 54 ++--- ...add_nondefault_subnet_validator_tx_test.go | 202 +++++++++--------- vms/platformvm/advance_time_tx.go | 8 +- vms/platformvm/advance_time_tx_test.go | 16 +- vms/platformvm/create_chain_tx.go | 2 +- vms/platformvm/event_heap.go | 12 +- vms/platformvm/event_heap_test.go | 40 ++-- vms/platformvm/reward_validator_tx.go | 22 +- vms/platformvm/reward_validator_tx_test.go | 22 +- vms/platformvm/service.go | 98 ++++----- vms/platformvm/service_test.go | 8 +- vms/platformvm/static_service.go | 28 +-- vms/platformvm/static_service_test.go | 20 +- vms/platformvm/vm.go | 44 ++-- vms/platformvm/vm_test.go | 94 ++++---- 26 files changed, 486 insertions(+), 486 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index 34270fbe8af2..2b48fe1c85b5 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -344,7 +344,7 @@ func (m *manager) buildChain(chainParams ChainParameters) (*chain, error) { if m.stakingEnabled { validators, ok = m.validators.GetValidatorSet(chainParams.SubnetID) } else { // Staking is disabled. Every peer validates every subnet. - validators, ok = m.validators.GetValidatorSet(constants.DefaultSubnetID) + validators, ok = m.validators.GetValidatorSet(constants.PrimaryNetworkID) } if !ok { return nil, fmt.Errorf("couldn't get validator set of subnet with ID %s. The subnet may not exist", chainParams.SubnetID) diff --git a/genesis/genesis.go b/genesis/genesis.go index 02e69b98f250..2be3c527b9ca 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -119,7 +119,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { weight := json.Uint64(20 * units.KiloAvax) destAddr := config.FundedAddresses[i%len(config.FundedAddresses)] platformvmArgs.Validators = append(platformvmArgs.Validators, - platformvm.FormattedAPIDefaultSubnetValidator{ + platformvm.FormattedAPIPrimaryValidator{ FormattedAPIValidator: platformvm.FormattedAPIValidator{ StartTime: json.Uint64(genesisTime.Unix()), EndTime: json.Uint64(endStakingTime.Unix()), @@ -135,7 +135,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { platformvmArgs.Chains = []platformvm.APIChain{ { GenesisData: avmReply.Bytes, - SubnetID: constants.DefaultSubnetID, + SubnetID: constants.PrimaryNetworkID, VMID: avm.ID, FxIDs: []ids.ID{ secp256k1fx.ID, @@ -146,7 +146,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { }, { GenesisData: formatting.CB58{Bytes: config.EVMBytes}, - SubnetID: constants.DefaultSubnetID, + SubnetID: constants.PrimaryNetworkID, VMID: EVMID, Name: "C-Chain", }, diff --git a/main/params.go b/main/params.go index 8e3bdd3982ed..f3e0f1f617ca 100644 --- a/main/params.go +++ b/main/params.go @@ -166,8 +166,8 @@ func init() { // AVAX fees: fs.Uint64Var(&Config.TxFee, "tx-fee", units.MilliAvax, "Transaction fee, in nAVAX") - // Minimum stake, in nAVAX, required to validate the Default Subnet - fs.Uint64Var(&Config.MinStake, "min-stake", 5*units.MilliAvax, "Minimum stake, in nAVAX, required to validate the Default Subnet") + // Minimum stake, in nAVAX, required to validate the primary netwok + fs.Uint64Var(&Config.MinStake, "min-stake", 5*units.MilliAvax, "Minimum stake, in nAVAX, required to validate the primary network") // Assertions: fs.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") diff --git a/node/config.go b/node/config.go index 0bd595c13b7d..aed3c9cec727 100644 --- a/node/config.go +++ b/node/config.go @@ -23,7 +23,7 @@ type Config struct { // Transaction fee configuration TxFee uint64 - // Minimum stake, in nAVAX, required to validate the Default Subnet + // Minimum stake, in nAVAX, required to validate the primary network MinStake uint64 // Assertions configuration diff --git a/node/node.go b/node/node.go index 95acaa0cc108..665d80afd8f9 100644 --- a/node/node.go +++ b/node/node.go @@ -155,10 +155,10 @@ func (n *Node) initNetworking() error { clientUpgrader = network.NewIPUpgrader() } - // Initialize validator manager and default subnet's validator set - defaultSubnetValidators := validators.NewSet() + // Initialize validator manager and primary network's validator set + primaryNetworkValidators := validators.NewSet() n.vdrs = validators.NewManager() - n.vdrs.PutValidatorSet(constants.DefaultSubnetID, defaultSubnetValidators) + n.vdrs.PutValidatorSet(constants.PrimaryNetworkID, primaryNetworkValidators) n.Net = network.NewDefaultNetwork( n.Config.ConsensusParams.Metrics, @@ -172,14 +172,14 @@ func (n *Node) initNetworking() error { dialer, serverUpgrader, clientUpgrader, - defaultSubnetValidators, + primaryNetworkValidators, n.beacons, n.Config.ConsensusRouter, ) if !n.Config.EnableStaking { n.Net.RegisterHandler(&insecureValidatorManager{ - vdrs: defaultSubnetValidators, + vdrs: primaryNetworkValidators, weight: n.Config.DisabledStakingWeight, }) } @@ -356,7 +356,7 @@ func (n *Node) initChains(genesisBytes []byte, avaxAssetID ids.ID) error { // Create the Platform Chain n.chainManager.ForceCreateChain(chains.ChainParameters{ ID: constants.PlatformChainID, - SubnetID: constants.DefaultSubnetID, + SubnetID: constants.PrimaryNetworkID, GenesisData: genesisBytes, // Specifies other chains to create VMAlias: platformvm.ID.String(), CustomBeacons: n.beacons, @@ -442,12 +442,12 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { // Instead of updating node's validator manager, platform chain makes changes // to its own local validator manager (which isn't used for sampling) if !n.Config.EnableStaking { - defaultSubnetValidators := validators.NewSet() - if err := defaultSubnetValidators.Add(validators.NewValidator(n.ID, 1)); err != nil { - return fmt.Errorf("couldn't add validator to Default Subnet: %w", err) + primaryNetworkValidators := validators.NewSet() + if err := primaryNetworkValidators.Add(validators.NewValidator(n.ID, 1)); err != nil { + return fmt.Errorf("couldn't add validator to primary network: %w", err) } vdrs = validators.NewManager() - vdrs.PutValidatorSet(constants.DefaultSubnetID, defaultSubnetValidators) + vdrs.PutValidatorSet(constants.PrimaryNetworkID, primaryNetworkValidators) } errs := wrappers.Errs{} diff --git a/snow/validators/validator.go b/snow/validators/validator.go index bbdda0ad646a..cab16bf62d67 100644 --- a/snow/validators/validator.go +++ b/snow/validators/validator.go @@ -13,7 +13,7 @@ type Validator interface { ID() ids.ShortID // Weight that can be used for weighted sampling. - // If this validator is validating the default subnet, returns the amount of + // If this validator is validating the primary network, returns the amount of // AVAX staked Weight() uint64 } diff --git a/utils/constants/constants.go b/utils/constants/constants.go index 51c1f2a0373e..203b406e1cf1 100644 --- a/utils/constants/constants.go +++ b/utils/constants/constants.go @@ -21,8 +21,8 @@ const ( // Variables to be exported var ( - DefaultSubnetID = ids.Empty - PlatformChainID = ids.Empty + PrimaryNetworkID = ids.Empty + PlatformChainID = ids.Empty MainnetID uint32 = 1 CascadeID uint32 = 2 diff --git a/vms/platformvm/add_default_subnet_delegator_tx.go b/vms/platformvm/add_default_subnet_delegator_tx.go index 1f9817738d99..9275c6576175 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx.go +++ b/vms/platformvm/add_default_subnet_delegator_tx.go @@ -26,12 +26,12 @@ var ( errInvalidState = errors.New("generated output isn't valid state") errInvalidAmount = errors.New("invalid amount") - _ UnsignedProposalTx = &UnsignedAddDefaultSubnetDelegatorTx{} - _ TimedTx = &UnsignedAddDefaultSubnetDelegatorTx{} + _ UnsignedProposalTx = &UnsignedAddPrimaryDelegatorTx{} + _ TimedTx = &UnsignedAddPrimaryDelegatorTx{} ) -// UnsignedAddDefaultSubnetDelegatorTx is an unsigned addDefaultSubnetDelegatorTx -type UnsignedAddDefaultSubnetDelegatorTx struct { +// UnsignedAddPrimaryDelegatorTx is an unsigned addPrimaryDelegatorTx +type UnsignedAddPrimaryDelegatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee @@ -43,17 +43,17 @@ type UnsignedAddDefaultSubnetDelegatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddDefaultSubnetDelegatorTx) StartTime() time.Time { +func (tx *UnsignedAddPrimaryDelegatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddDefaultSubnetDelegatorTx) EndTime() time.Time { +func (tx *UnsignedAddPrimaryDelegatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddDefaultSubnetDelegatorTx) Verify( +func (tx *UnsignedAddPrimaryDelegatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -102,7 +102,7 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( +func (tx *UnsignedAddPrimaryDelegatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -130,28 +130,28 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( // Ensure that the period this delegator is running is a subset of the time // the validator is running. First, see if the validator is currently // running. - currentValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + currentValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) if err != nil { - return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current validators of default subnet: %w", err)} + return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current validators of primary network: %w", err)} } - pendingValidators, err := vm.getPendingValidators(db, constants.DefaultSubnetID) + pendingValidators, err := vm.getPendingValidators(db, constants.PrimaryNetworkID) if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of default subnet: %w", err)} + return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of primary network: %w", err)} } - if validator, err := currentValidators.getDefaultSubnetStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := validator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + if validator, err := currentValidators.getPrimaryStaker(tx.Validator.NodeID); err == nil { + unsignedValidator := validator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, permError{errDSValidatorSubset} } } else { // They aren't currently validating, so check to see if they will // validate in the future. - validator, err := pendingValidators.getDefaultSubnetStaker(tx.Validator.NodeID) + validator, err := pendingValidators.getPrimaryStaker(tx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, permError{errDSValidatorSubset} } - unsignedValidator := validator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + unsignedValidator := validator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, permError{errDSValidatorSubset} } @@ -182,7 +182,7 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( // Add the delegator to the pending validators heap pendingValidators.Add(stx) // If this proposal is committed, update the pending validator set to include the delegator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.DefaultSubnetID); err != nil { + if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.PrimaryNetworkID); err != nil { return nil, nil, nil, nil, tempError{err} } @@ -202,12 +202,12 @@ func (tx *UnsignedAddDefaultSubnetDelegatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddDefaultSubnetDelegatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddPrimaryDelegatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } // Creates a new transaction -func (vm *VM) newAddDefaultSubnetDelegatorTx( +func (vm *VM) newAddPrimaryDelegatorTx( stakeAmt, // Amount the delegator stakes startTime, // Unix time they start delegating endTime uint64, // Unix time they stop delegating @@ -220,7 +220,7 @@ func (vm *VM) newAddDefaultSubnetDelegatorTx( return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } // Create the tx - utx := &UnsignedAddDefaultSubnetDelegatorTx{ + utx := &UnsignedAddPrimaryDelegatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index 49b5e6c53390..a25e34f43fc2 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -15,8 +15,8 @@ import ( "github.com/ava-labs/gecko/utils/crypto" ) -func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -27,13 +27,13 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { rewardAddress := nodeID // Case : tx is nil - var unsignedTx *UnsignedAddDefaultSubnetDelegatorTx + var unsignedTx *UnsignedAddPrimaryDelegatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because tx is nil") } // Case: Wrong network ID - tx, err := vm.newAddDefaultSubnetDelegatorTx( + tx, err := vm.newAddPrimaryDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -44,15 +44,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Missing Node ID - tx, err = vm.newAddDefaultSubnetDelegatorTx( + tx, err = vm.newAddPrimaryDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -63,15 +63,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.NodeID = ids.ShortID{} + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.NodeID = ids.ShortID{} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because NodeID is nil") } // Case: Not enough weight - tx, err = vm.newAddDefaultSubnetDelegatorTx( + tx, err = vm.newAddPrimaryDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -82,15 +82,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.Wght = vm.minStake - 1 + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.Wght = vm.minStake - 1 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because of not enough weight") } // Case: Validation length is too short - tx, err = vm.newAddDefaultSubnetDelegatorTx( + tx, err = vm.newAddPrimaryDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -101,15 +101,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.End-- // 1 shorter than minimum stake time + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.End-- // 1 shorter than minimum stake time // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err = tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false + if err = tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - if tx, err = vm.newAddDefaultSubnetDelegatorTx( + if tx, err = vm.newAddPrimaryDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -119,15 +119,15 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Validator.End++ // 1 longer than maximum stake time + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.End++ // 1 longer than maximum stake time // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err = vm.newAddDefaultSubnetDelegatorTx( + if tx, err = vm.newAddPrimaryDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -136,13 +136,13 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { t.Fatal(err) } } -func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddPrimaryDelegatorTxSemanticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -164,9 +164,9 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { newValidatorID := newValidatorKey.PublicKey().Address() newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) newValidatorEndTime := uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()) - // [addValidator] adds a new validator to the default subnet's pending validator set + // [addValidator] adds a new validator to the primary network's pending validator set addValidator := func(db database.Database) { - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount newValidatorStartTime, // start time newValidatorEndTime, // end time @@ -182,7 +182,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { SortByStartTime: true, Txs: []*Tx{tx}, }, - constants.DefaultSubnetID, + constants.PrimaryNetworkID, ); err != nil { t.Fatal(err) } @@ -210,7 +210,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, nil, true, - "validator stops validating default subnet earlier than non-default subnet", + "validator stops validating primary network earlier than subnet", }, { vm.minStake, @@ -221,7 +221,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, nil, true, - "end time is after the default subnets end time", + "end time is after the primary network end time", }, { vm.minStake, @@ -232,34 +232,34 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, nil, true, - "validator not in the current or pending validator sets of the default subnet", + "validator not in the current or pending validator sets of the subnet", }, { vm.minStake, - newValidatorStartTime - 1, // start validating non-default subnet before default subnet + newValidatorStartTime - 1, // start validating subnet before primary network newValidatorEndTime, newValidatorID, rewardAddress, []*crypto.PrivateKeySECP256K1R{keys[0]}, addValidator, true, - "validator starts validating non-default subnet before default subnet", + "validator starts validating subnet before primary network", }, { vm.minStake, newValidatorStartTime, - newValidatorEndTime + 1, // stop validating non-default subnet after stopping validating default subnet + newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network newValidatorID, rewardAddress, []*crypto.PrivateKeySECP256K1R{keys[0]}, addValidator, true, - "validator stops validating default subnet before non-default subnet", + "validator stops validating primary network before subnet", }, { vm.minStake, - newValidatorStartTime, // same start time as for default subnet - newValidatorEndTime, // same end time as for default subnet + newValidatorStartTime, // same start time as for primary network + newValidatorEndTime, // same end time as for primary network newValidatorID, rewardAddress, []*crypto.PrivateKeySECP256K1R{keys[0]}, @@ -303,7 +303,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { for _, tt := range tests { vdb.Abort() - tx, err := vm.newAddDefaultSubnetDelegatorTx( + tx, err := vm.newAddPrimaryDelegatorTx( tt.stakeAmount, tt.startTime, tt.endTime, diff --git a/vms/platformvm/add_default_subnet_validator_tx.go b/vms/platformvm/add_default_subnet_validator_tx.go index b9c09c388ecb..87ce71d1fddf 100644 --- a/vms/platformvm/add_default_subnet_validator_tx.go +++ b/vms/platformvm/add_default_subnet_validator_tx.go @@ -30,12 +30,12 @@ var ( errStakeTooLong = errors.New("staking period is too long") errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", NumberOfShares) - _ UnsignedProposalTx = &UnsignedAddDefaultSubnetValidatorTx{} - _ TimedTx = &UnsignedAddDefaultSubnetValidatorTx{} + _ UnsignedProposalTx = &UnsignedAddPrimaryValidatorTx{} + _ TimedTx = &UnsignedAddPrimaryValidatorTx{} ) -// UnsignedAddDefaultSubnetValidatorTx is an unsigned addDefaultSubnetValidatorTx -type UnsignedAddDefaultSubnetValidatorTx struct { +// UnsignedAddPrimaryValidatorTx is an unsigned addPrimaryValidatorTx +type UnsignedAddPrimaryValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee @@ -50,17 +50,17 @@ type UnsignedAddDefaultSubnetValidatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddDefaultSubnetValidatorTx) StartTime() time.Time { +func (tx *UnsignedAddPrimaryValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddDefaultSubnetValidatorTx) EndTime() time.Time { +func (tx *UnsignedAddPrimaryValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddDefaultSubnetValidatorTx) Verify( +func (tx *UnsignedAddPrimaryValidatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -110,7 +110,7 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( +func (tx *UnsignedAddPrimaryValidatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -136,25 +136,25 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( } // Ensure the proposed validator is not already a validator of the specified subnet - currentValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + currentValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) if err != nil { return nil, nil, nil, nil, tempError{err} } for _, currentVdr := range vm.getValidators(currentValidators) { if currentVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a Default Subnet validator", + return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator", tx.Validator.NodeID)} } } // Ensure the proposed validator is not already slated to validate for the specified subnet - pendingValidators, err := vm.getPendingValidators(db, constants.DefaultSubnetID) + pendingValidators, err := vm.getPendingValidators(db, constants.PrimaryNetworkID) if err != nil { return nil, nil, nil, nil, tempError{err} } for _, pendingVdr := range vm.getValidators(pendingValidators) { if pendingVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, tempError{fmt.Errorf("validator %s is already a pending Default Subnet validator", + return nil, nil, nil, nil, tempError{fmt.Errorf("validator %s is already a pending primary network validator", tx.Validator.NodeID)} } } @@ -184,7 +184,7 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( // Add validator to set of pending validators pendingValidators.Add(stx) // If this proposal is committed, update the pending validator set to include the validator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.DefaultSubnetID); err != nil { + if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.PrimaryNetworkID); err != nil { return nil, nil, nil, nil, tempError{err} } @@ -203,12 +203,12 @@ func (tx *UnsignedAddDefaultSubnetValidatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddDefaultSubnetValidatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddPrimaryValidatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } -// NewAddDefaultSubnetValidatorTx returns a new NewAddDefaultSubnetValidatorTx -func (vm *VM) newAddDefaultSubnetValidatorTx( +// NewAddPrimaryValidatorTx returns a new NewAddPrimaryValidatorTx +func (vm *VM) newAddPrimaryValidatorTx( stakeAmt, // Amount the delegator stakes startTime, // Unix time they start delegating endTime uint64, // Unix time they stop delegating @@ -222,7 +222,7 @@ func (vm *VM) newAddDefaultSubnetValidatorTx( return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } // Create the tx - utx := &UnsignedAddDefaultSubnetValidatorTx{ + utx := &UnsignedAddPrimaryValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_default_subnet_validator_tx_test.go b/vms/platformvm/add_default_subnet_validator_tx_test.go index 4a7449503db7..cd25d764fd9c 100644 --- a/vms/platformvm/add_default_subnet_validator_tx_test.go +++ b/vms/platformvm/add_default_subnet_validator_tx_test.go @@ -16,8 +16,8 @@ import ( "github.com/ava-labs/gecko/vms/secp256k1fx" ) -func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -31,13 +31,13 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { nodeID := key.PublicKey().Address() // Case: tx is nil - var unsignedTx *UnsignedAddDefaultSubnetValidatorTx + var unsignedTx *UnsignedAddPrimaryValidatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because tx is nil") } // Case 3: Wrong Network ID - tx, err := vm.newAddDefaultSubnetValidatorTx( + tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -49,15 +49,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Node ID is nil - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -69,15 +69,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because node ID is nil") } // Case: Stake owner has no addresses - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -89,7 +89,7 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Stake = []*avax.TransferableOutput{{ + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Stake = []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: vm.minStake, @@ -101,13 +101,13 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { }, }} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because stake owner has no addresses") } // Case: Rewards owner has no addresses - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -119,19 +119,19 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).RewardsOwner = &secp256k1fx.OutputOwners{ + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).RewardsOwner = &secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, Addrs: nil, } // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because rewards owner has no addresses") } // Case: Stake amount too small - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -143,15 +143,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.Wght-- // 1 less than minimum amount + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.Wght-- // 1 less than minimum amount // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because stake amount too small") } // Case: Too many shares - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -163,15 +163,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Shares++ // 1 more than max amount + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Shares++ // 1 more than max amount // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because of too many shares") } // Case: Validation length is too short - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -183,15 +183,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.End-- // 1 less than min duration + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.End-- // 1 less than min duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is negative - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -203,15 +203,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.End = tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.Start - 1 + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.End = tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.Start - 1 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - tx, err = vm.newAddDefaultSubnetValidatorTx( + tx, err = vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -223,15 +223,15 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Validator.End++ // 1 more than maximum duration + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.End++ // 1 more than maximum duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -241,14 +241,14 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { t.Fatal(err) } } -// Test AddDefaultSubnetValidatorTx.SemanticVerify -func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() +// Test AddPrimaryValidatorTx.SemanticVerify +func TestAddPrimaryValidatorTxSemanticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -263,7 +263,7 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { nodeID := key.PublicKey().Address() // Case: Validator's start time too early - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix())-1, uint64(defaultValidateEndTime.Unix()), @@ -278,8 +278,8 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } vDB.Abort() - // Case: Validator already validating default subnet - if tx, err := vm.newAddDefaultSubnetValidatorTx( + // Case: Validator already validating primary network + if tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -294,17 +294,17 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } vDB.Abort() - // Case: Validator in pending validator set of default subnet + // Case: Validator in pending validator set of primary network key2, err := vm.factory.NewPrivateKey() if err != nil { t.Fatal(err) } startTime := defaultGenesisTime.Add(1 * time.Second) - tx, err := vm.newAddDefaultSubnetValidatorTx( - vm.minStake, // stake amount - uint64(startTime.Unix()), // start time + tx, err := vm.newAddPrimaryValidatorTx( + vm.minStake, // stake amount + uint64(startTime.Unix()), // start time uint64(startTime.Add(MinimumStakingDuration).Unix()), // end time - nodeID, // node ID + nodeID, // node ID key2.PublicKey().Address(), // reward address NumberOfShares, // shares []*crypto.PrivateKeySECP256K1R{keys[0]}, // key @@ -316,7 +316,7 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { SortByStartTime: true, Txs: []*Tx{tx}, }, - constants.DefaultSubnetID, + constants.PrimaryNetworkID, ); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vDB, tx); err == nil { @@ -325,7 +325,7 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { vDB.Abort() // Case: Validator doesn't have enough tokens to cover stake amount - if _, err := vm.newAddDefaultSubnetValidatorTx( // create the tx + if _, err := vm.newAddPrimaryValidatorTx( // create the tx vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx.go b/vms/platformvm/add_nondefault_subnet_validator_tx.go index 0f8aefc95647..805d5ccd888f 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx.go @@ -22,14 +22,14 @@ import ( var ( errSigsNotUniqueOrNotSorted = errors.New("control signatures not unique or not sorted") errWrongNumberOfSignatures = errors.New("wrong number of signatures") - errDSValidatorSubset = errors.New("all subnets must be a subset of the default subnet") + errDSValidatorSubset = errors.New("all subnets must be a subset of the primary network") - _ UnsignedProposalTx = &UnsignedAddNonDefaultSubnetValidatorTx{} - _ TimedTx = &UnsignedAddNonDefaultSubnetValidatorTx{} + _ UnsignedProposalTx = &UnsignedAddSubnetValidatorTx{} + _ TimedTx = &UnsignedAddSubnetValidatorTx{} ) -// UnsignedAddNonDefaultSubnetValidatorTx is an unsigned addNonDefaultSubnetValidatorTx -type UnsignedAddNonDefaultSubnetValidatorTx struct { +// UnsignedAddSubnetValidatorTx is an unsigned addSubnetValidatorTx +type UnsignedAddSubnetValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // The validator @@ -39,17 +39,17 @@ type UnsignedAddNonDefaultSubnetValidatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) StartTime() time.Time { +func (tx *UnsignedAddSubnetValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) EndTime() time.Time { +func (tx *UnsignedAddSubnetValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) Verify( +func (tx *UnsignedAddSubnetValidatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -75,7 +75,7 @@ func (tx *UnsignedAddNonDefaultSubnetValidatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) SemanticVerify( +func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -104,36 +104,36 @@ func (tx *UnsignedAddNonDefaultSubnetValidatorTx) SemanticVerify( } // Ensure that the period this validator validates the specified subnet is a - // subnet of the time they validate the default subnet. First, see if - // they're currently validating the default subnet. - currentDSValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + // subnet of the time they validate the primary network. First, see if + // they're currently validating the primary network. + currentDSValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get current validators of default subnet: %v", err)} + return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get current validators of primary network: %v", err)} } - if dsValidator, err := currentDSValidators.getDefaultSubnetStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + if dsValidator, err := currentDSValidators.getPrimaryStaker(tx.Validator.NodeID); err == nil { + unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, - permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]", + permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", tx.StartTime(), tx.EndTime(), unsignedValidator.StartTime(), unsignedValidator.EndTime())} } } else { - // They aren't currently validating the default subnet. See if they will - // validate the default subnet in the future. - pendingDSValidators, err := vm.getPendingValidators(db, constants.DefaultSubnetID) + // They aren't currently validating the primary network. See if they will + // validate the primary network in the future. + pendingDSValidators, err := vm.getPendingValidators(db, constants.PrimaryNetworkID) if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of default subnet: %v", err)} + return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of primary network: %v", err)} } - dsValidator, err := pendingDSValidators.getDefaultSubnetStaker(tx.Validator.NodeID) + dsValidator, err := pendingDSValidators.getPrimaryStaker(tx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, - permError{fmt.Errorf("validator would not be validating default subnet while validating non-default subnet")} + permError{fmt.Errorf("validator would not be validating primary network while validating subnet")} } - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, - permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]", + permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", tx.StartTime(), tx.EndTime(), unsignedValidator.StartTime(), unsignedValidator.EndTime())} } @@ -219,12 +219,12 @@ func (tx *UnsignedAddNonDefaultSubnetValidatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddNonDefaultSubnetValidatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddSubnetValidatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } // Create a new transaction -func (vm *VM) newAddNonDefaultSubnetValidatorTx( +func (vm *VM) newAddSubnetValidatorTx( weight, // Sampling weight of the new validator startTime, // Unix time they start delegating endTime uint64, // Unix time they top delegating @@ -244,7 +244,7 @@ func (vm *VM) newAddNonDefaultSubnetValidatorTx( signers = append(signers, subnetSigners) // Create the tx - utx := &UnsignedAddNonDefaultSubnetValidatorTx{ + utx := &UnsignedAddSubnetValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go index cd6a78112246..2f91c61f7cd7 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go @@ -15,8 +15,8 @@ import ( "github.com/ava-labs/gecko/vms/secp256k1fx" ) -func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -26,13 +26,13 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { nodeID := keys[0].PublicKey().Address() // Case: tx is nil - var unsignedTx *UnsignedAddNonDefaultSubnetValidatorTx + var unsignedTx *UnsignedAddSubnetValidatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because tx is nil") } // Case: Wrong network ID - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -43,15 +43,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Missing Node ID - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -62,15 +62,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because NodeID is empty") } // Case: Missing Subnet ID - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -81,15 +81,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.Subnet = ids.ID{ID: nil} + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.Subnet = ids.ID{ID: nil} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because Subnet ID is nil") } // Case: No weight - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( 1, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -100,15 +100,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.Wght = 0 + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.Wght = 0 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because of no weight") } // Case: Subnet auth indices not unique - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())-1, @@ -119,16 +119,16 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[0] = - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1] + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[0] = + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1] // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err = tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err = tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because sig indices weren't unique") } // Case: Validation length is too short - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -139,15 +139,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.End-- // 1 less than min duration + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.End-- // 1 less than min duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - tx, err = vm.newAddNonDefaultSubnetValidatorTx( + tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -158,15 +158,15 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.End++ // 1 more than max duration + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.End++ // 1 more than max duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err = vm.newAddNonDefaultSubnetValidatorTx( + if tx, err = vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -175,13 +175,13 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { t.Fatal(err) } } -func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() +func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -190,10 +190,10 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { nodeID := keys[0].PublicKey().Address() - // Case: Proposed validator currently validating default subnet - // but stops validating non-default subnet after stops validating default subnet + // Case: Proposed validator currently validating primary network + // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())+1, @@ -203,14 +203,14 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator stops validating default subnet earlier than non-default subnet") + t.Fatal("should have failed because validator stops validating primary network earlier than subnet") } - // Case: Proposed validator currently validating default subnet - // and proposed non-default subnet validation period is subset of - // default subnet validation period + // Case: Proposed validator currently validating primary network + // and proposed subnet validation period is subset of + // primary network validation period // (note that keys[0] is a genesis validator) - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()+1), uint64(defaultValidateEndTime.Unix()), @@ -223,18 +223,18 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - // Add a validator to pending validator set of default subnet + // Add a validator to pending validator set of primary network key, err := vm.factory.NewPrivateKey() if err != nil { t.Fatal(err) } pendingDSValidatorID := key.PublicKey().Address() - // starts validating default subnet 10 seconds after genesis + // starts validating primary network 10 seconds after genesis DSStartTime := defaultGenesisTime.Add(10 * time.Second) DSEndTime := DSStartTime.Add(5 * MinimumStakingDuration) - addDSTx, err := vm.newAddDefaultSubnetValidatorTx( + addDSTx, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(DSStartTime.Unix()), // start time uint64(DSEndTime.Unix()), // end time @@ -248,9 +248,9 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Case: Proposed validator isn't in pending or current validator sets - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // start validating non-default subnet before default subnet + uint64(DSStartTime.Unix()), // start validating subnet before primary network uint64(DSEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), @@ -258,7 +258,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } else if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator not in the current or pending validator sets of the default subnet") + t.Fatal("should have failed because validator not in the current or pending validator sets of the primary network") } if err := vm.putPendingValidators( @@ -267,17 +267,17 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { SortByStartTime: true, Txs: []*Tx{addDSTx}, }, - constants.DefaultSubnetID, + constants.PrimaryNetworkID, ); err != nil { t.Fatal(err) } - // Node with ID key.PublicKey().Address() now a pending validator for default subnet + // Node with ID key.PublicKey().Address() now a pending validator for primary network - // Case: Proposed validator is pending validator of default subnet - // but starts validating non-default subnet before default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator is pending validator of primary network + // but starts validating subnet before primary network + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix())-1, // start validating non-default subnet before default subnet + uint64(DSStartTime.Unix())-1, // start validating subnet before primary network uint64(DSEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), @@ -285,32 +285,32 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator starts validating non-default " + - "subnet before starting to validate default subnet") + t.Fatal("should have failed because validator starts validating primary " + + "network before starting to validate primary network") } - // Case: Proposed validator is pending validator of default subnet - // but stops validating non-default subnet after default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator is pending validator of primary network + // but stops validating subnet after primary network + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(DSStartTime.Unix()), - uint64(DSEndTime.Unix())+1, // stop validating non-default subnet after stopping validating default subnet + uint64(DSEndTime.Unix())+1, // stop validating subnet after stopping validating primary network pendingDSValidatorID, testSubnet1.ID(), []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) } else if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { - t.Fatal("should have failed because validator stops validating non-default " + - "subnet after stops validating default subnet") + t.Fatal("should have failed because validator stops validating primary " + + "network after stops validating primary network") } - // Case: Proposed validator is pending validator of default subnet - // and period validating non-default subnet is subset of time validating default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator is pending validator of primary network + // and period validating subnet is subset of time validating primary network + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // same start time as for default subnet - uint64(DSEndTime.Unix()), // same end time as for default subnet + uint64(DSStartTime.Unix()), // same start time as for primary network + uint64(DSEndTime.Unix()), // same end time as for primary network pendingDSValidatorID, testSubnet1.ID(), []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, @@ -327,9 +327,9 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( - defaultWeight, // weight - uint64(newTimestamp.Unix()), // start time + if tx, err := vm.newAddSubnetValidatorTx( + defaultWeight, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -345,14 +345,14 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - // Case: Proposed validator already validating the non-default subnet - // First, add validator as validator of non-default subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // Case: Proposed validator already validating the subnet + // First, add validator as validator of subnet + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) @@ -367,12 +367,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( + if tx, err := vm.newAddSubnetValidatorTx( defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) @@ -388,9 +388,9 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Case: Too many signatures - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + if tx, err := vm.newAddSubnetValidatorTx( + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -402,9 +402,9 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Case: Too few signatures - tx, err := vm.newAddNonDefaultSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + tx, err := vm.newAddSubnetValidatorTx( + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -414,18 +414,18 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } // Remove a signature - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices = - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1:] + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices = + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).SubnetAuth.(*secp256k1fx.Input).SigIndices[1:] // This tx was syntactically verified when it was created...pretend it wan't so we don't use cache - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).syntacticallyVerified = false + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).syntacticallyVerified = false if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatal("should have failed verification because not enough control sigs") } // Case: Control Signature from invalid key (keys[3] is not a control key) - tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + tx, err = vm.newAddSubnetValidatorTx( + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -446,9 +446,9 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet - if tx, err := vm.newAddNonDefaultSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time + if tx, err := vm.newAddSubnetValidatorTx( + defaultWeight, // weight + uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -469,8 +469,8 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } // Test that marshalling/unmarshalling works -func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { - vm , _ := defaultVM() +func TestAddSubnetValidatorMarshal(t *testing.T) { + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -480,7 +480,7 @@ func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { var unmarshaledTx Tx // valid tx - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -496,11 +496,11 @@ func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { t.Fatal(err) } else if err := unmarshaledTx.Sign(vm.codec, nil); err != nil { t.Fatal(err) - } else if err := unmarshaledTx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { + } else if err := unmarshaledTx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { t.Fatal(err) } - if tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Memo == nil { // reflect.DeepEqual considers []byte{} and nil to be different so change nil to []byte{} - tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Memo = []byte{} + if tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Memo == nil { // reflect.DeepEqual considers []byte{} and nil to be different so change nil to []byte{} + tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Memo = []byte{} } if !reflect.DeepEqual(*tx, unmarshaledTx) { t.Fatal("should be equal") diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go index 4ffdc8a2dcba..a1173c3f09d4 100644 --- a/vms/platformvm/advance_time_tx.go +++ b/vms/platformvm/advance_time_tx.go @@ -88,12 +88,12 @@ func (tx *UnsignedAdvanceTimeTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } - current, pending, _, _, err := vm.calculateValidators(db, tx.Timestamp(), constants.DefaultSubnetID) + current, pending, _, _, err := vm.calculateValidators(db, tx.Timestamp(), constants.PrimaryNetworkID) if err != nil { return nil, nil, nil, nil, tempError{err} - } else if err := vm.putCurrentValidators(onCommitDB, current, constants.DefaultSubnetID); err != nil { + } else if err := vm.putCurrentValidators(onCommitDB, current, constants.PrimaryNetworkID); err != nil { return nil, nil, nil, nil, tempError{err} - } else if err := vm.putPendingValidators(onCommitDB, pending, constants.DefaultSubnetID); err != nil { + } else if err := vm.putPendingValidators(onCommitDB, pending, constants.PrimaryNetworkID); err != nil { return nil, nil, nil, nil, tempError{err} } @@ -134,7 +134,7 @@ func (tx *UnsignedAdvanceTimeTx) SemanticVerify( return err } } - if err := vm.updateValidators(constants.DefaultSubnetID); err != nil { + if err := vm.updateValidators(constants.PrimaryNetworkID); err != nil { return err } diff --git a/vms/platformvm/advance_time_tx_test.go b/vms/platformvm/advance_time_tx_test.go index 056378829478..ae839e2403bd 100644 --- a/vms/platformvm/advance_time_tx_test.go +++ b/vms/platformvm/advance_time_tx_test.go @@ -38,7 +38,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) nodeIDKey, _ := vm.factory.NewPrivateKey() nodeID := nodeIDKey.PublicKey().Address() - addPendingValidatorTx, err := vm.newAddDefaultSubnetValidatorTx( + addPendingValidatorTx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(pendingValidatorStartTime.Unix()), uint64(pendingValidatorEndTime.Unix()), @@ -57,7 +57,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { SortByStartTime: true, Txs: []*Tx{addPendingValidatorTx}, }, - constants.DefaultSubnetID, + constants.PrimaryNetworkID, ) if err != nil { t.Fatal(err) @@ -106,7 +106,7 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) nodeIDKey, _ := vm.factory.NewPrivateKey() nodeID := nodeIDKey.PublicKey().Address() - addPendingValidatorTx, err := vm.newAddDefaultSubnetValidatorTx( + addPendingValidatorTx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(pendingValidatorStartTime.Unix()), uint64(pendingValidatorEndTime.Unix()), @@ -125,7 +125,7 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { SortByStartTime: true, Txs: []*Tx{addPendingValidatorTx}, }, - constants.DefaultSubnetID, + constants.PrimaryNetworkID, ); err != nil { t.Fatal(err) } @@ -139,25 +139,25 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { t.Fatal(err) } - if onCommitCurrentEvents, err := vm.getCurrentValidators(onCommit, constants.DefaultSubnetID); err != nil { + if onCommitCurrentEvents, err := vm.getCurrentValidators(onCommit, constants.PrimaryNetworkID); err != nil { t.Fatal(err) } else if onCommitCurrentEvents.Len() != len(keys)+1 { // Each key in [keys] is a validator to start with...then we added a validator t.Fatalf("Should have added the validator to the validator set") } - if onCommitPendingEvents, err := vm.getPendingValidators(onCommit, constants.DefaultSubnetID); err != nil { + if onCommitPendingEvents, err := vm.getPendingValidators(onCommit, constants.PrimaryNetworkID); err != nil { t.Fatal(err) } else if onCommitPendingEvents.Len() != 0 { t.Fatalf("Should have removed the validator from the pending validator set") } - if onAbortCurrentEvents, err := vm.getCurrentValidators(onAbort, constants.DefaultSubnetID); err != nil { + if onAbortCurrentEvents, err := vm.getCurrentValidators(onAbort, constants.PrimaryNetworkID); err != nil { t.Fatal(err) } else if onAbortCurrentEvents.Len() != len(keys) { t.Fatalf("Shouldn't have added the validator to the validator set") } - if onAbortPendingEvents, err := vm.getPendingValidators(onAbort, constants.DefaultSubnetID); err != nil { + if onAbortPendingEvents, err := vm.getPendingValidators(onAbort, constants.PrimaryNetworkID); err != nil { t.Fatal(err) } else if onAbortPendingEvents.Len() != 1 { t.Fatalf("Shouldn't have removed the validator from the pending validator set") diff --git a/vms/platformvm/create_chain_tx.go b/vms/platformvm/create_chain_tx.go index 8759f56ba0d1..1e431a7d37b9 100644 --- a/vms/platformvm/create_chain_tx.go +++ b/vms/platformvm/create_chain_tx.go @@ -66,7 +66,7 @@ func (tx *UnsignedCreateChainTx) Verify( return nil case tx.SubnetID.IsZero(): return errNoSubnetID - case tx.SubnetID.Equals(constants.DefaultSubnetID): + case tx.SubnetID.Equals(constants.PrimaryNetworkID): return errDSCantValidate case len(tx.ChainName) > maxNameLen: return errNameTooLong diff --git a/vms/platformvm/event_heap.go b/vms/platformvm/event_heap.go index 6fc30d4f7a01..68890476b101 100644 --- a/vms/platformvm/event_heap.go +++ b/vms/platformvm/event_heap.go @@ -48,8 +48,8 @@ func (h *EventHeap) Less(i, j int) bool { case iTime.Unix() < jTime.Unix(): return true case iTime == jTime: - _, iOk := iTx.(*UnsignedAddDefaultSubnetValidatorTx) - _, jOk := jTx.(*UnsignedAddDefaultSubnetValidatorTx) + _, iOk := iTx.(*UnsignedAddPrimaryValidatorTx) + _, jOk := jTx.(*UnsignedAddPrimaryValidatorTx) if iOk != jOk { return iOk == h.SortByStartTime @@ -95,10 +95,10 @@ func (h *EventHeap) Bytes() ([]byte, error) { return Codec.Marshal(h) } -// getDefaultSubnetStaker ... -func (h *EventHeap) getDefaultSubnetStaker(id ids.ShortID) (*Tx, error) { +// getPrimaryStaker ... +func (h *EventHeap) getPrimaryStaker(id ids.ShortID) (*Tx, error) { for _, txIntf := range h.Txs { - tx, ok := txIntf.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + tx, ok := txIntf.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) if !ok { continue } @@ -106,5 +106,5 @@ func (h *EventHeap) getDefaultSubnetStaker(id ids.ShortID) (*Tx, error) { return txIntf, nil } } - return nil, errors.New("couldn't find validator in the default subnet") + return nil, errors.New("couldn't find validator in the primary network") } diff --git a/vms/platformvm/event_heap_test.go b/vms/platformvm/event_heap_test.go index 99310c78b84f..9453311e5a39 100644 --- a/vms/platformvm/event_heap_test.go +++ b/vms/platformvm/event_heap_test.go @@ -11,7 +11,7 @@ import ( ) func TestTxHeapStart(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -20,7 +20,7 @@ func TestTxHeapStart(t *testing.T) { txHeap := EventHeap{SortByStartTime: true} - validator0, err := vm.newAddDefaultSubnetValidatorTx( + validator0, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -32,9 +32,9 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr0Tx := validator0.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr0Tx := validator0.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) - validator1, err := vm.newAddDefaultSubnetValidatorTx( + validator1, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+2), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+2), // endTime @@ -46,9 +46,9 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr1Tx := validator1.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr1Tx := validator1.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) - validator2, err := vm.newAddDefaultSubnetValidatorTx( + validator2, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+3), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+3), // endTime @@ -60,7 +60,7 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr2Tx := validator2.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr2Tx := validator2.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) txHeap.Add(validator2) if timestamp := txHeap.Timestamp(); !timestamp.Equal(vdr2Tx.StartTime()) { @@ -81,7 +81,7 @@ func TestTxHeapStart(t *testing.T) { } func TestTxHeapStop(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -90,7 +90,7 @@ func TestTxHeapStop(t *testing.T) { txHeap := EventHeap{} - validator0, err := vm.newAddDefaultSubnetValidatorTx( + validator0, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -102,9 +102,9 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr0Tx := validator0.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr0Tx := validator0.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) - validator1, err := vm.newAddDefaultSubnetValidatorTx( + validator1, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+2), // endTime @@ -116,9 +116,9 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr1Tx := validator1.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr1Tx := validator1.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) - validator2, err := vm.newAddDefaultSubnetValidatorTx( + validator2, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+3), // endTime @@ -130,7 +130,7 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr2Tx := validator2.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + vdr2Tx := validator2.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) txHeap.Add(validator2) if timestamp := txHeap.Timestamp(); !timestamp.Equal(vdr2Tx.EndTime()) { @@ -151,7 +151,7 @@ func TestTxHeapStop(t *testing.T) { } func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -160,7 +160,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { txHeap := EventHeap{SortByStartTime: true} - validator, err := vm.newAddDefaultSubnetValidatorTx( + validator, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -173,7 +173,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { t.Fatal(err) } - delegator, err := vm.newAddDefaultSubnetDelegatorTx( + delegator, err := vm.newAddPrimaryDelegatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -194,7 +194,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { } func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -203,7 +203,7 @@ func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { txHeap := EventHeap{} - validator, err := vm.newAddDefaultSubnetValidatorTx( + validator, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -216,7 +216,7 @@ func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { t.Fatal(err) } - delegator, err := vm.newAddDefaultSubnetDelegatorTx( + delegator, err := vm.newAddPrimaryDelegatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index 96f4fa02297d..628e8f37c9cc 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -18,7 +18,7 @@ import ( ) var ( - errShouldBeDSValidator = errors.New("expected validator to be in the default subnet") + errShouldBeDSValidator = errors.New("expected validator to be in the primary network") errOverflowReward = errors.New("overflow while calculating validator reward") errWrongTxType = errors.New("wrong transaction type") @@ -68,14 +68,14 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( return nil, nil, nil, nil, permError{errWrongNumberOfCredentials} } - defaultSubnetVdrHeap, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + primaryNetworkVdrHeap, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) if err != nil { return nil, nil, nil, nil, tempError{err} - } else if defaultSubnetVdrHeap.Len() == 0 { // there is no validator to remove + } else if primaryNetworkVdrHeap.Len() == 0 { // there is no validator to remove return nil, nil, nil, nil, permError{errEmptyValidatingSet} } - vdrTx := defaultSubnetVdrHeap.Remove() + vdrTx := primaryNetworkVdrHeap.Remove() txID := vdrTx.ID() if !txID.Equals(tx.TxID) { return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s. Should be removing %s", @@ -101,18 +101,18 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // If this tx's proposal is committed, remove the validator from the validator set onCommitDB := versiondb.New(db) - if err := vm.putCurrentValidators(onCommitDB, defaultSubnetVdrHeap, constants.DefaultSubnetID); err != nil { + if err := vm.putCurrentValidators(onCommitDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { return nil, nil, nil, nil, tempError{err} } // If this tx's proposal is aborted, remove the validator from the validator set onAbortDB := versiondb.New(db) - if err := vm.putCurrentValidators(onAbortDB, defaultSubnetVdrHeap, constants.DefaultSubnetID); err != nil { + if err := vm.putCurrentValidators(onAbortDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { return nil, nil, nil, nil, tempError{err} } switch uVdrTx := vdrTx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: + case *UnsignedAddPrimaryValidatorTx: // Refund the stake here for i, out := range uVdrTx.Stake { utxo := &avax.UTXO{ @@ -153,13 +153,13 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } } - case *UnsignedAddDefaultSubnetDelegatorTx: + case *UnsignedAddPrimaryDelegatorTx: // We're removing a delegator - parentTx, err := defaultSubnetVdrHeap.getDefaultSubnetStaker(uVdrTx.Validator.NodeID) + parentTx, err := primaryNetworkVdrHeap.getPrimaryStaker(uVdrTx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, permError{err} } - unsignedParentTx := parentTx.UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + unsignedParentTx := parentTx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) // Refund the stake here for i, out := range uVdrTx.Stake { @@ -247,7 +247,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // validator set to remove the staker. onAbortDB or onCommitDB should commit // (flush to vm.DB) before this is called updateValidators := func() error { - return vm.updateValidators(constants.DefaultSubnetID) + return vm.updateValidators(constants.PrimaryNetworkID) } return onCommitDB, onAbortDB, updateValidators, updateValidators, nil diff --git a/vms/platformvm/reward_validator_tx_test.go b/vms/platformvm/reward_validator_tx_test.go index 890dc38f3274..18f85a36b048 100644 --- a/vms/platformvm/reward_validator_tx_test.go +++ b/vms/platformvm/reward_validator_tx_test.go @@ -16,19 +16,19 @@ import ( ) func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() vm.Ctx.Lock.Unlock() }() - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) + currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } // ID of validator that should leave DS validator set next - nextToRemove := currentValidators.Peek().UnsignedTx.(*UnsignedAddDefaultSubnetValidatorTx) + nextToRemove := currentValidators.Peek().UnsignedTx.(*UnsignedAddPrimaryValidatorTx) // Case 1: Chain timestamp is wrong if tx, err := vm.newRewardValidatorTx(nextToRemove.ID()); err != nil { @@ -60,11 +60,11 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { // Should be one less validator than before oldNumValidators := len(currentValidators.Txs) - if currentValidators, err := vm.getCurrentValidators(onCommitDB, constants.DefaultSubnetID); err != nil { + if currentValidators, err := vm.getCurrentValidators(onCommitDB, constants.PrimaryNetworkID); err != nil { t.Fatal(err) } else if numValidators := currentValidators.Len(); numValidators != oldNumValidators-1 { t.Fatalf("Should be %d validators but are %d", oldNumValidators-1, numValidators) - } else if currentValidators, err = vm.getCurrentValidators(onAbortDB, constants.DefaultSubnetID); err != nil { + } else if currentValidators, err = vm.getCurrentValidators(onAbortDB, constants.PrimaryNetworkID); err != nil { t.Fatal(err) } else if numValidators := currentValidators.Len(); numValidators != oldNumValidators-1 { t.Fatalf("Should be %d validators but there are %d", oldNumValidators-1, numValidators) @@ -101,7 +101,7 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { } func TestRewardDelegatorTxSemanticVerify(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -114,7 +114,7 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { vdrStartTime := uint64(defaultValidateStartTime.Unix()) + 1 vdrEndTime := uint64(defaultValidateStartTime.Add(2 * MinimumStakingDuration).Unix()) vdrNodeID := ids.GenerateTestShortID() - vdrTx, err := vm.newAddDefaultSubnetValidatorTx( + vdrTx, err := vm.newAddPrimaryValidatorTx( vm.minStake, // stakeAmt vdrStartTime, vdrEndTime, @@ -129,7 +129,7 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { delStartTime := vdrStartTime + 1 delEndTime := vdrEndTime - 1 - delTx, err := vm.newAddDefaultSubnetDelegatorTx( + delTx, err := vm.newAddPrimaryDelegatorTx( vm.minStake, // stakeAmt delStartTime, delEndTime, @@ -140,15 +140,15 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - unsignedDelTx := delTx.UnsignedTx.(*UnsignedAddDefaultSubnetDelegatorTx) + unsignedDelTx := delTx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx) - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) + currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } currentValidators.Add(vdrTx) currentValidators.Add(delTx) - if err := vm.putCurrentValidators(vm.DB, currentValidators, constants.DefaultSubnetID); err != nil { + if err := vm.putCurrentValidators(vm.DB, currentValidators, constants.PrimaryNetworkID); err != nil { t.Fatal(err) // Advance timestamp to when delegator should leave validator set } else if err := vm.putTimestamp(vm.DB, time.Unix(int64(delEndTime), 0)); err != nil { diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index c854e567af46..f0e7fd1f8e6d 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -398,12 +398,12 @@ type GetSubnetsArgs struct { // GetSubnetsResponse is the response from calling GetSubnets type GetSubnetsResponse struct { // Each element is a subnet that exists - // Null if there are no subnets other than the default subnet + // Null if there are no subnets other than the primary network Subnets []APISubnet `json:"subnets"` } // GetSubnets returns the subnets whose ID are in [args.IDs] -// The response will include the default subnet +// The response will include the primary network func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error { service.vm.SnowmanVM.Ctx.Log.Info("Platform: GetSubnets called") subnets, err := service.vm.getSubnets(service.vm.DB) // all subnets @@ -432,9 +432,9 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon Threshold: json.Uint32(owner.Threshold), } } - // Include Default Subnet + // Include primary network response.Subnets[len(subnets)] = APISubnet{ - ID: constants.DefaultSubnetID, + ID: constants.PrimaryNetworkID, ControlKeys: []string{}, Threshold: json.Uint32(0), } @@ -464,10 +464,10 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon ) } } - if idsSet.Contains(constants.DefaultSubnetID) { + if idsSet.Contains(constants.PrimaryNetworkID) { response.Subnets = append(response.Subnets, APISubnet{ - ID: constants.DefaultSubnetID, + ID: constants.PrimaryNetworkID, ControlKeys: []string{}, Threshold: json.Uint32(0), }, @@ -492,10 +492,10 @@ func (service *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAsset service.vm.SnowmanVM.Ctx.Log.Info("Platform: GetStakingAssetID called") if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } - if !args.SubnetID.Equals(constants.DefaultSubnetID) { + if !args.SubnetID.Equals(constants.PrimaryNetworkID) { return fmt.Errorf("Subnet %s doesn't have a valid staking token", args.SubnetID) } @@ -513,7 +513,7 @@ func (service *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAsset // GetCurrentValidatorsArgs are the arguments for calling GetCurrentValidators type GetCurrentValidatorsArgs struct { // Subnet we're listing the validators of - // If omitted, defaults to default subnet + // If omitted, defaults to primary network SubnetID ids.ID `json:"subnetID"` } @@ -526,7 +526,7 @@ type GetCurrentValidatorsReply struct { func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { service.vm.Ctx.Log.Info("Platform: GetCurrentValidators called") if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } validators, err := service.vm.getCurrentValidators(service.vm.DB, args.SubnetID) @@ -535,10 +535,10 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa } reply.Validators = make([]FormattedAPIValidator, validators.Len()) - if args.SubnetID.Equals(constants.DefaultSubnetID) { + if args.SubnetID.Equals(constants.PrimaryNetworkID) { for i, tx := range validators.Txs { switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: + case *UnsignedAddPrimaryValidatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -546,7 +546,7 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa EndTime: json.Uint64(tx.EndTime().Unix()), StakeAmount: &weight, } - case *UnsignedAddDefaultSubnetDelegatorTx: + case *UnsignedAddPrimaryDelegatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -560,7 +560,7 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa } } else { for i, tx := range validators.Txs { - utx := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx) + utx := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx) weight := json.Uint64(utx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -577,7 +577,7 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa // GetPendingValidatorsArgs are the arguments for calling GetPendingValidators type GetPendingValidatorsArgs struct { // Subnet we're getting the pending validators of - // If omitted, defaults to default subnet + // If omitted, defaults to primary network SubnetID ids.ID `json:"subnetID"` } @@ -590,7 +590,7 @@ type GetPendingValidatorsReply struct { func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { service.vm.Ctx.Log.Info("Platform: GetPendingValidators called") if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } validators, err := service.vm.getPendingValidators(service.vm.DB, args.SubnetID) @@ -600,9 +600,9 @@ func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingVa reply.Validators = make([]FormattedAPIValidator, validators.Len()) for i, tx := range validators.Txs { - if args.SubnetID.Equals(constants.DefaultSubnetID) { + if args.SubnetID.Equals(constants.PrimaryNetworkID) { switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: + case *UnsignedAddPrimaryValidatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -610,7 +610,7 @@ func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingVa EndTime: json.Uint64(tx.EndTime().Unix()), StakeAmount: &weight, } - case *UnsignedAddDefaultSubnetDelegatorTx: + case *UnsignedAddPrimaryDelegatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -622,7 +622,7 @@ func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingVa return fmt.Errorf("couldn't get the reward address of %s", tx.ID()) } } else { - utx := tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx) + utx := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx) weight := json.Uint64(utx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -642,7 +642,7 @@ type SampleValidatorsArgs struct { Size json.Uint16 `json:"size"` // ID of subnet to sample validators from - // If omitted, defaults to the default subnet + // If omitted, defaults to the primary network SubnetID ids.ID `json:"subnetID"` } @@ -655,7 +655,7 @@ type SampleValidatorsReply struct { func (service *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { service.vm.Ctx.Log.Info("Platform: SampleValidators called with Size = %d", args.Size) if args.SubnetID.IsZero() { - args.SubnetID = constants.DefaultSubnetID + args.SubnetID = constants.PrimaryNetworkID } validators, ok := service.vm.validators.GetValidatorSet(args.SubnetID) @@ -687,17 +687,17 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators ****************************************************** */ -// AddDefaultSubnetValidatorArgs are the arguments to AddDefaultSubnetValidator -type AddDefaultSubnetValidatorArgs struct { - FormattedAPIDefaultSubnetValidator +// AddPrimaryValidatorArgs are the arguments to AddPrimaryValidator +type AddPrimaryValidatorArgs struct { + FormattedAPIPrimaryValidator api.UserPass } -// AddDefaultSubnetValidator creates and signs and issues a transaction to add a -// validator to the default subnet -func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *api.JsonTxID) error { - service.vm.Ctx.Log.Info("Platform: AddDefaultSubnetValidator called") +// AddPrimaryValidator creates and signs and issues a transaction to add a +// validator to the primary network +func (service *Service) AddPrimaryValidator(_ *http.Request, args *AddPrimaryValidatorArgs, reply *api.JsonTxID) error { + service.vm.Ctx.Log.Info("Platform: AddPrimaryValidator called") switch { case args.RewardAddress == "": return errNoRewardAddress @@ -735,7 +735,7 @@ func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefa } // Create the transaction - tx, err := service.vm.newAddDefaultSubnetValidatorTx( + tx, err := service.vm.newAddPrimaryValidatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time @@ -752,17 +752,17 @@ func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefa return service.vm.issueTx(tx) } -// AddDefaultSubnetDelegatorArgs are the arguments to AddDefaultSubnetDelegator -type AddDefaultSubnetDelegatorArgs struct { +// AddPrimaryDelegatorArgs are the arguments to AddPrimaryDelegator +type AddPrimaryDelegatorArgs struct { FormattedAPIValidator api.UserPass RewardAddress string `json:"rewardAddress"` } -// AddDefaultSubnetDelegator creates and signs and issues a transaction to add a -// delegator to the default subnet -func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *api.JsonTxID) error { - service.vm.Ctx.Log.Info("Platform: AddDefaultSubnetDelegator called") +// AddPrimaryDelegator creates and signs and issues a transaction to add a +// delegator to the primary network +func (service *Service) AddPrimaryDelegator(_ *http.Request, args *AddPrimaryDelegatorArgs, reply *api.JsonTxID) error { + service.vm.Ctx.Log.Info("Platform: AddPrimaryDelegator called") switch { case int64(args.StartTime) < time.Now().Unix(): return fmt.Errorf("start time must be in the future") @@ -798,7 +798,7 @@ func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefa } // Create the transaction - tx, err := service.vm.newAddDefaultSubnetDelegatorTx( + tx, err := service.vm.newAddPrimaryDelegatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time @@ -814,18 +814,18 @@ func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefa return service.vm.issueTx(tx) } -// AddNonDefaultSubnetValidatorArgs are the arguments to AddNonDefaultSubnetValidator -type AddNonDefaultSubnetValidatorArgs struct { +// AddSubnetValidatorArgs are the arguments to AddSubnetValidator +type AddSubnetValidatorArgs struct { FormattedAPIValidator api.UserPass // ID of subnet to validate SubnetID string `json:"subnetID"` } -// AddNonDefaultSubnetValidator creates and signs and issues a transaction to -// add a validator to a subnet other than the default subnet -func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *api.JsonTxID) error { - service.vm.SnowmanVM.Ctx.Log.Info("Platform: AddNonDefaultSubnetValidator called") +// AddSubnetValidator creates and signs and issues a transaction to +// add a validator to a subnet other than the primary network +func (service *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorArgs, response *api.JsonTxID) error { + service.vm.SnowmanVM.Ctx.Log.Info("Platform: AddSubnetValidator called") switch { case args.SubnetID == "": return errNoSubnetID @@ -840,8 +840,8 @@ func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddN if err != nil { return fmt.Errorf("problem parsing subnetID '%s': %w", args.SubnetID, err) } - if subnetID.Equals(constants.DefaultSubnetID) { - return errors.New("non-default subnet validator attempts to validate default subnet") + if subnetID.Equals(constants.PrimaryNetworkID) { + return errors.New("subnet validator attempts to validate primary network") } // Get the keys controlled by the user @@ -856,7 +856,7 @@ func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddN } // Create the transaction - tx, err := service.vm.newAddNonDefaultSubnetValidatorTx( + tx, err := service.vm.newAddSubnetValidatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time @@ -1069,7 +1069,7 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain fxIDs = append(fxIDs, secp256k1fx.ID) } - if args.SubnetID.Equals(constants.DefaultSubnetID) { + if args.SubnetID.Equals(constants.PrimaryNetworkID) { return errDSCantValidate } @@ -1205,8 +1205,8 @@ type ValidatesResponse struct { func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { service.vm.Ctx.Log.Info("Platform: Validates called") // Verify that the Subnet exists - // Ignore lookup error if it's the DefaultSubnetID - if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil && !args.SubnetID.Equals(constants.DefaultSubnetID) { + // Ignore lookup error if it's the PrimaryNetworkID + if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil && !args.SubnetID.Equals(constants.PrimaryNetworkID) { return fmt.Errorf("problem retrieving subnet '%s': %w", args.SubnetID, err) } // Get the chains that exist diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index dd8502b98643..9b0ba7da839c 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -40,7 +40,7 @@ var ( ) func defaultService(t *testing.T) *Service { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer vm.Ctx.Lock.Unlock() ks := keystore.CreateTestKeystore() @@ -72,9 +72,9 @@ func defaultAddress(t *testing.T, service *Service) { } } -func TestAddDefaultSubnetValidator(t *testing.T) { +func TestAddPrimaryValidator(t *testing.T) { expectedJSONString := `{"startTime":"0","endTime":"0","nodeID":"","rewardAddress":"","delegationFeeRate":"0.0000","username":"","password":""}` - args := AddDefaultSubnetValidatorArgs{} + args := AddPrimaryValidatorArgs{} bytes, err := json.Marshal(&args) if err != nil { t.Fatal(err) @@ -235,7 +235,7 @@ func TestGetTx(t *testing.T) { test{ "proposal block", func() (*Tx, error) { - return service.vm.newAddDefaultSubnetValidatorTx( // Test GetTx works for proposal blocks + return service.vm.newAddPrimaryValidatorTx( // Test GetTx works for proposal blocks service.vm.minStake, uint64(service.vm.clock.Time().Add(Delta).Unix()), uint64(service.vm.clock.Time().Add(Delta).Add(MinimumStakingDuration).Unix()), diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go index b63a6025e5b1..54c7245b7a35 100644 --- a/vms/platformvm/static_service.go +++ b/vms/platformvm/static_service.go @@ -61,8 +61,8 @@ func (v *APIValidator) weight() uint64 { } } -// APIDefaultSubnetValidator is a validator of the default subnet -type APIDefaultSubnetValidator struct { +// APIPrimaryValidator is a validator of the primary network +type APIPrimaryValidator struct { APIValidator RewardAddress string `json:"rewardAddress"` @@ -89,8 +89,8 @@ func (v *FormattedAPIValidator) weight() uint64 { } } -// FormattedAPIDefaultSubnetValidator is a formatted validator of the default subnet -type FormattedAPIDefaultSubnetValidator struct { +// FormattedAPIPrimaryValidator is a formatted validator of the primary network +type FormattedAPIPrimaryValidator struct { FormattedAPIValidator RewardAddress string `json:"rewardAddress"` @@ -118,17 +118,17 @@ type APIChain struct { // the genesis data of the Platform Chain. // [NetworkID] is the ID of the network // [UTXOs] are the UTXOs on the Platform Chain that exist at genesis. -// [Validators] are the validators of the default subnet at genesis. +// [Validators] are the validators of the primary network at genesis. // [Chains] are the chains that exist at genesis. // [Time] is the Platform Chain's time at network genesis. type BuildGenesisArgs struct { - AvaxAssetID ids.ID `json:"avaxAssetID"` - NetworkID json.Uint32 `json:"address"` - UTXOs []APIUTXO `json:"utxos"` - Validators []FormattedAPIDefaultSubnetValidator `json:"defaultSubnetValidators"` - Chains []APIChain `json:"chains"` - Time json.Uint64 `json:"time"` - Message string `json:"message"` + AvaxAssetID ids.ID `json:"avaxAssetID"` + NetworkID json.Uint32 `json:"address"` + UTXOs []APIUTXO `json:"utxos"` + Validators []FormattedAPIPrimaryValidator `json:"primaryNetworkValidators"` + Chains []APIChain `json:"chains"` + Time json.Uint64 `json:"time"` + Message string `json:"message"` } // BuildGenesisReply is the reply from BuildGenesis @@ -198,7 +198,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r }) } - // Specify the validators that are validating the default subnet at genesis. + // Specify the validators that are validating the primary network at genesis. validators := &EventHeap{} for _, validator := range args.Validators { weight := validator.weight() @@ -217,7 +217,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r return err } - tx := &Tx{UnsignedTx: &UnsignedAddDefaultSubnetValidatorTx{ + tx := &Tx{UnsignedTx: &UnsignedAddPrimaryValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: uint32(args.NetworkID), BlockchainID: ids.Empty, diff --git a/vms/platformvm/static_service_test.go b/vms/platformvm/static_service_test.go index ecfabb9016e4..fcfc8fda067a 100644 --- a/vms/platformvm/static_service_test.go +++ b/vms/platformvm/static_service_test.go @@ -25,7 +25,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { Amount: 0, } weight := json.Uint64(987654321) - validator := FormattedAPIDefaultSubnetValidator{ + validator := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ EndTime: 15, Weight: &weight, @@ -38,7 +38,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator, }, Time: 5, @@ -64,7 +64,7 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { Amount: 123456789, } weight := json.Uint64(0) - validator := FormattedAPIDefaultSubnetValidator{ + validator := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 0, EndTime: 15, @@ -78,7 +78,7 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator, }, Time: 5, @@ -105,7 +105,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { } weight := json.Uint64(987654321) - validator := FormattedAPIDefaultSubnetValidator{ + validator := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 0, EndTime: 5, @@ -119,7 +119,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator, }, Time: 5, @@ -146,7 +146,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { } weight := json.Uint64(987654321) - validator1 := FormattedAPIDefaultSubnetValidator{ + validator1 := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 0, EndTime: 20, @@ -156,7 +156,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { RewardAddress: addr, } - validator2 := FormattedAPIDefaultSubnetValidator{ + validator2 := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 3, EndTime: 15, @@ -166,7 +166,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { RewardAddress: addr, } - validator3 := FormattedAPIDefaultSubnetValidator{ + validator3 := FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: 1, EndTime: 10, @@ -181,7 +181,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { UTXOs: []APIUTXO{ utxo, }, - Validators: []FormattedAPIDefaultSubnetValidator{ + Validators: []FormattedAPIPrimaryValidator{ validator1, validator2, validator3, diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index deb680f29382..45dcc14e21fe 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -100,7 +100,7 @@ var ( errEmptyAddressPrefix = errors.New("empty address prefix") errEmptyAddressSuffix = errors.New("empty address suffix") errInvalidID = errors.New("invalid ID") - errDSCantValidate = errors.New("new blockchain can't be validated by default Subnet") + errDSCantValidate = errors.New("new blockchain can't be validated by primary network") ) // Codec does serialization and deserialization @@ -128,9 +128,9 @@ func init() { Codec.RegisterType(&secp256k1fx.Input{}), Codec.RegisterType(&secp256k1fx.OutputOwners{}), - Codec.RegisterType(&UnsignedAddDefaultSubnetValidatorTx{}), - Codec.RegisterType(&UnsignedAddNonDefaultSubnetValidatorTx{}), - Codec.RegisterType(&UnsignedAddDefaultSubnetDelegatorTx{}), + Codec.RegisterType(&UnsignedAddPrimaryValidatorTx{}), + Codec.RegisterType(&UnsignedAddSubnetValidatorTx{}), + Codec.RegisterType(&UnsignedAddPrimaryDelegatorTx{}), Codec.RegisterType(&UnsignedCreateChainTx{}), Codec.RegisterType(&UnsignedCreateSubnetTx{}), @@ -252,8 +252,8 @@ func (vm *VM) Initialize( } heap.Init(validators) - // Persist default subnet validator set at genesis - if err := vm.putCurrentValidators(vm.DB, validators, constants.DefaultSubnetID); err != nil { + // Persist primary network validator set at genesis + if err := vm.putCurrentValidators(vm.DB, validators, constants.PrimaryNetworkID); err != nil { return err } @@ -290,7 +290,7 @@ func (vm *VM) Initialize( } // There are no pending stakers at genesis - if err := vm.putPendingValidators(vm.DB, &EventHeap{SortByStartTime: true}, constants.DefaultSubnetID); err != nil { + if err := vm.putPendingValidators(vm.DB, &EventHeap{SortByStartTime: true}, constants.PrimaryNetworkID); err != nil { return err } @@ -406,7 +406,7 @@ func (vm *VM) initSubnets() error { return err } - if err := vm.updateValidators(constants.DefaultSubnetID); err != nil { + if err := vm.updateValidators(constants.PrimaryNetworkID); err != nil { return err } @@ -434,7 +434,7 @@ func (vm *VM) createChain(tx *Tx) { return } if vm.stakingEnabled && // Staking is enabled, so nodes might not validate all chains - !constants.DefaultSubnetID.Equals(unsignedTx.SubnetID) && // All nodes must validate the default subnet + !constants.PrimaryNetworkID.Equals(unsignedTx.SubnetID) && // All nodes must validate the primary network !validators.Contains(vm.Ctx.NodeID) { // This node doesn't validate this blockchain return } @@ -547,9 +547,9 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { return nil, errEndOfTime } - // If the chain time would be the time for the next default subnet validator to leave, + // If the chain time would be the time for the next primary network validator to leave, // then we create a block that removes the validator and proposes they receive a validator reward - currentValidators, err := vm.getCurrentValidators(db, constants.DefaultSubnetID) + currentValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) if err != nil { return nil, fmt.Errorf("couldn't get validator set: %w", err) } @@ -728,7 +728,7 @@ func (vm *VM) resetTimer() { return } - nextDSValidatorEndTime := vm.nextSubnetValidatorChangeTime(db, constants.DefaultSubnetID, false) + nextDSValidatorEndTime := vm.nextSubnetValidatorChangeTime(db, constants.PrimaryNetworkID, false) if timestamp.Equal(nextDSValidatorEndTime) { vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeRewardValidator return @@ -772,7 +772,7 @@ func (vm *VM) resetTimer() { // Otherwise, returns the time at which the next validator (of any subnet) stops validating // If no such validator is found, returns maxTime func (vm *VM) nextValidatorChangeTime(db database.Database, start bool) time.Time { - earliest := vm.nextSubnetValidatorChangeTime(db, constants.DefaultSubnetID, start) + earliest := vm.nextSubnetValidatorChangeTime(db, constants.PrimaryNetworkID, start) subnets, err := vm.getSubnets(db) if err != nil { return earliest @@ -810,7 +810,7 @@ func (vm *VM) nextSubnetValidatorChangeTime(db database.Database, subnetID ids.I // 2) The pending validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] // 3) The IDs of the validators that start validating [subnetID] between now and [timestamp] // 4) The IDs of the validators that stop validating [subnetID] between now and [timestamp] -// Note that this method will not remove validators from the current validator set of the default subnet. +// Note that this method will not remove validators from the current validator set of the primary network. // That happens in reward blocks. func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, pending *EventHeap, started, stopped ids.ShortSet, err error) { @@ -819,9 +819,9 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub if err != nil { return nil, nil, nil, nil, err } - if !subnetID.Equals(constants.DefaultSubnetID) { // validators of default subnet removed in rewardValidatorTxs, not here + if !subnetID.Equals(constants.PrimaryNetworkID) { // validators of primary network removed in rewardValidatorTxs, not here for current.Len() > 0 { - next := current.Peek().UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx) // current validator with earliest end time + next := current.Peek().UnsignedTx.(*UnsignedAddSubnetValidatorTx) // current validator with earliest end time if timestamp.Before(next.EndTime()) { break } @@ -836,21 +836,21 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub for pending.Len() > 0 { nextTx := pending.Peek() // pending staker with earliest start time switch tx := nextTx.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: + case *UnsignedAddPrimaryValidatorTx: if timestamp.Before(tx.StartTime()) { return current, pending, started, stopped, nil } current.Add(nextTx) pending.Remove() started.Add(tx.Validator.ID()) - case *UnsignedAddNonDefaultSubnetValidatorTx: + case *UnsignedAddSubnetValidatorTx: if timestamp.Before(tx.StartTime()) { return current, pending, started, stopped, nil } current.Add(nextTx) pending.Remove() started.Add(tx.Validator.ID()) - case *UnsignedAddDefaultSubnetDelegatorTx: + case *UnsignedAddPrimaryDelegatorTx: if timestamp.Before(tx.StartTime()) { return current, pending, started, stopped, nil } @@ -869,11 +869,11 @@ func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { for _, event := range validatorEvents.Txs { var vdr validators.Validator switch tx := event.UnsignedTx.(type) { - case *UnsignedAddDefaultSubnetValidatorTx: + case *UnsignedAddPrimaryValidatorTx: vdr = &tx.Validator - case *UnsignedAddDefaultSubnetDelegatorTx: + case *UnsignedAddPrimaryDelegatorTx: vdr = &tx.Validator - case *UnsignedAddNonDefaultSubnetValidatorTx: + case *UnsignedAddSubnetValidatorTx: vdr = &tx.Validator default: continue diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index bbec6dc67d69..260c084d0347 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -69,7 +69,7 @@ var ( // amount all genesis validators stake in defaultVM defaultStakeAmount uint64 = 100 * minStake - // non-default Subnet that exists at genesis in defaultVM + // subnet that exists at genesis in defaultVM // Its controlKeys are keys[0], keys[1], keys[2] testSubnet1 *UnsignedCreateSubnetTx testSubnet1ControlKeys []*crypto.PrivateKeySECP256K1R @@ -165,7 +165,7 @@ func defaultGenesis() (*BuildGenesisArgs, []byte) { } } - genesisValidators := make([]FormattedAPIDefaultSubnetValidator, len(keys)) + genesisValidators := make([]FormattedAPIPrimaryValidator, len(keys)) for i, key := range keys { weight := json.Uint64(defaultWeight) id := key.PublicKey().Address() @@ -173,7 +173,7 @@ func defaultGenesis() (*BuildGenesisArgs, []byte) { if err != nil { panic(err) } - genesisValidators[i] = FormattedAPIDefaultSubnetValidator{ + genesisValidators[i] = FormattedAPIPrimaryValidator{ FormattedAPIValidator: FormattedAPIValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), @@ -214,9 +214,9 @@ func defaultVM() (*VM, database.Database) { chainDB := prefixdb.New([]byte{0}, baseDB) atomicDB := prefixdb.New([]byte{1}, baseDB) - defaultSubnet := validators.NewSet() // TODO do we need this? + primaryNetwork := validators.NewSet() // TODO do we need this? vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.DefaultSubnetID, defaultSubnet) + vm.validators.PutValidatorSet(constants.PrimaryNetworkID, primaryNetwork) vm.clock.Set(defaultGenesisTime) msgChan := make(chan common.Message, 1) @@ -237,7 +237,7 @@ func defaultVM() (*VM, database.Database) { panic(err) } - // Create a non-default subnet and store it in testSubnet1 + // Create a subnet and store it in testSubnet1 if tx, err := vm.newCreateSubnetTx( 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet // control keys are keys[0], keys[1], keys[2] @@ -314,8 +314,8 @@ func TestGenesis(t *testing.T) { } } - // Ensure current validator set of default subnet is correct - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) + // Ensure current validator set of primary network is correct + currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } else if len(currentValidators.Txs) != len(genesisState.Validators) { @@ -332,7 +332,7 @@ func TestGenesis(t *testing.T) { } // Ensure pending validator set is correct (empty) - if pendingValidators, err := vm.getPendingValidators(vm.DB, constants.DefaultSubnetID); err != nil { + if pendingValidators, err := vm.getPendingValidators(vm.DB, constants.PrimaryNetworkID); err != nil { t.Fatal(err) } else if pendingValidators.Len() != 0 { t.Fatal("vm's pending validator set should be empty") @@ -351,8 +351,8 @@ func TestGenesis(t *testing.T) { } } -// accept proposal to add validator to default subnet -func TestAddDefaultSubnetValidatorCommit(t *testing.T) { +// accept proposal to add validator to primary network +func TestAddPrimaryValidatorCommit(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -369,7 +369,7 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) { ID := key.PublicKey().Address() // create valid tx - tx, err := vm.newAddDefaultSubnetValidatorTx( + tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -419,7 +419,7 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) { } // Verify that new validator now in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, constants.DefaultSubnetID) + pendingValidators, err := vm.getPendingValidators(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } @@ -430,8 +430,8 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) { } } -// verify invalid proposal to add validator to default subnet -func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) { +// verify invalid proposal to add validator to primary network +func TestInvalidAddPrimaryValidatorCommit(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -445,7 +445,7 @@ func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) { ID := key.PublicKey().Address() // create invalid tx - if tx, err := vm.newAddDefaultSubnetValidatorTx( + if tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -476,8 +476,8 @@ func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) { } } -// Reject proposal to add validator to default subnet -func TestAddDefaultSubnetValidatorReject(t *testing.T) { +// Reject proposal to add validator to primary network +func TestAddPrimaryValidatorReject(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -491,7 +491,7 @@ func TestAddDefaultSubnetValidatorReject(t *testing.T) { ID := key.PublicKey().Address() // create valid tx - tx, err := vm.newAddDefaultSubnetValidatorTx( + tx, err := vm.newAddPrimaryValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -543,7 +543,7 @@ func TestAddDefaultSubnetValidatorReject(t *testing.T) { } // Verify that new validator NOT in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, constants.DefaultSubnetID) + pendingValidators, err := vm.getPendingValidators(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } @@ -554,8 +554,8 @@ func TestAddDefaultSubnetValidatorReject(t *testing.T) { } } -// Accept proposal to add validator to non-default subnet -func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { +// Accept proposal to add validator to subnet +func TestAddSubnetValidatorAccept(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -568,8 +568,8 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] - // validates default subnet ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -633,8 +633,8 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { } } -// Reject proposal to add validator to non-default subnet -func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { +// Reject proposal to add validator to subnet +func TestAddSubnetValidatorReject(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -649,8 +649,8 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] - // validates default subnet ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := vm.newAddNonDefaultSubnetValidatorTx( + // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) + tx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -714,7 +714,7 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { } } -// Test case where default subnet validator rewarded +// Test case where primary network validator rewarded func TestRewardValidatorAccept(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() @@ -802,7 +802,7 @@ func TestRewardValidatorAccept(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } else if currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID); err != nil { + } else if currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID); err != nil { // Verify that genesis validator was rewarded and removed from current validator set t.Fatal(err) } else if currentValidators.Len() != len(keys)-1 { @@ -810,7 +810,7 @@ func TestRewardValidatorAccept(t *testing.T) { } } -// Test case where default subnet validator not rewarded +// Test case where primary network validator not rewarded func TestRewardValidatorReject(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() @@ -889,7 +889,7 @@ func TestRewardValidatorReject(t *testing.T) { } // Verify that genesis validator was removed from current validator set - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.DefaultSubnetID) + currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } @@ -1005,7 +1005,7 @@ func TestCreateSubnet(t *testing.T) { startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) endTime := startTime.Add(MinimumStakingDuration) // [startTime, endTime] is subset of time keys[0] validates default subent so tx is valid - if addValidatorTx, err := vm.newAddNonDefaultSubnetValidatorTx( + if addValidatorTx, err := vm.newAddSubnetValidatorTx( defaultWeight, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -1062,7 +1062,7 @@ func TestCreateSubnet(t *testing.T) { } foundNewValidator := false for _, tx := range pendingValidators.Txs { - if tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.ID().Equals(nodeID) { + if tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.ID().Equals(nodeID) { foundNewValidator = true break } @@ -1127,7 +1127,7 @@ func TestCreateSubnet(t *testing.T) { } foundNewValidator = false for _, tx := range currentValidators.Txs { - if tx.UnsignedTx.(*UnsignedAddNonDefaultSubnetValidatorTx).Validator.ID().Equals(nodeID) { + if tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.ID().Equals(nodeID) { foundNewValidator = true break } @@ -1350,9 +1350,9 @@ func TestRestartPartiallyAccepted(t *testing.T) { SnowmanVM: &core.SnowmanVM{}, chainManager: chains.MockManager{}, } - firstDefaultSubnet := validators.NewSet() + firstPrimaryNetwork := validators.NewSet() firstVM.validators = validators.NewManager() - firstVM.validators.PutValidatorSet(constants.DefaultSubnetID, firstDefaultSubnet) + firstVM.validators.PutValidatorSet(constants.PrimaryNetworkID, firstPrimaryNetwork) firstVM.clock.Set(defaultGenesisTime) firstCtx := defaultContext() firstCtx.Lock.Lock() @@ -1423,9 +1423,9 @@ func TestRestartPartiallyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - secondDefaultSubnet := validators.NewSet() + secondPrimaryNetwork := validators.NewSet() secondVM.validators = validators.NewManager() - secondVM.validators.PutValidatorSet(constants.DefaultSubnetID, secondDefaultSubnet) + secondVM.validators.PutValidatorSet(constants.PrimaryNetworkID, secondPrimaryNetwork) secondVM.clock.Set(defaultGenesisTime) secondCtx := defaultContext() @@ -1456,9 +1456,9 @@ func TestRestartFullyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - firstDefaultSubnet := validators.NewSet() + firstPrimaryNetwork := validators.NewSet() firstVM.validators = validators.NewManager() - firstVM.validators.PutValidatorSet(constants.DefaultSubnetID, firstDefaultSubnet) + firstVM.validators.PutValidatorSet(constants.PrimaryNetworkID, firstPrimaryNetwork) firstVM.clock.Set(defaultGenesisTime) firstCtx := defaultContext() @@ -1544,9 +1544,9 @@ func TestRestartFullyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - secondDefaultSubnet := validators.NewSet() + secondPrimaryNetwork := validators.NewSet() secondVM.validators = validators.NewManager() - secondVM.validators.PutValidatorSet(constants.DefaultSubnetID, secondDefaultSubnet) + secondVM.validators.PutValidatorSet(constants.PrimaryNetworkID, secondPrimaryNetwork) secondVM.clock.Set(defaultGenesisTime) secondCtx := defaultContext() @@ -1582,9 +1582,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - defaultSubnet := validators.NewSet() + primaryNetwork := validators.NewSet() vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.DefaultSubnetID, defaultSubnet) + vm.validators.PutValidatorSet(constants.PrimaryNetworkID, primaryNetwork) vm.clock.Set(defaultGenesisTime) ctx := defaultContext() @@ -1735,9 +1735,9 @@ func TestUnverifiedParent(t *testing.T) { chainManager: chains.MockManager{}, } - defaultSubnet := validators.NewSet() + primaryNetwork := validators.NewSet() vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.DefaultSubnetID, defaultSubnet) + vm.validators.PutValidatorSet(constants.PrimaryNetworkID, primaryNetwork) vm.clock.Set(defaultGenesisTime) ctx := defaultContext() From 30d8b86a90606b499a28e0ccafd428b2444bf585 Mon Sep 17 00:00:00 2001 From: "Collin K. Cusce" Date: Mon, 24 Aug 2020 20:36:25 -0400 Subject: [PATCH 05/47] PrimaryValidator and DefaultValidator to just Validator --- .../add_default_subnet_delegator_tx.go | 26 +++--- .../add_default_subnet_delegator_tx_test.go | 54 +++++------ .../add_default_subnet_validator_tx.go | 24 ++--- .../add_default_subnet_validator_tx_test.go | 92 +++++++++---------- .../add_nondefault_subnet_validator_tx.go | 4 +- ...add_nondefault_subnet_validator_tx_test.go | 2 +- vms/platformvm/advance_time_tx_test.go | 4 +- vms/platformvm/event_heap.go | 6 +- vms/platformvm/event_heap_test.go | 32 +++---- vms/platformvm/reward_validator_tx.go | 6 +- vms/platformvm/reward_validator_tx_test.go | 8 +- vms/platformvm/service.go | 32 +++---- vms/platformvm/service_test.go | 6 +- vms/platformvm/static_service.go | 2 +- vms/platformvm/vm.go | 12 +-- vms/platformvm/vm_test.go | 12 +-- 16 files changed, 161 insertions(+), 161 deletions(-) diff --git a/vms/platformvm/add_default_subnet_delegator_tx.go b/vms/platformvm/add_default_subnet_delegator_tx.go index 9275c6576175..5372d7e208cd 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx.go +++ b/vms/platformvm/add_default_subnet_delegator_tx.go @@ -26,12 +26,12 @@ var ( errInvalidState = errors.New("generated output isn't valid state") errInvalidAmount = errors.New("invalid amount") - _ UnsignedProposalTx = &UnsignedAddPrimaryDelegatorTx{} - _ TimedTx = &UnsignedAddPrimaryDelegatorTx{} + _ UnsignedProposalTx = &UnsignedAddDelegatorTx{} + _ TimedTx = &UnsignedAddDelegatorTx{} ) -// UnsignedAddPrimaryDelegatorTx is an unsigned addPrimaryDelegatorTx -type UnsignedAddPrimaryDelegatorTx struct { +// UnsignedAddDelegatorTx is an unsigned addDelegatorTx +type UnsignedAddDelegatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee @@ -43,17 +43,17 @@ type UnsignedAddPrimaryDelegatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddPrimaryDelegatorTx) StartTime() time.Time { +func (tx *UnsignedAddDelegatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddPrimaryDelegatorTx) EndTime() time.Time { +func (tx *UnsignedAddDelegatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddPrimaryDelegatorTx) Verify( +func (tx *UnsignedAddDelegatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -102,7 +102,7 @@ func (tx *UnsignedAddPrimaryDelegatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddPrimaryDelegatorTx) SemanticVerify( +func (tx *UnsignedAddDelegatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -140,7 +140,7 @@ func (tx *UnsignedAddPrimaryDelegatorTx) SemanticVerify( } if validator, err := currentValidators.getPrimaryStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := validator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + unsignedValidator := validator.UnsignedTx.(*UnsignedAddValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, permError{errDSValidatorSubset} } @@ -151,7 +151,7 @@ func (tx *UnsignedAddPrimaryDelegatorTx) SemanticVerify( if err != nil { return nil, nil, nil, nil, permError{errDSValidatorSubset} } - unsignedValidator := validator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + unsignedValidator := validator.UnsignedTx.(*UnsignedAddValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, permError{errDSValidatorSubset} } @@ -202,12 +202,12 @@ func (tx *UnsignedAddPrimaryDelegatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddPrimaryDelegatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddDelegatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } // Creates a new transaction -func (vm *VM) newAddPrimaryDelegatorTx( +func (vm *VM) newAddDelegatorTx( stakeAmt, // Amount the delegator stakes startTime, // Unix time they start delegating endTime uint64, // Unix time they stop delegating @@ -220,7 +220,7 @@ func (vm *VM) newAddPrimaryDelegatorTx( return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } // Create the tx - utx := &UnsignedAddPrimaryDelegatorTx{ + utx := &UnsignedAddDelegatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index a25e34f43fc2..2e1322cabfbb 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/gecko/utils/crypto" ) -func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { +func TestAddDelegatorTxSyntacticVerify(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -27,13 +27,13 @@ func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { rewardAddress := nodeID // Case : tx is nil - var unsignedTx *UnsignedAddPrimaryDelegatorTx + var unsignedTx *UnsignedAddDelegatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because tx is nil") } // Case: Wrong network ID - tx, err := vm.newAddPrimaryDelegatorTx( + tx, err := vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -44,15 +44,15 @@ func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddDelegatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Missing Node ID - tx, err = vm.newAddPrimaryDelegatorTx( + tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -63,15 +63,15 @@ func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.NodeID = ids.ShortID{} + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.NodeID = ids.ShortID{} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because NodeID is nil") } // Case: Not enough weight - tx, err = vm.newAddPrimaryDelegatorTx( + tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -82,15 +82,15 @@ func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.Wght = vm.minStake - 1 + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.Wght = vm.minStake - 1 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because of not enough weight") } // Case: Validation length is too short - tx, err = vm.newAddPrimaryDelegatorTx( + tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -101,15 +101,15 @@ func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.End-- // 1 shorter than minimum stake time + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.End-- // 1 shorter than minimum stake time // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false - if err = tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err = tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - if tx, err = vm.newAddPrimaryDelegatorTx( + if tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -119,15 +119,15 @@ func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { ); err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Validator.End++ // 1 longer than maximum stake time + tx.UnsignedTx.(*UnsignedAddDelegatorTx).Validator.End++ // 1 longer than maximum stake time // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddDelegatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err = vm.newAddPrimaryDelegatorTx( + if tx, err = vm.newAddDelegatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -136,12 +136,12 @@ func TestAddPrimaryDelegatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddDelegatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { t.Fatal(err) } } -func TestAddPrimaryDelegatorTxSemanticVerify(t *testing.T) { +func TestAddDelegatorTxSemanticVerify(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -166,7 +166,7 @@ func TestAddPrimaryDelegatorTxSemanticVerify(t *testing.T) { newValidatorEndTime := uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()) // [addValidator] adds a new validator to the primary network's pending validator set addValidator := func(db database.Database) { - if tx, err := vm.newAddPrimaryValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, // stake amount newValidatorStartTime, // start time newValidatorEndTime, // end time @@ -303,7 +303,7 @@ func TestAddPrimaryDelegatorTxSemanticVerify(t *testing.T) { for _, tt := range tests { vdb.Abort() - tx, err := vm.newAddPrimaryDelegatorTx( + tx, err := vm.newAddDelegatorTx( tt.stakeAmount, tt.startTime, tt.endTime, diff --git a/vms/platformvm/add_default_subnet_validator_tx.go b/vms/platformvm/add_default_subnet_validator_tx.go index 87ce71d1fddf..57ea89042e6b 100644 --- a/vms/platformvm/add_default_subnet_validator_tx.go +++ b/vms/platformvm/add_default_subnet_validator_tx.go @@ -30,12 +30,12 @@ var ( errStakeTooLong = errors.New("staking period is too long") errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", NumberOfShares) - _ UnsignedProposalTx = &UnsignedAddPrimaryValidatorTx{} - _ TimedTx = &UnsignedAddPrimaryValidatorTx{} + _ UnsignedProposalTx = &UnsignedAddValidatorTx{} + _ TimedTx = &UnsignedAddValidatorTx{} ) -// UnsignedAddPrimaryValidatorTx is an unsigned addPrimaryValidatorTx -type UnsignedAddPrimaryValidatorTx struct { +// UnsignedAddValidatorTx is an unsigned addValidatorTx +type UnsignedAddValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee @@ -50,17 +50,17 @@ type UnsignedAddPrimaryValidatorTx struct { } // StartTime of this validator -func (tx *UnsignedAddPrimaryValidatorTx) StartTime() time.Time { +func (tx *UnsignedAddValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } // EndTime of this validator -func (tx *UnsignedAddPrimaryValidatorTx) EndTime() time.Time { +func (tx *UnsignedAddValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } // Verify return nil iff [tx] is valid -func (tx *UnsignedAddPrimaryValidatorTx) Verify( +func (tx *UnsignedAddValidatorTx) Verify( ctx *snow.Context, c codec.Codec, feeAmount uint64, @@ -110,7 +110,7 @@ func (tx *UnsignedAddPrimaryValidatorTx) Verify( } // SemanticVerify this transaction is valid. -func (tx *UnsignedAddPrimaryValidatorTx) SemanticVerify( +func (tx *UnsignedAddValidatorTx) SemanticVerify( vm *VM, db database.Database, stx *Tx, @@ -203,12 +203,12 @@ func (tx *UnsignedAddPrimaryValidatorTx) SemanticVerify( // InitiallyPrefersCommit returns true if the proposed validators start time is // after the current wall clock time, -func (tx *UnsignedAddPrimaryValidatorTx) InitiallyPrefersCommit(vm *VM) bool { +func (tx *UnsignedAddValidatorTx) InitiallyPrefersCommit(vm *VM) bool { return tx.StartTime().After(vm.clock.Time()) } -// NewAddPrimaryValidatorTx returns a new NewAddPrimaryValidatorTx -func (vm *VM) newAddPrimaryValidatorTx( +// NewAddValidatorTx returns a new NewAddValidatorTx +func (vm *VM) newAddValidatorTx( stakeAmt, // Amount the delegator stakes startTime, // Unix time they start delegating endTime uint64, // Unix time they stop delegating @@ -222,7 +222,7 @@ func (vm *VM) newAddPrimaryValidatorTx( return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } // Create the tx - utx := &UnsignedAddPrimaryValidatorTx{ + utx := &UnsignedAddValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.Ctx.NetworkID, BlockchainID: vm.Ctx.ChainID, diff --git a/vms/platformvm/add_default_subnet_validator_tx_test.go b/vms/platformvm/add_default_subnet_validator_tx_test.go index cd25d764fd9c..0e7469c38c44 100644 --- a/vms/platformvm/add_default_subnet_validator_tx_test.go +++ b/vms/platformvm/add_default_subnet_validator_tx_test.go @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/gecko/vms/secp256k1fx" ) -func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { +func TestAddValidatorTxSyntacticVerify(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -31,13 +31,13 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { nodeID := key.PublicKey().Address() // Case: tx is nil - var unsignedTx *UnsignedAddPrimaryValidatorTx + var unsignedTx *UnsignedAddValidatorTx if err := unsignedTx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because tx is nil") } // Case 3: Wrong Network ID - tx, err := vm.newAddPrimaryValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -49,15 +49,15 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).NetworkID++ + tx.UnsignedTx.(*UnsignedAddValidatorTx).NetworkID++ // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because the wrong network ID was used") } // Case: Node ID is nil - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -69,15 +69,15 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.NodeID = ids.ShortID{ID: nil} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because node ID is nil") } // Case: Stake owner has no addresses - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -89,7 +89,7 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Stake = []*avax.TransferableOutput{{ + tx.UnsignedTx.(*UnsignedAddValidatorTx).Stake = []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: vm.minStake, @@ -101,13 +101,13 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { }, }} // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because stake owner has no addresses") } // Case: Rewards owner has no addresses - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -119,19 +119,19 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).RewardsOwner = &secp256k1fx.OutputOwners{ + tx.UnsignedTx.(*UnsignedAddValidatorTx).RewardsOwner = &secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, Addrs: nil, } // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because rewards owner has no addresses") } // Case: Stake amount too small - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -143,15 +143,15 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.Wght-- // 1 less than minimum amount + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.Wght-- // 1 less than minimum amount // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because stake amount too small") } // Case: Too many shares - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -163,15 +163,15 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Shares++ // 1 more than max amount + tx.UnsignedTx.(*UnsignedAddValidatorTx).Shares++ // 1 more than max amount // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because of too many shares") } // Case: Validation length is too short - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -183,15 +183,15 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.End-- // 1 less than min duration + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.End-- // 1 less than min duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is negative - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix()), @@ -203,15 +203,15 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.End = tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.Start - 1 + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.End = tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.Start - 1 // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too short") } // Case: Validation length is too long - tx, err = vm.newAddPrimaryValidatorTx( + tx, err = vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix()), @@ -223,15 +223,15 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Validator.End++ // 1 more than maximum duration + tx.UnsignedTx.(*UnsignedAddValidatorTx).Validator.End++ // 1 more than maximum duration // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache - tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).syntacticallyVerified = false - if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { + tx.UnsignedTx.(*UnsignedAddValidatorTx).syntacticallyVerified = false + if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err == nil { t.Fatal("should have errored because validation length too long") } // Case: Valid - if tx, err := vm.newAddPrimaryValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -241,13 +241,13 @@ func TestAddPrimaryValidatorTxSyntacticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, ); err != nil { t.Fatal(err) - } else if err := tx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { + } else if err := tx.UnsignedTx.(*UnsignedAddValidatorTx).Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { t.Fatal(err) } } -// Test AddPrimaryValidatorTx.SemanticVerify -func TestAddPrimaryValidatorTxSemanticVerify(t *testing.T) { +// Test AddValidatorTx.SemanticVerify +func TestAddValidatorTxSemanticVerify(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -263,7 +263,7 @@ func TestAddPrimaryValidatorTxSemanticVerify(t *testing.T) { nodeID := key.PublicKey().Address() // Case: Validator's start time too early - if tx, err := vm.newAddPrimaryValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix())-1, uint64(defaultValidateEndTime.Unix()), @@ -279,7 +279,7 @@ func TestAddPrimaryValidatorTxSemanticVerify(t *testing.T) { vDB.Abort() // Case: Validator already validating primary network - if tx, err := vm.newAddPrimaryValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), @@ -300,7 +300,7 @@ func TestAddPrimaryValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } startTime := defaultGenesisTime.Add(1 * time.Second) - tx, err := vm.newAddPrimaryValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(startTime.Unix()), // start time uint64(startTime.Add(MinimumStakingDuration).Unix()), // end time @@ -325,7 +325,7 @@ func TestAddPrimaryValidatorTxSemanticVerify(t *testing.T) { vDB.Abort() // Case: Validator doesn't have enough tokens to cover stake amount - if _, err := vm.newAddPrimaryValidatorTx( // create the tx + if _, err := vm.newAddValidatorTx( // create the tx vm.minStake, uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx.go b/vms/platformvm/add_nondefault_subnet_validator_tx.go index 805d5ccd888f..981f961d0e6b 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx.go @@ -111,7 +111,7 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get current validators of primary network: %v", err)} } if dsValidator, err := currentDSValidators.getPrimaryStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", @@ -130,7 +130,7 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, permError{fmt.Errorf("validator would not be validating primary network while validating subnet")} } - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddValidatorTx) if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { return nil, nil, nil, nil, permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go index 2f91c61f7cd7..e5cc8f84e432 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go @@ -234,7 +234,7 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { DSStartTime := defaultGenesisTime.Add(10 * time.Second) DSEndTime := DSStartTime.Add(5 * MinimumStakingDuration) - addDSTx, err := vm.newAddPrimaryValidatorTx( + addDSTx, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(DSStartTime.Unix()), // start time uint64(DSEndTime.Unix()), // end time diff --git a/vms/platformvm/advance_time_tx_test.go b/vms/platformvm/advance_time_tx_test.go index ae839e2403bd..00a7f8354f4d 100644 --- a/vms/platformvm/advance_time_tx_test.go +++ b/vms/platformvm/advance_time_tx_test.go @@ -38,7 +38,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) nodeIDKey, _ := vm.factory.NewPrivateKey() nodeID := nodeIDKey.PublicKey().Address() - addPendingValidatorTx, err := vm.newAddPrimaryValidatorTx( + addPendingValidatorTx, err := vm.newAddValidatorTx( vm.minStake, uint64(pendingValidatorStartTime.Unix()), uint64(pendingValidatorEndTime.Unix()), @@ -106,7 +106,7 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) nodeIDKey, _ := vm.factory.NewPrivateKey() nodeID := nodeIDKey.PublicKey().Address() - addPendingValidatorTx, err := vm.newAddPrimaryValidatorTx( + addPendingValidatorTx, err := vm.newAddValidatorTx( vm.minStake, uint64(pendingValidatorStartTime.Unix()), uint64(pendingValidatorEndTime.Unix()), diff --git a/vms/platformvm/event_heap.go b/vms/platformvm/event_heap.go index 68890476b101..699e14f1ed28 100644 --- a/vms/platformvm/event_heap.go +++ b/vms/platformvm/event_heap.go @@ -48,8 +48,8 @@ func (h *EventHeap) Less(i, j int) bool { case iTime.Unix() < jTime.Unix(): return true case iTime == jTime: - _, iOk := iTx.(*UnsignedAddPrimaryValidatorTx) - _, jOk := jTx.(*UnsignedAddPrimaryValidatorTx) + _, iOk := iTx.(*UnsignedAddValidatorTx) + _, jOk := jTx.(*UnsignedAddValidatorTx) if iOk != jOk { return iOk == h.SortByStartTime @@ -98,7 +98,7 @@ func (h *EventHeap) Bytes() ([]byte, error) { // getPrimaryStaker ... func (h *EventHeap) getPrimaryStaker(id ids.ShortID) (*Tx, error) { for _, txIntf := range h.Txs { - tx, ok := txIntf.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + tx, ok := txIntf.UnsignedTx.(*UnsignedAddValidatorTx) if !ok { continue } diff --git a/vms/platformvm/event_heap_test.go b/vms/platformvm/event_heap_test.go index 9453311e5a39..6e11bf4ea78c 100644 --- a/vms/platformvm/event_heap_test.go +++ b/vms/platformvm/event_heap_test.go @@ -20,7 +20,7 @@ func TestTxHeapStart(t *testing.T) { txHeap := EventHeap{SortByStartTime: true} - validator0, err := vm.newAddPrimaryValidatorTx( + validator0, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -32,9 +32,9 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr0Tx := validator0.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + vdr0Tx := validator0.UnsignedTx.(*UnsignedAddValidatorTx) - validator1, err := vm.newAddPrimaryValidatorTx( + validator1, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+2), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+2), // endTime @@ -46,9 +46,9 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr1Tx := validator1.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + vdr1Tx := validator1.UnsignedTx.(*UnsignedAddValidatorTx) - validator2, err := vm.newAddPrimaryValidatorTx( + validator2, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+3), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+3), // endTime @@ -60,7 +60,7 @@ func TestTxHeapStart(t *testing.T) { if err != nil { t.Fatal(err) } - vdr2Tx := validator2.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + vdr2Tx := validator2.UnsignedTx.(*UnsignedAddValidatorTx) txHeap.Add(validator2) if timestamp := txHeap.Timestamp(); !timestamp.Equal(vdr2Tx.StartTime()) { @@ -90,7 +90,7 @@ func TestTxHeapStop(t *testing.T) { txHeap := EventHeap{} - validator0, err := vm.newAddPrimaryValidatorTx( + validator0, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -102,9 +102,9 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr0Tx := validator0.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + vdr0Tx := validator0.UnsignedTx.(*UnsignedAddValidatorTx) - validator1, err := vm.newAddPrimaryValidatorTx( + validator1, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+2), // endTime @@ -116,9 +116,9 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr1Tx := validator1.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + vdr1Tx := validator1.UnsignedTx.(*UnsignedAddValidatorTx) - validator2, err := vm.newAddPrimaryValidatorTx( + validator2, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+3), // endTime @@ -130,7 +130,7 @@ func TestTxHeapStop(t *testing.T) { if err != nil { t.Fatal(err) } - vdr2Tx := validator2.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + vdr2Tx := validator2.UnsignedTx.(*UnsignedAddValidatorTx) txHeap.Add(validator2) if timestamp := txHeap.Timestamp(); !timestamp.Equal(vdr2Tx.EndTime()) { @@ -160,7 +160,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { txHeap := EventHeap{SortByStartTime: true} - validator, err := vm.newAddPrimaryValidatorTx( + validator, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -173,7 +173,7 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { t.Fatal(err) } - delegator, err := vm.newAddPrimaryDelegatorTx( + delegator, err := vm.newAddDelegatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -203,7 +203,7 @@ func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { txHeap := EventHeap{} - validator, err := vm.newAddPrimaryValidatorTx( + validator, err := vm.newAddValidatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime @@ -216,7 +216,7 @@ func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { t.Fatal(err) } - delegator, err := vm.newAddPrimaryDelegatorTx( + delegator, err := vm.newAddDelegatorTx( vm.minStake, // stake amount uint64(defaultGenesisTime.Unix()+1), // startTime uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()+1), // endTime diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index 628e8f37c9cc..7a2e9c07d01a 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -112,7 +112,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } switch uVdrTx := vdrTx.UnsignedTx.(type) { - case *UnsignedAddPrimaryValidatorTx: + case *UnsignedAddValidatorTx: // Refund the stake here for i, out := range uVdrTx.Stake { utxo := &avax.UTXO{ @@ -153,13 +153,13 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } } - case *UnsignedAddPrimaryDelegatorTx: + case *UnsignedAddDelegatorTx: // We're removing a delegator parentTx, err := primaryNetworkVdrHeap.getPrimaryStaker(uVdrTx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, permError{err} } - unsignedParentTx := parentTx.UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + unsignedParentTx := parentTx.UnsignedTx.(*UnsignedAddValidatorTx) // Refund the stake here for i, out := range uVdrTx.Stake { diff --git a/vms/platformvm/reward_validator_tx_test.go b/vms/platformvm/reward_validator_tx_test.go index 18f85a36b048..329e303695cf 100644 --- a/vms/platformvm/reward_validator_tx_test.go +++ b/vms/platformvm/reward_validator_tx_test.go @@ -28,7 +28,7 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } // ID of validator that should leave DS validator set next - nextToRemove := currentValidators.Peek().UnsignedTx.(*UnsignedAddPrimaryValidatorTx) + nextToRemove := currentValidators.Peek().UnsignedTx.(*UnsignedAddValidatorTx) // Case 1: Chain timestamp is wrong if tx, err := vm.newRewardValidatorTx(nextToRemove.ID()); err != nil { @@ -114,7 +114,7 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { vdrStartTime := uint64(defaultValidateStartTime.Unix()) + 1 vdrEndTime := uint64(defaultValidateStartTime.Add(2 * MinimumStakingDuration).Unix()) vdrNodeID := ids.GenerateTestShortID() - vdrTx, err := vm.newAddPrimaryValidatorTx( + vdrTx, err := vm.newAddValidatorTx( vm.minStake, // stakeAmt vdrStartTime, vdrEndTime, @@ -129,7 +129,7 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { delStartTime := vdrStartTime + 1 delEndTime := vdrEndTime - 1 - delTx, err := vm.newAddPrimaryDelegatorTx( + delTx, err := vm.newAddDelegatorTx( vm.minStake, // stakeAmt delStartTime, delEndTime, @@ -140,7 +140,7 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - unsignedDelTx := delTx.UnsignedTx.(*UnsignedAddPrimaryDelegatorTx) + unsignedDelTx := delTx.UnsignedTx.(*UnsignedAddDelegatorTx) currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) if err != nil { diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index f0e7fd1f8e6d..5461cfb8799b 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -538,7 +538,7 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa if args.SubnetID.Equals(constants.PrimaryNetworkID) { for i, tx := range validators.Txs { switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddPrimaryValidatorTx: + case *UnsignedAddValidatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -546,7 +546,7 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa EndTime: json.Uint64(tx.EndTime().Unix()), StakeAmount: &weight, } - case *UnsignedAddPrimaryDelegatorTx: + case *UnsignedAddDelegatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -602,7 +602,7 @@ func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingVa for i, tx := range validators.Txs { if args.SubnetID.Equals(constants.PrimaryNetworkID) { switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddPrimaryValidatorTx: + case *UnsignedAddValidatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -610,7 +610,7 @@ func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingVa EndTime: json.Uint64(tx.EndTime().Unix()), StakeAmount: &weight, } - case *UnsignedAddPrimaryDelegatorTx: + case *UnsignedAddDelegatorTx: weight := json.Uint64(tx.Validator.Weight()) reply.Validators[i] = FormattedAPIValidator{ ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), @@ -687,17 +687,17 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators ****************************************************** */ -// AddPrimaryValidatorArgs are the arguments to AddPrimaryValidator -type AddPrimaryValidatorArgs struct { +// AddValidatorArgs are the arguments to AddValidator +type AddValidatorArgs struct { FormattedAPIPrimaryValidator api.UserPass } -// AddPrimaryValidator creates and signs and issues a transaction to add a +// AddValidator creates and signs and issues a transaction to add a // validator to the primary network -func (service *Service) AddPrimaryValidator(_ *http.Request, args *AddPrimaryValidatorArgs, reply *api.JsonTxID) error { - service.vm.Ctx.Log.Info("Platform: AddPrimaryValidator called") +func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *api.JsonTxID) error { + service.vm.Ctx.Log.Info("Platform: AddValidator called") switch { case args.RewardAddress == "": return errNoRewardAddress @@ -735,7 +735,7 @@ func (service *Service) AddPrimaryValidator(_ *http.Request, args *AddPrimaryVal } // Create the transaction - tx, err := service.vm.newAddPrimaryValidatorTx( + tx, err := service.vm.newAddValidatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time @@ -752,17 +752,17 @@ func (service *Service) AddPrimaryValidator(_ *http.Request, args *AddPrimaryVal return service.vm.issueTx(tx) } -// AddPrimaryDelegatorArgs are the arguments to AddPrimaryDelegator -type AddPrimaryDelegatorArgs struct { +// AddDelegatorArgs are the arguments to AddDelegator +type AddDelegatorArgs struct { FormattedAPIValidator api.UserPass RewardAddress string `json:"rewardAddress"` } -// AddPrimaryDelegator creates and signs and issues a transaction to add a +// AddDelegator creates and signs and issues a transaction to add a // delegator to the primary network -func (service *Service) AddPrimaryDelegator(_ *http.Request, args *AddPrimaryDelegatorArgs, reply *api.JsonTxID) error { - service.vm.Ctx.Log.Info("Platform: AddPrimaryDelegator called") +func (service *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *api.JsonTxID) error { + service.vm.Ctx.Log.Info("Platform: AddDelegator called") switch { case int64(args.StartTime) < time.Now().Unix(): return fmt.Errorf("start time must be in the future") @@ -798,7 +798,7 @@ func (service *Service) AddPrimaryDelegator(_ *http.Request, args *AddPrimaryDel } // Create the transaction - tx, err := service.vm.newAddPrimaryDelegatorTx( + tx, err := service.vm.newAddDelegatorTx( uint64(args.weight()), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 9b0ba7da839c..0d491cf91427 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -72,9 +72,9 @@ func defaultAddress(t *testing.T, service *Service) { } } -func TestAddPrimaryValidator(t *testing.T) { +func TestAddValidator(t *testing.T) { expectedJSONString := `{"startTime":"0","endTime":"0","nodeID":"","rewardAddress":"","delegationFeeRate":"0.0000","username":"","password":""}` - args := AddPrimaryValidatorArgs{} + args := AddValidatorArgs{} bytes, err := json.Marshal(&args) if err != nil { t.Fatal(err) @@ -235,7 +235,7 @@ func TestGetTx(t *testing.T) { test{ "proposal block", func() (*Tx, error) { - return service.vm.newAddPrimaryValidatorTx( // Test GetTx works for proposal blocks + return service.vm.newAddValidatorTx( // Test GetTx works for proposal blocks service.vm.minStake, uint64(service.vm.clock.Time().Add(Delta).Unix()), uint64(service.vm.clock.Time().Add(Delta).Add(MinimumStakingDuration).Unix()), diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go index 54c7245b7a35..c94930accb61 100644 --- a/vms/platformvm/static_service.go +++ b/vms/platformvm/static_service.go @@ -217,7 +217,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r return err } - tx := &Tx{UnsignedTx: &UnsignedAddPrimaryValidatorTx{ + tx := &Tx{UnsignedTx: &UnsignedAddValidatorTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ NetworkID: uint32(args.NetworkID), BlockchainID: ids.Empty, diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 45dcc14e21fe..f366e76dfa59 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -128,9 +128,9 @@ func init() { Codec.RegisterType(&secp256k1fx.Input{}), Codec.RegisterType(&secp256k1fx.OutputOwners{}), - Codec.RegisterType(&UnsignedAddPrimaryValidatorTx{}), + Codec.RegisterType(&UnsignedAddValidatorTx{}), Codec.RegisterType(&UnsignedAddSubnetValidatorTx{}), - Codec.RegisterType(&UnsignedAddPrimaryDelegatorTx{}), + Codec.RegisterType(&UnsignedAddDelegatorTx{}), Codec.RegisterType(&UnsignedCreateChainTx{}), Codec.RegisterType(&UnsignedCreateSubnetTx{}), @@ -836,7 +836,7 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub for pending.Len() > 0 { nextTx := pending.Peek() // pending staker with earliest start time switch tx := nextTx.UnsignedTx.(type) { - case *UnsignedAddPrimaryValidatorTx: + case *UnsignedAddValidatorTx: if timestamp.Before(tx.StartTime()) { return current, pending, started, stopped, nil } @@ -850,7 +850,7 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub current.Add(nextTx) pending.Remove() started.Add(tx.Validator.ID()) - case *UnsignedAddPrimaryDelegatorTx: + case *UnsignedAddDelegatorTx: if timestamp.Before(tx.StartTime()) { return current, pending, started, stopped, nil } @@ -869,9 +869,9 @@ func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { for _, event := range validatorEvents.Txs { var vdr validators.Validator switch tx := event.UnsignedTx.(type) { - case *UnsignedAddPrimaryValidatorTx: + case *UnsignedAddValidatorTx: vdr = &tx.Validator - case *UnsignedAddPrimaryDelegatorTx: + case *UnsignedAddDelegatorTx: vdr = &tx.Validator case *UnsignedAddSubnetValidatorTx: vdr = &tx.Validator diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 260c084d0347..cac6162aba28 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -352,7 +352,7 @@ func TestGenesis(t *testing.T) { } // accept proposal to add validator to primary network -func TestAddPrimaryValidatorCommit(t *testing.T) { +func TestAddidatorCommit(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -369,7 +369,7 @@ func TestAddPrimaryValidatorCommit(t *testing.T) { ID := key.PublicKey().Address() // create valid tx - tx, err := vm.newAddPrimaryValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -431,7 +431,7 @@ func TestAddPrimaryValidatorCommit(t *testing.T) { } // verify invalid proposal to add validator to primary network -func TestInvalidAddPrimaryValidatorCommit(t *testing.T) { +func TestInvalidAddValidatorCommit(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -445,7 +445,7 @@ func TestInvalidAddPrimaryValidatorCommit(t *testing.T) { ID := key.PublicKey().Address() // create invalid tx - if tx, err := vm.newAddPrimaryValidatorTx( + if tx, err := vm.newAddValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), @@ -477,7 +477,7 @@ func TestInvalidAddPrimaryValidatorCommit(t *testing.T) { } // Reject proposal to add validator to primary network -func TestAddPrimaryValidatorReject(t *testing.T) { +func TestAddValidatorReject(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -491,7 +491,7 @@ func TestAddPrimaryValidatorReject(t *testing.T) { ID := key.PublicKey().Address() // create valid tx - tx, err := vm.newAddPrimaryValidatorTx( + tx, err := vm.newAddValidatorTx( vm.minStake, uint64(startTime.Unix()), uint64(endTime.Unix()), From 7d5f98cbd0254d0880cc08a58dcab8757ed5aeb6 Mon Sep 17 00:00:00 2001 From: "Collin K. Cusce" Date: Tue, 25 Aug 2020 00:19:44 -0400 Subject: [PATCH 06/47] renamed files, fixed netwok --- main/params.go | 2 +- .../{add_default_subnet_delegator_tx.go => add_delegator_tx.go} | 0 ...ult_subnet_delegator_tx_test.go => add_delegator_tx_test.go} | 0 ...efault_subnet_validator_tx.go => add_subnet_validator_tx.go} | 0 ...net_validator_tx_test.go => add_subnet_validator_tx_test.go} | 0 .../{add_default_subnet_validator_tx.go => add_validator_tx.go} | 0 ...ult_subnet_validator_tx_test.go => add_validator_tx_test.go} | 0 7 files changed, 1 insertion(+), 1 deletion(-) rename vms/platformvm/{add_default_subnet_delegator_tx.go => add_delegator_tx.go} (100%) rename vms/platformvm/{add_default_subnet_delegator_tx_test.go => add_delegator_tx_test.go} (100%) rename vms/platformvm/{add_nondefault_subnet_validator_tx.go => add_subnet_validator_tx.go} (100%) rename vms/platformvm/{add_nondefault_subnet_validator_tx_test.go => add_subnet_validator_tx_test.go} (100%) rename vms/platformvm/{add_default_subnet_validator_tx.go => add_validator_tx.go} (100%) rename vms/platformvm/{add_default_subnet_validator_tx_test.go => add_validator_tx_test.go} (100%) diff --git a/main/params.go b/main/params.go index f3e0f1f617ca..94ed698b8778 100644 --- a/main/params.go +++ b/main/params.go @@ -166,7 +166,7 @@ func init() { // AVAX fees: fs.Uint64Var(&Config.TxFee, "tx-fee", units.MilliAvax, "Transaction fee, in nAVAX") - // Minimum stake, in nAVAX, required to validate the primary netwok + // Minimum stake, in nAVAX, required to validate the primary network fs.Uint64Var(&Config.MinStake, "min-stake", 5*units.MilliAvax, "Minimum stake, in nAVAX, required to validate the primary network") // Assertions: diff --git a/vms/platformvm/add_default_subnet_delegator_tx.go b/vms/platformvm/add_delegator_tx.go similarity index 100% rename from vms/platformvm/add_default_subnet_delegator_tx.go rename to vms/platformvm/add_delegator_tx.go diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_delegator_tx_test.go similarity index 100% rename from vms/platformvm/add_default_subnet_delegator_tx_test.go rename to vms/platformvm/add_delegator_tx_test.go diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx.go b/vms/platformvm/add_subnet_validator_tx.go similarity index 100% rename from vms/platformvm/add_nondefault_subnet_validator_tx.go rename to vms/platformvm/add_subnet_validator_tx.go diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go b/vms/platformvm/add_subnet_validator_tx_test.go similarity index 100% rename from vms/platformvm/add_nondefault_subnet_validator_tx_test.go rename to vms/platformvm/add_subnet_validator_tx_test.go diff --git a/vms/platformvm/add_default_subnet_validator_tx.go b/vms/platformvm/add_validator_tx.go similarity index 100% rename from vms/platformvm/add_default_subnet_validator_tx.go rename to vms/platformvm/add_validator_tx.go diff --git a/vms/platformvm/add_default_subnet_validator_tx_test.go b/vms/platformvm/add_validator_tx_test.go similarity index 100% rename from vms/platformvm/add_default_subnet_validator_tx_test.go rename to vms/platformvm/add_validator_tx_test.go From 771df5763db1e2ec1d3db220d375aa6d7e62f6d3 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 25 Aug 2020 11:40:49 -0400 Subject: [PATCH 07/47] wip abstracting out snowball from snowstorm --- snow/consensus/snowstorm/benchmark_test.go | 20 ++--- snow/consensus/snowstorm/common.go | 9 ++- snow/consensus/snowstorm/consensus.go | 7 +- snow/consensus/snowstorm/consensus_test.go | 79 +++++++++---------- snow/consensus/snowstorm/directed.go | 91 ++++++++++------------ snow/consensus/snowstorm/equality_test.go | 4 +- snow/consensus/snowstorm/input.go | 5 +- snow/consensus/snowstorm/network_test.go | 7 +- snow/consensus/snowstorm/snowball.go | 67 ++++++++++++++++ 9 files changed, 178 insertions(+), 111 deletions(-) create mode 100644 snow/consensus/snowstorm/snowball.go diff --git a/snow/consensus/snowstorm/benchmark_test.go b/snow/consensus/snowstorm/benchmark_test.go index 914167dd14c9..58e25ca0ba50 100644 --- a/snow/consensus/snowstorm/benchmark_test.go +++ b/snow/consensus/snowstorm/benchmark_test.go @@ -9,12 +9,12 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/snow/consensus/snowball" + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) func Simulate( numColors, colorsPerConsumer, maxInputConflicts, numNodes int, - params snowball.Parameters, + params sbcon.Parameters, seed int64, fact Factory, ) { @@ -53,7 +53,7 @@ func BenchmarkVirtuousDirected(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -73,7 +73,7 @@ func BenchmarkVirtuousInput(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -99,7 +99,7 @@ func BenchmarkRogueDirected(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -119,7 +119,7 @@ func BenchmarkRogueInput(b *testing.B) { /*colorsPerConsumer=*/ 1, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -145,7 +145,7 @@ func BenchmarkMultiDirected(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -165,7 +165,7 @@ func BenchmarkMultiInput(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 1, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -191,7 +191,7 @@ func BenchmarkMultiRogueDirected(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, @@ -211,7 +211,7 @@ func BenchmarkMultiRogueInput(b *testing.B) { /*colorsPerConsumer=*/ 10, /*maxInputConflicts=*/ 3, /*numNodes=*/ 50, - /*params=*/ snowball.Parameters{ + /*params=*/ sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, diff --git a/snow/consensus/snowstorm/common.go b/snow/consensus/snowstorm/common.go index 6fe964e750ee..5bf09d9fc9aa 100644 --- a/snow/consensus/snowstorm/common.go +++ b/snow/consensus/snowstorm/common.go @@ -8,9 +8,10 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/consensus/snowball" "github.com/ava-labs/gecko/snow/events" "github.com/ava-labs/gecko/utils/wrappers" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) type common struct { @@ -21,7 +22,7 @@ type common struct { ctx *snow.Context // params describes how this instance was parameterized - params snowball.Parameters + params sbcon.Parameters // each element of preferences is the ID of a transaction that is preferred preferences ids.Set @@ -46,7 +47,7 @@ type common struct { } // Initialize implements the ConflictGraph interface -func (c *common) Initialize(ctx *snow.Context, params snowball.Parameters) error { +func (c *common) Initialize(ctx *snow.Context, params sbcon.Parameters) error { c.ctx = ctx c.params = params @@ -57,7 +58,7 @@ func (c *common) Initialize(ctx *snow.Context, params snowball.Parameters) error } // Parameters implements the Snowstorm interface -func (c *common) Parameters() snowball.Parameters { return c.params } +func (c *common) Parameters() sbcon.Parameters { return c.params } // Virtuous implements the ConflictGraph interface func (c *common) Virtuous() ids.Set { return c.virtuous } diff --git a/snow/consensus/snowstorm/consensus.go b/snow/consensus/snowstorm/consensus.go index 67f24b0f33f1..5c89ffe6ac31 100644 --- a/snow/consensus/snowstorm/consensus.go +++ b/snow/consensus/snowstorm/consensus.go @@ -8,7 +8,8 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/consensus/snowball" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) // Consensus is a snowball instance deciding between an unbounded number of @@ -18,10 +19,10 @@ type Consensus interface { fmt.Stringer // Takes in the context, alpha, betaVirtuous, and betaRogue - Initialize(*snow.Context, snowball.Parameters) error + Initialize(*snow.Context, sbcon.Parameters) error // Returns the parameters that describe this snowstorm instance - Parameters() snowball.Parameters + Parameters() sbcon.Parameters // Returns true if transaction is virtuous. // That is, no transaction has been added that conflicts with diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index f24dece42d7d..267efb491044 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -12,7 +12,8 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowball" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) var ( @@ -87,7 +88,7 @@ func MetricsTest(t *testing.T, factory Factory) { Setup() { - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, } @@ -98,7 +99,7 @@ func MetricsTest(t *testing.T, factory Factory) { graph.Initialize(snow.DefaultContextTest(), params) } { - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, } @@ -109,7 +110,7 @@ func MetricsTest(t *testing.T, factory Factory) { graph.Initialize(snow.DefaultContextTest(), params) } { - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, } @@ -126,7 +127,7 @@ func ParamsTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, } @@ -148,7 +149,7 @@ func IssuedTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } @@ -174,7 +175,7 @@ func LeftoverInputTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } @@ -213,7 +214,7 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } @@ -252,7 +253,7 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } @@ -295,7 +296,7 @@ func IndependentTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 2, BetaRogue: 2, } @@ -343,7 +344,7 @@ func VirtuousTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } @@ -381,7 +382,7 @@ func IsVirtuousTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } @@ -421,7 +422,7 @@ func QuiesceTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } @@ -454,7 +455,7 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { } purple.InputIDsV.Add(ids.Empty.Prefix(8)) - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, } @@ -545,7 +546,7 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { } purple.InputIDsV.Add(ids.Empty.Prefix(8)) - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, } @@ -618,7 +619,7 @@ func VacuouslyAcceptedTest(t *testing.T, factory Factory) { StatusV: choices.Processing, }} - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, } @@ -638,7 +639,7 @@ func ConflictsTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, } @@ -688,7 +689,7 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, } @@ -753,7 +754,7 @@ func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) { StatusV: choices.Processing, }} - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, } @@ -776,7 +777,7 @@ func ErrorOnAcceptedTest(t *testing.T, factory Factory) { }} purple.InputIDsV.Add(ids.Empty.Prefix(4)) - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, } @@ -813,7 +814,7 @@ func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) }} pink.InputIDsV.Add(X) - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, } @@ -852,7 +853,7 @@ func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) }} pink.InputIDsV.Add(X) - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, } @@ -876,7 +877,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) { graph := factory.New() - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, } @@ -910,10 +911,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 1 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 0 Bias: 0\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 0 Bias: 0\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 1 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 0, Confidence = 0)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 0, Confidence = 0)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) @@ -940,10 +941,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 1 Bias: 1\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 1 Bias: 1\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) @@ -967,10 +968,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 0 Bias: 1\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 0 Bias: 1\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) @@ -991,10 +992,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { { expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 1 Bias: 2\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 1 Bias: 2\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 2, Confidence = 1)\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 2, Confidence = 1)\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + ")" if str := graph.String(); str != expected { t.Fatalf("Expected %s, got %s", expected, str) diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 3245bfd5d075..84a28751de1d 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -12,8 +12,9 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowball" "github.com/ava-labs/gecko/utils/formatting" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) // DirectedFactory implements Factory by returning a directed struct @@ -37,20 +38,7 @@ type Directed struct { } type directedTx struct { - // bias is the number of times this transaction was the successful result of - // a network poll - bias int - - // confidence is the number of consecutive times this transaction was the - // successful result of a network poll as of [lastVote] - confidence int - - // lastVote is the last poll number that this transaction was included in a - // successful network poll - lastVote int - - // rogue identifies if there is a known conflict with this transaction - rogue bool + snowball // pendingAccept identifies if this transaction has been marked as accepted // once its transitive dependencies have also been accepted @@ -73,7 +61,10 @@ type directedTx struct { } // Initialize implements the Consensus interface -func (dg *Directed) Initialize(ctx *snow.Context, params snowball.Parameters) error { +func (dg *Directed) Initialize( + ctx *snow.Context, + params sbcon.Parameters, +) error { dg.txs = make(map[[32]byte]*directedTx) dg.utxos = make(map[[32]byte]ids.Set) @@ -171,10 +162,10 @@ func (dg *Directed) Add(tx Tx) error { // this UTXO spenders := dg.utxos[inputKey] - // Add all the txs that spend this UTXO to this tx's conflicts that are - // preferred over this tx. We know all these tx's are preferred over - // this tx, because this tx currently has a bias of 0 and the tie-break - // goes to the tx whose bias was updated first. + // Add all the txs that spend this UTXO to this txs conflicts that are + // preferred over this tx. We know all these txs are preferred over + // this tx, because this tx currently has a bias of 0 and the tie goes + // to the tx whose bias was updated first. txNode.outs.Union(spenders) // Update txs conflicting with tx to account for its issuance @@ -261,41 +252,52 @@ func (dg *Directed) Issued(tx Tx) bool { // RecordPoll implements the Consensus interface func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { + // Increase the vote ID. This is updated here and is used to reset the + // confidence values of transactions lazily. dg.currentVote++ + + // Changed tracks if the Avalanche instance needs to recompute its + // frontiers. Frontiers only need to be recalculated if preferences change + // or if a tx was accepted. changed := false + // We only want to iterate over txs that received alpha votes votes.SetThreshold(dg.params.Alpha) - threshold := votes.Threshold() // Each element is ID of transaction preferred by >= Alpha poll respondents - for _, toInc := range threshold.List() { - incKey := toInc.Key() - txNode, exist := dg.txs[incKey] + // Get the set of IDs that meet this alpha threshold + metThreshold := votes.Threshold() + for _, txID := range metThreshold.List() { + txKey := txID.Key() + + // Get the node this tx represents + txNode, exist := dg.txs[txKey] if !exist { - // Votes for decided consumers are ignored + // This tx may have already been accepted because of tx + // dependencies. If this is the case, we can just drop the vote. continue } - if txNode.lastVote+1 != dg.currentVote { - txNode.confidence = 0 - } - txNode.lastVote = dg.currentVote + txNode.RecordSuccessfulPoll(dg.currentVote) - dg.ctx.Log.Verbo("Increasing (bias, confidence) of %s from (%d, %d) to (%d, %d)", - toInc, txNode.bias, txNode.confidence, txNode.bias+1, txNode.confidence+1) - - txNode.bias++ - txNode.confidence++ + dg.ctx.Log.Verbo("Updated TxID=%s to have consensus state=%s", + txID, &txNode.snowball) + // If the tx should be accepted, then we should defer its acceptance + // until its dependencies are decided. However, if this tx was + // already marked to be accepted, we shouldn't register it again. if !txNode.pendingAccept && - ((!txNode.rogue && txNode.confidence >= dg.params.BetaVirtuous) || - txNode.confidence >= dg.params.BetaRogue) { + txNode.Finalized(dg.params.BetaVirtuous, dg.params.BetaRogue) { dg.deferAcceptance(txNode) if dg.errs.Errored() { return changed, dg.errs.Err } } + if !txNode.accepted { + // If this tx wasn't accepted, then this instance is only changed if + // preferences changed. changed = dg.redirectEdges(txNode) || changed } else { + // By accepting a tx, the state of this instance has changed. changed = true } } @@ -307,31 +309,24 @@ func (dg *Directed) String() string { for _, tx := range dg.txs { nodes = append(nodes, tx) } + // Sort the nodes so that the string representation is canonical sortTxNodes(nodes) sb := strings.Builder{} - sb.WriteString("DG(") format := fmt.Sprintf( - "\n Choice[%s] = ID: %%50s Confidence: %s Bias: %%d", - formatting.IntFormat(len(dg.txs)-1), - formatting.IntFormat(dg.params.BetaRogue-1)) - + "\n Choice[%s] = ID: %%50s %%s", + formatting.IntFormat(len(dg.txs)-1)) for i, txNode := range nodes { - confidence := txNode.confidence - if txNode.lastVote != dg.currentVote { - confidence = 0 - } sb.WriteString(fmt.Sprintf(format, - i, txNode.tx.ID(), confidence, txNode.bias)) + i, txNode.tx.ID(), txNode.snowball.CurrentString(dg.currentVote))) } if len(nodes) > 0 { sb.WriteString("\n") } sb.WriteString(")") - return sb.String() } @@ -390,7 +385,7 @@ func (dg *Directed) redirectEdges(tx *directedTx) bool { func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) bool { nodeID := txNode.tx.ID() conflict := dg.txs[conflictID.Key()] - if txNode.bias <= conflict.bias { + if txNode.numSuccessfulPolls <= conflict.numSuccessfulPolls { return false } diff --git a/snow/consensus/snowstorm/equality_test.go b/snow/consensus/snowstorm/equality_test.go index 8298c48329e1..383504bbdc78 100644 --- a/snow/consensus/snowstorm/equality_test.go +++ b/snow/consensus/snowstorm/equality_test.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/snow/consensus/snowball" + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) func TestConflictGraphEquality(t *testing.T) { @@ -19,7 +19,7 @@ func TestConflictGraphEquality(t *testing.T) { colorsPerConsumer := 2 maxInputConflicts := 2 numNodes := 100 - params := snowball.Parameters{ + params := sbcon.Parameters{ Metrics: prometheus.NewRegistry(), K: 20, Alpha: 11, diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index 1dd6aff99c96..32455dac14ab 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -11,8 +11,9 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/consensus/snowball" "github.com/ava-labs/gecko/utils/formatting" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) // InputFactory implements Factory by returning an input struct @@ -53,7 +54,7 @@ type inputUtxo struct { } // Initialize implements the ConflictGraph interface -func (ig *Input) Initialize(ctx *snow.Context, params snowball.Parameters) error { +func (ig *Input) Initialize(ctx *snow.Context, params sbcon.Parameters) error { ig.txs = make(map[[32]byte]inputTx) ig.utxos = make(map[[32]byte]inputUtxo) diff --git a/snow/consensus/snowstorm/network_test.go b/snow/consensus/snowstorm/network_test.go index f60cbe34f9e4..f1e3d77ab38a 100644 --- a/snow/consensus/snowstorm/network_test.go +++ b/snow/consensus/snowstorm/network_test.go @@ -9,12 +9,13 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowball" "github.com/ava-labs/gecko/utils/sampler" + + sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) type Network struct { - params snowball.Parameters + params sbcon.Parameters consumers []*TestTx nodeTxs []map[[32]byte]*TestTx nodes, running []Consensus @@ -31,7 +32,7 @@ func (n *Network) shuffleConsumers() { n.consumers = consumers } -func (n *Network) Initialize(params snowball.Parameters, numColors, colorsPerConsumer, maxInputConflicts int) { +func (n *Network) Initialize(params sbcon.Parameters, numColors, colorsPerConsumer, maxInputConflicts int) { n.params = params idCount := uint64(0) diff --git a/snow/consensus/snowstorm/snowball.go b/snow/consensus/snowstorm/snowball.go new file mode 100644 index 000000000000..8037ed5842a9 --- /dev/null +++ b/snow/consensus/snowstorm/snowball.go @@ -0,0 +1,67 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "fmt" +) + +type snowball struct { + // numSuccessfulPolls is the number of times this choice was the successful + // result of a network poll + numSuccessfulPolls int + + // confidence is the number of consecutive times this choice was the + // successful result of a network poll as of [lastVote] + confidence int + + // lastVote is the last poll number that this choice was included in a + // successful network poll + lastVote int + + // rogue identifies if there is a known conflict with this choice + rogue bool +} + +func (sb *snowball) RecordSuccessfulPoll(currentVote int) { + // If this choice wasn't voted for during the last poll, the confidence + // should have been reset during the last poll. So, we reset it now. + if sb.lastVote+1 != currentVote { + sb.confidence = 0 + } + + // This choice was voted for in this poll. Mark it as such. + sb.lastVote = currentVote + + // An affirmative vote increases both the snowball and snowflake counters. + sb.numSuccessfulPolls++ + sb.confidence++ +} + +func (sb *snowball) Finalized(betaVirtuous, betaRogue int) bool { + // This choice is finalized if the snowflake counter is at least + // [betaRogue]. If there are no known conflicts with this operation, it can + // be accepted with a snowflake counter of at least [betaVirtuous]. + return (!sb.rogue && sb.confidence >= betaVirtuous) || + sb.confidence >= betaRogue +} + +func (sb *snowball) CurrentString(currentVote int) string { + confidence := sb.confidence + if sb.lastVote != currentVote { + confidence = 0 + } + return fmt.Sprintf( + "SB(NumSuccessfulPolls = %d, Confidence = %d)", + sb.numSuccessfulPolls, + confidence) +} + +func (sb *snowball) String() string { + return fmt.Sprintf( + "SB(NumSuccessfulPolls = %d, Confidence = %d, As of %d)", + sb.numSuccessfulPolls, + sb.confidence, + sb.lastVote) +} From 2d8fafbed18b2e415d7100a651451952da1235ca Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 25 Aug 2020 13:54:32 -0400 Subject: [PATCH 08/47] bump coreth version --- go.mod | 2 +- go.sum | 4 ++-- scripts/build_coreth.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 4a219ebca215..260e3480d0f4 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/AppsFlyer/go-sundheit v0.2.0 github.com/allegro/bigcache v1.2.1 // indirect github.com/aristanetworks/goarista v0.0.0-20200812190859-4cb0e71f3c0e // indirect - github.com/ava-labs/coreth v0.2.12-rc.1 // indirect + github.com/ava-labs/coreth v0.2.13 // indirect github.com/ava-labs/go-ethereum v1.9.3 github.com/btcsuite/btcutil v1.0.2 github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 diff --git a/go.sum b/go.sum index 7a57321b2601..ad6caf93f241 100644 --- a/go.sum +++ b/go.sum @@ -21,8 +21,8 @@ github.com/aristanetworks/goarista v0.0.0-20200609010056-95bcf8053598/go.mod h1: github.com/aristanetworks/goarista v0.0.0-20200812190859-4cb0e71f3c0e h1:tkEt0le4Lv5+VmcxZPIVSrP8LVPLhndIm/BOP7iPh/w= github.com/aristanetworks/goarista v0.0.0-20200812190859-4cb0e71f3c0e/go.mod h1:QZe5Yh80Hp1b6JxQdpfSEEe8X7hTyTEZSosSrFf/oJE= github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= -github.com/ava-labs/coreth v0.2.12-rc.1 h1:BUUu+89KwsAFZpdcim8PGkc11P54BQhvRaB9XzX63F8= -github.com/ava-labs/coreth v0.2.12-rc.1/go.mod h1:ZwQ7rzHvQLorZsMoUm2FDWmLwOvDDoNzB+EEp2NhWyI= +github.com/ava-labs/coreth v0.2.13 h1:MaTf6Mbhfh2Ou5MpYmMqohCOtg3ZQgbDXTeEzcCiMb8= +github.com/ava-labs/coreth v0.2.13/go.mod h1:ZwQ7rzHvQLorZsMoUm2FDWmLwOvDDoNzB+EEp2NhWyI= github.com/ava-labs/gecko v0.6.1-rc.1/go.mod h1:TT6uA1BETZpVMR0xiFtE8I5Mv4DULlS+lAL3xuYKnpA= github.com/ava-labs/go-ethereum v1.9.3 h1:GmnMZ/dlvVAPFmWBzEpRJX49pUAymPfoASLNRJqR0AY= github.com/ava-labs/go-ethereum v1.9.3/go.mod h1:a+agc6fXfZFsPZCylA3ry4Y8CLCqLKg3Rc23NXZ9aw8= diff --git a/scripts/build_coreth.sh b/scripts/build_coreth.sh index dedb497c8336..538db1fe6e34 100755 --- a/scripts/build_coreth.sh +++ b/scripts/build_coreth.sh @@ -12,7 +12,7 @@ BUILD_DIR="$GECKO_PATH/build" # Where binaries go PLUGIN_DIR="$BUILD_DIR/plugins" # Where plugin binaries (namely coreth) go BINARY_PATH="$PLUGIN_DIR/evm" -CORETH_VER="0.2.12-rc.1" # Should match coreth version in go.mod +CORETH_VER="0.2.13" # Should match coreth version in go.mod CORETH_PATH="$GOPATH/pkg/mod/github.com/ava-labs/coreth@v$CORETH_VER" if [[ $# -eq 2 ]]; then From fb2830121bc5fde9d23bfa8bbfa05affdfcf5539 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 25 Aug 2020 13:55:36 -0400 Subject: [PATCH 09/47] v0.6.5 version bump --- node/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/node.go b/node/node.go index 95acaa0cc108..213367cbf875 100644 --- a/node/node.go +++ b/node/node.go @@ -63,7 +63,7 @@ var ( genesisHashKey = []byte("genesisID") // Version is the version of this code - Version = version.NewDefaultVersion("avalanche", 0, 6, 4) + Version = version.NewDefaultVersion("avalanche", 0, 6, 5) versionParser = version.NewDefaultParser() ) From 36cfb0985758034a5208b592e695f42b4ec2bd12 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 25 Aug 2020 15:02:20 -0400 Subject: [PATCH 10/47] continuing snowstorm cleanup --- snow/consensus/snowstorm/directed.go | 54 +++++++++++++++++++--------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 84a28751de1d..93ce1c5b4030 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -330,7 +330,11 @@ func (dg *Directed) String() string { return sb.String() } +// deferAcceptance attempts to mark this tx as accepted now or in the future +// once dependencies are accepted func (dg *Directed) deferAcceptance(txNode *directedTx) { + // Mark that this tx is pending acceptance so this function won't be called + // again txNode.pendingAccept = true toAccept := &directedAccepter{ @@ -338,36 +342,56 @@ func (dg *Directed) deferAcceptance(txNode *directedTx) { txNode: txNode, } for _, dependency := range txNode.tx.Dependencies() { - if !dependency.Status().Decided() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. So, + // this tx should be accepted after all of these processing txs are + // accepted. toAccept.deps.Add(dependency.ID()) } } + // This tx is no longer being voted on, so we remove it from the voting set. + // This ensures that virtuous txs built on top of rogue txs don't force the + // node to treat the rogue tx as virtuous. dg.virtuousVoting.Remove(txNode.tx.ID()) dg.pendingAccept.Register(toAccept) } +// reject all the named txIDs and remove them from the graph func (dg *Directed) reject(ids ...ids.ID) error { - for _, conflict := range ids { - conflictKey := conflict.Key() - conf := dg.txs[conflictKey] + for _, conflictID := range ids { + conflictKey := conflictID.Key() + conflict := dg.txs[conflictKey] + + // We are rejecting the tx, so we should remove it from the graph delete(dg.txs, conflictKey) - dg.preferences.Remove(conflict) + // While it's statistically unlikely that something being rejected is + // preferred, it is handled for completion. + dg.preferences.Remove(conflictID) // remove the edge between this node and all its neighbors - dg.removeConflict(conflict, conf.ins.List()...) - dg.removeConflict(conflict, conf.outs.List()...) + dg.removeConflict(conflictID, conflict.ins.List()...) + dg.removeConflict(conflictID, conflict.outs.List()...) - // Mark it as rejected - if err := conf.tx.Reject(); err != nil { + // Reject is called before notifying the IPC so that rejections that + // cause fatal errors aren't sent to an IPC peer. + if err := conflict.tx.Reject(); err != nil { return err } - dg.ctx.DecisionDispatcher.Reject(dg.ctx.ChainID, conf.tx.ID(), conf.tx.Bytes()) - dg.metrics.Rejected(conflict) - dg.pendingAccept.Abandon(conflict) - dg.pendingReject.Fulfill(conflict) + // Notify the IPC that the tx was rejected + dg.ctx.DecisionDispatcher.Reject(dg.ctx.ChainID, conflict.tx.ID(), conflict.tx.Bytes()) + + // Update the metrics to account for this transaction's rejection + dg.metrics.Rejected(conflictID) + + // If there is a tx that was accepted pending on this tx, the ancestor + // tx can't be accepted. + dg.pendingAccept.Abandon(conflictID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // must be rejected. + dg.pendingReject.Fulfill(conflictID) } return nil } @@ -389,10 +413,6 @@ func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) bool { return false } - // TODO: why is this confidence reset here? It should already be reset - // implicitly by the lack of a timestamp increase. - conflict.confidence = 0 - // Change the edge direction conflict.ins.Remove(nodeID) conflict.outs.Add(nodeID) From f9abcb722db1f11739751566a80d4423740920df Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 25 Aug 2020 16:35:02 -0400 Subject: [PATCH 11/47] Fixed verification race condition with the AVM --- vms/avm/unique_tx.go | 22 +-- vms/avm/vm.go | 6 +- vms/avm/vm_test.go | 352 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 362 insertions(+), 18 deletions(-) diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go index c47ca442c83d..1fe1daaf9ced 100644 --- a/vms/avm/unique_tx.go +++ b/vms/avm/unique_tx.go @@ -269,8 +269,7 @@ func (tx *UniqueTx) Bytes() []byte { return tx.Tx.Bytes() } -// Verify the validity of this transaction -func (tx *UniqueTx) Verify() error { +func (tx *UniqueTx) verifyWithoutCacheWrites() error { switch status := tx.Status(); status { case choices.Unknown: return errUnknownTx @@ -283,6 +282,17 @@ func (tx *UniqueTx) Verify() error { } } +// Verify the validity of this transaction +func (tx *UniqueTx) Verify() error { + if err := tx.verifyWithoutCacheWrites(); err != nil { + return err + } + + tx.verifiedState = true + tx.vm.pubsub.Publish("verified", tx.ID()) + return nil +} + // SyntacticVerify verifies that this transaction is well formed func (tx *UniqueTx) SyntacticVerify() error { tx.refresh() @@ -310,11 +320,5 @@ func (tx *UniqueTx) SemanticVerify() error { return tx.validity } - if err := tx.Tx.SemanticVerify(tx.vm, tx.UnsignedTx); err != nil { - return err - } - - tx.verifiedState = true - tx.vm.pubsub.Publish("verified", tx.ID()) - return nil + return tx.Tx.SemanticVerify(tx.vm, tx.UnsignedTx) } diff --git a/vms/avm/vm.go b/vms/avm/vm.go index f4fd392b079d..e5d5a0071ac3 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -307,7 +307,7 @@ func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) { } // Verify must be called in the case the that tx was flushed from the unique // cache. - return tx, tx.Verify() + return tx, tx.verifyWithoutCacheWrites() } /* @@ -328,7 +328,7 @@ func (vm *VM) IssueTx(b []byte) (ids.ID, error) { if err != nil { return ids.ID{}, err } - if err := tx.Verify(); err != nil { + if err := tx.verifyWithoutCacheWrites(); err != nil { return ids.ID{}, err } vm.issueTx(tx) @@ -609,7 +609,7 @@ func (vm *VM) getUTXO(utxoID *avax.UTXOID) (*avax.UTXO, error) { txID: inputTx, } - if err := parent.Verify(); err != nil { + if err := parent.verifyWithoutCacheWrites(); err != nil { return nil, errMissingUTXO } else if status := parent.Status(); status.Decided() { return nil, errMissingUTXO diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 37ab6b4b7701..70fdc282736a 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -521,7 +521,7 @@ type testTxBytes struct{ unsignedBytes []byte } func (tx *testTxBytes) UnsignedBytes() []byte { return tx.unsignedBytes } func TestIssueTx(t *testing.T) { - genesisBytes, issuer, vm , _ := GenesisVM(t) + genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -551,7 +551,7 @@ func TestIssueTx(t *testing.T) { } func TestGenesisGetUTXOs(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -575,7 +575,7 @@ func TestGenesisGetUTXOs(t *testing.T) { // Test issuing a transaction that consumes a currently pending UTXO. The // transaction should be issued successfully. func TestIssueDependentTx(t *testing.T) { - genesisBytes, issuer, vm , _ := GenesisVM(t) + genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -968,7 +968,7 @@ func TestIssueProperty(t *testing.T) { } func TestVMFormat(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) defer func() { vm.Shutdown() vm.ctx.Lock.Unlock() @@ -994,7 +994,7 @@ func TestVMFormat(t *testing.T) { } func TestTxCached(t *testing.T) { - genesisBytes, _, vm , _ := GenesisVM(t) + genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -1022,7 +1022,7 @@ func TestTxCached(t *testing.T) { } func TestTxNotCached(t *testing.T) { - genesisBytes, _, vm , _ := GenesisVM(t) + genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -1050,3 +1050,343 @@ func TestTxNotCached(t *testing.T) { assert.NoError(t, err) assert.True(t, *called, "should have called the DB") } + +func TestTxVerifyAfterIssueTx(t *testing.T) { + genesisBytes, issuer, vm, _ := GenesisVM(t) + ctx := vm.ctx + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + key := keys[0] + firstTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + secondTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := secondTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Verify(); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Accept(); err != nil { + t.Fatal(err) + } + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + ctx.Lock.Lock() + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + parsedFirstTx := txs[0] + + if err := parsedFirstTx.Verify(); err == nil { + t.Fatalf("Should have errored due to a missing UTXO") + } +} + +func TestTxVerifyAfterGetTx(t *testing.T) { + genesisBytes, _, vm, _ := GenesisVM(t) + ctx := vm.ctx + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + key := keys[0] + firstTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + secondTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := secondTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Verify(); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { + t.Fatal(err) + } + parsedFirstTx, err := vm.GetTx(firstTx.ID()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Accept(); err != nil { + t.Fatal(err) + } + if err := parsedFirstTx.Verify(); err == nil { + t.Fatalf("Should have errored due to a missing UTXO") + } +} + +func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { + genesisBytes, _, vm, _ := GenesisVM(t) + ctx := vm.ctx + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + key := keys[0] + firstTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + firstTxDecendent := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: firstTx.ID(), + OutputIndex: 0, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := firstTxDecendent.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + secondTx := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }}} + if err := secondTx.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + t.Fatal(err) + } + + parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Verify(); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { + t.Fatal(err) + } + if _, err := vm.IssueTx(firstTxDecendent.Bytes()); err != nil { + t.Fatal(err) + } + parsedFirstTx, err := vm.GetTx(firstTx.ID()) + if err != nil { + t.Fatal(err) + } + if err := parsedSecondTx.Accept(); err != nil { + t.Fatal(err) + } + if err := parsedFirstTx.Verify(); err == nil { + t.Fatalf("Should have errored due to a missing UTXO") + } +} From ab782ba103877ee874bd661b3d551dfa88b6988d Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 25 Aug 2020 17:25:10 -0400 Subject: [PATCH 12/47] add memo field to X-Chain send API --- vms/avm/service.go | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/vms/avm/service.go b/vms/avm/service.go index a0cf8c672051..48d854615946 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -886,17 +886,30 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a // SendArgs are arguments for passing into Send requests type SendArgs struct { + // Username and password of user sending the funds api.UserPass - Amount json.Uint64 `json:"amount"` - AssetID string `json:"assetID"` - To string `json:"to"` + + // The amount of funds to send + Amount json.Uint64 `json:"amount"` + + // ID of the asset being sent + AssetID string `json:"assetID"` + + // Address of the recipient + To string `json:"to"` + + // Memo field + Memo string `json:"memo"` } // Send returns the ID of the newly created transaction func (service *Service) Send(r *http.Request, args *SendArgs, reply *api.JsonTxID) error { service.vm.ctx.Log.Info("AVM: Send called with username: %s", args.Username) - if args.Amount == 0 { + memoBytes := []byte(args.Memo) + if l := len(memoBytes); l > avax.MaxMemoSize { + return fmt.Errorf("Max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) + } else if args.Amount == 0 { return errInvalidAmount } @@ -983,6 +996,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *api.JsonTxI BlockchainID: service.vm.ctx.ChainID, Outs: outs, Ins: ins, + Memo: memoBytes, }}} if err := tx.SignSECP256K1Fx(service.vm.codec, keys); err != nil { return err From 904bc60427a3626996faefed1602071d1c60b16c Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 25 Aug 2020 17:55:34 -0400 Subject: [PATCH 13/47] fixed typo --- vms/avm/vm_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 70fdc282736a..c56808d9be21 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -1300,7 +1300,7 @@ func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { t.Fatal(err) } - firstTxDecendent := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ + firstTxDescendant := &Tx{UnsignedTx: &BaseTx{BaseTx: avax.BaseTx{ NetworkID: networkID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ @@ -1329,7 +1329,7 @@ func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { }, }}, }}} - if err := firstTxDecendent.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := firstTxDescendant.SignSECP256K1Fx(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { t.Fatal(err) } @@ -1376,7 +1376,7 @@ func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { t.Fatal(err) } - if _, err := vm.IssueTx(firstTxDecendent.Bytes()); err != nil { + if _, err := vm.IssueTx(firstTxDescendant.Bytes()); err != nil { t.Fatal(err) } parsedFirstTx, err := vm.GetTx(firstTx.ID()) From 97f6ae1602b2f631a48d31c62b6d2c635be0bb86 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 25 Aug 2020 23:22:10 -0400 Subject: [PATCH 14/47] finished first pass of directed.go --- snow/consensus/snowstorm/common.go | 23 ++ snow/consensus/snowstorm/consensus.go | 3 + snow/consensus/snowstorm/consensus_test.go | 225 ++++++++++++++---- snow/consensus/snowstorm/directed.go | 148 +++++++----- snow/consensus/snowstorm/input.go | 29 +-- .../avalanche/state/unique_vertex_test.go | 2 +- snow/engine/avalanche/state/vertex_test.go | 2 +- 7 files changed, 299 insertions(+), 133 deletions(-) diff --git a/snow/consensus/snowstorm/common.go b/snow/consensus/snowstorm/common.go index 5bf09d9fc9aa..f84ce612f97f 100644 --- a/snow/consensus/snowstorm/common.go +++ b/snow/consensus/snowstorm/common.go @@ -81,3 +81,26 @@ func (c *common) Finalized() bool { numPreferences) return numPreferences == 0 } + +// rejector implements Blockable +type rejector struct { + g Consensus + deps ids.Set + errs *wrappers.Errs + rejected bool // true if the tx has been rejected + txID ids.ID +} + +func (r *rejector) Dependencies() ids.Set { return r.deps } + +func (r *rejector) Fulfill(ids.ID) { + if r.rejected || r.errs.Errored() { + return + } + r.rejected = true + r.errs.Add(r.g.reject(r.txID)) +} + +func (*rejector) Abandon(ids.ID) {} + +func (*rejector) Update() {} diff --git a/snow/consensus/snowstorm/consensus.go b/snow/consensus/snowstorm/consensus.go index 5c89ffe6ac31..bca3894b7fc5 100644 --- a/snow/consensus/snowstorm/consensus.go +++ b/snow/consensus/snowstorm/consensus.go @@ -59,4 +59,7 @@ type Consensus interface { // possible that after returning finalized, a new decision may be added such // that this instance is no longer finalized. Finalized() bool + + // Reject all the provided txs and remove them from the graph + reject(txIDs ...ids.ID) error } diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index 267efb491044..12127530675a 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" @@ -37,6 +38,7 @@ var ( ErrorOnAcceptedTest, ErrorOnRejectingLowerConfidenceConflictTest, ErrorOnRejectingHigherConfidenceConflictTest, + UTXOCleanupTest, } Red, Green, Blue, Alpha *TestTx @@ -89,8 +91,12 @@ func MetricsTest(t *testing.T, factory Factory) { { params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_processing", @@ -100,8 +106,12 @@ func MetricsTest(t *testing.T, factory Factory) { } { params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_accepted", @@ -111,8 +121,12 @@ func MetricsTest(t *testing.T, factory Factory) { } { params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_rejected", @@ -128,8 +142,12 @@ func ParamsTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -150,8 +168,12 @@ func IssuedTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -176,8 +198,12 @@ func LeftoverInputTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -215,8 +241,12 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -254,8 +284,12 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -297,8 +331,12 @@ func IndependentTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 2, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 2, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -345,8 +383,12 @@ func VirtuousTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -383,8 +425,12 @@ func IsVirtuousTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -423,8 +469,12 @@ func QuiesceTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -456,8 +506,12 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { purple.InputIDsV.Add(ids.Empty.Prefix(8)) params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -547,8 +601,12 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { purple.InputIDsV.Add(ids.Empty.Prefix(8)) params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -620,8 +678,12 @@ func VacuouslyAcceptedTest(t *testing.T, factory Factory) { }} params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -640,8 +702,12 @@ func ConflictsTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -690,8 +756,12 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -755,8 +825,12 @@ func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) { }} params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -778,8 +852,12 @@ func ErrorOnAcceptedTest(t *testing.T, factory Factory) { purple.InputIDsV.Add(ids.Empty.Prefix(4)) params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -815,8 +893,12 @@ func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) pink.InputIDsV.Add(X) params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -854,8 +936,12 @@ func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) pink.InputIDsV.Add(X) params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) @@ -872,14 +958,65 @@ func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) } } +func UTXOCleanupTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + err := graph.Initialize(snow.DefaultContextTest(), params) + assert.NoError(t, err) + + err = graph.Add(Red) + assert.NoError(t, err) + + err = graph.Add(Green) + assert.NoError(t, err) + + redVotes := ids.Bag{} + redVotes.Add(Red.ID()) + changed, err := graph.RecordPoll(redVotes) + assert.NoError(t, err) + assert.False(t, changed, "shouldn't have accepted red yet") + + changed, err = graph.RecordPoll(redVotes) + assert.NoError(t, err) + assert.True(t, changed, "should have accepted red") + + assert.Equal(t, choices.Accepted, Red.Status()) + assert.Equal(t, choices.Rejected, Green.Status()) + + err = graph.Add(Blue) + assert.NoError(t, err) + + blueVotes := ids.Bag{} + blueVotes.Add(Blue.ID()) + changed, err = graph.RecordPoll(blueVotes) + assert.NoError(t, err) + assert.True(t, changed, "should have accepted blue") + + assert.Equal(t, choices.Accepted, Blue.Status()) +} + func StringTest(t *testing.T, factory Factory, prefix string) { Setup() graph := factory.New() params := sbcon.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } graph.Initialize(snow.DefaultContextTest(), params) diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 93ce1c5b4030..489f264ac1d9 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -44,10 +44,6 @@ type directedTx struct { // once its transitive dependencies have also been accepted pendingAccept bool - // accepted identifies if this transaction has been accepted. This should - // only be set if [pendingAccept] is also set - accepted bool - // ins is the set of txIDs that this tx conflicts with that are less // preferred than this tx ins ids.Set @@ -213,9 +209,10 @@ func (dg *Directed) Add(tx Tx) error { // This tx can be accepted only if all the txs it depends on are also // accepted. If any txs that this tx depends on are rejected, reject it. - toReject := &directedRejector{ - dg: dg, - txNode: txNode, + toReject := &rejector{ + g: dg, + errs: &dg.errs, + txID: txID, } // Register all of this txs dependencies as possibilities to reject this tx. @@ -292,7 +289,7 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { } } - if !txNode.accepted { + if txNode.tx.Status() != choices.Accepted { // If this tx wasn't accepted, then this instance is only changed if // preferences changed. changed = dg.redirectEdges(txNode) || changed @@ -358,11 +355,34 @@ func (dg *Directed) deferAcceptance(txNode *directedTx) { } // reject all the named txIDs and remove them from the graph -func (dg *Directed) reject(ids ...ids.ID) error { - for _, conflictID := range ids { +func (dg *Directed) reject(conflictIDs ...ids.ID) error { + for _, conflictID := range conflictIDs { conflictKey := conflictID.Key() conflict := dg.txs[conflictKey] + // This tx is not longer an option for consuming the UTXOs from its + // inputs, so we should remove their reference to this tx. + for _, inputID := range conflict.tx.InputIDs().List() { + inputKey := inputID.Key() + txIDs, exists := dg.utxos[inputKey] + if !exists { + // This UTXO may no longer exist because it was removed due to + // the acceptance of a tx. If that is the case, there is nothing + // left to remove from memory. + continue + } + txIDs.Remove(conflictID) + if txIDs.Len() == 0 { + // If this tx was the last tx consuming this UTXO, we should + // prune the UTXO from memory entirely. + delete(dg.utxos, inputKey) + } else { + // If this UTXO still has txs consuming it, then we should make + // sure this update is written back to the UTXOs map. + dg.utxos[inputKey] = txIDs + } + } + // We are rejecting the tx, so we should remove it from the graph delete(dg.txs, conflictKey) @@ -396,6 +416,8 @@ func (dg *Directed) reject(ids ...ids.ID) error { return nil } +// redirectEdges attempts to turn outbound edges into inbound edges if the +// preferences have changed func (dg *Directed) redirectEdges(tx *directedTx) bool { changed := false for _, conflictID := range tx.outs.List() { @@ -404,43 +426,51 @@ func (dg *Directed) redirectEdges(tx *directedTx) bool { return changed } -// Set the confidence of all conflicts to 0 -// Change the direction of edges if needed +// Change the direction of this edge if needed. Returns true if the direction +// was switched. func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) bool { - nodeID := txNode.tx.ID() conflict := dg.txs[conflictID.Key()] if txNode.numSuccessfulPolls <= conflict.numSuccessfulPolls { return false } - // Change the edge direction + // Because this tx has a higher preference than the conflicting tx, we must + // ensure that the edge is directed towards this tx. + nodeID := txNode.tx.ID() + + // Change the edge direction according to the conflict tx conflict.ins.Remove(nodeID) conflict.outs.Add(nodeID) - dg.preferences.Remove(conflictID) // This consumer now has an out edge + dg.preferences.Remove(conflictID) // This conflict has an outbound edge + // Change the edge direction according to this tx txNode.ins.Add(conflictID) txNode.outs.Remove(conflictID) if txNode.outs.Len() == 0 { - // If I don't have out edges, I'm preferred + // If this tx doesn't have any outbound edges, it's preferred dg.preferences.Add(nodeID) } return true } -func (dg *Directed) removeConflict(id ids.ID, ids ...ids.ID) { - for _, neighborID := range ids { +func (dg *Directed) removeConflict(txID ids.ID, neighborIDs ...ids.ID) { + for _, neighborID := range neighborIDs { neighborKey := neighborID.Key() - // If the neighbor doesn't exist, they may have already been rejected - if neighbor, exists := dg.txs[neighborKey]; exists { - neighbor.ins.Remove(id) - neighbor.outs.Remove(id) - - if neighbor.outs.Len() == 0 { - // Make sure to mark the neighbor as preferred if needed - dg.preferences.Add(neighborID) - } + neighbor, exists := dg.txs[neighborKey] + if !exists { + // If the neighbor doesn't exist, they may have already been + // rejected, so this mapping can be skipped. + continue + } - dg.txs[neighborKey] = neighbor + // Remove any edge to this tx. + neighbor.ins.Remove(txID) + neighbor.outs.Remove(txID) + + if neighbor.outs.Len() == 0 { + // If this tx should now be preferred, make sure its status is + // updated. + dg.preferences.Add(neighborID) } } } @@ -462,67 +492,61 @@ func (a *directedAccepter) Fulfill(id ids.ID) { func (a *directedAccepter) Abandon(id ids.ID) { a.rejected = true } func (a *directedAccepter) Update() { - // If I was rejected or I am still waiting on dependencies to finish do - // nothing. + // If I was rejected or I am still waiting on dependencies to finish or an + // error has occurred, I shouldn't do anything. if a.rejected || a.deps.Len() != 0 || a.dg.errs.Errored() { return } - id := a.txNode.tx.ID() - delete(a.dg.txs, id.Key()) + txID := a.txNode.tx.ID() + // We are accepting the tx, so we should remove the node from the graph. + delete(a.dg.txs, txID.Key()) + // This tx is consuming all the UTXOs from its inputs, so we can prune them + // all from memory for _, inputID := range a.txNode.tx.InputIDs().List() { delete(a.dg.utxos, inputID.Key()) } - a.dg.virtuous.Remove(id) - a.dg.preferences.Remove(id) - // Reject the conflicts + // This tx is now accepted, so it shouldn't be part of the virtuous set or + // the preferred set. Its status as Accepted implies these descriptions. + a.dg.virtuous.Remove(txID) + a.dg.preferences.Remove(txID) + + // Reject all the txs that conflicted with this tx. if err := a.dg.reject(a.txNode.ins.List()...); err != nil { a.dg.errs.Add(err) return } - // Should normally be empty + // While it is typically true that a tx this is being accepted is preferred, + // it is possible for this to not be the case. So this is handled for + // completeness. if err := a.dg.reject(a.txNode.outs.List()...); err != nil { a.dg.errs.Add(err) return } - // Mark it as accepted + // Accept is called before notifying the IPC so that acceptances that cause + // fatal errors aren't sent to an IPC peer. if err := a.txNode.tx.Accept(); err != nil { a.dg.errs.Add(err) return } - a.txNode.accepted = true - a.dg.ctx.DecisionDispatcher.Accept(a.dg.ctx.ChainID, id, a.txNode.tx.Bytes()) - a.dg.metrics.Accepted(id) - - a.dg.pendingAccept.Fulfill(id) - a.dg.pendingReject.Abandon(id) -} -// directedRejector implements Blockable -type directedRejector struct { - dg *Directed - deps ids.Set - rejected bool // true if the transaction has been rejected - txNode *directedTx -} + // Notify the IPC socket that this tx has been accepted. + a.dg.ctx.DecisionDispatcher.Accept(a.dg.ctx.ChainID, txID, a.txNode.tx.Bytes()) -func (r *directedRejector) Dependencies() ids.Set { return r.deps } + // Update the metrics to account for this transaction's acceptance + a.dg.metrics.Accepted(txID) -func (r *directedRejector) Fulfill(id ids.ID) { - if r.rejected || r.dg.errs.Errored() { - return - } - r.rejected = true - r.dg.errs.Add(r.dg.reject(r.txNode.tx.ID())) + // If there is a tx that was accepted pending on this tx, the ancestor + // should be notified that it doesn't need to block on this tx anymore. + a.dg.pendingAccept.Fulfill(txID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // doesn't need to be rejected because of this tx. + a.dg.pendingReject.Abandon(txID) } -func (*directedRejector) Abandon(id ids.ID) {} - -func (*directedRejector) Update() {} - type sortTxNodeData []*directedTx func (tnd sortTxNodeData) Less(i, j int) bool { diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index 32455dac14ab..e18a098ca257 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -129,9 +129,10 @@ func (ig *Input) Add(tx Tx) error { } ig.metrics.Issued(txID) - toReject := &inputRejector{ - ig: ig, - tn: cn, + toReject := &rejector{ + g: ig, + errs: &ig.errs, + txID: txID, } for _, dependency := range tx.Dependencies() { @@ -475,28 +476,6 @@ func (a *inputAccepter) Update() { a.ig.pendingReject.Abandon(id) } -// inputRejector implements Blockable -type inputRejector struct { - ig *Input - deps ids.Set - rejected bool // true if the transaction represented by fn has been rejected - tn inputTx -} - -func (r *inputRejector) Dependencies() ids.Set { return r.deps } - -func (r *inputRejector) Fulfill(id ids.ID) { - if r.rejected || r.ig.errs.Errored() { - return - } - r.rejected = true - r.ig.errs.Add(r.ig.reject(r.tn.tx.ID())) -} - -func (*inputRejector) Abandon(id ids.ID) {} - -func (*inputRejector) Update() {} - type tempNode struct { id ids.ID bias, confidence int diff --git a/snow/engine/avalanche/state/unique_vertex_test.go b/snow/engine/avalanche/state/unique_vertex_test.go index 7cc309c84526..de31d56023ae 100644 --- a/snow/engine/avalanche/state/unique_vertex_test.go +++ b/snow/engine/avalanche/state/unique_vertex_test.go @@ -80,7 +80,7 @@ func TestUniqueVertexCacheHit(t *testing.T) { serializer: s, } if err := uVtx.setVertex(vtx); err != nil { - t.Fatalf("Failed to set vertex due to: %w", err) + t.Fatalf("Failed to set vertex due to: %s", err) } newUVtx := &uniqueVertex{ diff --git a/snow/engine/avalanche/state/vertex_test.go b/snow/engine/avalanche/state/vertex_test.go index 58c4c7666c63..0bdd9aa244f7 100644 --- a/snow/engine/avalanche/state/vertex_test.go +++ b/snow/engine/avalanche/state/vertex_test.go @@ -31,7 +31,7 @@ func TestVertexVerify(t *testing.T) { } if err := validVertex.Verify(); err != nil { - t.Fatalf("Valid vertex failed verification due to: %w", err) + t.Fatalf("Valid vertex failed verification due to: %s", err) } nonUniqueParentsVtx := &innerVertex{ From 53c0d6289a6c7529a5ac613e5b36fadabbafde69 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 26 Aug 2020 00:27:02 -0400 Subject: [PATCH 15/47] made input.go pass tests --- snow/consensus/snowstorm/consensus_test.go | 9 +++------ snow/consensus/snowstorm/input.go | 22 ++++++++++++++-------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index 12127530675a..478289ec517f 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -982,13 +982,11 @@ func UTXOCleanupTest(t *testing.T, factory Factory) { redVotes := ids.Bag{} redVotes.Add(Red.ID()) - changed, err := graph.RecordPoll(redVotes) + _, err = graph.RecordPoll(redVotes) assert.NoError(t, err) - assert.False(t, changed, "shouldn't have accepted red yet") - changed, err = graph.RecordPoll(redVotes) + _, err = graph.RecordPoll(redVotes) assert.NoError(t, err) - assert.True(t, changed, "should have accepted red") assert.Equal(t, choices.Accepted, Red.Status()) assert.Equal(t, choices.Rejected, Green.Status()) @@ -998,9 +996,8 @@ func UTXOCleanupTest(t *testing.T, factory Factory) { blueVotes := ids.Bag{} blueVotes.Add(Blue.ID()) - changed, err = graph.RecordPoll(blueVotes) + _, err = graph.RecordPoll(blueVotes) assert.NoError(t, err) - assert.True(t, changed, "should have accepted blue") assert.Equal(t, choices.Accepted, Blue.Status()) } diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index e18a098ca257..6f3251c3afbf 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -381,14 +381,14 @@ func (ig *Input) String() string { confidence = 0 break } - - if input.confidence < confidence { - confidence = input.confidence - } if !id.Equals(input.color) { confidence = 0 break } + + if input.confidence < confidence { + confidence = input.confidence + } } nodes = append(nodes, tempNode{ @@ -404,12 +404,11 @@ func (ig *Input) String() string { sb.WriteString("IG(") format := fmt.Sprintf( - "\n Choice[%s] = ID: %%50s Confidence: %s Bias: %%d", - formatting.IntFormat(len(nodes)-1), - formatting.IntFormat(ig.params.BetaRogue-1)) + "\n Choice[%s] = ID: %%50s %%s", + formatting.IntFormat(len(nodes)-1)) for i, cn := range nodes { - sb.WriteString(fmt.Sprintf(format, i, cn.id, cn.confidence, cn.bias)) + sb.WriteString(fmt.Sprintf(format, i, cn.id, &cn)) } if len(nodes) > 0 { @@ -481,6 +480,13 @@ type tempNode struct { bias, confidence int } +func (tn *tempNode) String() string { + return fmt.Sprintf( + "SB(NumSuccessfulPolls = %d, Confidence = %d)", + tn.bias, + tn.confidence) +} + type sortTempNodeData []tempNode func (tnd sortTempNodeData) Less(i, j int) bool { From 16baeb486e5886c734d456147c74c1f69d59c04b Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 26 Aug 2020 01:30:21 -0400 Subject: [PATCH 16/47] cleaned up some wording --- snow/consensus/snowstorm/directed.go | 52 ++++++++++++------------ snow/engine/avalanche/transitive_test.go | 2 +- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 489f264ac1d9..95b5dc136258 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -70,8 +70,8 @@ func (dg *Directed) Initialize( // IsVirtuous implements the Consensus interface func (dg *Directed) IsVirtuous(tx Tx) bool { txID := tx.ID() - // If the tx is currently processing, we should just return if was registed - // as rogue or not. + // If the tx is currently processing, we should just return if was + // registered as rogue or not. if node, exists := dg.txs[txID.Key()]; exists { return !node.rogue } @@ -94,12 +94,12 @@ func (dg *Directed) IsVirtuous(tx Tx) bool { func (dg *Directed) Conflicts(tx Tx) ids.Set { conflicts := ids.Set{} if node, exists := dg.txs[tx.ID().Key()]; exists { - // If the tx is currently processing, the conflicting txs is just the + // If the tx is currently processing, the conflicting txs are just the // union of the inbound conflicts and the outbound conflicts. conflicts.Union(node.ins) conflicts.Union(node.outs) } else { - // If the tx isn't currently processing, the conflicting txs is the + // If the tx isn't currently processing, the conflicting txs are the // union of all the txs that spend an input that this tx spends. for _, input := range tx.InputIDs().List() { if spends, exists := dg.utxos[input.Key()]; exists { @@ -113,7 +113,7 @@ func (dg *Directed) Conflicts(tx Tx) ids.Set { // Add implements the Consensus interface func (dg *Directed) Add(tx Tx) error { if dg.Issued(tx) { - // If the tx was previously inserted, nothing should be done here. + // If the tx was previously inserted, it shouldn't be re-inserted. return nil } @@ -158,10 +158,10 @@ func (dg *Directed) Add(tx Tx) error { // this UTXO spenders := dg.utxos[inputKey] - // Add all the txs that spend this UTXO to this txs conflicts that are - // preferred over this tx. We know all these txs are preferred over - // this tx, because this tx currently has a bias of 0 and the tie goes - // to the tx whose bias was updated first. + // Add all the txs that spend this UTXO to this txs conflicts. These + // conflicting txs must be preferred over this tx. We know this because + // this tx currently has a bias of 0 and the tie goes to the tx whose + // bias was updated first. txNode.outs.Union(spenders) // Update txs conflicting with tx to account for its issuance @@ -171,9 +171,8 @@ func (dg *Directed) Add(tx Tx) error { // Get the node that contains this conflicting tx conflict := dg.txs[conflictKey] - // This conflicting tx can't be virtuous anymore. So we remove this - // conflicting tx from any of the virtuous sets if it was previously - // in them. + // This conflicting tx can't be virtuous anymore. So, we attempt to + // remove it from all of the virtuous sets. dg.virtuous.Remove(conflictID) dg.virtuousVoting.Remove(conflictID) @@ -207,8 +206,8 @@ func (dg *Directed) Add(tx Tx) error { // Add this tx to the set of currently processing txs dg.txs[txID.Key()] = txNode - // This tx can be accepted only if all the txs it depends on are also - // accepted. If any txs that this tx depends on are rejected, reject it. + // If a tx that this tx depends on is rejected, this tx should also be + // rejected. toReject := &rejector{ g: dg, errs: &dg.errs, @@ -220,8 +219,9 @@ func (dg *Directed) Add(tx Tx) error { if dependency.Status() != choices.Accepted { // If the dependency isn't accepted, then it must be processing. So, // this tx should be rejected if any of these processing txs are - // rejected. Note that the dependencies can't be rejected, because - // it is assumped that this tx is currently considered valid. + // rejected. Note that the dependencies can't already be rejected, + // because it is assumped that this tx is currently considered + // valid. toReject.deps.Add(dependency.ID()) } } @@ -249,11 +249,11 @@ func (dg *Directed) Issued(tx Tx) bool { // RecordPoll implements the Consensus interface func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { - // Increase the vote ID. This is updated here and is used to reset the + // Increase the vote ID. This is only updated here and is used to reset the // confidence values of transactions lazily. dg.currentVote++ - // Changed tracks if the Avalanche instance needs to recompute its + // This flag tracks if the Avalanche instance needs to recompute its // frontiers. Frontiers only need to be recalculated if preferences change // or if a tx was accepted. changed := false @@ -279,8 +279,8 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { txID, &txNode.snowball) // If the tx should be accepted, then we should defer its acceptance - // until its dependencies are decided. However, if this tx was - // already marked to be accepted, we shouldn't register it again. + // until its dependencies are decided. If this tx was already marked to + // be accepted, we shouldn't register it again. if !txNode.pendingAccept && txNode.Finalized(dg.params.BetaVirtuous, dg.params.BetaRogue) { dg.deferAcceptance(txNode) @@ -327,8 +327,9 @@ func (dg *Directed) String() string { return sb.String() } -// deferAcceptance attempts to mark this tx as accepted now or in the future -// once dependencies are accepted +// deferAcceptance attempts to mark this tx once all its dependencies are +// accepted. If all the dependencies are already accepted, this function will +// immediately accept the tx. func (dg *Directed) deferAcceptance(txNode *directedTx) { // Mark that this tx is pending acceptance so this function won't be called // again @@ -340,9 +341,8 @@ func (dg *Directed) deferAcceptance(txNode *directedTx) { } for _, dependency := range txNode.tx.Dependencies() { if dependency.Status() != choices.Accepted { - // If the dependency isn't accepted, then it must be processing. So, - // this tx should be accepted after all of these processing txs are - // accepted. + // If the dependency isn't accepted, then it must be processing. + // This tx should be accepted after this tx is accepted. toAccept.deps.Add(dependency.ID()) } } @@ -360,7 +360,7 @@ func (dg *Directed) reject(conflictIDs ...ids.ID) error { conflictKey := conflictID.Key() conflict := dg.txs[conflictKey] - // This tx is not longer an option for consuming the UTXOs from its + // This tx is no longer an option for consuming the UTXOs from its // inputs, so we should remove their reference to this tx. for _, inputID := range conflict.tx.InputIDs().List() { inputKey := inputID.Key() diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index ad76fc9b1811..8a6c90f42431 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -3077,7 +3077,7 @@ func TestEngineAggressivePolling(t *testing.T) { t.Fatalf("should have issued one push query") } if *numPullQueries != 2 { - t.Fatalf("should have issued two pull query") + t.Fatalf("should have issued two pull queries") } } From a5e198b3bd95cdfa57941a81d4bcd2f027ca26a7 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 26 Aug 2020 13:21:05 -0400 Subject: [PATCH 17/47] cleaned up repo based on staticcheck --- api/info/service.go | 8 ++-- api/keystore/service.go | 2 +- genesis/network_id.go | 6 +-- ids/bag.go | 2 +- ids/short_set.go | 2 +- ipcs/eventsocket.go | 5 --- main/params.go | 4 +- nat/nat.go | 2 +- network/codec.go | 1 - network/network_test.go | 6 +-- network/peer.go | 3 +- snow/choices/test_decidable.go | 4 +- snow/consensus/avalanche/consensus_test.go | 3 +- snow/consensus/avalanche/metrics.go | 43 +++++++++---------- snow/consensus/avalanche/poll/set.go | 2 +- snow/consensus/snowball/consensus_test.go | 6 --- snow/consensus/snowman/metrics.go | 43 +++++++++---------- snow/consensus/snowman/poll/set.go | 4 +- .../avalanche/bootstrap/bootstrapper.go | 2 +- snow/engine/avalanche/transitive_test.go | 2 - snow/engine/common/queue/test_job.go | 2 +- snow/engine/common/test_bootstrapable.go | 2 +- snow/engine/common/test_engine.go | 40 ++++++++--------- snow/engine/common/test_vm.go | 6 +-- snow/engine/snowman/bootstrap/bootstrapper.go | 2 +- snow/networking/router/chain_router.go | 4 +- snow/networking/router/chain_router_test.go | 8 ++-- snow/networking/router/handler.go | 2 +- snow/networking/router/handler_test.go | 12 +++--- snow/networking/router/service_queue.go | 2 +- snow/networking/router/service_queue_test.go | 2 +- snow/networking/sender/sender_test.go | 4 +- snow/validators/set.go | 11 ----- utils/codec/codec_test.go | 2 +- utils/math/safe_math_test.go | 6 +-- vms/avm/create_asset_tx_test.go | 27 +++++++++++- vms/avm/initial_state.go | 29 ------------- vms/avm/service.go | 6 +-- vms/avm/state_test.go | 13 +++--- vms/avm/vm.go | 1 - vms/avm/vm_test.go | 30 ++++++------- vms/components/core/snowman_vm.go | 3 +- vms/nftfx/fx.go | 13 ++---- vms/platformvm/add_subnet_validator_tx.go | 4 +- vms/platformvm/add_validator_tx.go | 1 - vms/platformvm/advance_time_tx.go | 2 +- vms/platformvm/create_chain_tx.go | 11 +++-- vms/platformvm/create_chain_tx_test.go | 14 +++--- vms/platformvm/import_tx.go | 11 ++--- vms/platformvm/import_tx_test.go | 1 - vms/platformvm/reward_validator_tx.go | 1 - vms/platformvm/service.go | 7 +-- vms/platformvm/spend.go | 4 -- vms/platformvm/static_service.go | 11 ----- vms/platformvm/vm.go | 20 +++------ vms/platformvm/vm_test.go | 33 +------------- vms/propertyfx/fx.go | 11 ++--- vms/secp256k1fx/fx.go | 1 - vms/secp256k1fx/fx_test.go | 4 -- vms/secp256k1fx/keychain.go | 3 +- vms/spchainvm/vm.go | 1 - vms/spdagvm/prefixed_state.go | 2 - vms/spdagvm/vm.go | 1 - vms/spdagvm/vm_test.go | 6 +-- vms/timestampvm/service.go | 4 +- xputtest/avmwallet/wallet.go | 3 +- 66 files changed, 198 insertions(+), 335 deletions(-) diff --git a/api/info/service.go b/api/info/service.go index aa594d47662a..5b16274ab3eb 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -18,8 +18,6 @@ import ( "github.com/ava-labs/gecko/utils/json" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/version" - - cjson "github.com/ava-labs/gecko/utils/json" ) // Info is the API service for unprivileged info on a node @@ -36,7 +34,7 @@ type Info struct { // NewService returns a new admin API service func NewService(log logging.Logger, version version.Version, nodeID ids.ShortID, networkID uint32, chainManager chains.Manager, peers network.Network, txFee uint64) (*common.HTTPHandler, error) { newServer := rpc.NewServer() - codec := cjson.NewCodec() + codec := json.NewCodec() newServer.RegisterCodec(codec, "application/json") newServer.RegisterCodec(codec, "application/json;charset=UTF-8") if err := newServer.RegisterService(&Info{ @@ -81,14 +79,14 @@ func (service *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDRep // GetNetworkIDReply are the results from calling GetNetworkID type GetNetworkIDReply struct { - NetworkID cjson.Uint32 `json:"networkID"` + NetworkID json.Uint32 `json:"networkID"` } // GetNetworkID returns the network ID this node is running on func (service *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { service.log.Info("Info: GetNetworkID called") - reply.NetworkID = cjson.Uint32(service.networkID) + reply.NetworkID = json.Uint32(service.networkID) return nil } diff --git a/api/keystore/service.go b/api/keystore/service.go index 500cf31e040e..a5fce7295688 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -57,7 +57,7 @@ const ( var ( errEmptyUsername = errors.New("username can't be the empty string") errUserPassMaxLength = fmt.Errorf("CreateUser call rejected due to username or password exceeding maximum length of %d chars", maxUserPassLen) - errWeakPassword = errors.New("Failed to create user as the given password is too weak") + errWeakPassword = errors.New("failed to create user as the given password is too weak") ) // KeyValuePair ... diff --git a/genesis/network_id.go b/genesis/network_id.go index ce00cbabbc31..146c39d1396c 100644 --- a/genesis/network_id.go +++ b/genesis/network_id.go @@ -30,18 +30,18 @@ func NetworkID(networkName string) (uint32, error) { if id, err := strconv.ParseUint(networkName, 10, 0); err == nil { if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + return 0, fmt.Errorf("networkID %s not in [0, 2^32)", networkName) } return uint32(id), nil } if constants.ValidNetworkName.MatchString(networkName) { if id, err := strconv.Atoi(networkName[8:]); err == nil { if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + return 0, fmt.Errorf("networkID %s not in [0, 2^32)", networkName) } return uint32(id), nil } } - return 0, fmt.Errorf("Failed to parse %s as a network name", networkName) + return 0, fmt.Errorf("failed to parse %s as a network name", networkName) } diff --git a/ids/bag.go b/ids/bag.go index e6c3f653854d..5d37de188252 100644 --- a/ids/bag.go +++ b/ids/bag.go @@ -90,7 +90,7 @@ func (b *Bag) Len() int { return b.size } // List returns a list of all ids that have been added. func (b *Bag) List() []ID { - idList := make([]ID, len(b.counts), len(b.counts)) + idList := make([]ID, len(b.counts)) i := 0 for id := range b.counts { idList[i] = NewID(id) diff --git a/ids/short_set.go b/ids/short_set.go index 9bcd37d2275b..df011bacb69d 100644 --- a/ids/short_set.go +++ b/ids/short_set.go @@ -80,7 +80,7 @@ func (ids ShortSet) CappedList(size int) []ShortID { // List converts this set into a list func (ids ShortSet) List() []ShortID { - idList := make([]ShortID, len(ids), len(ids)) + idList := make([]ShortID, len(ids)) i := 0 for id := range ids { idList[i] = NewShortID(id) diff --git a/ipcs/eventsocket.go b/ipcs/eventsocket.go index 2b0476bf5b38..80a7d85779fa 100644 --- a/ipcs/eventsocket.go +++ b/ipcs/eventsocket.go @@ -15,11 +15,6 @@ import ( "github.com/ava-labs/gecko/utils/wrappers" ) -type chainEventDipatcher struct { - chainID ids.ID - events *triggers.EventDispatcher -} - // EventSockets is a set of named eventSockets type EventSockets struct { consensusSocket *eventSocket diff --git a/main/params.go b/main/params.go index 94ed698b8778..de74f278fad9 100644 --- a/main/params.go +++ b/main/params.go @@ -298,7 +298,7 @@ func init() { } if ip == nil { - errs.Add(fmt.Errorf("Invalid IP Address %s", *consensusIP)) + errs.Add(fmt.Errorf("invalid IP Address %s", *consensusIP)) return } @@ -362,7 +362,7 @@ func init() { } } if len(Config.BootstrapPeers) != i { - errs.Add(fmt.Errorf("More bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i)) + errs.Add(fmt.Errorf("more bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i)) return } } else { diff --git a/nat/nat.go b/nat/nat.go index 079cb357efe3..cc6244cbeda5 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -121,7 +121,7 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, } updateTimer.Reset(mapUpdateTimeout) - case _, _ = <-dev.closer: + case <-dev.closer: return } } diff --git a/network/codec.go b/network/codec.go index 0d04f92b44b6..6e409209ba29 100644 --- a/network/codec.go +++ b/network/codec.go @@ -12,7 +12,6 @@ import ( ) var ( - errBadLength = errors.New("stream has unexpected length") errMissingField = errors.New("message missing field") errBadOp = errors.New("input field has invalid operation") ) diff --git a/network/network_test.go b/network/network_test.go index c2304028636c..5840ee4a2399 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -39,7 +39,7 @@ func (l *testListener) Accept() (net.Conn, error) { select { case c := <-l.inbound: return c, nil - case _, _ = <-l.closed: + case <-l.closed: return nil, errClosed } } @@ -102,7 +102,7 @@ func (c *testConn) Read(b []byte) (int, error) { return 0, errClosed } c.partialRead = read - case _, _ = <-c.closed: + case <-c.closed: return 0, errClosed } } @@ -122,7 +122,7 @@ func (c *testConn) Write(b []byte) (int, error) { select { case c.pendingWrites <- newB: - case _, _ = <-c.closed: + case <-c.closed: return 0, errClosed } diff --git a/network/peer.go b/network/peer.go index e73eb5b94fb5..08a10e37bc02 100644 --- a/network/peer.go +++ b/network/peer.go @@ -4,7 +4,6 @@ package network import ( - "bytes" "math" "net" "sync" @@ -483,7 +482,7 @@ func (p *peer) version(msg Msg) { if err == nil { // If we have no clue what the peer's IP is, we can't perform any // verification - if bytes.Equal(peerIP.IP, localPeerIP.IP) { + if peerIP.IP.Equal(localPeerIP.IP) { // if the IPs match, add this ip:port pair to be tracked p.net.stateLock.Lock() p.ip = peerIP diff --git a/snow/choices/test_decidable.go b/snow/choices/test_decidable.go index 8d90c7b77395..1643e619bff9 100644 --- a/snow/choices/test_decidable.go +++ b/snow/choices/test_decidable.go @@ -23,7 +23,7 @@ func (d *TestDecidable) ID() ids.ID { return d.IDV } func (d *TestDecidable) Accept() error { switch d.StatusV { case Unknown, Rejected: - return fmt.Errorf("Invalid state transaition from %s to %s", + return fmt.Errorf("invalid state transaition from %s to %s", d.StatusV, Accepted) default: d.StatusV = Accepted @@ -35,7 +35,7 @@ func (d *TestDecidable) Accept() error { func (d *TestDecidable) Reject() error { switch d.StatusV { case Unknown, Accepted: - return fmt.Errorf("Invalid state transaition from %s to %s", + return fmt.Errorf("invalid state transaition from %s to %s", d.StatusV, Rejected) default: d.StatusV = Rejected diff --git a/snow/consensus/avalanche/consensus_test.go b/snow/consensus/avalanche/consensus_test.go index 99bb89a6b3f1..f3851d43dace 100644 --- a/snow/consensus/avalanche/consensus_test.go +++ b/snow/consensus/avalanche/consensus_test.go @@ -19,8 +19,7 @@ import ( ) var ( - Genesis = ids.GenerateTestID() - Tests = []func(*testing.T, Factory){ + Tests = []func(*testing.T, Factory){ MetricsTest, ParamsTest, AddTest, diff --git a/snow/consensus/avalanche/metrics.go b/snow/consensus/avalanche/metrics.go index 5caaaf767fc6..b599bdb48d39 100644 --- a/snow/consensus/avalanche/metrics.go +++ b/snow/consensus/avalanche/metrics.go @@ -26,35 +26,32 @@ type metrics struct { func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) error { m.processing = make(map[[32]byte]time.Time) - m.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "vtx_processing", - Help: "Number of currently processing vertices", - }) - m.latAccepted = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "vtx_accepted", - Help: "Latency of accepting from the time the vertex was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - m.latRejected = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "vtx_rejected", - Help: "Latency of rejecting from the time the vertex was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) + m.numProcessing = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "vtx_processing", + Help: "Number of currently processing vertices", + }) + m.latAccepted = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "vtx_accepted", + Help: "Latency of accepting from the time the vertex was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + m.latRejected = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "vtx_rejected", + Help: "Latency of rejecting from the time the vertex was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) if err := registerer.Register(m.numProcessing); err != nil { - return fmt.Errorf("Failed to register vtx_processing statistics due to %w", err) + return fmt.Errorf("failed to register vtx_processing statistics due to %w", err) } if err := registerer.Register(m.latAccepted); err != nil { - return fmt.Errorf("Failed to register vtx_accepted statistics due to %w", err) + return fmt.Errorf("failed to register vtx_accepted statistics due to %w", err) } if err := registerer.Register(m.latRejected); err != nil { - return fmt.Errorf("Failed to register vtx_rejected statistics due to %w", err) + return fmt.Errorf("failed to register vtx_rejected statistics due to %w", err) } return nil } diff --git a/snow/consensus/avalanche/poll/set.go b/snow/consensus/avalanche/poll/set.go index 19012bdc7e34..21df5a28e7bb 100644 --- a/snow/consensus/avalanche/poll/set.go +++ b/snow/consensus/avalanche/poll/set.go @@ -112,7 +112,7 @@ func (s *set) Vote( s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.durPolls.Observe(float64(time.Since(poll.start).Milliseconds())) s.numPolls.Dec() // decrease the metrics return poll.Result(), true } diff --git a/snow/consensus/snowball/consensus_test.go b/snow/consensus/snowball/consensus_test.go index 304bc19d7edf..334999d99594 100644 --- a/snow/consensus/snowball/consensus_test.go +++ b/snow/consensus/snowball/consensus_test.go @@ -11,12 +11,6 @@ import ( "github.com/ava-labs/gecko/ids" ) -// ByzantineFactory implements Factory by returning a byzantine struct -type ByzantineFactory struct{} - -// New implements Factory -func (ByzantineFactory) New() Consensus { return &Byzantine{} } - // Byzantine is a naive implementation of a multi-choice snowball instance type Byzantine struct { // params contains all the configurations of a snowball instance diff --git a/snow/consensus/snowman/metrics.go b/snow/consensus/snowman/metrics.go index 81f7989ea47f..d78d9d7af7dd 100644 --- a/snow/consensus/snowman/metrics.go +++ b/snow/consensus/snowman/metrics.go @@ -26,35 +26,32 @@ type metrics struct { func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) error { m.processing = make(map[[32]byte]time.Time) - m.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "processing", - Help: "Number of currently processing blocks", - }) - m.latAccepted = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "accepted", - Help: "Latency of accepting from the time the block was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) - m.latRejected = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: namespace, - Name: "rejected", - Help: "Latency of rejecting from the time the block was issued in milliseconds", - Buckets: timer.MillisecondsBuckets, - }) + m.numProcessing = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "processing", + Help: "Number of currently processing blocks", + }) + m.latAccepted = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "accepted", + Help: "Latency of accepting from the time the block was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + m.latRejected = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "rejected", + Help: "Latency of rejecting from the time the block was issued in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) if err := registerer.Register(m.numProcessing); err != nil { - return fmt.Errorf("Failed to register processing statistics due to %w", err) + return fmt.Errorf("failed to register processing statistics due to %w", err) } if err := registerer.Register(m.latAccepted); err != nil { - return fmt.Errorf("Failed to register accepted statistics due to %w", err) + return fmt.Errorf("failed to register accepted statistics due to %w", err) } if err := registerer.Register(m.latRejected); err != nil { - return fmt.Errorf("Failed to register rejected statistics due to %w", err) + return fmt.Errorf("failed to register rejected statistics due to %w", err) } return nil } diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index 31dc89e65d1a..f419f68f90a4 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -112,7 +112,7 @@ func (s *set) Vote( s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.durPolls.Observe(float64(time.Since(poll.start).Milliseconds())) s.numPolls.Dec() // decrease the metrics return poll.Result(), true } @@ -140,7 +140,7 @@ func (s *set) Drop(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) { s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.durPolls.Observe(float64(time.Since(poll.start).Milliseconds())) s.numPolls.Dec() // decrease the metrics return poll.Result(), true } diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index ac3ef342c0ee..05dcb23b6baf 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -138,7 +138,7 @@ func (b *Bootstrapper) fetch(vtxIDs ...ids.ID) error { validators, err := b.Validators.Sample(1) // validator to send request to if err != nil { - return fmt.Errorf("Dropping request for %s as there are no validators", vtxID) + return fmt.Errorf("dropping request for %s as there are no validators", vtxID) } validatorID := validators[0].ID() b.RequestID++ diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index 8a6c90f42431..ac8f0aec328f 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -24,8 +24,6 @@ var ( errUnknownVertex = errors.New("unknown vertex") errFailedParsing = errors.New("failed parsing") errMissing = errors.New("missing") - - Genesis = ids.GenerateTestID() ) func TestEngineShutdown(t *testing.T) { diff --git a/snow/engine/common/queue/test_job.go b/snow/engine/common/queue/test_job.go index 3c50f49becc8..8cb8c9cf4bf0 100644 --- a/snow/engine/common/queue/test_job.go +++ b/snow/engine/common/queue/test_job.go @@ -62,7 +62,7 @@ func (j *TestJob) Execute() error { } else if j.CantExecute && j.T != nil { j.T.Fatalf("Unexpectedly called Execute") } - return errors.New("Unexpectedly called Execute") + return errors.New("unexpectedly called Execute") } // Bytes ... diff --git a/snow/engine/common/test_bootstrapable.go b/snow/engine/common/test_bootstrapable.go index b9eac91c81d1..a25a05076f61 100644 --- a/snow/engine/common/test_bootstrapable.go +++ b/snow/engine/common/test_bootstrapable.go @@ -60,7 +60,7 @@ func (b *BootstrapableTest) ForceAccepted(containerIDs ids.Set) error { if b.T != nil { b.T.Fatalf("Unexpectedly called ForceAccepted") } - return errors.New("Unexpectedly called ForceAccepted") + return errors.New("unexpectedly called ForceAccepted") } return nil } diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index dafbf1ab3691..9bc1d3687102 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -113,7 +113,7 @@ func (e *EngineTest) Startup() error { if e.T != nil { e.T.Fatalf("Unexpectedly called Startup") } - return errors.New("Unexpectedly called Startup") + return errors.New("unexpectedly called Startup") } // Gossip ... @@ -127,7 +127,7 @@ func (e *EngineTest) Gossip() error { if e.T != nil { e.T.Fatalf("Unexpectedly called Gossip") } - return errors.New("Unexpectedly called Gossip") + return errors.New("unexpectedly called Gossip") } // Shutdown ... @@ -141,7 +141,7 @@ func (e *EngineTest) Shutdown() error { if e.T != nil { e.T.Fatalf("Unexpectedly called Shutdown") } - return errors.New("Unexpectedly called Shutdown") + return errors.New("unexpectedly called Shutdown") } // Notify ... @@ -155,7 +155,7 @@ func (e *EngineTest) Notify(msg Message) error { if e.T != nil { e.T.Fatalf("Unexpectedly called Notify") } - return errors.New("Unexpectedly called Notify") + return errors.New("unexpectedly called Notify") } // GetAcceptedFrontier ... @@ -169,7 +169,7 @@ func (e *EngineTest) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedFrontier") } - return errors.New("Unexpectedly called GetAcceptedFrontier") + return errors.New("unexpectedly called GetAcceptedFrontier") } // GetAcceptedFrontierFailed ... @@ -183,7 +183,7 @@ func (e *EngineTest) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestI if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedFrontierFailed") } - return errors.New("Unexpectedly called GetAcceptedFrontierFailed") + return errors.New("unexpectedly called GetAcceptedFrontierFailed") } // AcceptedFrontier ... @@ -197,7 +197,7 @@ func (e *EngineTest) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, if e.T != nil { e.T.Fatalf("Unexpectedly called AcceptedFrontierF") } - return errors.New("Unexpectedly called AcceptedFrontierF") + return errors.New("unexpectedly called AcceptedFrontierF") } // GetAccepted ... @@ -211,7 +211,7 @@ func (e *EngineTest) GetAccepted(validatorID ids.ShortID, requestID uint32, cont if e.T != nil { e.T.Fatalf("Unexpectedly called GetAccepted") } - return errors.New("Unexpectedly called GetAccepted") + return errors.New("unexpectedly called GetAccepted") } // GetAcceptedFailed ... @@ -225,7 +225,7 @@ func (e *EngineTest) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32 if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedFailed") } - return errors.New("Unexpectedly called GetAcceptedFailed") + return errors.New("unexpectedly called GetAcceptedFailed") } // Accepted ... @@ -239,7 +239,7 @@ func (e *EngineTest) Accepted(validatorID ids.ShortID, requestID uint32, contain if e.T != nil { e.T.Fatalf("Unexpectedly called Accepted") } - return errors.New("Unexpectedly called Accepted") + return errors.New("unexpectedly called Accepted") } // Get ... @@ -253,7 +253,7 @@ func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID if e.T != nil { e.T.Fatalf("Unexpectedly called Get") } - return errors.New("Unexpectedly called Get") + return errors.New("unexpectedly called Get") } // GetAncestors ... @@ -267,7 +267,7 @@ func (e *EngineTest) GetAncestors(validatorID ids.ShortID, requestID uint32, con if e.T != nil { e.T.Fatalf("Unexpectedly called GetAncestors") } - return errors.New("Unexpectedly called GetAncestors") + return errors.New("unexpectedly called GetAncestors") } @@ -282,7 +282,7 @@ func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) error if e.T != nil { e.T.Fatalf("Unexpectedly called GetFailed") } - return errors.New("Unexpectedly called GetFailed") + return errors.New("unexpectedly called GetFailed") } // GetAncestorsFailed ... @@ -296,7 +296,7 @@ func (e *EngineTest) GetAncestorsFailed(validatorID ids.ShortID, requestID uint3 if e.T != nil { e.T.Fatalf("Unexpectedly called GetAncestorsFailed") } - return errors.New("Unexpectedly called GetAncestorsFailed") + return errors.New("unexpectedly called GetAncestorsFailed") } // Put ... @@ -310,7 +310,7 @@ func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID if e.T != nil { e.T.Fatalf("Unexpectedly called Put") } - return errors.New("Unexpectedly called Put") + return errors.New("unexpectedly called Put") } // MultiPut ... @@ -324,7 +324,7 @@ func (e *EngineTest) MultiPut(validatorID ids.ShortID, requestID uint32, contain if e.T != nil { e.T.Fatalf("Unexpectedly called MultiPut") } - return errors.New("Unexpectedly called MultiPut") + return errors.New("unexpectedly called MultiPut") } // PushQuery ... @@ -338,7 +338,7 @@ func (e *EngineTest) PushQuery(validatorID ids.ShortID, requestID uint32, contai if e.T != nil { e.T.Fatalf("Unexpectedly called PushQuery") } - return errors.New("Unexpectedly called PushQuery") + return errors.New("unexpectedly called PushQuery") } // PullQuery ... @@ -352,7 +352,7 @@ func (e *EngineTest) PullQuery(validatorID ids.ShortID, requestID uint32, contai if e.T != nil { e.T.Fatalf("Unexpectedly called PullQuery") } - return errors.New("Unexpectedly called PullQuery") + return errors.New("unexpectedly called PullQuery") } // QueryFailed ... @@ -366,7 +366,7 @@ func (e *EngineTest) QueryFailed(validatorID ids.ShortID, requestID uint32) erro if e.T != nil { e.T.Fatalf("Unexpectedly called QueryFailed") } - return errors.New("Unexpectedly called QueryFailed") + return errors.New("unexpectedly called QueryFailed") } // Chits ... @@ -380,7 +380,7 @@ func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerI if e.T != nil { e.T.Fatalf("Unexpectedly called Chits") } - return errors.New("Unexpectedly called Chits") + return errors.New("unexpectedly called Chits") } // IsBootstrapped ... diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go index c693a45a76c9..bb5c8338df41 100644 --- a/snow/engine/common/test_vm.go +++ b/snow/engine/common/test_vm.go @@ -56,7 +56,7 @@ func (vm *TestVM) Bootstrapping() error { if vm.T != nil { vm.T.Fatalf("Unexpectedly called Bootstrapping") } - return errors.New("Unexpectedly called Bootstrapping") + return errors.New("unexpectedly called Bootstrapping") } return nil } @@ -69,7 +69,7 @@ func (vm *TestVM) Bootstrapped() error { if vm.T != nil { vm.T.Fatalf("Unexpectedly called Bootstrapped") } - return errors.New("Unexpectedly called Bootstrapped") + return errors.New("unexpectedly called Bootstrapped") } return nil } @@ -82,7 +82,7 @@ func (vm *TestVM) Shutdown() error { if vm.T != nil { vm.T.Fatalf("Unexpectedly called Shutdown") } - return errors.New("Unexpectedly called Shutdown") + return errors.New("unexpectedly called Shutdown") } return nil } diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 400520b32556..080ea3d1f03a 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -133,7 +133,7 @@ func (b *Bootstrapper) fetch(blkID ids.ID) error { validators, err := b.Validators.Sample(1) // validator to send request to if err != nil { - return fmt.Errorf("Dropping request for %s as there are no validators", blkID) + return fmt.Errorf("dropping request for %s as there are no validators", blkID) } validatorID := validators[0].ID() b.RequestID++ diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index cd28209c225e..d581a65c030f 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -89,7 +89,7 @@ func (sr *ChainRouter) RemoveChain(chainID ids.ID) { ticker := time.NewTicker(sr.closeTimeout) select { - case _, _ = <-chain.closed: + case <-chain.closed: case <-ticker.C: chain.Context().Log.Warn("timed out while shutting down") } @@ -355,7 +355,7 @@ func (sr *ChainRouter) Shutdown() { timedout := false for _, chain := range prevChains { select { - case _, _ = <-chain.closed: + case <-chain.closed: case <-ticker.C: timedout = true } diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 471f280a525d..3ab075a7b53b 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -53,9 +53,9 @@ func TestShutdown(t *testing.T) { ticker := time.NewTicker(20 * time.Millisecond) select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Handler shutdown was not called or timed out after 20ms during chainRouter shutdown") - case _, _ = <-shutdownCalled: + case <-shutdownCalled: } select { @@ -118,8 +118,8 @@ func TestShutdownTimesOut(t *testing.T) { }() select { - case _, _ = <-engineFinished: + case <-engineFinished: t.Fatalf("Shutdown should have finished in one millisecond before timing out instead of waiting for engine to finish shutting down.") - case _, _ = <-shutdownFinished: + case <-shutdownFinished: } } diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 2ccc397f1354..f52f8cbc0b82 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -470,7 +470,7 @@ func (h *Handler) shutdownDispatch() { go h.toClose() } h.closing = true - h.shutdown.Observe(float64(time.Now().Sub(startTime))) + h.shutdown.Observe(float64(time.Since(startTime))) close(h.closed) } diff --git a/snow/networking/router/handler_test.go b/snow/networking/router/handler_test.go index 5965071b743b..e7fdb1a3357c 100644 --- a/snow/networking/router/handler_test.go +++ b/snow/networking/router/handler_test.go @@ -58,9 +58,9 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { ticker := time.NewTicker(50 * time.Millisecond) defer ticker.Stop() select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Calling engine function timed out") - case _, _ = <-called: + case <-called: } } @@ -95,9 +95,9 @@ func TestHandlerDoesntDrop(t *testing.T) { ticker := time.NewTicker(20 * time.Millisecond) defer ticker.Stop() select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Calling engine function timed out") - case _, _ = <-called: + case <-called: } } @@ -134,8 +134,8 @@ func TestHandlerClosesOnError(t *testing.T) { ticker := time.NewTicker(20 * time.Millisecond) select { - case _, _ = <-ticker.C: + case <-ticker.C: t.Fatalf("Handler shutdown timed out before calling toClose") - case _, _ = <-closed: + case <-closed: } } diff --git a/snow/networking/router/service_queue.go b/snow/networking/router/service_queue.go index e487fb87bcb9..e054129465b6 100644 --- a/snow/networking/router/service_queue.go +++ b/snow/networking/router/service_queue.go @@ -16,7 +16,7 @@ import ( ) var ( - errNoMessages = errors.New("No messages remaining on queue") + errNoMessages = errors.New("no messages remaining on queue") ) type messageQueue interface { diff --git a/snow/networking/router/service_queue_test.go b/snow/networking/router/service_queue_test.go index b545fa0783c3..b79098864695 100644 --- a/snow/networking/router/service_queue_test.go +++ b/snow/networking/router/service_queue_test.go @@ -86,7 +86,7 @@ func TestMultiLevelQueueSendsMessages(t *testing.T) { // Ensure that the 6th message was never added to the queue select { - case _ = <-semaChan: + case <-semaChan: t.Fatal("Semaphore channel should have been empty after reading all messages from the queue") default: } diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 5d63b5a48d28..9b296b8f3f2e 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -150,7 +150,7 @@ func TestReliableMessages(t *testing.T) { }() for _, await := range awaiting { - _, _ = <-await + <-await } } @@ -216,6 +216,6 @@ func TestReliableMessagesToMyself(t *testing.T) { }() for _, await := range awaiting { - _, _ = <-await + <-await } } diff --git a/snow/validators/set.go b/snow/validators/set.go index ef967ae55e26..039553e167ee 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -295,17 +295,6 @@ func (s *set) Weight() uint64 { return s.totalWeight } -func (s *set) calculateWeight() (uint64, error) { - weight := uint64(0) - for _, vdr := range s.vdrSlice { - weight, err := safemath.Add64(weight, vdr.Weight()) - if err != nil { - return weight, err - } - } - return weight, nil -} - func (s *set) String() string { s.lock.Lock() defer s.lock.Unlock() diff --git a/utils/codec/codec_test.go b/utils/codec/codec_test.go index 8d4f059ba9f6..e48ed3411a6c 100644 --- a/utils/codec/codec_test.go +++ b/utils/codec/codec_test.go @@ -148,7 +148,7 @@ func TestSlice(t *testing.T) { // Test marshalling/unmarshalling largest possible slice func TestMaxSizeSlice(t *testing.T) { - mySlice := make([]string, math.MaxUint16, math.MaxUint16) + mySlice := make([]string, math.MaxUint16) mySlice[0] = "first!" mySlice[math.MaxUint16-1] = "last!" codec := NewDefault() diff --git a/utils/math/safe_math_test.go b/utils/math/safe_math_test.go index 47f65f994395..e5653ea7bf5f 100644 --- a/utils/math/safe_math_test.go +++ b/utils/math/safe_math_test.go @@ -57,17 +57,17 @@ func TestAdd64(t *testing.T) { t.Fatalf("Expected %d, got %d", uint64(1<<63), sum) } - sum, err = Add64(1, maxUint64) + _, err = Add64(1, maxUint64) if err == nil { t.Fatalf("Add64 succeeded unexpectedly") } - sum, err = Add64(maxUint64, 1) + _, err = Add64(maxUint64, 1) if err == nil { t.Fatalf("Add64 succeeded unexpectedly") } - sum, err = Add64(maxUint64, maxUint64) + _, err = Add64(maxUint64, maxUint64) if err == nil { t.Fatalf("Add64 succeeded unexpectedly") } diff --git a/vms/avm/create_asset_tx_test.go b/vms/avm/create_asset_tx_test.go index 18a77bfb6d85..b939080f10fc 100644 --- a/vms/avm/create_asset_tx_test.go +++ b/vms/avm/create_asset_tx_test.go @@ -21,7 +21,7 @@ var ( illegalNameCharacter = "h8*32" invalidASCIIStr = "ÉÎ" invalidWhitespaceStr = " HAT" - denominationTooLarge = maxDenomination + 1 + denominationTooLarge = byte(maxDenomination + 1) ) func validCreateAssetTx(t *testing.T) (*CreateAssetTx, codec.Codec, *snow.Context) { @@ -461,7 +461,7 @@ func TestCreateAssetTxSyntacticVerifyDenominationTooLong(t *testing.T) { }}, Name: "BRADY", Symbol: "TOM", - Denomination: 33, + Denomination: denominationTooLarge, States: []*InitialState{{ FxID: 0, }}, @@ -519,6 +519,29 @@ func TestCreateAssetTxSyntacticVerifyNameWithInvalidCharacter(t *testing.T) { } } +func TestCreateAssetTxSyntacticVerifyNameWithUnicodeCharacter(t *testing.T) { + ctx := NewContext(t) + c := setupCodec() + + tx := &CreateAssetTx{ + BaseTx: BaseTx{BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + }}, + Name: illegalNameCharacter, + Symbol: "TOM", + Denomination: 0, + States: []*InitialState{{ + FxID: 0, + }}, + } + tx.Initialize(nil, nil) + + if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 1); err == nil { + t.Fatalf("Name with an invalid character should have errored") + } +} + func TestCreateAssetTxSyntacticVerifySymbolWithInvalidCharacter(t *testing.T) { ctx := NewContext(t) c := setupCodec() diff --git a/vms/avm/initial_state.go b/vms/avm/initial_state.go index b8fb1a9ce5d8..40a2e71e0fb7 100644 --- a/vms/avm/initial_state.go +++ b/vms/avm/initial_state.go @@ -52,35 +52,6 @@ func (is *InitialState) Verify(c codec.Codec, numFxs int) error { // Sort ... func (is *InitialState) Sort(c codec.Codec) { sortState(is.Outs, c) } -type innerSortVerifiables struct { - vers []verify.Verifiable - codec codec.Codec -} - -func (vers *innerSortVerifiables) Less(i, j int) bool { - iVer := vers.vers[i] - jVer := vers.vers[j] - - iBytes, err := vers.codec.Marshal(&iVer) - if err != nil { - return false - } - jBytes, err := vers.codec.Marshal(&jVer) - if err != nil { - return false - } - return bytes.Compare(iBytes, jBytes) == -1 -} -func (vers *innerSortVerifiables) Len() int { return len(vers.vers) } -func (vers *innerSortVerifiables) Swap(i, j int) { v := vers.vers; v[j], v[i] = v[i], v[j] } - -func sortVerifiables(vers []verify.Verifiable, c codec.Codec) { - sort.Sort(&innerSortVerifiables{vers: vers, codec: c}) -} -func isSortedVerifiables(vers []verify.Verifiable, c codec.Codec) bool { - return sort.IsSorted(&innerSortVerifiables{vers: vers, codec: c}) -} - type innerSortState struct { vers []verify.State codec codec.Codec diff --git a/vms/avm/service.go b/vms/avm/service.go index 48d854615946..7ff0df166b5d 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -372,8 +372,8 @@ func (service *Service) GetAllBalances(r *http.Request, args *api.JsonAddress, r return fmt.Errorf("couldn't get address's UTXOs: %s", err) } - assetIDs := ids.Set{} // IDs of assets the address has a non-zero balance of - balances := make(map[[32]byte]uint64, 0) // key: ID (as bytes). value: balance of that asset + assetIDs := ids.Set{} // IDs of assets the address has a non-zero balance of + balances := make(map[[32]byte]uint64) // key: ID (as bytes). value: balance of that asset for _, utxo := range utxos { transferable, ok := utxo.Out.(avax.TransferableOut) if !ok { @@ -908,7 +908,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *api.JsonTxI memoBytes := []byte(args.Memo) if l := len(memoBytes); l > avax.MaxMemoSize { - return fmt.Errorf("Max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) + return fmt.Errorf("max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) } else if args.Amount == 0 { return errInvalidAmount } diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index cd9e3d306370..07c6b3eb2909 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -17,7 +17,7 @@ import ( // Test function IDs when argument start is empty func TestStateIDsNoStart(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -56,6 +56,9 @@ func TestStateIDsNoStart(t *testing.T) { } result, err = state.IDs(ids.Empty.Bytes(), []byte{}, math.MaxInt32) + if err != nil { + t.Fatal(err) + } if len(result) != len(expected) { t.Fatalf("Returned the wrong number of ids") } @@ -149,7 +152,7 @@ func TestStateIDsNoStart(t *testing.T) { } func TestStateIDsWithStart(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -195,7 +198,7 @@ func TestStateIDsWithStart(t *testing.T) { } func TestStateStatuses(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -242,7 +245,7 @@ func TestStateStatuses(t *testing.T) { } func TestStateUTXOs(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() @@ -318,7 +321,7 @@ func TestStateUTXOs(t *testing.T) { } func TestStateTXs(t *testing.T) { - _, _, vm , _ := GenesisVM(t) + _, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { vm.Shutdown() diff --git a/vms/avm/vm.go b/vms/avm/vm.go index e5d5a0071ac3..1e3750d1d2ca 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -49,7 +49,6 @@ var ( errIncompatibleFx = errors.New("incompatible feature extension") errUnknownFx = errors.New("unknown feature extension") errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state") - errInvalidAddress = errors.New("invalid address") errWrongBlockchainID = errors.New("wrong blockchain ID") errBootstrapping = errors.New("chain is currently bootstrapping") errInsufficientFunds = errors.New("insufficient funds") diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index c56808d9be21..7c7ff45e7001 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -96,23 +96,23 @@ func GetFirstTxFromGenesisTest(genesisBytes []byte, t *testing.T) *Tx { t.Fatal(err) } - for _, genesisTx := range genesis.Txs { - if len(genesisTx.Outs) != 0 { - t.Fatal("genesis tx can't have non-new assets") - } + if len(genesis.Txs) == 0 { + t.Fatal("genesis tx didn't have any txs") + } - tx := Tx{ - UnsignedTx: &genesisTx.CreateAssetTx, - } - if err := tx.SignSECP256K1Fx(c, nil); err != nil { - t.Fatal(err) - } + genesisTx := genesis.Txs[0] + if len(genesisTx.Outs) != 0 { + t.Fatal("genesis tx can't have non-new assets") + } - return &tx + tx := Tx{ + UnsignedTx: &genesisTx.CreateAssetTx, + } + if err := tx.SignSECP256K1Fx(c, nil); err != nil { + t.Fatal(err) } - t.Fatal("genesis tx didn't have any txs") - return nil + return &tx } func BuildGenesisTest(t *testing.T) []byte { @@ -516,10 +516,6 @@ func TestFxInitializationFailure(t *testing.T) { } } -type testTxBytes struct{ unsignedBytes []byte } - -func (tx *testTxBytes) UnsignedBytes() []byte { return tx.unsignedBytes } - func TestIssueTx(t *testing.T) { genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx diff --git a/vms/components/core/snowman_vm.go b/vms/components/core/snowman_vm.go index cbc058c0bb8b..13d1ff5950ba 100644 --- a/vms/components/core/snowman_vm.go +++ b/vms/components/core/snowman_vm.go @@ -20,8 +20,7 @@ import ( ) var ( - errUnmarshalBlockUndefined = errors.New("vm's UnmarshalBlock member is undefined") - errBadData = errors.New("got unexpected value from database") + errBadData = errors.New("got unexpected value from database") ) // If the status of this ID is not choices.Accepted, diff --git a/vms/nftfx/fx.go b/vms/nftfx/fx.go index 1440ff038b66..7edd7426f600 100644 --- a/vms/nftfx/fx.go +++ b/vms/nftfx/fx.go @@ -14,15 +14,10 @@ var ( errWrongUTXOType = errors.New("wrong utxo type") errWrongOperationType = errors.New("wrong operation type") errWrongCredentialType = errors.New("wrong credential type") - - errNoUTXOs = errors.New("an operation must consume at least one UTXO") - errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") - errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") - - errWrongUniqueID = errors.New("wrong unique ID provided") - errWrongBytes = errors.New("wrong bytes provided") - - errCantTransfer = errors.New("cant transfer with this fx") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongUniqueID = errors.New("wrong unique ID provided") + errWrongBytes = errors.New("wrong bytes provided") + errCantTransfer = errors.New("cant transfer with this fx") ) // Fx ... diff --git a/vms/platformvm/add_subnet_validator_tx.go b/vms/platformvm/add_subnet_validator_tx.go index 981f961d0e6b..2082f2f3735d 100644 --- a/vms/platformvm/add_subnet_validator_tx.go +++ b/vms/platformvm/add_subnet_validator_tx.go @@ -20,9 +20,7 @@ import ( ) var ( - errSigsNotUniqueOrNotSorted = errors.New("control signatures not unique or not sorted") - errWrongNumberOfSignatures = errors.New("wrong number of signatures") - errDSValidatorSubset = errors.New("all subnets must be a subset of the primary network") + errDSValidatorSubset = errors.New("all subnets must be a subset of the primary network") _ UnsignedProposalTx = &UnsignedAddSubnetValidatorTx{} _ TimedTx = &UnsignedAddSubnetValidatorTx{} diff --git a/vms/platformvm/add_validator_tx.go b/vms/platformvm/add_validator_tx.go index 57ea89042e6b..9e4261854a96 100644 --- a/vms/platformvm/add_validator_tx.go +++ b/vms/platformvm/add_validator_tx.go @@ -24,7 +24,6 @@ import ( var ( errNilTx = errors.New("tx is nil") - errWrongNetworkID = errors.New("tx was issued with a different network ID") errWeightTooSmall = errors.New("weight of this validator is too low") errStakeTooShort = errors.New("staking period is too short") errStakeTooLong = errors.New("staking period is too long") diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go index a1173c3f09d4..e98a67a6b342 100644 --- a/vms/platformvm/advance_time_tx.go +++ b/vms/platformvm/advance_time_tx.go @@ -103,7 +103,7 @@ func (tx *UnsignedAdvanceTimeTx) SemanticVerify( // Key: Subnet ID // Value: IDs of validators that will have started validating this Subnet when // timestamp is advanced to tx.Timestamp() - startedValidating := make(map[[32]byte]ids.ShortSet, 0) + startedValidating := make(map[[32]byte]ids.ShortSet) subnets, err := vm.getSubnets(db) if err != nil { return nil, nil, nil, nil, tempError{err} diff --git a/vms/platformvm/create_chain_tx.go b/vms/platformvm/create_chain_tx.go index 1e431a7d37b9..4a3af086f857 100644 --- a/vms/platformvm/create_chain_tx.go +++ b/vms/platformvm/create_chain_tx.go @@ -19,12 +19,11 @@ import ( ) var ( - errInvalidVMID = errors.New("invalid VM ID") - errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") - errControlSigsNotSortedAndUnique = errors.New("control signatures must be sorted and unique") - errNameTooLong = errors.New("name too long") - errGenesisTooLong = errors.New("genesis too long") - errIllegalNameCharacter = errors.New("illegal name character") + errInvalidVMID = errors.New("invalid VM ID") + errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") + errNameTooLong = errors.New("name too long") + errGenesisTooLong = errors.New("genesis too long") + errIllegalNameCharacter = errors.New("illegal name character") _ UnsignedDecisionTx = &UnsignedCreateChainTx{} ) diff --git a/vms/platformvm/create_chain_tx_test.go b/vms/platformvm/create_chain_tx_test.go index 388e29498881..a95057beefa9 100644 --- a/vms/platformvm/create_chain_tx_test.go +++ b/vms/platformvm/create_chain_tx_test.go @@ -14,7 +14,7 @@ import ( ) func TestUnsignedCreateChainTxVerify(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -43,7 +43,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { fxIDs: nil, chainName: "yeet", keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - setup: func(tx *UnsignedCreateChainTx) *UnsignedCreateChainTx { tx = nil; return tx }, + setup: func(*UnsignedCreateChainTx) *UnsignedCreateChainTx { return nil }, }, { description: "vm ID is empty", @@ -146,7 +146,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { // Ensure SemanticVerify fails when there are not enough control sigs func TestCreateChainTxInsufficientControlSigs(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -174,7 +174,7 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { // Ensure SemanticVerify fails when an incorrect control signature is given func TestCreateChainTxWrongControlSig(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -214,7 +214,7 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { // Ensure SemanticVerify fails when the Subnet the blockchain specifies as // its validator set doesn't exist func TestCreateChainTxNoSuchSubnet(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -239,7 +239,7 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { } func TestCreateChainTxAlreadyExists(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() @@ -272,7 +272,7 @@ func TestCreateChainTxAlreadyExists(t *testing.T) { // Ensure valid tx passes semanticVerify func TestCreateChainTxValid(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() diff --git a/vms/platformvm/import_tx.go b/vms/platformvm/import_tx.go index 3a872bfb55c3..cf979c4e3a0a 100644 --- a/vms/platformvm/import_tx.go +++ b/vms/platformvm/import_tx.go @@ -18,13 +18,10 @@ import ( ) var ( - errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") - errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") - errNoInputs = errors.New("tx has no inputs") - errNoImportInputs = errors.New("tx has no imported inputs") - errInputsNotSortedUnique = errors.New("inputs not sorted and unique") - errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") - errUnknownAsset = errors.New("unknown asset ID") + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") + errNoImportInputs = errors.New("tx has no imported inputs") + errInputsNotSortedUnique = errors.New("inputs not sorted and unique") _ UnsignedAtomicTx = &UnsignedImportTx{} ) diff --git a/vms/platformvm/import_tx_test.go b/vms/platformvm/import_tx_test.go index b7b03aa393bb..c21ec6f49846 100644 --- a/vms/platformvm/import_tx_test.go +++ b/vms/platformvm/import_tx_test.go @@ -25,7 +25,6 @@ func TestNewImportTx(t *testing.T) { type test struct { description string sharedMemory atomic.SharedMemory - feeKeys []*crypto.PrivateKeySECP256K1R recipientKeys []*crypto.PrivateKeySECP256K1R shouldErr bool } diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index 7a2e9c07d01a..dfb7e67c0181 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -19,7 +19,6 @@ import ( var ( errShouldBeDSValidator = errors.New("expected validator to be in the primary network") - errOverflowReward = errors.New("overflow while calculating validator reward") errWrongTxType = errors.New("wrong transaction type") _ UnsignedProposalTx = &UnsignedRewardValidatorTx{} diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 5461cfb8799b..594303a19731 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -30,11 +30,8 @@ const ( var ( errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") errNoFunds = errors.New("no spendable funds were found") - errNoUsername = errors.New("argument 'username' not provided") - errNoPassword = errors.New("argument 'password' not provided") errNoSubnetID = errors.New("argument 'subnetID' not provided") errNoRewardAddress = errors.New("argument 'rewardAddress' not provided") - errUnexpectedTxType = errors.New("expected tx to be a DecisionTx, ProposalTx or AtomicTx but is not") errInvalidDelegationRate = errors.New("argument 'delegationFeeRate' must be between 0 and 100, inclusive") errNoAddresses = errors.New("no addresses provided") ) @@ -742,7 +739,7 @@ func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, re nodeID, // Node ID rewardAddress, // Reward Address uint32(10000*args.DelegationFeeRate), // Shares - privKeys, // Private keys + privKeys, // Private keys ) if err != nil { return fmt.Errorf("couldn't create tx: %w", err) @@ -833,7 +830,7 @@ func (service *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValid nodeID, err := ids.ShortFromPrefixedString(args.ID, constants.NodeIDPrefix) if err != nil { - return fmt.Errorf("Error parsing nodeID: '%s': %w", args.ID, err) + return fmt.Errorf("error parsing nodeID: '%s': %w", args.ID, err) } subnetID, err := ids.FromString(args.SubnetID) diff --git a/vms/platformvm/spend.go b/vms/platformvm/spend.go index 5ca60bda30a6..8585313a2643 100644 --- a/vms/platformvm/spend.go +++ b/vms/platformvm/spend.go @@ -16,14 +16,10 @@ import ( ) var ( - errSpendOverflow = errors.New("spent amount overflows uint64") - errNoKeys = errors.New("no keys provided") errLockedFundsNotMarkedAsLocked = errors.New("locked funds not marked as locked") errWrongLocktime = errors.New("wrong locktime reported") errUnknownOwners = errors.New("unknown owners") errCantSign = errors.New("can't sign") - errInputOverflow = errors.New("inputs overflowed uint64") - errOutputOverflow = errors.New("outputs overflowed uint64") ) // stake the provided amount while deducting the provided fee. diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go index c94930accb61..74244ad4fc31 100644 --- a/vms/platformvm/static_service.go +++ b/vms/platformvm/static_service.go @@ -50,17 +50,6 @@ type APIValidator struct { ID ids.ShortID `json:"id"` } -func (v *APIValidator) weight() uint64 { - switch { - case v.Weight != nil: - return uint64(*v.Weight) - case v.StakeAmount != nil: - return uint64(*v.StakeAmount) - default: - return 0 - } -} - // APIPrimaryValidator is a validator of the primary network type APIPrimaryValidator struct { APIValidator diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index f366e76dfa59..15ba037ffd19 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -78,29 +78,19 @@ var ( // taken from https://stackoverflow.com/questions/25065055/what-is-the-maximum-time-time-in-go/32620397#32620397 maxTime = time.Unix(1<<63-62135596801, 0) // 0 is used because we drop the nano-seconds - timestampKey = ids.NewID([32]byte{'t', 'i', 'm', 'e'}) - currentValidatorsKey = ids.NewID([32]byte{'c', 'u', 'r', 'r', 'e', 'n', 't'}) - pendingValidatorsKey = ids.NewID([32]byte{'p', 'e', 'n', 'd', 'i', 'n', 'g'}) - chainsKey = ids.NewID([32]byte{'c', 'h', 'a', 'i', 'n', 's'}) - subnetsKey = ids.NewID([32]byte{'s', 'u', 'b', 'n', 'e', 't', 's'}) + timestampKey = ids.NewID([32]byte{'t', 'i', 'm', 'e'}) + chainsKey = ids.NewID([32]byte{'c', 'h', 'a', 'i', 'n', 's'}) + subnetsKey = ids.NewID([32]byte{'s', 'u', 'b', 'n', 'e', 't', 's'}) ) var ( errEndOfTime = errors.New("program time is suspiciously far in the future. Either this codebase was way more successful than expected, or a critical error has occurred") - errTimeTooAdvanced = errors.New("this is proposing a time too far in the future") errNoPendingBlocks = errors.New("no pending blocks") - errUnsupportedFXs = errors.New("unsupported feature extensions") errRegisteringType = errors.New("error registering type with database") - errMissingBlock = errors.New("missing block") errInvalidLastAcceptedBlock = errors.New("last accepted block must be a decision block") - errInvalidAddress = errors.New("invalid address") - errInvalidAddressSeperator = errors.New("invalid address seperator") - errInvalidAddressPrefix = errors.New("invalid address prefix") - errInvalidAddressSuffix = errors.New("invalid address suffix") - errEmptyAddressPrefix = errors.New("empty address prefix") - errEmptyAddressSuffix = errors.New("empty address suffix") errInvalidID = errors.New("invalid ID") errDSCantValidate = errors.New("new blockchain can't be validated by primary network") + errUnknownTxType = errors.New("unknown transaction type") ) // Codec does serialization and deserialization @@ -377,7 +367,7 @@ func (vm *VM) issueTx(tx *Tx) error { case UnsignedAtomicTx: vm.unissuedAtomicTxs = append(vm.unissuedAtomicTxs, tx) default: - return errors.New("Could not parse given tx. Provided tx needs to be a ProposalTx, DecisionTx, or AtomicTx") + return errUnknownTxType } vm.resetTimer() return nil diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index cac6162aba28..025b84d39393 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -63,9 +63,6 @@ var ( minStake = 5 * units.MilliAvax - // balance of addresses that exist at genesis in defaultVM - defaultBalance = 100 * minStake - // amount all genesis validators stake in defaultVM defaultStakeAmount uint64 = 100 * minStake @@ -78,9 +75,8 @@ var ( ) var ( - errShouldNotifyEngine = errors.New("should have notified engine of block ready") - errShouldPrefCommit = errors.New("should prefer to commit proposal") - errShouldPrefAbort = errors.New("should prefer to abort proposal") + errShouldPrefCommit = errors.New("should prefer to commit proposal") + errShouldPrefAbort = errors.New("should prefer to abort proposal") ) const ( @@ -122,31 +118,6 @@ func defaultContext() *snow.Context { return ctx } -// The UTXOs that exist at genesis in the default VM -func defaultGenesisUTXOs() []*avax.UTXO { - utxos := []*avax.UTXO(nil) - for i, key := range keys { - utxos = append(utxos, - &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: ids.Empty, - OutputIndex: uint32(i), - }, - Asset: avax.Asset{ID: avaxAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: defaultBalance, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }, - ) - } - return utxos -} - // Returns: // 1) The genesis state // 2) The byte representation of the default genesis for tests diff --git a/vms/propertyfx/fx.go b/vms/propertyfx/fx.go index 41cd2252ce08..95209d76d1c3 100644 --- a/vms/propertyfx/fx.go +++ b/vms/propertyfx/fx.go @@ -13,14 +13,9 @@ var ( errWrongUTXOType = errors.New("wrong utxo type") errWrongOperationType = errors.New("wrong operation type") errWrongCredentialType = errors.New("wrong credential type") - - errNoUTXOs = errors.New("an operation must consume at least one UTXO") - errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") - errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") - - errWrongMintOutput = errors.New("wrong mint output provided") - - errCantTransfer = errors.New("cant transfer with this fx") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongMintOutput = errors.New("wrong mint output provided") + errCantTransfer = errors.New("cant transfer with this fx") ) // Fx ... diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go index 74575fcd69c2..5681a9c99504 100644 --- a/vms/secp256k1fx/fx.go +++ b/vms/secp256k1fx/fx.go @@ -17,7 +17,6 @@ var ( errWrongTxType = errors.New("wrong tx type") errWrongOpType = errors.New("wrong operation type") errWrongUTXOType = errors.New("wrong utxo type") - errWrongOutputType = errors.New("wrong output type") errWrongInputType = errors.New("wrong input type") errWrongCredentialType = errors.New("wrong credential type") errWrongOwnerType = errors.New("wrong owner type") diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index bdcfb973303c..4fe05c084bbe 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -58,10 +58,6 @@ func (vm *testVM) Clock() *timer.Clock { return &vm.clock } func (vm *testVM) Logger() logging.Logger { return logging.NoLog{} } -type testCodec struct{} - -func (c *testCodec) RegisterStruct(interface{}) {} - type testTx struct{ bytes []byte } func (tx *testTx) UnsignedBytes() []byte { return tx.bytes } diff --git a/vms/secp256k1fx/keychain.go b/vms/secp256k1fx/keychain.go index be6b89edbdec..47321fb66a38 100644 --- a/vms/secp256k1fx/keychain.go +++ b/vms/secp256k1fx/keychain.go @@ -15,8 +15,7 @@ import ( ) var ( - errLockedFunds = errors.New("funds currently locked") - errCantSpend = errors.New("unable to spend this UTXO") + errCantSpend = errors.New("unable to spend this UTXO") ) // Keychain is a collection of keys that can be used to spend outputs diff --git a/vms/spchainvm/vm.go b/vms/spchainvm/vm.go index 5a4d37d17bf9..f73152646ae1 100644 --- a/vms/spchainvm/vm.go +++ b/vms/spchainvm/vm.go @@ -37,7 +37,6 @@ var ( var ( errNoTxs = errors.New("no transactions") - errUnknownBlock = errors.New("unknown block") errUnsupportedFXs = errors.New("unsupported feature extensions") ) diff --git a/vms/spdagvm/prefixed_state.go b/vms/spdagvm/prefixed_state.go index 99cc834a010a..29cf5f511c28 100644 --- a/vms/spdagvm/prefixed_state.go +++ b/vms/spdagvm/prefixed_state.go @@ -29,8 +29,6 @@ type prefixedState struct { tx, utxo, txStatus, funds cache.Cacher uniqueTx cache.Deduplicator - - generatedStatus ids.ID } // UniqueTx de-duplicates the transaction. diff --git a/vms/spdagvm/vm.go b/vms/spdagvm/vm.go index 42a94e16a050..49a39ff6b399 100644 --- a/vms/spdagvm/vm.go +++ b/vms/spdagvm/vm.go @@ -36,7 +36,6 @@ const ( ) var ( - errNoKeys = errors.New("no private keys were provided") errUnknownUTXOType = errors.New("utxo has unknown output type") errAsset = errors.New("assetID must be blank") errAmountOverflow = errors.New("the amount of this transaction plus the transaction fee overflows") diff --git a/vms/spdagvm/vm_test.go b/vms/spdagvm/vm_test.go index ae06305839f2..dd42e3a03839 100644 --- a/vms/spdagvm/vm_test.go +++ b/vms/spdagvm/vm_test.go @@ -333,8 +333,7 @@ func TestRPCAPI(t *testing.T) { // Inverse of the above map pkToAddr := map[string]string{} - pks := []string{} // List of private keys - addresses := []string{} // List of addresses controlled by the private keys + pks := []string{} // List of private keys // Populate the above data structures using [keys] for _, v := range keys { @@ -347,7 +346,6 @@ func TestRPCAPI(t *testing.T) { pkToAddr[pk] = address pks = append(pks, pk) - addresses = append(addresses, address) } // Ensure GetAddress and GetBalance return the correct values for the @@ -537,7 +535,6 @@ func TestMultipleSend(t *testing.T) { addrToPK := map[string]string{} pkToAddr := map[string]string{} pks := []string{} - addresses := []string{} for _, v := range keys { cb58 := formatting.CB58{Bytes: v.Bytes()} pk := cb58.String() @@ -548,7 +545,6 @@ func TestMultipleSend(t *testing.T) { pkToAddr[pk] = address pks = append(pks, pk) - addresses = append(addresses, address) } ctx.Lock.Lock() diff --git a/vms/timestampvm/service.go b/vms/timestampvm/service.go index 73af6d9c77b4..cbb98d00898a 100644 --- a/vms/timestampvm/service.go +++ b/vms/timestampvm/service.go @@ -8,13 +8,11 @@ import ( "net/http" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/json" - "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" ) var ( - errDBError = errors.New("error getting data from database") errBadData = errors.New("data must be base 58 repr. of 32 bytes") errNoSuchBlock = errors.New("couldn't get block from database. Does it exist?") ) diff --git a/xputtest/avmwallet/wallet.go b/xputtest/avmwallet/wallet.go index f51d511eea89..fb51f56843a6 100644 --- a/xputtest/avmwallet/wallet.go +++ b/xputtest/avmwallet/wallet.go @@ -37,8 +37,7 @@ type Wallet struct { balance map[[32]byte]uint64 txFee uint64 - txsSent int32 - txs []*avm.Tx + txs []*avm.Tx } // NewWallet returns a new Wallet From 0bfc26c5e31efe5b9124c19570b317d1bb803633 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 26 Aug 2020 23:39:51 -0400 Subject: [PATCH 18/47] cleaned up input.go for snowstorm --- snow/consensus/snowstorm/consensus_test.go | 134 +++++ snow/consensus/snowstorm/directed.go | 10 +- snow/consensus/snowstorm/input.go | 662 +++++++++++++-------- vms/avm/service.go | 14 +- vms/avm/vm.go | 2 +- vms/platformvm/service.go | 10 +- vms/platformvm/state.go | 2 +- 7 files changed, 554 insertions(+), 280 deletions(-) diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index 478289ec517f..a38af34c97f5 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -30,6 +30,7 @@ var ( IsVirtuousTest, QuiesceTest, AcceptingDependencyTest, + AcceptingSlowDependencyTest, RejectingDependencyTest, VacuouslyAcceptedTest, ConflictsTest, @@ -586,6 +587,139 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { } } +type singleAcceptTx struct { + Tx + + t *testing.T + accepted bool +} + +func (tx *singleAcceptTx) Accept() error { + if tx.accepted { + tx.t.Fatalf("accept called multiple times") + } + tx.accepted = true + return tx.Tx.Accept() +} + +func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + rawPurple := &TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(7), + StatusV: choices.Processing, + }, + DependenciesV: []Tx{Red}, + } + rawPurple.InputIDsV.Add(ids.Empty.Prefix(8)) + + purple := &singleAcceptTx{ + Tx: rawPurple, + t: t, + } + + params := sbcon.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if err := graph.Add(purple); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + g := ids.Bag{} + g.Add(Green.ID()) + if _, err := graph.RecordPoll(g); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + p := ids.Bag{} + p.Add(purple.ID()) + if _, err := graph.RecordPoll(p); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + rp := ids.Bag{} + rp.Add(Red.ID(), purple.ID()) + if _, err := graph.RecordPoll(rp); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + r := ids.Bag{} + r.Add(Red.ID()) + if _, err := graph.RecordPoll(r); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if Red.Status() != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Accepted) + } else if Green.Status() != choices.Rejected { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Rejected) + } else if purple.Status() != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) + } +} + func RejectingDependencyTest(t *testing.T, factory Factory) { Setup() diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 95b5dc136258..f55445e26d2c 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -78,8 +78,8 @@ func (dg *Directed) IsVirtuous(tx Tx) bool { // The tx isn't processing, so we need to check to see if it conflicts with // any of the other txs that are currently processing. - for _, input := range tx.InputIDs().List() { - if _, exists := dg.utxos[input.Key()]; exists { + for _, utxoID := range tx.InputIDs().List() { + if _, exists := dg.utxos[utxoID.Key()]; exists { // A currently processing tx names the same input as the provided // tx, so the provided tx would be rogue. return false @@ -263,10 +263,8 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { // Get the set of IDs that meet this alpha threshold metThreshold := votes.Threshold() for _, txID := range metThreshold.List() { - txKey := txID.Key() - // Get the node this tx represents - txNode, exist := dg.txs[txKey] + txNode, exist := dg.txs[txID.Key()] if !exist { // This tx may have already been accepted because of tx // dependencies. If this is the case, we can just drop the vote. @@ -327,7 +325,7 @@ func (dg *Directed) String() string { return sb.String() } -// deferAcceptance attempts to mark this tx once all its dependencies are +// deferAcceptance attempts to accept this tx once all its dependencies are // accepted. If all the dependencies are already accepted, this function will // immediately accept the tx. func (dg *Directed) deferAcceptance(txNode *directedTx) { diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index 6f3251c3afbf..c5fc5405e379 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -6,11 +6,13 @@ package snowstorm import ( "bytes" "fmt" + "math" "sort" "strings" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/utils/formatting" sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" @@ -29,384 +31,374 @@ type Input struct { // Key: Transaction ID // Value: Node that represents this transaction in the conflict graph - txs map[[32]byte]inputTx + txs map[[32]byte]*inputTx // Key: UTXO ID // Value: Node that represents the status of the transactions consuming this // input - utxos map[[32]byte]inputUtxo + utxos map[[32]byte]inputUTXO } type inputTx struct { - bias int - tx Tx - + // pendingAccept identifies if this transaction has been marked as accepted + // once its transitive dependencies have also been accepted + pendingAccept bool + + // numSuccessfulPolls is the number of times this tx was the successful + // result of a network poll + numSuccessfulPolls int + + // lastVote is the last poll number that this tx was included in a + // successful network poll. This timestamp is needed to ensure correctness + // in the case that a tx was rejected when it was preferred in a conflict + // set and there was a tie for the second highest numSuccessfulPolls. lastVote int + + // tx is the actual transaction this node represents + tx Tx } -type inputUtxo struct { - bias, confidence, lastVote int - rogue bool +type inputUTXO struct { + snowball + // preference is the txID which snowball says this UTXO should prefer preference ids.ID - color ids.ID - conflicts ids.Set + + // color is the txID which snowflake says this UTXO should prefer + color ids.ID + + // spenders is the set of txIDs that are currently attempting to spend this + // UTXO + spenders ids.Set } // Initialize implements the ConflictGraph interface func (ig *Input) Initialize(ctx *snow.Context, params sbcon.Parameters) error { - ig.txs = make(map[[32]byte]inputTx) - ig.utxos = make(map[[32]byte]inputUtxo) + ig.txs = make(map[[32]byte]*inputTx) + ig.utxos = make(map[[32]byte]inputUTXO) return ig.common.Initialize(ctx, params) } // IsVirtuous implements the ConflictGraph interface func (ig *Input) IsVirtuous(tx Tx) bool { - id := tx.ID() - for _, consumption := range tx.InputIDs().List() { - input := ig.utxos[consumption.Key()] - if input.rogue || - (input.conflicts.Len() > 0 && !input.conflicts.Contains(id)) { + txID := tx.ID() + for _, utxoID := range tx.InputIDs().List() { + utxo, exists := ig.utxos[utxoID.Key()] + // If the UTXO wasn't currently processing, then this tx won't conflict + // due to this UTXO. + if !exists { + continue + } + // If this UTXO is rogue, then this tx will have at least one conflict. + if utxo.rogue { + return false + } + // This UTXO is currently virtuous, so it must be spent by only one tx. + // If that tx is different from this tx, then these txs would conflict. + if !utxo.spenders.Contains(txID) { return false } } + + // None of the UTXOs consumed by this tx imply that this tx would be rogue, + // so it is virtuous as far as this consensus instance knows. return true } +// Conflicts implements the ConflictGraph interface +func (ig *Input) Conflicts(tx Tx) ids.Set { + conflicts := ids.Set{} + // The conflicting txs are the union of all the txs that spend an input that + // this tx spends. + for _, utxoID := range tx.InputIDs().List() { + if utxo, exists := ig.utxos[utxoID.Key()]; exists { + conflicts.Union(utxo.spenders) + } + } + // A tx can't conflict with itself, so we should make sure to remove the + // provided tx from the conflict set. This is needed in case this tx is + // currently processing. + conflicts.Remove(tx.ID()) + return conflicts +} + // Add implements the ConflictGraph interface func (ig *Input) Add(tx Tx) error { if ig.Issued(tx) { - return nil // Already inserted + // If the tx was previously inserted, it shouldn't be re-inserted. + return nil } txID := tx.ID() bytes := tx.Bytes() + // Notify the IPC socket that this tx has been issued. ig.ctx.DecisionDispatcher.Issue(ig.ctx.ChainID, txID, bytes) + + // Notify the metrics that this transaction was just issued. + ig.metrics.Issued(txID) + inputs := tx.InputIDs() - // If there are no inputs, they are vacuously accepted + + // If this tx doesn't have any inputs, it's impossible for there to be any + // conflicting transactions. Therefore, this transaction is treated as + // vacuously accepted. if inputs.Len() == 0 { + // Accept is called before notifying the IPC so that acceptances that + // cause fatal errors aren't sent to an IPC peer. if err := tx.Accept(); err != nil { return err } + + // Notify the IPC socket that this tx has been accepted. ig.ctx.DecisionDispatcher.Accept(ig.ctx.ChainID, txID, bytes) - ig.metrics.Issued(txID) + + // Notify the metrics that this transaction was just accepted. ig.metrics.Accepted(txID) return nil } - cn := inputTx{tx: tx} + txNode := &inputTx{tx: tx} + + // This tx should be added to the virtuous sets and preferred sets if this + // tx is virtuous in all of the UTXOs it is trying to consume. virtuous := true - // If there are inputs, they must be voted on - for _, consumption := range inputs.List() { - consumptionKey := consumption.Key() - input, exists := ig.utxos[consumptionKey] - input.rogue = exists // If the input exists for a conflict + + // For each UTXO consumed by the tx: + // * Mark this tx as attempting to consume this UTXO + // * Mark the UTXO as being rogue if applicable + for _, inputID := range inputs.List() { + inputKey := inputID.Key() + utxo, exists := ig.utxos[inputKey] if exists { - for _, conflictID := range input.conflicts.List() { + // If the utxo was already being consumed by another tx, this utxo + // is now rogue. + utxo.rogue = true + // Since this utxo is rogue, this tx is rogue as well. + virtuous = false + // If this utxo was previously virtuous, then there may be txs that + // were considered virtuous that are now known to be rogue. If + // that's the case we should remove those txs from the virtuous + // sets. + for _, conflictID := range utxo.spenders.List() { ig.virtuous.Remove(conflictID) ig.virtuousVoting.Remove(conflictID) } } else { - input.preference = txID // If there isn't a conflict, I'm preferred + // If there isn't a conflict for this UTXO, I'm the preferred + // spender. + utxo.preference = txID } - input.conflicts.Add(txID) - ig.utxos[consumptionKey] = input - virtuous = virtuous && !exists + // This UTXO needs to track that it is being spent by this tx. + utxo.spenders.Add(txID) + + // We need to write back + ig.utxos[inputKey] = utxo } - // Add the node to the set - ig.txs[txID.Key()] = cn if virtuous { - // If I'm preferred in all my conflict sets, I'm preferred. - // Because the preference graph is a DAG, there will always be at least - // one preferred consumer, if there is a consumer - ig.preferences.Add(txID) + // If this tx is currently virtuous, add it to the virtuous sets ig.virtuous.Add(txID) ig.virtuousVoting.Add(txID) + + // If a tx is virtuous, it must be preferred. + ig.preferences.Add(txID) } - ig.metrics.Issued(txID) + // Add this tx to the set of currently processing txs + ig.txs[txID.Key()] = txNode + + // If a tx that this tx depends on is rejected, this tx should also be + // rejected. toReject := &rejector{ g: ig, errs: &ig.errs, txID: txID, } + // Register all of this txs dependencies as possibilities to reject this tx. for _, dependency := range tx.Dependencies() { - if !dependency.Status().Decided() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. So, + // this tx should be rejected if any of these processing txs are + // rejected. Note that the dependencies can't already be rejected, + // because it is assumped that this tx is currently considered + // valid. toReject.deps.Add(dependency.ID()) } } + + // Register these dependencies ig.pendingReject.Register(toReject) - return ig.errs.Err + + // Registering the rejector can't result in an error, so we can safely + // return nil here. + return nil } // Issued implements the ConflictGraph interface func (ig *Input) Issued(tx Tx) bool { + // If the tx is either Accepted or Rejected, then it must have been issued + // previously. if tx.Status().Decided() { return true } + + // If the tx is currently processing, then it must have been issued. _, ok := ig.txs[tx.ID().Key()] return ok } -// Conflicts implements the ConflictGraph interface -func (ig *Input) Conflicts(tx Tx) ids.Set { - id := tx.ID() - conflicts := ids.Set{} - - for _, input := range tx.InputIDs().List() { - inputNode := ig.utxos[input.Key()] - conflicts.Union(inputNode.conflicts) - } - - conflicts.Remove(id) - return conflicts -} - // RecordPoll implements the ConflictGraph interface func (ig *Input) RecordPoll(votes ids.Bag) (bool, error) { + // Increase the vote ID. This is only updated here and is used to reset the + // confidence values of transactions lazily. ig.currentVote++ + + // This flag tracks if the Avalanche instance needs to recompute its + // frontiers. Frontiers only need to be recalculated if preferences change + // or if a tx was accepted. changed := false + // We only want to iterate over txs that received alpha votes votes.SetThreshold(ig.params.Alpha) - threshold := votes.Threshold() - for _, toInc := range threshold.List() { - incKey := toInc.Key() - tx, exist := ig.txs[incKey] + // Get the set of IDs that meet this alpha threshold + metThreshold := votes.Threshold() + for _, txID := range metThreshold.List() { + txKey := txID.Key() + + // Get the node this tx represents + txNode, exist := ig.txs[txKey] if !exist { - // Votes for decided consumptions are ignored + // This tx may have already been accepted because of tx + // dependencies. If this is the case, we can just drop the vote. continue } - tx.bias++ - - // The timestamp is needed to ensure correctness in the case that a - // consumer was rejected from a conflict set, when it was preferred in - // this conflict set, when there is a tie for the second highest - // confidence. - tx.lastVote = ig.currentVote + txNode.numSuccessfulPolls++ + txNode.lastVote = ig.currentVote + // This tx is preferred if it is preferred in all of its conflict sets preferred := true + // This tx is rogue if any of its conflict sets are rogue rogue := false - confidence := ig.params.BetaRogue - - consumptions := tx.tx.InputIDs().List() - for _, inputID := range consumptions { + // The confidence of the tx is the minimum confidence of all the input's + // conflict sets + confidence := math.MaxInt32 + for _, inputID := range txNode.tx.InputIDs().List() { inputKey := inputID.Key() - input := ig.utxos[inputKey] - - // If I did not receive a vote in the last vote, reset my confidence to 0 - if input.lastVote+1 != ig.currentVote { - input.confidence = 0 + utxo := ig.utxos[inputKey] + + // If this tx wasn't voted for during the last poll, the confidence + // should have been reset during the last poll. So, we reset it now. + // Additionally, if a different tx was voted for in the last poll, + // the confidence should also be reset. + if utxo.lastVote+1 != ig.currentVote || !txID.Equals(utxo.color) { + utxo.confidence = 0 } - input.lastVote = ig.currentVote - - // check the snowflake preference - if !toInc.Equals(input.color) { - input.confidence = 0 - } - // update the snowball preference - if tx.bias > input.bias { - // if the previous preference lost it's preference in this - // input, it can't be preferred in all the inputs - if ig.preferences.Contains(input.preference) { - ig.preferences.Remove(input.preference) - changed = true + utxo.lastVote = ig.currentVote + + // Update the Snowflake counter and preference. + utxo.color = txID + utxo.confidence++ + + // Update the Snowball preference. + if txNode.numSuccessfulPolls > utxo.numSuccessfulPolls { + // If this node didn't previous prefer this tx, then we need to + // update the preferences. + if !txID.Equals(utxo.preference) { + // If the previous preference lost it's preference in this + // input, it can't be preferred in all the inputs. + if ig.preferences.Contains(utxo.preference) { + ig.preferences.Remove(utxo.preference) + // Because there was a change in preferences, Avalanche + // will need to recompute its frontiers. + changed = true + } + utxo.preference = txID } - - input.bias = tx.bias - input.preference = toInc + utxo.numSuccessfulPolls = txNode.numSuccessfulPolls + } else { + // This isn't the preferred choice in this conflict set so this + // tx isn't be preferred. + preferred = false } - // update snowflake vars - input.color = toInc - input.confidence++ + // If this utxo is rogue, the transaction must have at least one + // conflict. + rogue = rogue || utxo.rogue - ig.utxos[inputKey] = input - - // track cumulative statistics - preferred = preferred && toInc.Equals(input.preference) - rogue = rogue || input.rogue - if confidence > input.confidence { - confidence = input.confidence + // The confidence of this tx is the minimum confidence of its + // inputs. + if confidence > utxo.confidence { + confidence = utxo.confidence } + + // The input isn't a pointer, so it must be written back. + ig.utxos[inputKey] = utxo } - // If the node wasn't accepted, but was preferred, make sure it is - // marked as preferred - if preferred && !ig.preferences.Contains(toInc) { - ig.preferences.Add(toInc) + // If this tx is preferred and it isn't already marked as such, mark the + // tx as preferred and for Avalanche to recompute the frontiers. + if preferred && !ig.preferences.Contains(txID) { + ig.preferences.Add(txID) changed = true } - if (!rogue && confidence >= ig.params.BetaVirtuous) || - confidence >= ig.params.BetaRogue { - ig.deferAcceptance(tx) + // If the tx should be accepted, then we should defer its acceptance + // until its dependencies are decided. If this tx was already marked to + // be accepted, we shouldn't register it again. + if !txNode.pendingAccept && + ((!rogue && confidence >= ig.params.BetaVirtuous) || + confidence >= ig.params.BetaRogue) { + ig.deferAcceptance(txNode) if ig.errs.Errored() { return changed, ig.errs.Err } - changed = true - continue } - ig.txs[incKey] = tx - } - return changed, ig.errs.Err -} - -func (ig *Input) deferAcceptance(tn inputTx) { - toAccept := &inputAccepter{ - ig: ig, - tn: tn, - } - - for _, dependency := range tn.tx.Dependencies() { - if !dependency.Status().Decided() { - toAccept.deps.Add(dependency.ID()) - } - } - - ig.virtuousVoting.Remove(tn.tx.ID()) - ig.pendingAccept.Register(toAccept) -} - -// reject all the ids and remove them from their conflict sets -func (ig *Input) reject(ids ...ids.ID) error { - for _, conflict := range ids { - conflictKey := conflict.Key() - cn := ig.txs[conflictKey] - delete(ig.txs, conflictKey) - ig.preferences.Remove(conflict) // A rejected value isn't preferred - - // Remove from all conflict sets - ig.removeConflict(conflict, cn.tx.InputIDs().List()...) - - // Mark it as rejected - if err := cn.tx.Reject(); err != nil { - return err - } - ig.ctx.DecisionDispatcher.Reject(ig.ctx.ChainID, cn.tx.ID(), cn.tx.Bytes()) - ig.metrics.Rejected(conflict) - ig.pendingAccept.Abandon(conflict) - ig.pendingReject.Fulfill(conflict) - } - return nil -} - -// Remove id from all of its conflict sets -func (ig *Input) removeConflict(id ids.ID, inputIDs ...ids.ID) { - for _, inputID := range inputIDs { - inputKey := inputID.Key() - // if the input doesn't exists, it was already decided - if input, exists := ig.utxos[inputKey]; exists { - input.conflicts.Remove(id) - - // If there is nothing attempting to consume the input, remove it - // from memory - if input.conflicts.Len() == 0 { - delete(ig.utxos, inputKey) - continue - } - - // If I was previously preferred, I must find who should now be - // preferred. This shouldn't normally happen, therefore it is okay - // to be fairly slow here - if input.preference.Equals(id) { - newPreference := ids.ID{} - newBias := -1 - newBiasTime := 0 - - // Find the highest bias conflict - for _, spend := range input.conflicts.List() { - tx := ig.txs[spend.Key()] - if tx.bias > newBias || - (tx.bias == newBias && - newBiasTime < tx.lastVote) { - newPreference = spend - newBias = tx.bias - newBiasTime = tx.lastVote - } - } - - // Set the preferences to the highest bias - input.preference = newPreference - input.bias = newBias - - ig.utxos[inputKey] = input - - // We need to check if this node is now preferred - preferenceNode, exist := ig.txs[newPreference.Key()] - if exist { - isPreferred := true - inputIDs := preferenceNode.tx.InputIDs().List() - for _, inputID := range inputIDs { - inputKey := inputID.Key() - input := ig.utxos[inputKey] - - if !newPreference.Equals(input.preference) { - // If this preference isn't the preferred color, it - // isn't preferred. Input might not exist, in which - // case this still isn't the preferred color - isPreferred = false - break - } - } - if isPreferred { - // If I'm preferred in all my conflict sets, I'm - // preferred - ig.preferences.Add(newPreference) - } - } - } else { - // If i'm rejecting the non-preference, do nothing - ig.utxos[inputKey] = input - } + if txNode.tx.Status() == choices.Accepted { + // By accepting a tx, the state of this instance has changed. + changed = true } } + return changed, ig.errs.Err } func (ig *Input) String() string { - nodes := []tempNode{} + nodes := make([]tempNode, 0, len(ig.txs)) for _, tx := range ig.txs { id := tx.tx.ID() confidence := ig.params.BetaRogue for _, inputID := range tx.tx.InputIDs().List() { input := ig.utxos[inputID.Key()] - if input.lastVote != ig.currentVote { + if input.lastVote != ig.currentVote || !id.Equals(input.color) { confidence = 0 break } - if !id.Equals(input.color) { - confidence = 0 - break - } - if input.confidence < confidence { confidence = input.confidence } } nodes = append(nodes, tempNode{ - id: id, - bias: tx.bias, - confidence: confidence, + id: id, + numSuccessfulPolls: tx.numSuccessfulPolls, + confidence: confidence, }) } + // Sort the nodes so that the string representation is canonical sortTempNodes(nodes) sb := strings.Builder{} - sb.WriteString("IG(") format := fmt.Sprintf( "\n Choice[%s] = ID: %%50s %%s", formatting.IntFormat(len(nodes)-1)) - for i, cn := range nodes { sb.WriteString(fmt.Sprintf(format, i, cn.id, &cn)) } @@ -415,15 +407,153 @@ func (ig *Input) String() string { sb.WriteString("\n") } sb.WriteString(")") - return sb.String() } +// deferAcceptance attempts to accept this tx once all its dependencies are +// accepted. If all the dependencies are already accepted, this function will +// immediately accept the tx. +func (ig *Input) deferAcceptance(txNode *inputTx) { + // Mark that this tx is pending acceptance so this function won't be called + // again + txNode.pendingAccept = true + + toAccept := &inputAccepter{ + ig: ig, + txNode: txNode, + } + + for _, dependency := range txNode.tx.Dependencies() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. + // This tx should be accepted after this tx is accepted. + toAccept.deps.Add(dependency.ID()) + } + } + + // This tx is no longer being voted on, so we remove it from the voting set. + // This ensures that virtuous txs built on top of rogue txs don't force the + // node to treat the rogue tx as virtuous. + ig.virtuousVoting.Remove(txNode.tx.ID()) + ig.pendingAccept.Register(toAccept) +} + +// reject all the named txIDs and remove them from their conflict sets +func (ig *Input) reject(conflictIDs ...ids.ID) error { + for _, conflictID := range conflictIDs { + conflictKey := conflictID.Key() + conflict := ig.txs[conflictKey] + + // We are rejecting the tx, so we should remove it from the graph + delete(ig.txs, conflictKey) + + // While it's statistically unlikely that something being rejected is + // preferred, it is handled for completion. + ig.preferences.Remove(conflictID) + + // Remove this tx from all the conflict sets it's currently in + ig.removeConflict(conflictID, conflict.tx.InputIDs().List()...) + + // Reject is called before notifying the IPC so that rejections that + // cause fatal errors aren't sent to an IPC peer. + if err := conflict.tx.Reject(); err != nil { + return err + } + + // Notify the IPC that the tx was rejected + ig.ctx.DecisionDispatcher.Reject(ig.ctx.ChainID, conflict.tx.ID(), conflict.tx.Bytes()) + + // Update the metrics to account for this transaction's rejection + ig.metrics.Rejected(conflictID) + + // If there is a tx that was accepted pending on this tx, the ancestor + // tx can't be accepted. + ig.pendingAccept.Abandon(conflictID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // must be rejected. + ig.pendingReject.Fulfill(conflictID) + } + return nil +} + +// Remove id from all of its conflict sets +func (ig *Input) removeConflict(txID ids.ID, inputIDs ...ids.ID) { + for _, inputID := range inputIDs { + inputKey := inputID.Key() + utxo, exists := ig.utxos[inputKey] + if !exists { + // if the utxo doesn't exists, it was already consumed, so there is + // no mapping left to update. + continue + } + + // This tx is no longer attempting to spend this utxo. + utxo.spenders.Remove(txID) + + // If there is nothing attempting to consume the utxo anymore, remove it + // from memory. + if utxo.spenders.Len() == 0 { + delete(ig.utxos, inputKey) + continue + } + + // If I'm rejecting the non-preference, there is nothing else to update. + if !utxo.preference.Equals(txID) { + ig.utxos[inputKey] = utxo + continue + } + + // If I was previously preferred, I must find who should now be + // preferred. + preference := ids.ID{} + numSuccessfulPolls := -1 + lastVote := 0 + + // Find the new Snowball preference + for _, spender := range utxo.spenders.List() { + txNode := ig.txs[spender.Key()] + if txNode.numSuccessfulPolls > numSuccessfulPolls || + (txNode.numSuccessfulPolls == numSuccessfulPolls && + lastVote < txNode.lastVote) { + preference = spender + numSuccessfulPolls = txNode.numSuccessfulPolls + lastVote = txNode.lastVote + } + } + + // Update the preferences + utxo.preference = preference + utxo.numSuccessfulPolls = numSuccessfulPolls + + ig.utxos[inputKey] = utxo + + // We need to check if this tx is now preferred + txNode := ig.txs[preference.Key()] + isPreferred := true + for _, inputID := range txNode.tx.InputIDs().List() { + inputKey := inputID.Key() + input := ig.utxos[inputKey] + + if !preference.Equals(input.preference) { + // If this preference isn't the preferred color, the tx isn't + // preferred. Also note that the input might not exist, in which + // case this tx is going to be rejected in a later iteration. + isPreferred = false + break + } + } + if isPreferred { + // If I'm preferred in all my conflict sets, I'm preferred. + ig.preferences.Add(preference) + } + } +} + type inputAccepter struct { ig *Input deps ids.Set rejected bool - tn inputTx + txNode *inputTx } func (a *inputAccepter) Dependencies() ids.Set { return a.deps } @@ -436,54 +566,66 @@ func (a *inputAccepter) Fulfill(id ids.ID) { func (a *inputAccepter) Abandon(id ids.ID) { a.rejected = true } func (a *inputAccepter) Update() { + // If I was rejected or I am still waiting on dependencies to finish or an + // error has occurred, I shouldn't do anything. if a.rejected || a.deps.Len() != 0 || a.ig.errs.Errored() { return } - id := a.tn.tx.ID() - delete(a.ig.txs, id.Key()) - - // Remove Tx from all of its conflicts - inputIDs := a.tn.tx.InputIDs() - a.ig.removeConflict(id, inputIDs.List()...) + txID := a.txNode.tx.ID() + // We are accepting the tx, so we should remove the node from the graph. + delete(a.ig.txs, txID.Key()) - a.ig.virtuous.Remove(id) - a.ig.preferences.Remove(id) + // Get the conflicts of this tx so that we can reject them + conflicts := a.ig.Conflicts(a.txNode.tx) - // Reject the conflicts - conflicts := ids.Set{} - for inputKey, exists := range inputIDs { - if exists { - inputNode := a.ig.utxos[inputKey] - conflicts.Union(inputNode.conflicts) - } + // This tx is consuming all the UTXOs from its inputs, so we can prune them + // all from memory + for _, inputID := range a.txNode.tx.InputIDs().List() { + delete(a.ig.utxos, inputID.Key()) } + + // This tx is now accepted, so it shouldn't be part of the virtuous set or + // the preferred set. Its status as Accepted implies these descriptions. + a.ig.virtuous.Remove(txID) + a.ig.preferences.Remove(txID) + + // Reject all the txs that conflicted with this tx. if err := a.ig.reject(conflicts.List()...); err != nil { a.ig.errs.Add(err) return } - // Mark it as accepted - if err := a.tn.tx.Accept(); err != nil { + // Accept is called before notifying the IPC so that acceptances that cause + // fatal errors aren't sent to an IPC peer. + if err := a.txNode.tx.Accept(); err != nil { a.ig.errs.Add(err) return } - a.ig.ctx.DecisionDispatcher.Accept(a.ig.ctx.ChainID, id, a.tn.tx.Bytes()) - a.ig.metrics.Accepted(id) - a.ig.pendingAccept.Fulfill(id) - a.ig.pendingReject.Abandon(id) + // Notify the IPC socket that this tx has been accepted. + a.ig.ctx.DecisionDispatcher.Accept(a.ig.ctx.ChainID, txID, a.txNode.tx.Bytes()) + + // Update the metrics to account for this transaction's acceptance + a.ig.metrics.Accepted(txID) + + // If there is a tx that was accepted pending on this tx, the ancestor + // should be notified that it doesn't need to block on this tx anymore. + a.ig.pendingAccept.Fulfill(txID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // doesn't need to be rejected because of this tx. + a.ig.pendingReject.Abandon(txID) } type tempNode struct { - id ids.ID - bias, confidence int + id ids.ID + numSuccessfulPolls, confidence int } func (tn *tempNode) String() string { return fmt.Sprintf( "SB(NumSuccessfulPolls = %d, Confidence = %d)", - tn.bias, + tn.numSuccessfulPolls, tn.confidence) } diff --git a/vms/avm/service.go b/vms/avm/service.go index 7ff0df166b5d..005b28ccda81 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -116,7 +116,7 @@ func (service *Service) GetTx(r *http.Request, args *api.JsonTxID, reply *Format // Marks a starting or stopping point when fetching UTXOs. Used for pagination. type Index struct { Address string `json:"address"` // The address as a string - Utxo string `json:"utxo"` // The UTXO ID as a string + UTXO string `json:"utxo"` // The UTXO ID as a string } // GetUTXOsArgs are arguments for passing into GetUTXOs. @@ -125,7 +125,7 @@ type Index struct { // If [limit] == 0 or > [maxUTXOsToFetch], fetches up to [maxUTXOsToFetch]. // [StartIndex] defines where to start fetching UTXOs (for pagination.) // UTXOs fetched are from addresses equal to or greater than [StartIndex.Address] -// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.Utxo] will be returned. +// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.UTXO] will be returned. // If [StartIndex] is omitted, gets all UTXOs. // If GetUTXOs is called multiple times, with our without [StartIndex], it is not guaranteed // that returned UTXOs are unique. That is, the same UTXO may appear in the response of multiple calls. @@ -176,7 +176,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *Get startAddr := ids.ShortEmpty startUTXO := ids.Empty - if args.StartIndex.Address != "" || args.StartIndex.Utxo != "" { + if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { addrChainID, addr, err := service.vm.ParseAddress(args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address: %w", err) @@ -185,7 +185,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *Get return fmt.Errorf("addresses from multiple chains provided: %q and %q", chainID, addrChainID) } - utxo, err := ids.FromString(args.StartIndex.Utxo) + utxo, err := ids.FromString(args.StartIndex.UTXO) if err != nil { return fmt.Errorf("couldn't parse start index utxo: %w", err) } @@ -235,7 +235,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *Get } reply.EndIndex.Address = endAddress - reply.EndIndex.Utxo = endUTXOID.String() + reply.EndIndex.UTXO = endUTXOID.String() return nil } @@ -1329,12 +1329,12 @@ func (service *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, reply return err } - atomicUtxos, _, _, err := service.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, -1) + atomicUTXOs, _, _, err := service.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, -1) if err != nil { return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) } - amountsSpent, importInputs, importKeys, err := service.vm.SpendAll(atomicUtxos, kc) + amountsSpent, importInputs, importKeys, err := service.vm.SpendAll(atomicUTXOs, kc) if err != nil { return err } diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 1e3750d1d2ca..e4b8f03451ea 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -393,7 +393,7 @@ func (vm *VM) GetAtomicUTXOs( // Returns at most [limit] UTXOs. // If [limit] <= 0 or [limit] > maxUTXOsToFetch, it is set to [maxUTXOsToFetch]. // Only returns UTXOs associated with addresses >= [startAddr]. -// For address [startAddr], only returns UTXOs whose IDs are greater than [startUtxoID]. +// For address [startAddr], only returns UTXOs whose IDs are greater than [startUTXOID]. // Returns: // * The fetched of UTXOs // * The address associated with the last UTXO fetched diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 594303a19731..a2f027b7ce20 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -236,7 +236,7 @@ func (service *Service) ListAddresses(_ *http.Request, args *api.UserPass, respo // Marks a starting or stopping point when fetching UTXOs. Used for pagination. type Index struct { Address string `json:"address"` // The address as a string - Utxo string `json:"utxo"` // The UTXO ID as a string + UTXO string `json:"utxo"` // The UTXO ID as a string } // GetUTXOsArgs are arguments for passing into GetUTXOs. @@ -245,7 +245,7 @@ type Index struct { // If [limit] == 0 or > [maxUTXOsToFetch], fetches up to [maxUTXOsToFetch]. // [StartIndex] defines where to start fetching UTXOs (for pagination.) // UTXOs fetched are from addresses equal to or greater than [StartIndex.Address] -// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.Utxo] will be returned. +// For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.UTXO] will be returned. // If [StartIndex] is omitted, gets all UTXOs. // If GetUTXOs is called multiple times, with our without [StartIndex], it is not guaranteed // that returned UTXOs are unique. That is, the same UTXO may appear in the response of multiple calls. @@ -303,7 +303,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *GetUTXOsArgs, response * startAddr := ids.ShortEmpty startUTXO := ids.Empty - if args.StartIndex.Address != "" || args.StartIndex.Utxo != "" { + if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { addrChainID, addr, err := service.vm.ParseAddress(args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address: %w", err) @@ -312,7 +312,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *GetUTXOsArgs, response * return fmt.Errorf("addresses from multiple chains provided: %q and %q", chainID, addrChainID) } - utxo, err := ids.FromString(args.StartIndex.Utxo) + utxo, err := ids.FromString(args.StartIndex.UTXO) if err != nil { return fmt.Errorf("couldn't parse start index utxo: %w", err) } @@ -363,7 +363,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *GetUTXOsArgs, response * } response.EndIndex.Address = endAddress - response.EndIndex.Utxo = endUTXOID.String() + response.EndIndex.UTXO = endUTXOID.String() return nil } diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index 23c0dcf6d39c..f0968303f660 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -222,7 +222,7 @@ func (vm *VM) removeReferencingUTXO(db database.Database, addrBytes []byte, utxo // Returns at most [limit] UTXOs. // If [limit] <= 0 or [limit] > maxUTXOsToFetch, it is set to [maxUTXOsToFetch]. // Only returns UTXOs associated with addresses >= [startAddr]. -// For address [startAddr], only returns UTXOs whose IDs are greater than [startUtxoID]. +// For address [startAddr], only returns UTXOs whose IDs are greater than [startUTXOID]. // Returns: // * The fetched of UTXOs // * The address associated with the last UTXO fetched From ae6e657ee883474dfe8d8f4e83d80bf6a9f4ce69 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 27 Aug 2020 01:01:34 -0400 Subject: [PATCH 19/47] moved duplicated code into snowstorm/common --- snow/consensus/snowstorm/common.go | 230 +++++++++++++++++++++++- snow/consensus/snowstorm/consensus.go | 3 + snow/consensus/snowstorm/directed.go | 250 +++++--------------------- snow/consensus/snowstorm/input.go | 248 ++++--------------------- snow/consensus/snowstorm/snowball.go | 30 +--- 5 files changed, 319 insertions(+), 442 deletions(-) diff --git a/snow/consensus/snowstorm/common.go b/snow/consensus/snowstorm/common.go index f84ce612f97f..b49b34f02006 100644 --- a/snow/consensus/snowstorm/common.go +++ b/snow/consensus/snowstorm/common.go @@ -4,11 +4,16 @@ package snowstorm import ( + "bytes" "fmt" + "sort" + "strings" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/events" + "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/wrappers" sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" @@ -82,11 +87,183 @@ func (c *common) Finalized() bool { return numPreferences == 0 } +// shouldVote returns if the provided tx should be voted on to determine if it +// can be accepted. If the tx can be vacuously accepted, the tx will be accepted +// and will therefore not be valid to be voted on. +func (c *common) shouldVote(con Consensus, tx Tx) (bool, error) { + if con.Issued(tx) { + // If the tx was previously inserted, it shouldn't be re-inserted. + return false, nil + } + + txID := tx.ID() + bytes := tx.Bytes() + + // Notify the IPC socket that this tx has been issued. + c.ctx.DecisionDispatcher.Issue(c.ctx.ChainID, txID, bytes) + + // Notify the metrics that this transaction is being issued. + c.metrics.Issued(txID) + + // If this tx has inputs, it needs to be voted on before being accepted. + if inputs := tx.InputIDs(); inputs.Len() != 0 { + return true, nil + } + + // Since this tx doesn't have any inputs, it's impossible for there to be + // any conflicting transactions. Therefore, this transaction is treated as + // vacuously accepted and doesn't need to be voted on. + + // Accept is called before notifying the IPC so that acceptances that + // cause fatal errors aren't sent to an IPC peer. + if err := tx.Accept(); err != nil { + return false, err + } + + // Notify the IPC socket that this tx has been accepted. + c.ctx.DecisionDispatcher.Accept(c.ctx.ChainID, txID, bytes) + + // Notify the metrics that this transaction was just accepted. + c.metrics.Accepted(txID) + return false, nil +} + +// accept the provided tx. +func (c *common) acceptTx(tx Tx) error { + // Accept is called before notifying the IPC so that acceptances that cause + // fatal errors aren't sent to an IPC peer. + if err := tx.Accept(); err != nil { + return err + } + + txID := tx.ID() + + // Notify the IPC socket that this tx has been accepted. + c.ctx.DecisionDispatcher.Accept(c.ctx.ChainID, txID, tx.Bytes()) + + // Update the metrics to account for this transaction's acceptance + c.metrics.Accepted(txID) + + // If there is a tx that was accepted pending on this tx, the ancestor + // should be notified that it doesn't need to block on this tx anymore. + c.pendingAccept.Fulfill(txID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // doesn't need to be rejected because of this tx. + c.pendingReject.Abandon(txID) + return nil +} + +// reject the provided tx. +func (c *common) rejectTx(tx Tx) error { + // Reject is called before notifying the IPC so that rejections that + // cause fatal errors aren't sent to an IPC peer. + if err := tx.Reject(); err != nil { + return err + } + + txID := tx.ID() + + // Notify the IPC that the tx was rejected + c.ctx.DecisionDispatcher.Reject(c.ctx.ChainID, txID, tx.Bytes()) + + // Update the metrics to account for this transaction's rejection + c.metrics.Rejected(txID) + + // If there is a tx that was accepted pending on this tx, the ancestor + // tx can't be accepted. + c.pendingAccept.Abandon(txID) + // If there is a tx that was issued pending on this tx, the ancestor tx + // must be rejected. + c.pendingReject.Fulfill(txID) + return nil +} + +// registerAcceptor attempts to accept this tx once all its dependencies are +// accepted. If all the dependencies are already accepted, this function will +// immediately accept the tx. +func (c *common) registerAcceptor(con Consensus, tx Tx) { + txID := tx.ID() + + toAccept := &acceptor{ + g: con, + errs: &c.errs, + txID: txID, + } + + for _, dependency := range tx.Dependencies() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. + // This tx should be accepted after this tx is accepted. Note that + // the dependencies can't already be rejected, because it is assumed + // that this tx is currently considered valid. + toAccept.deps.Add(dependency.ID()) + } + } + + // This tx is no longer being voted on, so we remove it from the voting set. + // This ensures that virtuous txs built on top of rogue txs don't force the + // node to treat the rogue tx as virtuous. + c.virtuousVoting.Remove(txID) + c.pendingAccept.Register(toAccept) +} + +// registerRejector rejects this tx if any of its dependencies are rejected. +func (c *common) registerRejector(con Consensus, tx Tx) { + // If a tx that this tx depends on is rejected, this tx should also be + // rejected. + toReject := &rejector{ + g: con, + errs: &c.errs, + txID: tx.ID(), + } + + // Register all of this txs dependencies as possibilities to reject this tx. + for _, dependency := range tx.Dependencies() { + if dependency.Status() != choices.Accepted { + // If the dependency isn't accepted, then it must be processing. So, + // this tx should be rejected if any of these processing txs are + // rejected. Note that the dependencies can't already be rejected, + // because it is assumed that this tx is currently considered valid. + toReject.deps.Add(dependency.ID()) + } + } + + // Register these dependencies + c.pendingReject.Register(toReject) +} + +// acceptor implements Blockable +type acceptor struct { + g Consensus + errs *wrappers.Errs + deps ids.Set + rejected bool + txID ids.ID +} + +func (a *acceptor) Dependencies() ids.Set { return a.deps } + +func (a *acceptor) Fulfill(id ids.ID) { + a.deps.Remove(id) + a.Update() +} + +func (a *acceptor) Abandon(id ids.ID) { a.rejected = true } + +func (a *acceptor) Update() { + // If I was rejected or I am still waiting on dependencies to finish or an + // error has occurred, I shouldn't do anything. + if a.rejected || a.deps.Len() != 0 || a.errs.Errored() { + return + } + a.errs.Add(a.g.accept(a.txID)) +} + // rejector implements Blockable type rejector struct { g Consensus - deps ids.Set errs *wrappers.Errs + deps ids.Set rejected bool // true if the tx has been rejected txID ids.ID } @@ -102,5 +279,54 @@ func (r *rejector) Fulfill(ids.ID) { } func (*rejector) Abandon(ids.ID) {} +func (*rejector) Update() {} + +type snowballNode struct { + txID ids.ID + numSuccessfulPolls int + confidence int +} + +func (sb *snowballNode) String() string { + return fmt.Sprintf( + "SB(NumSuccessfulPolls = %d, Confidence = %d)", + sb.numSuccessfulPolls, + sb.confidence) +} + +type sortSnowballNodeData []*snowballNode + +func (sb sortSnowballNodeData) Less(i, j int) bool { + return bytes.Compare(sb[i].txID.Bytes(), sb[j].txID.Bytes()) == -1 +} +func (sb sortSnowballNodeData) Len() int { return len(sb) } +func (sb sortSnowballNodeData) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } -func (*rejector) Update() {} +func sortSnowballNodes(nodes []*snowballNode) { + sort.Sort(sortSnowballNodeData(nodes)) +} + +// ConsensusString converts a list of snowball nodes into a human-readable +// string. +func ConsensusString(name string, nodes []*snowballNode) string { + // Sort the nodes so that the string representation is canonical + sortSnowballNodes(nodes) + + sb := strings.Builder{} + sb.WriteString(name) + sb.WriteString("(") + + format := fmt.Sprintf( + "\n Choice[%s] = ID: %%50s SB(NumSuccessfulPolls = %%d, Confidence = %%d)", + formatting.IntFormat(len(nodes)-1)) + for i, txNode := range nodes { + sb.WriteString(fmt.Sprintf(format, + i, txNode.txID, txNode.numSuccessfulPolls, txNode.confidence)) + } + + if len(nodes) > 0 { + sb.WriteString("\n") + } + sb.WriteString(")") + return sb.String() +} diff --git a/snow/consensus/snowstorm/consensus.go b/snow/consensus/snowstorm/consensus.go index bca3894b7fc5..9cb746fd65ec 100644 --- a/snow/consensus/snowstorm/consensus.go +++ b/snow/consensus/snowstorm/consensus.go @@ -60,6 +60,9 @@ type Consensus interface { // that this instance is no longer finalized. Finalized() bool + // Accept the provided tx remove it from the graph + accept(txID ids.ID) error + // Reject all the provided txs and remove them from the graph reject(txIDs ...ids.ID) error } diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index f55445e26d2c..6fd5406f26c7 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -4,15 +4,9 @@ package snowstorm import ( - "bytes" - "fmt" - "sort" - "strings" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/utils/formatting" sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) @@ -112,46 +106,17 @@ func (dg *Directed) Conflicts(tx Tx) ids.Set { // Add implements the Consensus interface func (dg *Directed) Add(tx Tx) error { - if dg.Issued(tx) { - // If the tx was previously inserted, it shouldn't be re-inserted. - return nil + if shouldVote, err := dg.shouldVote(dg, tx); !shouldVote || err != nil { + return err } txID := tx.ID() - bytes := tx.Bytes() - - // Notify the IPC socket that this tx has been issued. - dg.ctx.DecisionDispatcher.Issue(dg.ctx.ChainID, txID, bytes) - - // Notify the metrics that this transaction was just issued. - dg.metrics.Issued(txID) - - inputs := tx.InputIDs() - - // If this tx doesn't have any inputs, it's impossible for there to be any - // conflicting transactions. Therefore, this transaction is treated as - // vacuously accepted. - if inputs.Len() == 0 { - // Accept is called before notifying the IPC so that acceptances that - // cause fatal errors aren't sent to an IPC peer. - if err := tx.Accept(); err != nil { - return err - } - - // Notify the IPC socket that this tx has been accepted. - dg.ctx.DecisionDispatcher.Accept(dg.ctx.ChainID, txID, bytes) - - // Notify the metrics that this transaction was just accepted. - dg.metrics.Accepted(txID) - return nil - } - txNode := &directedTx{tx: tx} // For each UTXO consumed by the tx: // * Add edges between this tx and txs that consume this UTXO // * Mark this tx as attempting to consume this UTXO - for _, inputID := range inputs.List() { + for _, inputID := range tx.InputIDs().List() { inputKey := inputID.Key() // Get the set of txs that are currently processing that also consume @@ -208,29 +173,7 @@ func (dg *Directed) Add(tx Tx) error { // If a tx that this tx depends on is rejected, this tx should also be // rejected. - toReject := &rejector{ - g: dg, - errs: &dg.errs, - txID: txID, - } - - // Register all of this txs dependencies as possibilities to reject this tx. - for _, dependency := range tx.Dependencies() { - if dependency.Status() != choices.Accepted { - // If the dependency isn't accepted, then it must be processing. So, - // this tx should be rejected if any of these processing txs are - // rejected. Note that the dependencies can't already be rejected, - // because it is assumped that this tx is currently considered - // valid. - toReject.deps.Add(dependency.ID()) - } - } - - // Register these dependencies - dg.pendingReject.Register(toReject) - - // Registering the rejector can't result in an error, so we can safely - // return nil here. + dg.registerRejector(dg, tx) return nil } @@ -281,7 +224,11 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { // be accepted, we shouldn't register it again. if !txNode.pendingAccept && txNode.Finalized(dg.params.BetaVirtuous, dg.params.BetaRogue) { - dg.deferAcceptance(txNode) + // Mark that this tx is pending acceptance so acceptance is only + // registered once. + txNode.pendingAccept = true + + dg.registerAcceptor(dg, txNode.tx) if dg.errs.Errored() { return changed, dg.errs.Err } @@ -300,56 +247,46 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { } func (dg *Directed) String() string { - nodes := make([]*directedTx, 0, len(dg.txs)) - for _, tx := range dg.txs { - nodes = append(nodes, tx) - } - // Sort the nodes so that the string representation is canonical - sortTxNodes(nodes) - - sb := strings.Builder{} - sb.WriteString("DG(") - - format := fmt.Sprintf( - "\n Choice[%s] = ID: %%50s %%s", - formatting.IntFormat(len(dg.txs)-1)) - for i, txNode := range nodes { - sb.WriteString(fmt.Sprintf(format, - i, txNode.tx.ID(), txNode.snowball.CurrentString(dg.currentVote))) + nodes := make([]*snowballNode, 0, len(dg.txs)) + for _, txNode := range dg.txs { + nodes = append(nodes, &snowballNode{ + txID: txNode.tx.ID(), + numSuccessfulPolls: txNode.numSuccessfulPolls, + confidence: txNode.Confidence(dg.currentVote), + }) } + return ConsensusString("DG", nodes) +} - if len(nodes) > 0 { - sb.WriteString("\n") +// accept the named txID and remove it from the graph +func (dg *Directed) accept(txID ids.ID) error { + txKey := txID.Key() + txNode := dg.txs[txKey] + // We are accepting the tx, so we should remove the node from the graph. + delete(dg.txs, txKey) + + // This tx is consuming all the UTXOs from its inputs, so we can prune them + // all from memory + for _, inputID := range txNode.tx.InputIDs().List() { + delete(dg.utxos, inputID.Key()) } - sb.WriteString(")") - return sb.String() -} -// deferAcceptance attempts to accept this tx once all its dependencies are -// accepted. If all the dependencies are already accepted, this function will -// immediately accept the tx. -func (dg *Directed) deferAcceptance(txNode *directedTx) { - // Mark that this tx is pending acceptance so this function won't be called - // again - txNode.pendingAccept = true - - toAccept := &directedAccepter{ - dg: dg, - txNode: txNode, + // This tx is now accepted, so it shouldn't be part of the virtuous set or + // the preferred set. Its status as Accepted implies these descriptions. + dg.virtuous.Remove(txID) + dg.preferences.Remove(txID) + + // Reject all the txs that conflicted with this tx. + if err := dg.reject(txNode.ins.List()...); err != nil { + return err } - for _, dependency := range txNode.tx.Dependencies() { - if dependency.Status() != choices.Accepted { - // If the dependency isn't accepted, then it must be processing. - // This tx should be accepted after this tx is accepted. - toAccept.deps.Add(dependency.ID()) - } + // While it is typically true that a tx this is being accepted is preferred, + // it is possible for this to not be the case. So this is handled for + // completeness. + if err := dg.reject(txNode.outs.List()...); err != nil { + return err } - - // This tx is no longer being voted on, so we remove it from the voting set. - // This ensures that virtuous txs built on top of rogue txs don't force the - // node to treat the rogue tx as virtuous. - dg.virtuousVoting.Remove(txNode.tx.ID()) - dg.pendingAccept.Register(toAccept) + return dg.acceptTx(txNode.tx) } // reject all the named txIDs and remove them from the graph @@ -392,24 +329,9 @@ func (dg *Directed) reject(conflictIDs ...ids.ID) error { dg.removeConflict(conflictID, conflict.ins.List()...) dg.removeConflict(conflictID, conflict.outs.List()...) - // Reject is called before notifying the IPC so that rejections that - // cause fatal errors aren't sent to an IPC peer. - if err := conflict.tx.Reject(); err != nil { + if err := dg.rejectTx(conflict.tx); err != nil { return err } - - // Notify the IPC that the tx was rejected - dg.ctx.DecisionDispatcher.Reject(dg.ctx.ChainID, conflict.tx.ID(), conflict.tx.Bytes()) - - // Update the metrics to account for this transaction's rejection - dg.metrics.Rejected(conflictID) - - // If there is a tx that was accepted pending on this tx, the ancestor - // tx can't be accepted. - dg.pendingAccept.Abandon(conflictID) - // If there is a tx that was issued pending on this tx, the ancestor tx - // must be rejected. - dg.pendingReject.Fulfill(conflictID) } return nil } @@ -472,87 +394,3 @@ func (dg *Directed) removeConflict(txID ids.ID, neighborIDs ...ids.ID) { } } } - -type directedAccepter struct { - dg *Directed - deps ids.Set - rejected bool - txNode *directedTx -} - -func (a *directedAccepter) Dependencies() ids.Set { return a.deps } - -func (a *directedAccepter) Fulfill(id ids.ID) { - a.deps.Remove(id) - a.Update() -} - -func (a *directedAccepter) Abandon(id ids.ID) { a.rejected = true } - -func (a *directedAccepter) Update() { - // If I was rejected or I am still waiting on dependencies to finish or an - // error has occurred, I shouldn't do anything. - if a.rejected || a.deps.Len() != 0 || a.dg.errs.Errored() { - return - } - - txID := a.txNode.tx.ID() - // We are accepting the tx, so we should remove the node from the graph. - delete(a.dg.txs, txID.Key()) - - // This tx is consuming all the UTXOs from its inputs, so we can prune them - // all from memory - for _, inputID := range a.txNode.tx.InputIDs().List() { - delete(a.dg.utxos, inputID.Key()) - } - - // This tx is now accepted, so it shouldn't be part of the virtuous set or - // the preferred set. Its status as Accepted implies these descriptions. - a.dg.virtuous.Remove(txID) - a.dg.preferences.Remove(txID) - - // Reject all the txs that conflicted with this tx. - if err := a.dg.reject(a.txNode.ins.List()...); err != nil { - a.dg.errs.Add(err) - return - } - // While it is typically true that a tx this is being accepted is preferred, - // it is possible for this to not be the case. So this is handled for - // completeness. - if err := a.dg.reject(a.txNode.outs.List()...); err != nil { - a.dg.errs.Add(err) - return - } - - // Accept is called before notifying the IPC so that acceptances that cause - // fatal errors aren't sent to an IPC peer. - if err := a.txNode.tx.Accept(); err != nil { - a.dg.errs.Add(err) - return - } - - // Notify the IPC socket that this tx has been accepted. - a.dg.ctx.DecisionDispatcher.Accept(a.dg.ctx.ChainID, txID, a.txNode.tx.Bytes()) - - // Update the metrics to account for this transaction's acceptance - a.dg.metrics.Accepted(txID) - - // If there is a tx that was accepted pending on this tx, the ancestor - // should be notified that it doesn't need to block on this tx anymore. - a.dg.pendingAccept.Fulfill(txID) - // If there is a tx that was issued pending on this tx, the ancestor tx - // doesn't need to be rejected because of this tx. - a.dg.pendingReject.Abandon(txID) -} - -type sortTxNodeData []*directedTx - -func (tnd sortTxNodeData) Less(i, j int) bool { - return bytes.Compare( - tnd[i].tx.ID().Bytes(), - tnd[j].tx.ID().Bytes()) == -1 -} -func (tnd sortTxNodeData) Len() int { return len(tnd) } -func (tnd sortTxNodeData) Swap(i, j int) { tnd[j], tnd[i] = tnd[i], tnd[j] } - -func sortTxNodes(nodes []*directedTx) { sort.Sort(sortTxNodeData(nodes)) } diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index c5fc5405e379..3ea7133c9769 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -4,16 +4,11 @@ package snowstorm import ( - "bytes" - "fmt" "math" - "sort" - "strings" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/utils/formatting" sbcon "github.com/ava-labs/gecko/snow/consensus/snowball" ) @@ -125,40 +120,11 @@ func (ig *Input) Conflicts(tx Tx) ids.Set { // Add implements the ConflictGraph interface func (ig *Input) Add(tx Tx) error { - if ig.Issued(tx) { - // If the tx was previously inserted, it shouldn't be re-inserted. - return nil + if shouldVote, err := ig.shouldVote(ig, tx); !shouldVote || err != nil { + return err } txID := tx.ID() - bytes := tx.Bytes() - - // Notify the IPC socket that this tx has been issued. - ig.ctx.DecisionDispatcher.Issue(ig.ctx.ChainID, txID, bytes) - - // Notify the metrics that this transaction was just issued. - ig.metrics.Issued(txID) - - inputs := tx.InputIDs() - - // If this tx doesn't have any inputs, it's impossible for there to be any - // conflicting transactions. Therefore, this transaction is treated as - // vacuously accepted. - if inputs.Len() == 0 { - // Accept is called before notifying the IPC so that acceptances that - // cause fatal errors aren't sent to an IPC peer. - if err := tx.Accept(); err != nil { - return err - } - - // Notify the IPC socket that this tx has been accepted. - ig.ctx.DecisionDispatcher.Accept(ig.ctx.ChainID, txID, bytes) - - // Notify the metrics that this transaction was just accepted. - ig.metrics.Accepted(txID) - return nil - } - txNode := &inputTx{tx: tx} // This tx should be added to the virtuous sets and preferred sets if this @@ -168,7 +134,7 @@ func (ig *Input) Add(tx Tx) error { // For each UTXO consumed by the tx: // * Mark this tx as attempting to consume this UTXO // * Mark the UTXO as being rogue if applicable - for _, inputID := range inputs.List() { + for _, inputID := range tx.InputIDs().List() { inputKey := inputID.Key() utxo, exists := ig.utxos[inputKey] if exists { @@ -212,29 +178,7 @@ func (ig *Input) Add(tx Tx) error { // If a tx that this tx depends on is rejected, this tx should also be // rejected. - toReject := &rejector{ - g: ig, - errs: &ig.errs, - txID: txID, - } - - // Register all of this txs dependencies as possibilities to reject this tx. - for _, dependency := range tx.Dependencies() { - if dependency.Status() != choices.Accepted { - // If the dependency isn't accepted, then it must be processing. So, - // this tx should be rejected if any of these processing txs are - // rejected. Note that the dependencies can't already be rejected, - // because it is assumped that this tx is currently considered - // valid. - toReject.deps.Add(dependency.ID()) - } - } - - // Register these dependencies - ig.pendingReject.Register(toReject) - - // Registering the rejector can't result in an error, so we can safely - // return nil here. + ig.registerRejector(ig, tx) return nil } @@ -353,7 +297,11 @@ func (ig *Input) RecordPoll(votes ids.Bag) (bool, error) { if !txNode.pendingAccept && ((!rogue && confidence >= ig.params.BetaVirtuous) || confidence >= ig.params.BetaRogue) { - ig.deferAcceptance(txNode) + // Mark that this tx is pending acceptance so acceptance is only + // registered once. + txNode.pendingAccept = true + + ig.registerAcceptor(ig, txNode.tx) if ig.errs.Errored() { return changed, ig.errs.Err } @@ -368,14 +316,14 @@ func (ig *Input) RecordPoll(votes ids.Bag) (bool, error) { } func (ig *Input) String() string { - nodes := make([]tempNode, 0, len(ig.txs)) + nodes := make([]*snowballNode, 0, len(ig.txs)) for _, tx := range ig.txs { - id := tx.tx.ID() + txID := tx.tx.ID() confidence := ig.params.BetaRogue for _, inputID := range tx.tx.InputIDs().List() { input := ig.utxos[inputID.Key()] - if input.lastVote != ig.currentVote || !id.Equals(input.color) { + if input.lastVote != ig.currentVote || !txID.Equals(input.color) { confidence = 0 break } @@ -384,58 +332,41 @@ func (ig *Input) String() string { } } - nodes = append(nodes, tempNode{ - id: id, + nodes = append(nodes, &snowballNode{ + txID: txID, numSuccessfulPolls: tx.numSuccessfulPolls, confidence: confidence, }) } - // Sort the nodes so that the string representation is canonical - sortTempNodes(nodes) + return ConsensusString("IG", nodes) +} - sb := strings.Builder{} - sb.WriteString("IG(") +// accept the named txID and remove it from the graph +func (ig *Input) accept(txID ids.ID) error { + txKey := txID.Key() + txNode := ig.txs[txKey] + // We are accepting the tx, so we should remove the node from the graph. + delete(ig.txs, txID.Key()) - format := fmt.Sprintf( - "\n Choice[%s] = ID: %%50s %%s", - formatting.IntFormat(len(nodes)-1)) - for i, cn := range nodes { - sb.WriteString(fmt.Sprintf(format, i, cn.id, &cn)) - } + // Get the conflicts of this tx so that we can reject them + conflicts := ig.Conflicts(txNode.tx) - if len(nodes) > 0 { - sb.WriteString("\n") + // This tx is consuming all the UTXOs from its inputs, so we can prune them + // all from memory + for _, inputID := range txNode.tx.InputIDs().List() { + delete(ig.utxos, inputID.Key()) } - sb.WriteString(")") - return sb.String() -} -// deferAcceptance attempts to accept this tx once all its dependencies are -// accepted. If all the dependencies are already accepted, this function will -// immediately accept the tx. -func (ig *Input) deferAcceptance(txNode *inputTx) { - // Mark that this tx is pending acceptance so this function won't be called - // again - txNode.pendingAccept = true - - toAccept := &inputAccepter{ - ig: ig, - txNode: txNode, - } + // This tx is now accepted, so it shouldn't be part of the virtuous set or + // the preferred set. Its status as Accepted implies these descriptions. + ig.virtuous.Remove(txID) + ig.preferences.Remove(txID) - for _, dependency := range txNode.tx.Dependencies() { - if dependency.Status() != choices.Accepted { - // If the dependency isn't accepted, then it must be processing. - // This tx should be accepted after this tx is accepted. - toAccept.deps.Add(dependency.ID()) - } + // Reject all the txs that conflicted with this tx. + if err := ig.reject(conflicts.List()...); err != nil { + return err } - - // This tx is no longer being voted on, so we remove it from the voting set. - // This ensures that virtuous txs built on top of rogue txs don't force the - // node to treat the rogue tx as virtuous. - ig.virtuousVoting.Remove(txNode.tx.ID()) - ig.pendingAccept.Register(toAccept) + return ig.acceptTx(txNode.tx) } // reject all the named txIDs and remove them from their conflict sets @@ -454,24 +385,9 @@ func (ig *Input) reject(conflictIDs ...ids.ID) error { // Remove this tx from all the conflict sets it's currently in ig.removeConflict(conflictID, conflict.tx.InputIDs().List()...) - // Reject is called before notifying the IPC so that rejections that - // cause fatal errors aren't sent to an IPC peer. - if err := conflict.tx.Reject(); err != nil { + if err := ig.rejectTx(conflict.tx); err != nil { return err } - - // Notify the IPC that the tx was rejected - ig.ctx.DecisionDispatcher.Reject(ig.ctx.ChainID, conflict.tx.ID(), conflict.tx.Bytes()) - - // Update the metrics to account for this transaction's rejection - ig.metrics.Rejected(conflictID) - - // If there is a tx that was accepted pending on this tx, the ancestor - // tx can't be accepted. - ig.pendingAccept.Abandon(conflictID) - // If there is a tx that was issued pending on this tx, the ancestor tx - // must be rejected. - ig.pendingReject.Fulfill(conflictID) } return nil } @@ -548,93 +464,3 @@ func (ig *Input) removeConflict(txID ids.ID, inputIDs ...ids.ID) { } } } - -type inputAccepter struct { - ig *Input - deps ids.Set - rejected bool - txNode *inputTx -} - -func (a *inputAccepter) Dependencies() ids.Set { return a.deps } - -func (a *inputAccepter) Fulfill(id ids.ID) { - a.deps.Remove(id) - a.Update() -} - -func (a *inputAccepter) Abandon(id ids.ID) { a.rejected = true } - -func (a *inputAccepter) Update() { - // If I was rejected or I am still waiting on dependencies to finish or an - // error has occurred, I shouldn't do anything. - if a.rejected || a.deps.Len() != 0 || a.ig.errs.Errored() { - return - } - - txID := a.txNode.tx.ID() - // We are accepting the tx, so we should remove the node from the graph. - delete(a.ig.txs, txID.Key()) - - // Get the conflicts of this tx so that we can reject them - conflicts := a.ig.Conflicts(a.txNode.tx) - - // This tx is consuming all the UTXOs from its inputs, so we can prune them - // all from memory - for _, inputID := range a.txNode.tx.InputIDs().List() { - delete(a.ig.utxos, inputID.Key()) - } - - // This tx is now accepted, so it shouldn't be part of the virtuous set or - // the preferred set. Its status as Accepted implies these descriptions. - a.ig.virtuous.Remove(txID) - a.ig.preferences.Remove(txID) - - // Reject all the txs that conflicted with this tx. - if err := a.ig.reject(conflicts.List()...); err != nil { - a.ig.errs.Add(err) - return - } - - // Accept is called before notifying the IPC so that acceptances that cause - // fatal errors aren't sent to an IPC peer. - if err := a.txNode.tx.Accept(); err != nil { - a.ig.errs.Add(err) - return - } - - // Notify the IPC socket that this tx has been accepted. - a.ig.ctx.DecisionDispatcher.Accept(a.ig.ctx.ChainID, txID, a.txNode.tx.Bytes()) - - // Update the metrics to account for this transaction's acceptance - a.ig.metrics.Accepted(txID) - - // If there is a tx that was accepted pending on this tx, the ancestor - // should be notified that it doesn't need to block on this tx anymore. - a.ig.pendingAccept.Fulfill(txID) - // If there is a tx that was issued pending on this tx, the ancestor tx - // doesn't need to be rejected because of this tx. - a.ig.pendingReject.Abandon(txID) -} - -type tempNode struct { - id ids.ID - numSuccessfulPolls, confidence int -} - -func (tn *tempNode) String() string { - return fmt.Sprintf( - "SB(NumSuccessfulPolls = %d, Confidence = %d)", - tn.numSuccessfulPolls, - tn.confidence) -} - -type sortTempNodeData []tempNode - -func (tnd sortTempNodeData) Less(i, j int) bool { - return bytes.Compare(tnd[i].id.Bytes(), tnd[j].id.Bytes()) == -1 -} -func (tnd sortTempNodeData) Len() int { return len(tnd) } -func (tnd sortTempNodeData) Swap(i, j int) { tnd[j], tnd[i] = tnd[i], tnd[j] } - -func sortTempNodes(nodes []tempNode) { sort.Sort(sortTempNodeData(nodes)) } diff --git a/snow/consensus/snowstorm/snowball.go b/snow/consensus/snowstorm/snowball.go index 8037ed5842a9..178e1184a849 100644 --- a/snow/consensus/snowstorm/snowball.go +++ b/snow/consensus/snowstorm/snowball.go @@ -3,10 +3,6 @@ package snowstorm -import ( - "fmt" -) - type snowball struct { // numSuccessfulPolls is the number of times this choice was the successful // result of a network poll @@ -24,6 +20,13 @@ type snowball struct { rogue bool } +func (sb *snowball) Confidence(currentVote int) int { + if sb.lastVote != currentVote { + return 0 + } + return sb.confidence +} + func (sb *snowball) RecordSuccessfulPoll(currentVote int) { // If this choice wasn't voted for during the last poll, the confidence // should have been reset during the last poll. So, we reset it now. @@ -46,22 +49,3 @@ func (sb *snowball) Finalized(betaVirtuous, betaRogue int) bool { return (!sb.rogue && sb.confidence >= betaVirtuous) || sb.confidence >= betaRogue } - -func (sb *snowball) CurrentString(currentVote int) string { - confidence := sb.confidence - if sb.lastVote != currentVote { - confidence = 0 - } - return fmt.Sprintf( - "SB(NumSuccessfulPolls = %d, Confidence = %d)", - sb.numSuccessfulPolls, - confidence) -} - -func (sb *snowball) String() string { - return fmt.Sprintf( - "SB(NumSuccessfulPolls = %d, Confidence = %d, As of %d)", - sb.numSuccessfulPolls, - sb.confidence, - sb.lastVote) -} From e06aaa06951d78813996105f90aa54990ebbe275 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 27 Aug 2020 01:07:47 -0400 Subject: [PATCH 20/47] formatted --- snow/consensus/snowstorm/consensus_test.go | 1 + snow/consensus/snowstorm/metrics.go | 5 ++++- snow/consensus/snowstorm/network_test.go | 7 ++++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index a38af34c97f5..7d117d31f392 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" "github.com/ava-labs/gecko/ids" diff --git a/snow/consensus/snowstorm/metrics.go b/snow/consensus/snowstorm/metrics.go index 36a5332c43f3..7d2e1b7d3ff1 100644 --- a/snow/consensus/snowstorm/metrics.go +++ b/snow/consensus/snowstorm/metrics.go @@ -36,7 +36,10 @@ type metrics struct { } // Initialize implements the Engine interface -func (m *metrics) Initialize(namespace string, registerer prometheus.Registerer) error { +func (m *metrics) Initialize( + namespace string, + registerer prometheus.Registerer, +) error { m.processing = make(map[[32]byte]time.Time) m.numProcessing = prometheus.NewGauge(prometheus.GaugeOpts{ diff --git a/snow/consensus/snowstorm/network_test.go b/snow/consensus/snowstorm/network_test.go index f1e3d77ab38a..03b5b4446a94 100644 --- a/snow/consensus/snowstorm/network_test.go +++ b/snow/consensus/snowstorm/network_test.go @@ -32,7 +32,12 @@ func (n *Network) shuffleConsumers() { n.consumers = consumers } -func (n *Network) Initialize(params sbcon.Parameters, numColors, colorsPerConsumer, maxInputConflicts int) { +func (n *Network) Initialize( + params sbcon.Parameters, + numColors, + colorsPerConsumer, + maxInputConflicts int, +) { n.params = params idCount := uint64(0) From 91c3c32f971ef0df5e1ff10f322b2bd19f3cb383 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 27 Aug 2020 01:24:20 -0400 Subject: [PATCH 21/47] Added tests for updating the Avalanche frontiers in snowstorm --- snow/consensus/snowstorm/consensus_test.go | 93 ++++++++++++++++------ 1 file changed, 69 insertions(+), 24 deletions(-) diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index 7d117d31f392..287b27a6699c 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -224,8 +224,10 @@ func LeftoverInputTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { @@ -269,8 +271,10 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Blue.ID()) { @@ -316,8 +320,10 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Alpha.ID()) { @@ -360,8 +366,10 @@ func IndependentTest(t *testing.T, factory Factory) { ra.SetThreshold(2) ra.AddCount(Red.ID(), 2) ra.AddCount(Alpha.ID(), 2) - if _, err := graph.RecordPoll(ra); err != nil { + if updated, err := graph.RecordPoll(ra); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { @@ -370,8 +378,10 @@ func IndependentTest(t *testing.T, factory Factory) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } else if _, err := graph.RecordPoll(ra); err != nil { + } else if updated, err := graph.RecordPoll(ra); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { @@ -539,8 +549,10 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { g := ids.Bag{} g.Add(Green.ID()) - if _, err := graph.RecordPoll(g); err != nil { + if updated, err := graph.RecordPoll(g); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -557,8 +569,10 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { rp := ids.Bag{} rp.Add(Red.ID(), purple.ID()) - if _, err := graph.RecordPoll(rp); err != nil { + if updated, err := graph.RecordPoll(rp); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -575,8 +589,10 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { r := ids.Bag{} r.Add(Red.ID()) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if Red.Status() != choices.Accepted { @@ -654,8 +670,10 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { g := ids.Bag{} g.Add(Green.ID()) - if _, err := graph.RecordPoll(g); err != nil { + if updated, err := graph.RecordPoll(g); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -672,8 +690,10 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { p := ids.Bag{} p.Add(purple.ID()) - if _, err := graph.RecordPoll(p); err != nil { + if updated, err := graph.RecordPoll(p); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -690,8 +710,10 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { rp := ids.Bag{} rp.Add(Red.ID(), purple.ID()) - if _, err := graph.RecordPoll(rp); err != nil { + if updated, err := graph.RecordPoll(rp); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -708,8 +730,10 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { r := ids.Bag{} r.Add(Red.ID()) - if _, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(r); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if Red.Status() != choices.Accepted { @@ -771,8 +795,10 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { gp := ids.Bag{} gp.Add(Green.ID(), purple.ID()) - if _, err := graph.RecordPoll(gp); err != nil { + if updated, err := graph.RecordPoll(gp); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { @@ -787,8 +813,10 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing) } else if purple.Status() != choices.Processing { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } else if _, err := graph.RecordPoll(gp); err != nil { + } else if updated, err := graph.RecordPoll(gp); err != nil { t.Fatal(err) + } else if !updated { + t.Fatalf("Should have updated the frontiers") } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if Red.Status() != choices.Rejected { @@ -935,8 +963,10 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { votes := ids.Bag{} votes.Add(rogue1.ID()) votes.Add(virtuous.ID()) - if _, err := graph.RecordPoll(votes); err != nil { + if updated, err := graph.RecordPoll(votes); err != nil { t.Fatal(err) + } else if updated { + t.Fatalf("Shouldn't have updated the frontiers") } else if status := rogue1.Status(); status != choices.Processing { t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) } else if status := rogue2.Status(); status != choices.Processing { @@ -1117,11 +1147,13 @@ func UTXOCleanupTest(t *testing.T, factory Factory) { redVotes := ids.Bag{} redVotes.Add(Red.ID()) - _, err = graph.RecordPoll(redVotes) + changed, err := graph.RecordPoll(redVotes) assert.NoError(t, err) + assert.False(t, changed, "shouldn't have accepted the red tx") - _, err = graph.RecordPoll(redVotes) + changed, err = graph.RecordPoll(redVotes) assert.NoError(t, err) + assert.True(t, changed, "should have accepted the red tx") assert.Equal(t, choices.Accepted, Red.Status()) assert.Equal(t, choices.Rejected, Green.Status()) @@ -1131,8 +1163,9 @@ func UTXOCleanupTest(t *testing.T, factory Factory) { blueVotes := ids.Bag{} blueVotes.Add(Blue.ID()) - _, err = graph.RecordPoll(blueVotes) + changed, err = graph.RecordPoll(blueVotes) assert.NoError(t, err) + assert.True(t, changed, "should have accepted the blue tx") assert.Equal(t, choices.Accepted, Blue.Status()) } @@ -1172,8 +1205,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { rb.SetThreshold(2) rb.AddCount(Red.ID(), 2) rb.AddCount(Blue.ID(), 2) - if _, err := graph.RecordPoll(rb); err != nil { + if changed, err := graph.RecordPoll(rb); err != nil { t.Fatal(err) + } else if !changed { + t.Fatalf("Should have caused the frontiers to recalculate") } else if err := graph.Add(Blue); err != nil { t.Fatal(err) } @@ -1204,8 +1239,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { ga.SetThreshold(2) ga.AddCount(Green.ID(), 2) ga.AddCount(Alpha.ID(), 2) - if _, err := graph.RecordPoll(ga); err != nil { + if changed, err := graph.RecordPoll(ga); err != nil { t.Fatal(err) + } else if changed { + t.Fatalf("Shouldn't have caused the frontiers to recalculate") } { @@ -1231,8 +1268,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { } empty := ids.Bag{} - if _, err := graph.RecordPoll(empty); err != nil { + if changed, err := graph.RecordPoll(empty); err != nil { t.Fatal(err) + } else if changed { + t.Fatalf("Shouldn't have caused the frontiers to recalculate") } { @@ -1255,8 +1294,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong preference. Expected %s", Blue.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } else if _, err := graph.RecordPoll(ga); err != nil { + } else if changed, err := graph.RecordPoll(ga); err != nil { t.Fatal(err) + } else if !changed { + t.Fatalf("Should have caused the frontiers to recalculate") } { @@ -1279,8 +1320,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } else if _, err := graph.RecordPoll(ga); err != nil { + } else if changed, err := graph.RecordPoll(ga); err != nil { t.Fatal(err) + } else if !changed { + t.Fatalf("Should have caused the frontiers to recalculate") } { @@ -1302,8 +1345,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("%s should have been rejected", Red.ID()) } else if Blue.Status() != choices.Rejected { t.Fatalf("%s should have been rejected", Blue.ID()) - } else if _, err := graph.RecordPoll(rb); err != nil { + } else if changed, err := graph.RecordPoll(rb); err != nil { t.Fatal(err) + } else if changed { + t.Fatalf("Shouldn't have caused the frontiers to recalculate") } { From 0e2a3f0a7ed1730b0598425cea93783f8081c812 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Thu, 27 Aug 2020 15:42:49 -0400 Subject: [PATCH 22/47] lower log level --- snow/networking/router/handler.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 2ccc397f1354..0d17b8340835 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -8,6 +8,8 @@ import ( "sync" "time" + "github.com/ava-labs/gecko/utils/constants" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/ids" @@ -235,7 +237,12 @@ func (h *Handler) dispatchMsg(msg message) { h.ctx.Lock.Lock() defer h.ctx.Lock.Unlock() - h.ctx.Log.Debug("Forwarding message to consensus: %s", msg) + if msg.requestID != constants.GossipMsgRequestID { + h.ctx.Log.Debug("Forwarding message to consensus: %s", msg) + } else { + h.ctx.Log.Verbo("Forwarding message to consensus: %s", msg) + } + var ( err error ) From d3b049b49a01c1815c6d867ecca9239f531fb23c Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Thu, 27 Aug 2020 18:27:40 -0400 Subject: [PATCH 23/47] WIP. Re-doing how validator lists are persisted in database --- vms/platformvm/add_delegator_tx.go | 39 +---- vms/platformvm/add_subnet_validator_tx.go | 78 ++------- vms/platformvm/add_validator_tx.go | 30 +--- vms/platformvm/event_heap.go | 1 + vms/platformvm/reward_validator_tx.go | 12 +- vms/platformvm/state.go | 193 ++++++++++++++++++++++ vms/platformvm/vm.go | 38 ++--- 7 files changed, 247 insertions(+), 144 deletions(-) diff --git a/vms/platformvm/add_delegator_tx.go b/vms/platformvm/add_delegator_tx.go index 5372d7e208cd..b1c3e925bbff 100644 --- a/vms/platformvm/add_delegator_tx.go +++ b/vms/platformvm/add_delegator_tx.go @@ -127,34 +127,11 @@ func (tx *UnsignedAddDelegatorTx) SemanticVerify( validatorStartTime)} } - // Ensure that the period this delegator is running is a subset of the time - // the validator is running. First, see if the validator is currently - // running. - currentValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) - if err != nil { - return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current validators of primary network: %w", err)} - } - pendingValidators, err := vm.getPendingValidators(db, constants.PrimaryNetworkID) - if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of primary network: %w", err)} - } - - if validator, err := currentValidators.getPrimaryStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := validator.UnsignedTx.(*UnsignedAddValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, permError{errDSValidatorSubset} - } - } else { - // They aren't currently validating, so check to see if they will - // validate in the future. - validator, err := pendingValidators.getPrimaryStaker(tx.Validator.NodeID) - if err != nil { - return nil, nil, nil, nil, permError{errDSValidatorSubset} - } - unsignedValidator := validator.UnsignedTx.(*UnsignedAddValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, permError{errDSValidatorSubset} - } + // Ensure that the period this delegator delegates is a subset of the time + // the validator validates. + vdr, isValidator := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{errDSValidatorSubset} } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.Stake)) @@ -179,12 +156,8 @@ func (tx *UnsignedAddDelegatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } - // Add the delegator to the pending validators heap - pendingValidators.Add(stx) // If this proposal is committed, update the pending validator set to include the delegator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.PrimaryNetworkID); err != nil { - return nil, nil, nil, nil, tempError{err} - } + vm.addValidator(onCommitDB, constants.PrimaryNetworkID, tx) // Set up the DB if this tx is aborted onAbortDB := versiondb.New(db) diff --git a/vms/platformvm/add_subnet_validator_tx.go b/vms/platformvm/add_subnet_validator_tx.go index 2082f2f3735d..dca9a2f77137 100644 --- a/vms/platformvm/add_subnet_validator_tx.go +++ b/vms/platformvm/add_subnet_validator_tx.go @@ -102,76 +102,30 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( } // Ensure that the period this validator validates the specified subnet is a - // subnet of the time they validate the primary network. First, see if - // they're currently validating the primary network. - currentDSValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) - if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get current validators of primary network: %v", err)} - } - if dsValidator, err := currentDSValidators.getPrimaryStaker(tx.Validator.NodeID); err == nil { - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, - permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", - tx.StartTime(), tx.EndTime(), - unsignedValidator.StartTime(), unsignedValidator.EndTime())} - } - } else { - // They aren't currently validating the primary network. See if they will - // validate the primary network in the future. - pendingDSValidators, err := vm.getPendingValidators(db, constants.PrimaryNetworkID) - if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of primary network: %v", err)} - } - dsValidator, err := pendingDSValidators.getPrimaryStaker(tx.Validator.NodeID) - if err != nil { - return nil, nil, nil, nil, - permError{fmt.Errorf("validator would not be validating primary network while validating subnet")} - } - unsignedValidator := dsValidator.UnsignedTx.(*UnsignedAddValidatorTx) - if !tx.Validator.BoundedBy(unsignedValidator.StartTime(), unsignedValidator.EndTime()) { - return nil, nil, nil, nil, - permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", - tx.StartTime(), tx.EndTime(), - unsignedValidator.StartTime(), unsignedValidator.EndTime())} - } + // subnet of the time they validate the primary network. + vdr, isValidator := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, + permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", + tx.StartTime(), tx.EndTime(), + vdr.StartTime(), vdr.EndTime())} } // Ensure the proposed validator is not already a validator of the specified subnet - currentValidators, err := vm.getCurrentValidators(db, tx.Validator.Subnet) - if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get current validators of subnet %s: %v", - tx.Validator.Subnet, err)} - } - for _, currentVdr := range vm.getValidators(currentValidators) { - if currentVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the current validator set for subnet with ID %s", - tx.Validator.NodeID, - tx.Validator.Subnet)} - } - } - - // Ensure the proposed validator is not already slated to validate for the specified subnet - pendingValidators, err := vm.getPendingValidators(db, tx.Validator.Subnet) - if err != nil { - return nil, nil, nil, nil, tempError{fmt.Errorf("couldn't get pending validators of subnet %s: %v", - tx.Validator.Subnet, err)} - } - for _, pendingVdr := range vm.getValidators(pendingValidators) { - if pendingVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the pending validator set for subnet with ID %s", - tx.Validator.NodeID, - tx.Validator.Subnet)} - } + vdr, isValidator = vm.isValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) + if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, + permError{fmt.Errorf("already validating subnet between [%v, %v]", + vdr.StartTime(), vdr.EndTime())} } baseTxCredsLen := len(stx.Creds) - 1 baseTxCreds := stx.Creds[:baseTxCredsLen] subnetCred := stx.Creds[baseTxCredsLen] - subnet, txErr := vm.getSubnet(db, tx.Validator.Subnet) + subnet, err := vm.getSubnet(db, tx.Validator.Subnet) if err != nil { - return nil, nil, nil, nil, txErr + return nil, nil, nil, nil, err } unsignedSubnet := subnet.UnsignedTx.(*UnsignedCreateSubnetTx) if err := vm.fx.VerifyPermission(tx, tx.SubnetAuth, subnetCred, unsignedSubnet.Owner); err != nil { @@ -196,9 +150,7 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } // Add the validator to the set of pending validators - pendingValidators.Add(stx) - // If this proposal is committed, update the pending validator set to include the delegator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, tx.Validator.Subnet); err != nil { + if err := vm.addValidator(onCommitDB, tx.Validator.Subnet, tx); err != nil { return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/add_validator_tx.go b/vms/platformvm/add_validator_tx.go index 9e4261854a96..ca5e5bcdbe06 100644 --- a/vms/platformvm/add_validator_tx.go +++ b/vms/platformvm/add_validator_tx.go @@ -134,28 +134,10 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( startTime)} } - // Ensure the proposed validator is not already a validator of the specified subnet - currentValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) - if err != nil { - return nil, nil, nil, nil, tempError{err} - } - for _, currentVdr := range vm.getValidators(currentValidators) { - if currentVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator", - tx.Validator.NodeID)} - } - } - - // Ensure the proposed validator is not already slated to validate for the specified subnet - pendingValidators, err := vm.getPendingValidators(db, constants.PrimaryNetworkID) - if err != nil { - return nil, nil, nil, nil, tempError{err} - } - for _, pendingVdr := range vm.getValidators(pendingValidators) { - if pendingVdr.ID().Equals(tx.Validator.NodeID) { - return nil, nil, nil, nil, tempError{fmt.Errorf("validator %s is already a pending primary network validator", - tx.Validator.NodeID)} - } + vdr, isValidator := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if isValidator && tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator from %s to %s", + tx.Validator.NodeID, vdr.StartTime(), vdr.EndTime())} } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.Stake)) @@ -181,9 +163,7 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( } // Add validator to set of pending validators - pendingValidators.Add(stx) - // If this proposal is committed, update the pending validator set to include the validator - if err := vm.putPendingValidators(onCommitDB, pendingValidators, constants.PrimaryNetworkID); err != nil { + if err := vm.addValidator(onCommitDB, constants.PrimaryNetworkID, tx); err != nil { return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/event_heap.go b/vms/platformvm/event_heap.go index 699e14f1ed28..01edffe0b78b 100644 --- a/vms/platformvm/event_heap.go +++ b/vms/platformvm/event_heap.go @@ -17,6 +17,7 @@ type TimedTx interface { ID() ids.ID StartTime() time.Time EndTime() time.Time + Bytes() []byte } // EventHeap is a collection of timedTxs where elements are ordered by either diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index dfb7e67c0181..38f3c87f6a13 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -100,15 +100,23 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // If this tx's proposal is committed, remove the validator from the validator set onCommitDB := versiondb.New(db) - if err := vm.putCurrentValidators(onCommitDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { + if err := vm.removeValidator(onCommitDB, constants.PrimaryNetworkID, unsignedVdrTx); err != nil { return nil, nil, nil, nil, tempError{err} } + // TODO remove + // if err := vm.putCurrentValidators(onCommitDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { + // return nil, nil, nil, nil, tempError{err} + // } // If this tx's proposal is aborted, remove the validator from the validator set onAbortDB := versiondb.New(db) - if err := vm.putCurrentValidators(onAbortDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { + if err := vm.removeValidator(onCommitDB, constants.PrimaryNetworkID, unsignedVdrTx); err != nil { return nil, nil, nil, nil, tempError{err} } + // TODO remove + // if err := vm.putCurrentValidators(onAbortDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { + // return nil, nil, nil, nil, tempError{err} + // } switch uVdrTx := vdrTx.UnsignedTx.(type) { case *UnsignedAddValidatorTx: diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index f0968303f660..d57cd4373543 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -13,7 +13,9 @@ import ( "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/avax" safemath "github.com/ava-labs/gecko/utils/math" @@ -25,6 +27,14 @@ import ( const ( currentValidatorsPrefix uint64 = iota pendingValidatorsPrefix + + delegator = "delegator" + start = "start" + stop = "stop" +) + +var ( + errNoValidators = errors.New("there are no validators") ) // persist a tx @@ -78,6 +88,189 @@ func (vm *VM) putCurrentValidators(db database.Database, validators *EventHeap, return nil } +func (vm *VM) addValidator(db database.Database, subnetID ids.ID, validator TimedTx) error { + validatorBytes := validator.Bytes() + + p := wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(validator.StartTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize start time: %w", p.Err) + } + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) + prefixStartDB := prefixdb.NewNested(prefixStart, db) + prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + prefixStopDB := prefixdb.NewNested(prefixStop, db) + startKey := append(p.Bytes, validatorBytes...) + + p = wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(validator.EndTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize stop time: %w", p.Err) + } + stopKey := append(p.Bytes, validatorBytes...) + + if err := prefixStartDB.Put(startKey, validatorBytes); err != nil { + return err + } + return prefixStopDB.Put(stopKey, validatorBytes) +} + +func (vm *VM) removeValidator(db database.Database, subnetID ids.ID, validator TimedTx) error { + validatorBytes := validator.Bytes() + + p := wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(validator.StartTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize start time: %w", p.Err) + } + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) + prefixStartDB := prefixdb.NewNested(prefixStart, db) + prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + prefixStopDB := prefixdb.NewNested(prefixStop, db) + startKey := append(p.Bytes, validatorBytes...) + + p = wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(validator.EndTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize stop time: %w", p.Err) + } + stopKey := append(p.Bytes, validatorBytes...) + + if err := prefixStartDB.Put(startKey, validatorBytes); err != nil { + return err + } + return prefixStopDB.Put(stopKey, validatorBytes) +} + +func (vm *VM) addDelegator(db database.Database, delegator TimedTx) error { + delegatorBytes := delegator.Bytes() + + p := wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(delegator.StartTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize start time: %w", p.Err) + } + prefixStart := []byte(fmt.Sprintf("%s%s", delegator, start)) + prefixStartDB := prefixdb.NewNested(prefixStart, db) + prefixStop := []byte(fmt.Sprintf("%s%s", delegator, stop)) + prefixStopDB := prefixdb.NewNested(prefixStop, db) + startKey := append(p.Bytes, delegatorBytes...) + + p = wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(delegator.EndTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize stop time: %w", p.Err) + } + stopKey := append(p.Bytes, delegatorBytes...) + + if err := prefixStartDB.Put(startKey, delegatorBytes); err != nil { + return err + } + return prefixStopDB.Put(stopKey, delegatorBytes) +} + +func (vm *VM) removeDelegator(db database.Database, delegator TimedTx) error { + delegatorBytes := delegator.Bytes() + + p := wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(delegator.StartTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize start time: %w", p.Err) + } + prefixStart := []byte(fmt.Sprintf("%s%s", delegator, start)) + prefixStartDB := prefixdb.NewNested(prefixStart, db) + prefixStop := []byte(fmt.Sprintf("%s%s", delegator, stop)) + prefixStopDB := prefixdb.NewNested(prefixStop, db) + startKey := append(p.Bytes, delegatorBytes...) + + p = wrappers.Packer{MaxSize: wrappers.LongLen} + p.PackLong(uint64(delegator.EndTime().Unix())) + if p.Err != nil { + return fmt.Errorf("couldn't serialize stop time: %w", p.Err) + } + stopKey := append(p.Bytes, delegatorBytes...) + + if err := prefixStartDB.Put(startKey, delegatorBytes); err != nil { + return err + } + return prefixStopDB.Put(stopKey, delegatorBytes) +} + +// Returns the pending validator that will start validating next +func (vm *VM) nextValidatorToStart(db database.Database, subnetID ids.ID) (TimedTx, error) { + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() + if iter.Next() { + txBytes := iter.Value() + var tx *Tx + if err := Codec.Unmarshal(txBytes, tx); err != nil { + return nil, err + } + if err := tx.Sign(vm.codec, nil); err != nil { + return nil, fmt.Errorf("couldn't sign tx: %w", err) + } + asTimedTx, ok := tx.UnsignedTx.(TimedTx) + if !ok { + return nil, fmt.Errorf("expected validator to be type TimedTx but is %T", tx) + } + return asTimedTx, nil + } + return nil, errNoValidators +} + +// Returns the current validator that will top validating next +func (vm *VM) nextValidatorToStop(db database.Database, subnetID ids.ID) (TimedTx, error) { + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, stop)), db).NewIterator() + if iter.Next() { + txBytes := iter.Value() + var tx *Tx + if err := Codec.Unmarshal(txBytes, tx); err != nil { + return nil, err + } + if err := tx.Sign(vm.codec, nil); err != nil { + return nil, fmt.Errorf("couldn't sign tx: %w", err) + } + asTimedTx, ok := tx.UnsignedTx.(TimedTx) + if !ok { + return nil, fmt.Errorf("expected validator to be type TimedTx but is %T", tx) + } + return asTimedTx, nil + } + return nil, errNoValidators +} + +// Returns true if [nodeID] is a validator +func (vm *VM) isValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool) { + iter := prefixdb.NewNested([]byte(start), db).NewIterator() + for iter.Next() { + txBytes := iter.Value() + var tx *Tx + if err := Codec.Unmarshal(txBytes, tx); err != nil { + vm.Ctx.Log.Warn("couldn't unmarshal Tx: %w", err) + return nil, false + } + if err := tx.Sign(vm.codec, nil); err != nil { + vm.Ctx.Log.Warn("couldn't sign *Tx: %w", err) + return nil, false + } + switch vdr := tx.UnsignedTx.(type) { + case *UnsignedAddValidatorTx: + if subnetID.Equals(constants.PrimaryNetworkID) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true + } + return nil, false + case *UnsignedAddSubnetValidatorTx: + if subnetID.Equals(vdr.Validator.SubnetID()) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true + } + return nil, false + default: + vm.Ctx.Log.Warn("expected tx to be *UnsignedAddValidatorTx or *UnsignedAddSubnetValidatorTx but got %T", tx) + return nil, false + } + } + return nil, false +} + // get the validators that are slated to validate the specified subnet in the future func (vm *VM) getPendingValidators(db database.Database, subnetID ids.ID) (*EventHeap, error) { return vm.getValidatorsFromDB(db, subnetID, pendingValidatorsPrefix, true) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 15ba037ffd19..a477e1acdc61 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -539,17 +539,14 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { // If the chain time would be the time for the next primary network validator to leave, // then we create a block that removes the validator and proposes they receive a validator reward - currentValidators, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) - if err != nil { - return nil, fmt.Errorf("couldn't get validator set: %w", err) - } nextValidatorEndtime := maxTime - if currentValidators.Len() > 0 { - nextValidatorEndtime = currentValidators.Peek().UnsignedTx.(TimedTx).EndTime() + nextToLeave, err := vm.nextValidatorToStop(db, constants.PrimaryNetworkID) // TODO check for delegators too + if err != nil { + return nil, err } + nextValidatorEndtime = nextToLeave.EndTime() if currentChainTimestamp.Equal(nextValidatorEndtime) { - stakerTx := currentValidators.Peek() - rewardValidatorTx, err := vm.newRewardValidatorTx(stakerTx.ID()) + rewardValidatorTx, err := vm.newRewardValidatorTx(nextToLeave.ID()) if err != nil { return nil, err } @@ -777,22 +774,21 @@ func (vm *VM) nextValidatorChangeTime(db database.Database, start bool) time.Tim } func (vm *VM) nextSubnetValidatorChangeTime(db database.Database, subnetID ids.ID, start bool) time.Time { - var validators *EventHeap - var err error - if start { - validators, err = vm.getPendingValidators(db, subnetID) - } else { - validators, err = vm.getCurrentValidators(db, subnetID) + nextToStart, err1 := vm.nextValidatorToStart(db, subnetID) + nextToStop, err2 := vm.nextValidatorToStop(db, subnetID) + if err1 != nil { + if err2 != nil { + return maxTime + } + return nextToStop.EndTime() } - if err != nil { - vm.Ctx.Log.Error("couldn't get validators of subnet with ID %s: %v", subnetID, err) - return maxTime + if err2 != nil { + return nextToStart.StartTime() } - if validators.Len() == 0 { - vm.Ctx.Log.Verbo("subnet, %s, has no validators", subnetID) - return maxTime + if nextToStart.StartTime().Before(nextToStop.EndTime()) { + return nextToStart.StartTime() } - return validators.Timestamp() + return nextToStop.EndTime() } // Returns: From b58ff59c578653c9c5d42a2e06d1c5ff0043c563 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Fri, 28 Aug 2020 00:01:07 -0400 Subject: [PATCH 24/47] Re-ordered IPC initialization --- node/node.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/node/node.go b/node/node.go index a073358548d4..4705e70c9359 100644 --- a/node/node.go +++ b/node/node.go @@ -706,8 +706,11 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg if err := n.initHealthAPI(); err != nil { // Start the Health API return fmt.Errorf("couldn't initialize health API: %w", err) } + if err := n.initIPCs(); err != nil { // Start the IPCs + return fmt.Errorf("couldn't initialize IPCs: %w", err) + } if err := n.initIPCAPI(); err != nil { // Start the IPC API - return fmt.Errorf("couldn't initialize ipc API: %w", err) + return fmt.Errorf("couldn't initialize the IPC API: %w", err) } if err := n.initAliases(); err != nil { // Set up aliases return fmt.Errorf("couldn't initialize aliases: %w", err) @@ -715,9 +718,6 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg if err := n.initChains(genesisBytes, avaxAssetID); err != nil { // Start the Platform chain return fmt.Errorf("couldn't initialize chains: %w", err) } - if err := n.initIPCs(); err != nil { // Start the IPCs - return fmt.Errorf("couldn't initialize IPCs: %w", err) - } return nil } From 496c05023b82b787d74bb86595d1d1b8a4668dde Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 28 Aug 2020 00:26:53 -0400 Subject: [PATCH 25/47] Update CI script to match breaking API name changes --- .ci/run_e2e_tests.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 270b5d4ad9b0..62c9a9b37620 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -8,15 +8,15 @@ DOCKER_REPO="avaplatform" echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin -TESTING_CONTROLLER_IMAGE="$DOCKER_REPO/avalanche-e2e-tests_controller:everest-latest" -BYZANTINE_IMAGE="$DOCKER_REPO/gecko-byzantine:everest-latest" +TESTING_CONTROLLER_IMAGE="$DOCKER_REPO/avalanche-testing_controller:everest-dev" +BYZANTINE_IMAGE="$DOCKER_REPO/gecko-byzantine:everest-dev" docker pull "$TESTING_CONTROLLER_IMAGE" docker pull "${BYZANTINE_IMAGE}" E2E_TESTING_REMOTE="https://github.com/ava-labs/avalanche-testing.git" -E2E_TAG="v0.8.2-dev" +E2E_TAG="v0.8.4-dev" mkdir -p "$E2E_TEST_HOME" git clone "$E2E_TESTING_REMOTE" "$E2E_TEST_HOME" @@ -26,4 +26,4 @@ git checkout "tags/$E2E_TAG" -b "$E2E_TAG" go mod edit -replace github.com/ava-labs/gecko="$GECKO_HOME" bash "./scripts/rebuild_initializer_binary.sh" -./build/avalanche-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" --byzantine-image-name="${BYZANTINE_IMAGE}" +./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" --byzantine-image-name="${BYZANTINE_IMAGE}" From 3544aa459c11ba85527878081852abdf7b0257aa Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 28 Aug 2020 16:16:51 -0400 Subject: [PATCH 26/47] Add command line parameter to set max non staker msgs cap --- chains/manager.go | 51 ++++++++------ main/params.go | 1 + node/config.go | 19 ++--- node/node.go | 1 + snow/networking/router/chain_router_test.go | 2 + snow/networking/router/handler.go | 5 +- snow/networking/router/handler_test.go | 3 + snow/networking/router/service_queue.go | 3 +- snow/networking/router/service_queue_test.go | 3 + snow/networking/sender/sender_test.go | 3 + snow/networking/throttler/ewma.go | 39 +++++++---- snow/networking/throttler/throttler_test.go | 74 ++++++++++++++------ vms/platformvm/vm_test.go | 1 + vms/spchainvm/consensus_benchmark_test.go | 2 + 14 files changed, 136 insertions(+), 71 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index 2b48fe1c85b5..e799c3271dba 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -115,6 +115,7 @@ type manager struct { stakingEnabled bool // True iff the network has staking enabled stakerMsgPortion, stakerCPUPortion float64 + maxNonStakerPendingMsgs uint32 log logging.Logger logFactory logging.Factory vmManager vms.Manager // Manage mappings from vm ID --> vm @@ -152,6 +153,7 @@ type manager struct { // TODO: Make this function take less arguments func New( stakingEnabled bool, + maxNonStakerPendingMsgs uint, stakerMsgPortion, stakerCPUPortion float64, log logging.Logger, @@ -186,29 +188,30 @@ func New( rtr.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout) m := &manager{ - stakingEnabled: stakingEnabled, - stakerMsgPortion: stakerMsgPortion, - stakerCPUPortion: stakerCPUPortion, - log: log, - logFactory: logFactory, - vmManager: vmManager, - decisionEvents: decisionEvents, - consensusEvents: consensusEvents, - db: db, - chainRouter: rtr, - net: net, - timeoutManager: &timeoutManager, - consensusParams: consensusParams, - validators: validators, - nodeID: nodeID, - networkID: networkID, - server: server, - keystore: keystore, - atomicMemory: atomicMemory, - avaxAssetID: avaxAssetID, - xChainID: xChainID, - criticalChains: criticalChains, - chains: make(map[[32]byte]*router.Handler), + stakingEnabled: stakingEnabled, + maxNonStakerPendingMsgs: uint32(maxNonStakerPendingMsgs), + stakerMsgPortion: stakerMsgPortion, + stakerCPUPortion: stakerCPUPortion, + log: log, + logFactory: logFactory, + vmManager: vmManager, + decisionEvents: decisionEvents, + consensusEvents: consensusEvents, + db: db, + chainRouter: rtr, + net: net, + timeoutManager: &timeoutManager, + consensusParams: consensusParams, + validators: validators, + nodeID: nodeID, + networkID: networkID, + server: server, + keystore: keystore, + atomicMemory: atomicMemory, + avaxAssetID: avaxAssetID, + xChainID: xChainID, + criticalChains: criticalChains, + chains: make(map[[32]byte]*router.Handler), } m.Initialize() return m, nil @@ -510,6 +513,7 @@ func (m *manager) createAvalancheChain( validators, msgChan, defaultChannelSize, + m.maxNonStakerPendingMsgs, m.stakerMsgPortion, m.stakerCPUPortion, fmt.Sprintf("%s_handler", consensusParams.Namespace), @@ -588,6 +592,7 @@ func (m *manager) createSnowmanChain( validators, msgChan, defaultChannelSize, + m.maxNonStakerPendingMsgs, m.stakerMsgPortion, m.stakerCPUPortion, fmt.Sprintf("%s_handler", consensusParams.Namespace), diff --git a/main/params.go b/main/params.go index de74f278fad9..03997835749e 100644 --- a/main/params.go +++ b/main/params.go @@ -202,6 +202,7 @@ func init() { fs.Uint64Var(&Config.DisabledStakingWeight, "staking-disabled-weight", 1, "Weight to provide to each peer when staking is disabled") // Throttling: + fs.UintVar(&Config.MaxNonStakerPendingMsgs, "max-non-staker-pending", 3, "Maximum number of messages a non-staker is allowed to have pending.") fs.Float64Var(&Config.StakerMsgPortion, "staker-msg-reserved", 0.2, "Reserve a portion of the chain message queue's space for stakers.") fs.Float64Var(&Config.StakerCPUPortion, "staker-cpu-reserved", 0.2, "Reserve a portion of the chain's CPU time for stakers.") diff --git a/node/config.go b/node/config.go index aed3c9cec727..5e6ad9f8dcec 100644 --- a/node/config.go +++ b/node/config.go @@ -36,15 +36,16 @@ type Config struct { DB database.Database // Staking configuration - StakingIP utils.IPDesc - StakingLocalPort uint16 - EnableP2PTLS bool - EnableStaking bool - StakingKeyFile string - StakingCertFile string - DisabledStakingWeight uint64 - StakerMsgPortion float64 - StakerCPUPortion float64 + StakingIP utils.IPDesc + StakingLocalPort uint16 + EnableP2PTLS bool + EnableStaking bool + StakingKeyFile string + StakingCertFile string + DisabledStakingWeight uint64 + MaxNonStakerPendingMsgs uint + StakerMsgPortion float64 + StakerCPUPortion float64 // Bootstrapping configuration BootstrapPeers []*Peer diff --git a/node/node.go b/node/node.go index a073358548d4..ae8ed261641d 100644 --- a/node/node.go +++ b/node/node.go @@ -411,6 +411,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { n.chainManager, err = chains.New( n.Config.EnableStaking, + n.Config.MaxNonStakerPendingMsgs, n.Config.StakerMsgPortion, n.Config.StakerCPUPortion, n.Log, diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 3ab075a7b53b..8fb89e9cb322 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -39,6 +39,7 @@ func TestShutdown(t *testing.T) { validators.NewSet(), nil, 1, + DefaultMaxNonStakerPendingMsgs, DefaultStakerPortion, DefaultStakerPortion, "", @@ -97,6 +98,7 @@ func TestShutdownTimesOut(t *testing.T) { validators.NewSet(), nil, 1, + DefaultMaxNonStakerPendingMsgs, DefaultStakerPortion, DefaultStakerPortion, "", diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index f52f8cbc0b82..5f58e4e49edb 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -18,7 +18,8 @@ import ( ) const ( - DefaultStakerPortion float64 = 0.2 + DefaultStakerPortion float64 = 0.2 + DefaultMaxNonStakerPendingMsgs uint32 = 3 ) // Requirement: A set of nodes spamming messages (potentially costly) shouldn't @@ -117,6 +118,7 @@ func (h *Handler) Initialize( validators validators.Set, msgChan <-chan common.Message, bufferSize int, + maxNonStakerPendingMsgs uint32, stakerMsgPortion, stakerCPUPortion float64, namespace string, @@ -156,6 +158,7 @@ func (h *Handler) Initialize( consumptionRanges, consumptionAllotments, bufferSize, + maxNonStakerPendingMsgs, cpuInterval, stakerMsgPortion, stakerCPUPortion, diff --git a/snow/networking/router/handler_test.go b/snow/networking/router/handler_test.go index e7fdb1a3357c..14aee95ad413 100644 --- a/snow/networking/router/handler_test.go +++ b/snow/networking/router/handler_test.go @@ -41,6 +41,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { vdrs, nil, 16, + DefaultMaxNonStakerPendingMsgs, DefaultStakerPortion, DefaultStakerPortion, "", @@ -83,6 +84,7 @@ func TestHandlerDoesntDrop(t *testing.T) { validators, nil, 16, + DefaultMaxNonStakerPendingMsgs, DefaultStakerPortion, DefaultStakerPortion, "", @@ -118,6 +120,7 @@ func TestHandlerClosesOnError(t *testing.T) { validators.NewSet(), nil, 16, + DefaultMaxNonStakerPendingMsgs, DefaultStakerPortion, DefaultStakerPortion, "", diff --git a/snow/networking/router/service_queue.go b/snow/networking/router/service_queue.go index e054129465b6..5fe5b47a6182 100644 --- a/snow/networking/router/service_queue.go +++ b/snow/networking/router/service_queue.go @@ -60,13 +60,14 @@ func newMultiLevelQueue( consumptionRanges []float64, consumptionAllotments []time.Duration, bufferSize int, + maxNonStakerPendingMsgs uint32, cpuInterval time.Duration, msgPortion, cpuPortion float64, ) (messageQueue, chan struct{}) { semaChan := make(chan struct{}, bufferSize) singleLevelSize := bufferSize / len(consumptionRanges) - throttler := throttler.NewEWMAThrottler(vdrs, uint32(bufferSize), msgPortion, cpuPortion, cpuInterval, log) + throttler := throttler.NewEWMAThrottler(vdrs, uint32(bufferSize), maxNonStakerPendingMsgs, msgPortion, cpuPortion, cpuInterval, log) queues := make([]singleLevelQueue, len(consumptionRanges)) for index := 0; index < len(queues); index++ { gauge, histogram, err := metrics.registerTierStatistics(index) diff --git a/snow/networking/router/service_queue_test.go b/snow/networking/router/service_queue_test.go index b79098864695..8ef87f0050c7 100644 --- a/snow/networking/router/service_queue_test.go +++ b/snow/networking/router/service_queue_test.go @@ -42,6 +42,7 @@ func setupMultiLevelQueue(t *testing.T, bufferSize int) (messageQueue, chan stru consumptionRanges, consumptionAllotments, bufferSize, + DefaultMaxNonStakerPendingMsgs, time.Second, DefaultStakerPortion, DefaultStakerPortion, @@ -169,6 +170,7 @@ func TestMultiLevelQueuePrioritizes(t *testing.T) { consumptionRanges, consumptionAllotments, bufferSize, + DefaultMaxNonStakerPendingMsgs, time.Second, DefaultStakerPortion, DefaultStakerPortion, @@ -263,6 +265,7 @@ func TestMultiLevelQueuePushesDownOldMessages(t *testing.T) { consumptionRanges, consumptionAllotments, bufferSize, + DefaultMaxNonStakerPendingMsgs, time.Second, DefaultStakerPortion, DefaultStakerPortion, diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 9b296b8f3f2e..08f6b509d226 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -67,6 +67,7 @@ func TestTimeout(t *testing.T) { validators.NewSet(), nil, 1, + router.DefaultMaxNonStakerPendingMsgs, router.DefaultStakerPortion, router.DefaultStakerPortion, "", @@ -123,6 +124,7 @@ func TestReliableMessages(t *testing.T) { validators.NewSet(), nil, 1, + router.DefaultMaxNonStakerPendingMsgs, router.DefaultStakerPortion, router.DefaultStakerPortion, "", @@ -189,6 +191,7 @@ func TestReliableMessagesToMyself(t *testing.T) { validators.NewSet(), nil, 1, + router.DefaultMaxNonStakerPendingMsgs, router.DefaultStakerPortion, router.DefaultStakerPortion, "", diff --git a/snow/networking/throttler/ewma.go b/snow/networking/throttler/ewma.go index a8068c553cf0..0ea9098c15cd 100644 --- a/snow/networking/throttler/ewma.go +++ b/snow/networking/throttler/ewma.go @@ -30,13 +30,19 @@ type ewmaThrottler struct { vdrs validators.Set // Track CPU utilization - decayFactor float64 - stakerCPU, nonReservedCPU time.Duration + decayFactor float64 // Factor used to discount the EWMA at every period + stakerCPU time.Duration // Amount of CPU time reserved for stakers + nonReservedCPU time.Duration // Amount of CPU time that is not reserved for stakers // Track pending messages - reservedStakerMessages uint32 - pendingNonReservedMsgs, nonReservedMsgs uint32 - maxNonStakerPendingMsgs uint32 + reservedStakerMessages uint32 // Number of messages reserved for stakers + nonReservedMsgs uint32 // Number of non-reserved messages left to a shared message pool + pendingNonReservedMsgs uint32 // Number of pending messages taken from the shared message pool + + // Threshold of messages taken from the pool before the throttler begins to enforce hard caps on individual peers' pending messages + enforceIndividualCapThreshold uint32 + // Cap on number of pending messages allowed to a non-staker (not enforced until above [enforceIndividualCapThreshold] is exceeded) + maxNonStakerPendingMsgs uint32 // Statistics adjusted at every interval currentPeriod uint32 @@ -55,7 +61,8 @@ type ewmaThrottler struct { // which is not the limit since it tracks consumption using EWMA. func NewEWMAThrottler( vdrs validators.Set, - maxMessages uint32, + maxMessages, + maxNonStakerPendingMsgs uint32, stakerMsgPortion, stakerCPUPortion float64, period time.Duration, @@ -89,8 +96,10 @@ func NewEWMAThrottler( stakerCPU: stakerCPU, nonReservedCPU: nonReservedCPU, - reservedStakerMessages: reservedStakerMessages, - nonReservedMsgs: nonReservedMsgs, + reservedStakerMessages: reservedStakerMessages, + nonReservedMsgs: nonReservedMsgs, + enforceIndividualCapThreshold: nonReservedMsgs / 2, // If the pool is half empty, begin to enforce the max message caps + maxNonStakerPendingMsgs: maxNonStakerPendingMsgs, } // Add validators to spenders, so that they will be calculated correctly in @@ -161,7 +170,8 @@ func (et *ewmaThrottler) GetUtilization( sp := et.getSpender(validatorID) if !sp.staking { exceedsMessageAllotment := et.pendingNonReservedMsgs > et.nonReservedMsgs || // the shared message pool has been taken - sp.pendingMessages > sp.maxMessages // exceeds its own individual message cap + (sp.pendingMessages > sp.maxMessages && // Spender has exceeded its individual cap + et.pendingNonReservedMsgs > et.enforceIndividualCapThreshold) // And the threshold before enforcing the cap has been reached if exceedsMessageAllotment { et.log.Verbo("Throttling non-staker %s: %s. Pending pool messages: %d/%d.", @@ -176,9 +186,10 @@ func (et *ewmaThrottler) GetUtilization( // Staker should only be throttled if it has exceeded its message allotment // and there are either no messages left in the shared pool or it has // exceeded its own maximum message allocation. - exceedsMessageAllotment := sp.pendingMessages > sp.msgAllotment && // exceeds its own individual message allotment - (et.pendingNonReservedMsgs > et.nonReservedMsgs || // no unreserved messages - sp.pendingMessages > sp.maxMessages) // exceeds its own individual message cap + exceedsMessageAllotment := sp.pendingMessages > sp.msgAllotment && // Throttle if the staker has exceeded its allotment + (et.pendingNonReservedMsgs > et.nonReservedMsgs || // And either the shared message pool is empty + (et.pendingNonReservedMsgs > et.enforceIndividualCapThreshold && // Or the threshold before enforcing the cap has been reached + sp.pendingMessages > sp.maxMessages)) // and this staker has exceeded its individual cap if exceedsMessageAllotment { et.log.Debug("Throttling staker %s: %s. Pending pool messages: %d/%d.", @@ -198,8 +209,6 @@ func (et *ewmaThrottler) EndInterval() { et.cumulativeEWMA = time.Duration(float64(et.cumulativeEWMA) / et.decayFactor) stakingWeight := et.vdrs.Weight() - numPeers := et.vdrs.Len() + 1 - et.maxNonStakerPendingMsgs = et.nonReservedMsgs / uint32(numPeers) for key, spender := range et.spenders { spender.cpuEWMA = time.Duration(float64(spender.cpuEWMA) / et.decayFactor) @@ -209,7 +218,7 @@ func (et *ewmaThrottler) EndInterval() { // Calculate staker allotment here spender.staking = true spender.msgAllotment = uint32(float64(et.reservedStakerMessages) * stakerPortion) - spender.maxMessages = uint32(float64(et.reservedStakerMessages)*stakerPortion) + et.maxNonStakerPendingMsgs + spender.maxMessages = spender.msgAllotment + et.maxNonStakerPendingMsgs spender.expectedCPU = time.Duration(float64(et.stakerCPU)*stakerPortion) + defaultMinimumCPUAllotment continue } diff --git a/snow/networking/throttler/throttler_test.go b/snow/networking/throttler/throttler_test.go index 835385e87dc2..c7d3d2cc54ab 100644 --- a/snow/networking/throttler/throttler_test.go +++ b/snow/networking/throttler/throttler_test.go @@ -12,6 +12,10 @@ import ( "github.com/ava-labs/gecko/utils/logging" ) +const ( + defaultMaxNonStakerPendingMsgs uint32 = 3 +) + func TestEWMAThrottler(t *testing.T) { vdrs := validators.NewSet() validator0 := validators.GenerateRandomValidator(1) @@ -23,7 +27,7 @@ func TestEWMAThrottler(t *testing.T) { msgPortion := 0.25 cpuPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, cpuPortion, period, logging.NoLog{}) + throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, cpuPortion, period, logging.NoLog{}) throttler.UtilizeCPU(validator0.ID(), 25*time.Millisecond) throttler.UtilizeCPU(validator1.ID(), 5*time.Second) @@ -68,7 +72,7 @@ func TestThrottlerPrunesSpenders(t *testing.T) { cpuPortion := 0.25 msgPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, cpuPortion, period, logging.NoLog{}) + throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, cpuPortion, period, logging.NoLog{}) throttler.AddMessage(nonStaker2) // nonStaker2 should not be removed with a pending message throttler.UtilizeCPU(nonStaker0, 1.0) @@ -107,40 +111,66 @@ func TestThrottleStaker(t *testing.T) { staker0 := validators.GenerateRandomValidator(1) staker1 := validators.GenerateRandomValidator(1) nonStaker0 := ids.NewShortID([20]byte{1}) + nonStaker1 := ids.NewShortID([20]byte{2}) vdrs.Add(staker0) vdrs.Add(staker1) - maxMessages := uint32(16) + maxMessages := uint32(9) msgPortion := 0.25 cpuPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, cpuPortion, period, logging.NoLog{}) + throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, cpuPortion, period, logging.NoLog{}) - // Message Allotment: 0.5 * 0.25 * 15 = 2 - // Message Pool: 12 messages - // Validator should be throttled iff it has exceeded its message allotment and the shared - // message pool is empty + // Message Allotment: 0.5 * 0.25 * 8 = 1 + // Message Pool: 6 messages + // Max Messages: 1 + defaultMaxNonStakerPendingMsgs + // Validator should be throttled if it has exceeded its max messages + // or it has exceeded its message allotment and the shared message pool is empty. - // staker0 consumes its own allotment plus 10 messages from the shared pool - for i := 0; i < 12; i++ { - throttler.AddMessage(staker0.ID()) - } + // staker0 consumes its entire message allotment - for i := 0; i < 3; i++ { - throttler.AddMessage(staker1.ID()) - if _, throttle := throttler.GetUtilization(staker1.ID()); throttle { + // Ensure that it is allowed to consume its entire max messages before being throttled + for i := 0; i < int(defaultMaxNonStakerPendingMsgs)+1; i++ { + throttler.AddMessage(staker0.ID()) + if _, throttle := throttler.GetUtilization(staker0.ID()); throttle { t.Fatal("Should not throttle message from staker until it has exceeded its own allotment") } } - // Consume the last message and one extra message from the shared pool - throttler.AddMessage(nonStaker0) - throttler.AddMessage(nonStaker0) - throttler.AddMessage(nonStaker0) + throttler.AddMessage(staker0.ID()) + if _, throttle := throttler.GetUtilization(staker0.ID()); !throttle { + t.Fatal("Should have throttled message after exceeding message") + } + + // Remove messages to reduce staker0 to have its normal message allotment pending + for i := 0; i < int(defaultMaxNonStakerPendingMsgs); i++ { + throttler.RemoveMessage(staker0.ID()) + } + + // Consume the entire message pool among two non-stakers + for i := 0; i < int(defaultMaxNonStakerPendingMsgs); i++ { + throttler.AddMessage(nonStaker0) + throttler.AddMessage(nonStaker1) + + // Neither should be throttled because they are only consuming until their own messsage cap + // and the shared pool has been emptied. + if _, throttle := throttler.GetUtilization(nonStaker0); throttle { + t.Fatalf("Should not have throttled message from nonStaker0 after %d messages", i) + } + if _, throttle := throttler.GetUtilization(nonStaker1); throttle { + t.Fatalf("Should not have throttled message from nonStaker1 after %d messages", i) + } + } - if _, throttle := throttler.GetUtilization(staker1.ID()); !throttle { - t.Fatal("Should have throttled message from staker after it exceeded its own allotment and the shared pool was empty") + // An additional message from staker0 should now cause it to be throttled since the mesasage pool + // has been emptied. + if _, throttle := throttler.GetUtilization(staker0.ID()); throttle { + t.Fatal("Should not have throttled message from staker until it had exceeded its message allotment.") + } + throttler.AddMessage(staker0.ID()) + if _, throttle := throttler.GetUtilization(staker0.ID()); !throttle { + t.Fatal("Should have throttled message from staker0 after it exceeded its message allotment because the message pool was empty.") } } @@ -155,7 +185,7 @@ func TestCalculatesEWMA(t *testing.T) { msgPortion := 0.25 stakerPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, stakerPortion, period, logging.NoLog{}) + throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, stakerPortion, period, logging.NoLog{}) // Spend X CPU time in consecutive intervals and ensure that the throttler correctly calculates EWMA spends := []time.Duration{ diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 025b84d39393..6e8cfd87a968 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1641,6 +1641,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { vdrs, msgChan, 1000, + router.DefaultMaxNonStakerPendingMsgs, router.DefaultStakerPortion, router.DefaultStakerPortion, "", diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go index 019ffe0ab3eb..0f49b89ca4ac 100644 --- a/vms/spchainvm/consensus_benchmark_test.go +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -111,6 +111,7 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { vdrs, msgChan, 1000, + router.DefaultMaxNonStakerPendingMsgs, router.DefaultStakerPortion, router.DefaultStakerPortion, "", @@ -253,6 +254,7 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { vdrs, msgChan, 1000, + router.DefaultMaxNonStakerPendingMsgs, router.DefaultStakerPortion, router.DefaultStakerPortion, "", From 51fe80641fecbca05a29abccfae88c1ebf1bd1cf Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 28 Aug 2020 19:07:40 -0400 Subject: [PATCH 27/47] WIP. Builds but crashes while executing bootstrapping --- chains/awaiter.go | 8 +- chains/manager.go | 4 +- node/node.go | 14 +- snow/engine/common/bootstrapper.go | 4 +- snow/networking/throttler/ewma.go | 4 +- snow/validators/manager.go | 63 ++-- snow/validators/set.go | 126 +++++-- snow/validators/validator.go | 38 ++- vms/platformvm/add_delegator_tx.go | 4 +- vms/platformvm/add_subnet_validator_tx.go | 2 +- vms/platformvm/add_validator_tx.go | 2 +- vms/platformvm/advance_time_tx.go | 102 ++---- vms/platformvm/create_subnet_tx.go | 3 +- vms/platformvm/factory.go | 2 +- vms/platformvm/reward_validator_tx.go | 77 ++--- vms/platformvm/service.go | 130 ++++---- vms/platformvm/state.go | 242 ++++---------- vms/platformvm/vm.go | 383 +++++++++++----------- 18 files changed, 593 insertions(+), 615 deletions(-) diff --git a/chains/awaiter.go b/chains/awaiter.go index 5b9cfd6de922..bf9473a77582 100644 --- a/chains/awaiter.go +++ b/chains/awaiter.go @@ -28,11 +28,11 @@ func NewAwaiter(vdrs validators.Set, reqWeight uint64, connected func()) network } func (a *awaitConnected) Connected(vdrID ids.ShortID) bool { - vdr, ok := a.vdrs.Get(vdrID) + weight, ok := a.vdrs.GetWeight(vdrID) if !ok { return false } - weight, err := math.Add64(vdr.Weight(), a.weight) + weight, err := math.Add64(weight, a.weight) a.weight = weight // If the error is non-nil, then an overflow error has occurred such that // the required weight was surpassed. As per network.Handler interface, @@ -46,14 +46,14 @@ func (a *awaitConnected) Connected(vdrID ids.ShortID) bool { } func (a *awaitConnected) Disconnected(vdrID ids.ShortID) bool { - if vdr, ok := a.vdrs.Get(vdrID); ok { + if weight, ok := a.vdrs.GetWeight(vdrID); ok { // TODO: Account for weight changes in a more robust manner. // Sub64 should rarely error since only validators that have added their // weight can become disconnected. Because it is possible that there are // changes to the validators set, we utilize that Sub64 returns 0 on // error. - a.weight, _ = math.Sub64(a.weight, vdr.Weight()) + a.weight, _ = math.Sub64(a.weight, weight) } return false } diff --git a/chains/manager.go b/chains/manager.go index 2b48fe1c85b5..31ca9e8c9f53 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -342,9 +342,9 @@ func (m *manager) buildChain(chainParams ChainParameters) (*chain, error) { var validators validators.Set // Validators validating this blockchain var ok bool if m.stakingEnabled { - validators, ok = m.validators.GetValidatorSet(chainParams.SubnetID) + validators, ok = m.validators.GetValidators(chainParams.SubnetID) } else { // Staking is disabled. Every peer validates every subnet. - validators, ok = m.validators.GetValidatorSet(constants.PrimaryNetworkID) + validators, ok = m.validators.GetValidators(constants.PrimaryNetworkID) } if !ok { return nil, fmt.Errorf("couldn't get validator set of subnet with ID %s. The subnet may not exist", chainParams.SubnetID) diff --git a/node/node.go b/node/node.go index a073358548d4..a578ec38ffb4 100644 --- a/node/node.go +++ b/node/node.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "net" "os" "path/filepath" @@ -158,7 +159,7 @@ func (n *Node) initNetworking() error { // Initialize validator manager and primary network's validator set primaryNetworkValidators := validators.NewSet() n.vdrs = validators.NewManager() - n.vdrs.PutValidatorSet(constants.PrimaryNetworkID, primaryNetworkValidators) + n.vdrs.Set(constants.PrimaryNetworkID, primaryNetworkValidators) n.Net = network.NewDefaultNetwork( n.Config.ConsensusParams.Metrics, @@ -198,14 +199,14 @@ type insecureValidatorManager struct { } func (i *insecureValidatorManager) Connected(vdrID ids.ShortID) bool { - _ = i.vdrs.Add(validators.NewValidator(vdrID, i.weight)) + _ = i.vdrs.AddWeight(vdrID, i.weight) return false } func (i *insecureValidatorManager) Disconnected(vdrID ids.ShortID) bool { // Shouldn't error unless the set previously had an error, which should // never happen as described above - _ = i.vdrs.Remove(vdrID) + _ = i.vdrs.RemoveWeight(vdrID, math.MaxUint64) return false } @@ -313,7 +314,7 @@ func (n *Node) initNodeID() error { func (n *Node) initBeacons() error { n.beacons = validators.NewSet() for _, peer := range n.Config.BootstrapPeers { - if err := n.beacons.Add(validators.NewValidator(peer.ID, 1)); err != nil { + if err := n.beacons.AddWeight(peer.ID, 1); err != nil { return err } } @@ -363,6 +364,7 @@ func (n *Node) initChains(genesisBytes []byte, avaxAssetID ids.ID) error { }) bootstrapWeight := n.beacons.Weight() + reqWeight := (3*bootstrapWeight + 3) / 4 if reqWeight == 0 { @@ -443,11 +445,11 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { // to its own local validator manager (which isn't used for sampling) if !n.Config.EnableStaking { primaryNetworkValidators := validators.NewSet() - if err := primaryNetworkValidators.Add(validators.NewValidator(n.ID, 1)); err != nil { + if err := primaryNetworkValidators.AddWeight(n.ID, 1); err != nil { return fmt.Errorf("couldn't add validator to primary network: %w", err) } vdrs = validators.NewManager() - vdrs.PutValidatorSet(constants.PrimaryNetworkID, primaryNetworkValidators) + vdrs.Set(constants.PrimaryNetworkID, primaryNetworkValidators) } errs := wrappers.Errs{} diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index f1ce4eff52f2..a15e49639aa5 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -134,8 +134,8 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta b.pendingAccepted.Remove(validatorID) weight := uint64(0) - if vdr, ok := b.Beacons.Get(validatorID); ok { - weight = vdr.Weight() + if w, ok := b.Beacons.GetWeight(validatorID); ok { + weight = w } for _, containerID := range containerIDs.List() { diff --git a/snow/networking/throttler/ewma.go b/snow/networking/throttler/ewma.go index a8068c553cf0..8d33610069d0 100644 --- a/snow/networking/throttler/ewma.go +++ b/snow/networking/throttler/ewma.go @@ -203,8 +203,8 @@ func (et *ewmaThrottler) EndInterval() { for key, spender := range et.spenders { spender.cpuEWMA = time.Duration(float64(spender.cpuEWMA) / et.decayFactor) - if vdr, exists := et.vdrs.Get(ids.NewShortID(key)); exists { - stakerPortion := float64(vdr.Weight()) / float64(stakingWeight) + if weight, ok := et.vdrs.GetWeight(ids.NewShortID(key)); ok { + stakerPortion := float64(weight) / float64(stakingWeight) // Calculate staker allotment here spender.staking = true diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 1a85b46c7a3a..0bf40b68e9a0 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -11,52 +11,77 @@ import ( // Manager holds the validator set of each subnet type Manager interface { - // PutValidatorSet puts associaties the given subnet ID with the given validator set - PutValidatorSet(ids.ID, Set) + // Set a subnet's validator set + Set(ids.ID, Set) - // RemoveValidatorSet removes the specified validator set - RemoveValidatorSet(ids.ID) + // AddWeight adds weight to a given validator on the given subnet + AddWeight(ids.ID, ids.ShortID, uint64) error - // GetGroup returns: - // 1) the validator set of the subnet with the specified ID - // 2) false if there is no subnet with the specified ID - GetValidatorSet(ids.ID) (Set, bool) + // RemoveWeight removes weight from a given validator on a given subnet + RemoveWeight(ids.ID, ids.ShortID, uint64) + + // GetValidators returns the validator set for the given subnet + // Returns false if the subnet doesn't exist + GetValidators(ids.ID) (Set, bool) } // NewManager returns a new, empty manager func NewManager() Manager { return &manager{ - validatorSets: make(map[[32]byte]Set), + subnetToVdrs: make(map[[32]byte]Set), } } // manager implements Manager type manager struct { - lock sync.Mutex - validatorSets map[[32]byte]Set + lock sync.Mutex + // Key: Subnet ID + // Value: The validators that validate the subnet + subnetToVdrs map[[32]byte]Set +} + +func (m *manager) Set(subnetID ids.ID, vdrSet Set) { + m.lock.Lock() + defer m.lock.Unlock() + + m.subnetToVdrs[subnetID.Key()] = vdrSet } -// PutValidatorSet implements the Manager interface. -func (m *manager) PutValidatorSet(subnetID ids.ID, set Set) { +// AddWeight implements the Manager interface. +func (m *manager) AddWeight(subnetID ids.ID, vdrID ids.ShortID, weight uint64) error { m.lock.Lock() defer m.lock.Unlock() + subnetIDKey := subnetID.Key() - m.validatorSets[subnetID.Key()] = set + vdrs, ok := m.subnetToVdrs[subnetIDKey] + if !ok { + vdrs = NewBestSet(5) + if err := vdrs.AddWeight(vdrID, weight); err != nil { + return err + } + } else { + vdrs.AddWeight(vdrID, weight) + } + m.subnetToVdrs[subnetIDKey] = vdrs + return nil } // RemoveValidatorSet implements the Manager interface. -func (m *manager) RemoveValidatorSet(subnetID ids.ID) { +func (m *manager) RemoveWeight(subnetID ids.ID, vdrID ids.ShortID, weight uint64) { m.lock.Lock() defer m.lock.Unlock() - delete(m.validatorSets, subnetID.Key()) + vdrs, ok := m.subnetToVdrs[subnetID.Key()] + if ok { + vdrs.RemoveWeight(vdrID, weight) + } } // GetValidatorSet implements the Manager interface. -func (m *manager) GetValidatorSet(subnetID ids.ID) (Set, bool) { +func (m *manager) GetValidators(subnetID ids.ID) (Set, bool) { m.lock.Lock() defer m.lock.Unlock() - set, exists := m.validatorSets[subnetID.Key()] - return set, exists + vdrs, ok := m.subnetToVdrs[subnetID.Key()] + return vdrs, ok } diff --git a/snow/validators/set.go b/snow/validators/set.go index 039553e167ee..008d53d58279 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -8,11 +8,12 @@ import ( "strings" "sync" + "math" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" - "github.com/ava-labs/gecko/utils/sampler" - safemath "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/sampler" ) const ( @@ -36,14 +37,14 @@ type Set interface { // validators to the set. Set([]Validator) error - // Add the provided validator to the set. - Add(Validator) error + // AddWeight to a staker. + AddWeight(ids.ShortID, uint64) error // Get the validator from the set. - Get(ids.ShortID) (Validator, bool) + GetWeight(ids.ShortID) (uint64, bool) - // Remove the validator with the specified ID. - Remove(ids.ShortID) error + // RemoveWeight from a staker. + RemoveWeight(ids.ShortID, uint64) error // Contains returns true if there is a validator with the specified ID // currently in the set. @@ -85,7 +86,7 @@ func NewBestSet(expectedSampleSize int) Set { type set struct { lock sync.Mutex vdrMap map[[20]byte]int - vdrSlice []Validator + vdrSlice []*validator vdrWeights []uint64 sampler sampler.WeightedWithoutReplacement totalWeight uint64 @@ -108,7 +109,7 @@ func (s *set) set(vdrs []Validator) error { if newCap < lenVdrs { newCap = lenVdrs } - s.vdrSlice = make([]Validator, 0, newCap) + s.vdrSlice = make([]*validator, 0, newCap) s.vdrWeights = make([]uint64, 0, newCap) } else { s.vdrSlice = s.vdrSlice[:0] @@ -122,7 +123,6 @@ func (s *set) set(vdrs []Validator) error { if s.contains(vdrID) { continue } - w := vdr.Weight() if w == 0 { continue // This validator would never be sampled anyway @@ -130,7 +130,10 @@ func (s *set) set(vdrs []Validator) error { i := len(s.vdrSlice) s.vdrMap[vdrID.Key()] = i - s.vdrSlice = append(s.vdrSlice, vdr) + s.vdrSlice = append(s.vdrSlice, &validator{ + id: vdr.ID(), + weight: vdr.Weight(), + }) s.vdrWeights = append(s.vdrWeights, w) newTotalWeight, err := safemath.Add64(s.totalWeight, w) if err != nil { @@ -142,36 +145,103 @@ func (s *set) set(vdrs []Validator) error { } // Add implements the Set interface. -func (s *set) Add(vdr Validator) error { +func (s *set) AddWeight(vdrID ids.ShortID, weight uint64) error { s.lock.Lock() defer s.lock.Unlock() - return s.add(vdr) + return s.addWeight(vdrID, weight) } -func (s *set) add(vdr Validator) error { - vdrID := vdr.ID() - if s.contains(vdrID) { - if err := s.remove(vdrID); err != nil { - return err - } +func (s *set) addWeight(vdrID ids.ShortID, weight uint64) error { + if weight == 0 { + return nil // This validator would never be sampled anyway } - w := vdr.Weight() - if w == 0 { - return nil // This validator would never be sampled anyway + vdrIDKey := vdrID.Key() + + i, ok := s.vdrMap[vdrIDKey] + if ok { // Validator already exists + vdr := s.vdrSlice[i] + newWeight, err := safemath.Add64(s.vdrWeights[i], weight) + if err != nil { + newWeight = math.MaxUint64 + } + s.vdrWeights[i] = newWeight + s.totalWeight, err = safemath.Add64(s.totalWeight, weight) + if err != nil { + newWeight = 0 + } + vdr.addWeight(weight) + s.vdrSlice[i] = vdr + return s.sampler.Initialize(s.vdrWeights) } - i := len(s.vdrSlice) - s.vdrMap[vdrID.Key()] = i + vdr := &validator{ + id: vdrID, + weight: weight, + } + i = len(s.vdrSlice) s.vdrSlice = append(s.vdrSlice, vdr) - s.vdrWeights = append(s.vdrWeights, w) - newTotalWeight, err := safemath.Add64(s.totalWeight, w) + s.vdrWeights = append(s.vdrWeights, weight) + s.vdrMap[vdrIDKey] = i + newTotalWeight, err := safemath.Add64(s.totalWeight, weight) if err != nil { return err } s.totalWeight = newTotalWeight return s.sampler.Initialize(s.vdrWeights) + +} + +// GetWeight implements the Set interface. +func (s *set) GetWeight(vdrID ids.ShortID) (uint64, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.getWeight(vdrID) +} + +func (s *set) getWeight(vdrID ids.ShortID) (uint64, bool) { + index, ok := s.vdrMap[vdrID.Key()] + if !ok { + return 0, false + } + return s.vdrWeights[index], true +} + +// RemoveWeight implements the Set interface. +func (s *set) RemoveWeight(vdrID ids.ShortID, weight uint64) error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.removeWeight(vdrID, weight) +} + +func (s *set) removeWeight(vdrID ids.ShortID, weight uint64) error { + if weight == 0 { + return nil + } + + vdrIDKey := vdrID.Key() + i, ok := s.vdrMap[vdrIDKey] + if !ok { + return nil + + } + // Validator exists + vdr := s.vdrSlice[i] + newWeight, err := safemath.Sub64(s.vdrWeights[i], weight) + if err != nil { + newWeight = 0 + } + s.vdrWeights[i] = newWeight + s.totalWeight, err = safemath.Sub64(s.totalWeight, weight) + if err != nil { + newWeight = 0 + } + vdr.removeWeight(weight) + s.vdrSlice[i] = vdr + return s.sampler.Initialize(s.vdrWeights) } // Get implements the Set interface. @@ -263,7 +333,9 @@ func (s *set) List() []Validator { func (s *set) list() []Validator { list := make([]Validator, len(s.vdrSlice)) - copy(list, s.vdrSlice) + for i, vdr := range s.vdrSlice { + list[i] = vdr + } return list } diff --git a/snow/validators/validator.go b/snow/validators/validator.go index cab16bf62d67..b4ce1f2b4406 100644 --- a/snow/validators/validator.go +++ b/snow/validators/validator.go @@ -4,16 +4,46 @@ package validators import ( + "math" + "github.com/ava-labs/gecko/ids" + safemath "github.com/ava-labs/gecko/utils/math" ) // Validator is the minimal description of someone that can be sampled. type Validator interface { - // ID returns the unique id of this validator + // ID returns the node ID of this validator ID() ids.ShortID - // Weight that can be used for weighted sampling. - // If this validator is validating the primary network, returns the amount of - // AVAX staked + // Returns this validator's weight Weight() uint64 } + +type validator struct { + id ids.ShortID + weight uint64 +} + +func (v *validator) ID() ids.ShortID { + return v.id +} + +func (v *validator) addWeight(weight uint64) { + newTotalWeight, err := safemath.Add64(weight, v.weight) + if err != nil { + newTotalWeight = math.MaxUint64 + } + v.weight = newTotalWeight +} + +func (v *validator) removeWeight(weight uint64) { + newTotalWeight, err := safemath.Sub64(v.weight, weight) + if err != nil { + newTotalWeight = 0 + } + v.weight = newTotalWeight +} + +func (v *validator) Weight() uint64 { + return v.weight +} diff --git a/vms/platformvm/add_delegator_tx.go b/vms/platformvm/add_delegator_tx.go index b1c3e925bbff..d84a6913e8e1 100644 --- a/vms/platformvm/add_delegator_tx.go +++ b/vms/platformvm/add_delegator_tx.go @@ -157,7 +157,9 @@ func (tx *UnsignedAddDelegatorTx) SemanticVerify( } // If this proposal is committed, update the pending validator set to include the delegator - vm.addValidator(onCommitDB, constants.PrimaryNetworkID, tx) + if err := vm.addStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { + return nil, nil, nil, nil, tempError{err} + } // Set up the DB if this tx is aborted onAbortDB := versiondb.New(db) diff --git a/vms/platformvm/add_subnet_validator_tx.go b/vms/platformvm/add_subnet_validator_tx.go index dca9a2f77137..98a7e58e0014 100644 --- a/vms/platformvm/add_subnet_validator_tx.go +++ b/vms/platformvm/add_subnet_validator_tx.go @@ -150,7 +150,7 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } // Add the validator to the set of pending validators - if err := vm.addValidator(onCommitDB, tx.Validator.Subnet, tx); err != nil { + if err := vm.addStaker(onCommitDB, tx.Validator.Subnet, stx); err != nil { return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/add_validator_tx.go b/vms/platformvm/add_validator_tx.go index ca5e5bcdbe06..e0418bbffb87 100644 --- a/vms/platformvm/add_validator_tx.go +++ b/vms/platformvm/add_validator_tx.go @@ -163,7 +163,7 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( } // Add validator to set of pending validators - if err := vm.addValidator(onCommitDB, constants.PrimaryNetworkID, tx); err != nil { + if err := vm.addStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go index e98a67a6b342..ce87fff2d2a9 100644 --- a/vms/platformvm/advance_time_tx.go +++ b/vms/platformvm/advance_time_tx.go @@ -4,14 +4,11 @@ package platformvm import ( - "bytes" "fmt" "time" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/versiondb" - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/vms/components/avax" ) @@ -66,95 +63,48 @@ func (tx *UnsignedAdvanceTimeTx) SemanticVerify( tx.Timestamp(), currentTimestamp)} } - // Only allow timestamp to move forward as far as the next validator's end time - if nextValidatorEndTime := vm.nextValidatorChangeTime(db, false); tx.Time > uint64(nextValidatorEndTime.Unix()) { - return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp (%s) later than next validator end time (%s)", - tx.Timestamp(), nextValidatorEndTime)} - } - - // Only allow timestamp to move forward as far as the next pending validator's start time - if nextValidatorStartTime := vm.nextValidatorChangeTime(db, true); tx.Time > uint64(nextValidatorStartTime.Unix()) { - return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp (%s) later than next validator start time (%s)", - tx.Timestamp(), nextValidatorStartTime)} + // Only allow timestamp to move forward as far as the time of next staker set change time + nextStakerChangeTime, err := vm.nextStakerChangeTime(db) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } else if tx.Time > uint64(nextStakerChangeTime.Unix()) { + return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp (%s) later than next staker change time (%s)", + tx.Timestamp(), nextStakerChangeTime)} } - // Calculate what the validator sets will be given new timestamp - // Move validators from pending to current if their start time is <= new timestamp. - // Remove validators from current if their end time <= proposed timestamp - // Specify what the state of the chain will be if this proposal is committed onCommitDB := versiondb.New(db) if err := vm.putTimestamp(onCommitDB, tx.Timestamp()); err != nil { return nil, nil, nil, nil, tempError{err} } - - current, pending, _, _, err := vm.calculateValidators(db, tx.Timestamp(), constants.PrimaryNetworkID) - if err != nil { - return nil, nil, nil, nil, tempError{err} - } else if err := vm.putCurrentValidators(onCommitDB, current, constants.PrimaryNetworkID); err != nil { + if err := vm.updateValidators(onCommitDB); err != nil { return nil, nil, nil, nil, tempError{err} - } else if err := vm.putPendingValidators(onCommitDB, pending, constants.PrimaryNetworkID); err != nil { - return nil, nil, nil, nil, tempError{err} - } - - // For each Subnet, calculate what current and pending validator sets should be - // given new timestamp - - // Key: Subnet ID - // Value: IDs of validators that will have started validating this Subnet when - // timestamp is advanced to tx.Timestamp() - startedValidating := make(map[[32]byte]ids.ShortSet) - subnets, err := vm.getSubnets(db) - if err != nil { - return nil, nil, nil, nil, tempError{err} - } - for _, subnet := range subnets { - subnetID := subnet.ID() - if current, pending, started, _, err := vm.calculateValidators(db, tx.Timestamp(), subnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } else if err := vm.putCurrentValidators(onCommitDB, current, subnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } else if err := vm.putPendingValidators(onCommitDB, pending, subnetID); err != nil { - return nil, nil, nil, nil, tempError{err} - } else { - startedValidating[subnet.ID().Key()] = started - } } // If this block is committed, update the validator sets // onAbortDB or onCommitDB should commit (flush to vm.DB) before this is called onCommitFunc := func() error { // For each Subnet, update the node's validator manager to reflect current Subnet membership - subnets, err := vm.getSubnets(vm.DB) - if err != nil { - return err - } - for _, subnet := range subnets { - if err := vm.updateValidators(subnet.ID()); err != nil { - return err - } - } - if err := vm.updateValidators(constants.PrimaryNetworkID); err != nil { - return err - } + return vm.updateVdrMgr() // If this node started validating a Subnet, create the blockchains that the Subnet validates - chains, err := vm.getChains(vm.DB) // all blockchains - if err != nil { - return err - } - for subnetID, validatorIDs := range startedValidating { - if !validatorIDs.Contains(vm.Ctx.NodeID) { - continue - } - for _, chain := range chains { - unsignedChain := chain.UnsignedTx.(*UnsignedCreateChainTx) - if bytes.Equal(subnetID[:], unsignedChain.SubnetID.Bytes()) { - vm.createChain(chain) - } - } - } - return nil + // TODO this + // chains, err := vm.getChains(vm.DB) // all blockchains + // if err != nil { + // return err + // } + // for subnetID, validatorIDs := range startedValidating { + // if !validatorIDs.Contains(vm.Ctx.NodeID) { + // continue + // } + // for _, chain := range chains { + // unsignedChain := chain.UnsignedTx.(*UnsignedCreateChainTx) + // if bytes.Equal(subnetID[:], unsignedChain.SubnetID.Bytes()) { + // vm.createChain(chain) + // } + // } + // } + // return nil } // State doesn't change if this proposal is aborted diff --git a/vms/platformvm/create_subnet_tx.go b/vms/platformvm/create_subnet_tx.go index de85351ffd10..babf317f70c7 100644 --- a/vms/platformvm/create_subnet_tx.go +++ b/vms/platformvm/create_subnet_tx.go @@ -93,10 +93,9 @@ func (tx *UnsignedCreateSubnetTx) SemanticVerify( if err := vm.produceOutputs(db, txID, tx.Outs); err != nil { return nil, tempError{err} } - // Register new subnet in validator manager onAccept := func() error { - vm.validators.PutValidatorSet(tx.ID(), validators.NewSet()) + vm.vdrMgr.Set(tx.ID(), validators.NewSet()) return nil } return onAccept, nil diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index 9ddd69b63655..48bc080a0539 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -28,7 +28,7 @@ type Factory struct { func (f *Factory) New(*snow.Context) (interface{}, error) { return &VM{ chainManager: f.ChainManager, - validators: f.Validators, + vdrMgr: f.Validators, stakingEnabled: f.StakingEnabled, txFee: f.Fee, minStake: f.MinStake, diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index 38f3c87f6a13..024f5a142dee 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -66,20 +66,16 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( case len(stx.Creds) != 0: return nil, nil, nil, nil, permError{errWrongNumberOfCredentials} } + txID := tx.ID() - primaryNetworkVdrHeap, err := vm.getCurrentValidators(db, constants.PrimaryNetworkID) + stakerTx, err := vm.nextStakerStop(db, constants.PrimaryNetworkID) if err != nil { - return nil, nil, nil, nil, tempError{err} - } else if primaryNetworkVdrHeap.Len() == 0 { // there is no validator to remove - return nil, nil, nil, nil, permError{errEmptyValidatingSet} + return nil, nil, nil, nil, permError{err} } - - vdrTx := primaryNetworkVdrHeap.Remove() - txID := vdrTx.ID() - if !txID.Equals(tx.TxID) { + if !stakerTx.ID().Equals(tx.TxID) { return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s. Should be removing %s", tx.TxID, - txID)} + stakerTx)} } // Verify that the chain's timestamp is the validator's end time @@ -88,11 +84,11 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } - unsignedVdrTx, ok := vdrTx.UnsignedTx.(TimedTx) + staker, ok := stakerTx.UnsignedTx.(TimedTx) if !ok { return nil, nil, nil, nil, permError{errWrongTxType} } - if endTime := unsignedVdrTx.EndTime(); !endTime.Equal(currentTime) { + if endTime := staker.EndTime(); !endTime.Equal(currentTime) { return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s before their end time %s", tx.TxID, endTime)} @@ -100,32 +96,24 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // If this tx's proposal is committed, remove the validator from the validator set onCommitDB := versiondb.New(db) - if err := vm.removeValidator(onCommitDB, constants.PrimaryNetworkID, unsignedVdrTx); err != nil { + if err := vm.removeStaker(onCommitDB, constants.PrimaryNetworkID, stakerTx); err != nil { return nil, nil, nil, nil, tempError{err} } - // TODO remove - // if err := vm.putCurrentValidators(onCommitDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { - // return nil, nil, nil, nil, tempError{err} - // } // If this tx's proposal is aborted, remove the validator from the validator set onAbortDB := versiondb.New(db) - if err := vm.removeValidator(onCommitDB, constants.PrimaryNetworkID, unsignedVdrTx); err != nil { + if err := vm.removeStaker(onAbortDB, constants.PrimaryNetworkID, stakerTx); err != nil { return nil, nil, nil, nil, tempError{err} } - // TODO remove - // if err := vm.putCurrentValidators(onAbortDB, primaryNetworkVdrHeap, constants.PrimaryNetworkID); err != nil { - // return nil, nil, nil, nil, tempError{err} - // } - switch uVdrTx := vdrTx.UnsignedTx.(type) { + switch uStakerTx := stakerTx.UnsignedTx.(type) { case *UnsignedAddValidatorTx: // Refund the stake here - for i, out := range uVdrTx.Stake { + for i, out := range uStakerTx.Stake { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + i), + TxID: tx.ID(), + OutputIndex: uint32(len(uStakerTx.Outs) + i), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out.Output(), @@ -140,8 +128,8 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } // Provide the reward here - if reward := reward(uVdrTx.Validator.Duration(), uVdrTx.Validator.Wght, InflationRate); reward > 0 { - outIntf, err := vm.fx.CreateOutput(reward, uVdrTx.RewardsOwner) + if reward := reward(uStakerTx.Validator.Duration(), uStakerTx.Validator.Wght, InflationRate); reward > 0 { + outIntf, err := vm.fx.CreateOutput(reward, uStakerTx.RewardsOwner) if err != nil { return nil, nil, nil, nil, permError{err} } @@ -152,7 +140,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + len(uVdrTx.Stake)), + OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake)), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out, @@ -162,18 +150,23 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } case *UnsignedAddDelegatorTx: // We're removing a delegator - parentTx, err := primaryNetworkVdrHeap.getPrimaryStaker(uVdrTx.Validator.NodeID) - if err != nil { - return nil, nil, nil, nil, permError{err} + vdrTx, ok := vm.isValidator(db, constants.PrimaryNetworkID, uStakerTx.Validator.NodeID) + if !ok { + return nil, nil, nil, nil, permError{ + fmt.Errorf("couldn't find validator %s: %w", uStakerTx.Validator.NodeID, err)} + } + vdr, ok := vdrTx.(*UnsignedAddValidatorTx) + if !ok { + return nil, nil, nil, nil, permError{ + fmt.Errorf("expected vdr to be *UnsignedAddValidatorTx but is %T", vdrTx)} } - unsignedParentTx := parentTx.UnsignedTx.(*UnsignedAddValidatorTx) // Refund the stake here - for i, out := range uVdrTx.Stake { + for i, out := range uStakerTx.Stake { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + i), + OutputIndex: uint32(len(uStakerTx.Outs) + i), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out.Output(), @@ -188,11 +181,11 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } // If reward given, it will be this amount - reward := reward(uVdrTx.Validator.Duration(), uVdrTx.Validator.Wght, InflationRate) + reward := reward(uStakerTx.Validator.Duration(), uStakerTx.Validator.Wght, InflationRate) // Calculate split of reward between delegator/delegatee // The delegator gives stake to the validatee - delegatorShares := NumberOfShares - uint64(unsignedParentTx.Shares) // parentTx.Shares <= NumberOfShares so no underflow - delegatorReward := delegatorShares * (reward / NumberOfShares) // delegatorShares <= NumberOfShares so no overflow + delegatorShares := NumberOfShares - uint64(vdr.Shares) // parentTx.Shares <= NumberOfShares so no underflow + delegatorReward := delegatorShares * (reward / NumberOfShares) // delegatorShares <= NumberOfShares so no overflow // Delay rounding as long as possible for small numbers if optimisticReward, err := safemath.Mul64(delegatorShares, reward); err == nil { delegatorReward = optimisticReward / NumberOfShares @@ -203,7 +196,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // Reward the delegator here if delegatorReward > 0 { - outIntf, err := vm.fx.CreateOutput(delegatorReward, uVdrTx.RewardsOwner) + outIntf, err := vm.fx.CreateOutput(delegatorReward, uStakerTx.RewardsOwner) if err != nil { return nil, nil, nil, nil, permError{err} } @@ -214,7 +207,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + len(uVdrTx.Stake)), + OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake)), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out, @@ -227,7 +220,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // Reward the delegatee here if delegateeReward > 0 { - outIntf, err := vm.fx.CreateOutput(delegateeReward, unsignedParentTx.RewardsOwner) + outIntf, err := vm.fx.CreateOutput(delegateeReward, vdr.RewardsOwner) if err != nil { return nil, nil, nil, nil, permError{err} } @@ -238,7 +231,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: txID, - OutputIndex: uint32(len(uVdrTx.Outs) + len(uVdrTx.Stake) + offset), + OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake) + offset), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, Out: out, @@ -254,7 +247,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // validator set to remove the staker. onAbortDB or onCommitDB should commit // (flush to vm.DB) before this is called updateValidators := func() error { - return vm.updateValidators(constants.PrimaryNetworkID) + return vm.updateVdrMgr() } return onCommitDB, onAbortDB, updateValidators, updateValidators, nil diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index a2f027b7ce20..fbcc11198901 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -507,6 +507,7 @@ func (service *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAsset ****************************************************** */ +/* // GetCurrentValidatorsArgs are the arguments for calling GetCurrentValidators type GetCurrentValidatorsArgs struct { // Subnet we're listing the validators of @@ -570,68 +571,69 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa return nil } - -// GetPendingValidatorsArgs are the arguments for calling GetPendingValidators -type GetPendingValidatorsArgs struct { - // Subnet we're getting the pending validators of - // If omitted, defaults to primary network - SubnetID ids.ID `json:"subnetID"` -} - -// GetPendingValidatorsReply are the results from calling GetPendingValidators -type GetPendingValidatorsReply struct { - Validators []FormattedAPIValidator `json:"validators"` -} - -// GetPendingValidators returns the list of pending validators -func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { - service.vm.Ctx.Log.Info("Platform: GetPendingValidators called") - if args.SubnetID.IsZero() { - args.SubnetID = constants.PrimaryNetworkID - } - - validators, err := service.vm.getPendingValidators(service.vm.DB, args.SubnetID) - if err != nil { - return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) - } - - reply.Validators = make([]FormattedAPIValidator, validators.Len()) - for i, tx := range validators.Txs { - if args.SubnetID.Equals(constants.PrimaryNetworkID) { - switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddValidatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - case *UnsignedAddDelegatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - default: // Shouldn't happen - return fmt.Errorf("couldn't get the reward address of %s", tx.ID()) - } - } else { - utx := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx) - weight := json.Uint64(utx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(utx.StartTime().Unix()), - EndTime: json.Uint64(utx.EndTime().Unix()), - Weight: &weight, - } - } - } - - return nil -} +*/ + +// // GetPendingValidatorsArgs are the arguments for calling GetPendingValidators +// type GetPendingValidatorsArgs struct { +// // Subnet we're getting the pending validators of +// // If omitted, defaults to primary network +// SubnetID ids.ID `json:"subnetID"` +// } + +// // GetPendingValidatorsReply are the results from calling GetPendingValidators +// type GetPendingValidatorsReply struct { +// Validators []FormattedAPIValidator `json:"validators"` +// } + +// // GetPendingValidators returns the list of pending validators +// func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { +// service.vm.Ctx.Log.Info("Platform: GetPendingValidators called") +// if args.SubnetID.IsZero() { +// args.SubnetID = constants.PrimaryNetworkID +// } + +// validators, err := service.vm.getPendingValidators(service.vm.DB, args.SubnetID) +// if err != nil { +// return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) +// } + +// reply.Validators = make([]FormattedAPIValidator, validators.Len()) +// for i, tx := range validators.Txs { +// if args.SubnetID.Equals(constants.PrimaryNetworkID) { +// switch tx := tx.UnsignedTx.(type) { +// case *UnsignedAddValidatorTx: +// weight := json.Uint64(tx.Validator.Weight()) +// reply.Validators[i] = FormattedAPIValidator{ +// ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), +// StartTime: json.Uint64(tx.StartTime().Unix()), +// EndTime: json.Uint64(tx.EndTime().Unix()), +// StakeAmount: &weight, +// } +// case *UnsignedAddDelegatorTx: +// weight := json.Uint64(tx.Validator.Weight()) +// reply.Validators[i] = FormattedAPIValidator{ +// ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), +// StartTime: json.Uint64(tx.StartTime().Unix()), +// EndTime: json.Uint64(tx.EndTime().Unix()), +// StakeAmount: &weight, +// } +// default: // Shouldn't happen +// return fmt.Errorf("couldn't get the reward address of %s", tx.ID()) +// } +// } else { +// utx := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx) +// weight := json.Uint64(utx.Validator.Weight()) +// reply.Validators[i] = FormattedAPIValidator{ +// ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), +// StartTime: json.Uint64(utx.StartTime().Unix()), +// EndTime: json.Uint64(utx.EndTime().Unix()), +// Weight: &weight, +// } +// } +// } + +// return nil +// } // SampleValidatorsArgs are the arguments for calling SampleValidators type SampleValidatorsArgs struct { @@ -655,7 +657,7 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators args.SubnetID = constants.PrimaryNetworkID } - validators, ok := service.vm.validators.GetValidatorSet(args.SubnetID) + validators, ok := service.vm.vdrMgr.GetValidators(args.SubnetID) if !ok { return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) } @@ -739,7 +741,7 @@ func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, re nodeID, // Node ID rewardAddress, // Reward Address uint32(10000*args.DelegationFeeRate), // Shares - privKeys, // Private keys + privKeys, // Private keys ) if err != nil { return fmt.Errorf("couldn't create tx: %w", err) diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index d57cd4373543..5d3c87ac8cb0 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -71,185 +71,143 @@ func (vm *VM) getStatus(db database.Database, ID ids.ID) (Status, error) { return Unknown, fmt.Errorf("expected status to be type Status but is type %T", statusIntf) } -// get the validators currently validating the specified subnet -func (vm *VM) getCurrentValidators(db database.Database, subnetID ids.ID) (*EventHeap, error) { - return vm.getValidatorsFromDB(db, subnetID, currentValidatorsPrefix, false) -} - -// put the validators currently validating the specified subnet -func (vm *VM) putCurrentValidators(db database.Database, validators *EventHeap, subnetID ids.ID) error { - if validators.SortByStartTime { - return errors.New("current validators should be sorted by end time") - } - err := vm.State.Put(db, validatorsTypeID, subnetID.Prefix(currentValidatorsPrefix), validators) - if err != nil { - return fmt.Errorf("couldn't put current validator set: %w", err) - } - return nil -} - -func (vm *VM) addValidator(db database.Database, subnetID ids.ID, validator TimedTx) error { - validatorBytes := validator.Bytes() - - p := wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(validator.StartTime().Unix())) - if p.Err != nil { - return fmt.Errorf("couldn't serialize start time: %w", p.Err) - } +// Add a staker to subnet [subnetID] +// A staker may be a validator or a delegator +func (vm *VM) addStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var staker TimedTx + switch stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx, *UnsignedAddSubnetValidatorTx, *UnsignedAddValidatorTx: + staker = stakerTx.UnsignedTx.(TimedTx) + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) + } + stakerID := staker.ID().Bytes() // Tx ID of this tx + + // Sorted by subnet ID then start time then tx ID prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) prefixStartDB := prefixdb.NewNested(prefixStart, db) + // Sorted by subnet ID then stop time then tx ID prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) prefixStopDB := prefixdb.NewNested(prefixStop, db) - startKey := append(p.Bytes, validatorBytes...) - - p = wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(validator.EndTime().Unix())) - if p.Err != nil { - return fmt.Errorf("couldn't serialize stop time: %w", p.Err) - } - stopKey := append(p.Bytes, validatorBytes...) - - if err := prefixStartDB.Put(startKey, validatorBytes); err != nil { - return err - } - return prefixStopDB.Put(stopKey, validatorBytes) -} - -func (vm *VM) removeValidator(db database.Database, subnetID ids.ID, validator TimedTx) error { - validatorBytes := validator.Bytes() + defer func() { + prefixStartDB.Close() + prefixStopDB.Close() + }() p := wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(validator.StartTime().Unix())) + p.PackLong(uint64(staker.StartTime().Unix())) if p.Err != nil { return fmt.Errorf("couldn't serialize start time: %w", p.Err) } - prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) - prefixStartDB := prefixdb.NewNested(prefixStart, db) - prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) - prefixStopDB := prefixdb.NewNested(prefixStop, db) - startKey := append(p.Bytes, validatorBytes...) + startKey := append(p.Bytes, stakerID...) p = wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(validator.EndTime().Unix())) + p.PackLong(uint64(staker.EndTime().Unix())) if p.Err != nil { return fmt.Errorf("couldn't serialize stop time: %w", p.Err) } - stopKey := append(p.Bytes, validatorBytes...) + stopKey := append(p.Bytes, stakerID...) - if err := prefixStartDB.Put(startKey, validatorBytes); err != nil { + if err := prefixStartDB.Put(startKey, stakerTx.Bytes()); err != nil { return err } - return prefixStopDB.Put(stopKey, validatorBytes) + return prefixStopDB.Put(stopKey, stakerTx.Bytes()) } -func (vm *VM) addDelegator(db database.Database, delegator TimedTx) error { - delegatorBytes := delegator.Bytes() - - p := wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(delegator.StartTime().Unix())) - if p.Err != nil { - return fmt.Errorf("couldn't serialize start time: %w", p.Err) +// Remove a staker from subnet [subnetID] +// A staker may be a validator or a delegator +func (vm *VM) removeStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var staker TimedTx + switch stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx, *UnsignedAddSubnetValidatorTx, *UnsignedAddValidatorTx: + staker = stakerTx.UnsignedTx.(TimedTx) + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) } - prefixStart := []byte(fmt.Sprintf("%s%s", delegator, start)) + stakerID := staker.ID().Bytes() // Tx ID of this tx + + // Sorted by subnet ID then start time then ID + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) prefixStartDB := prefixdb.NewNested(prefixStart, db) - prefixStop := []byte(fmt.Sprintf("%s%s", delegator, stop)) + // Sorted by subnet ID then stop time + prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) prefixStopDB := prefixdb.NewNested(prefixStop, db) - startKey := append(p.Bytes, delegatorBytes...) - - p = wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(delegator.EndTime().Unix())) - if p.Err != nil { - return fmt.Errorf("couldn't serialize stop time: %w", p.Err) - } - stopKey := append(p.Bytes, delegatorBytes...) - - if err := prefixStartDB.Put(startKey, delegatorBytes); err != nil { - return err - } - return prefixStopDB.Put(stopKey, delegatorBytes) -} - -func (vm *VM) removeDelegator(db database.Database, delegator TimedTx) error { - delegatorBytes := delegator.Bytes() + defer func() { + prefixStartDB.Close() + prefixStopDB.Close() + }() p := wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(delegator.StartTime().Unix())) + p.PackLong(uint64(staker.StartTime().Unix())) if p.Err != nil { return fmt.Errorf("couldn't serialize start time: %w", p.Err) } - prefixStart := []byte(fmt.Sprintf("%s%s", delegator, start)) - prefixStartDB := prefixdb.NewNested(prefixStart, db) - prefixStop := []byte(fmt.Sprintf("%s%s", delegator, stop)) - prefixStopDB := prefixdb.NewNested(prefixStop, db) - startKey := append(p.Bytes, delegatorBytes...) + startKey := append(p.Bytes, stakerID...) p = wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(delegator.EndTime().Unix())) + p.PackLong(uint64(staker.EndTime().Unix())) if p.Err != nil { return fmt.Errorf("couldn't serialize stop time: %w", p.Err) } - stopKey := append(p.Bytes, delegatorBytes...) + stopKey := append(p.Bytes, stakerID...) - if err := prefixStartDB.Put(startKey, delegatorBytes); err != nil { + if err := prefixStartDB.Put(startKey, nil); err != nil { return err } - return prefixStopDB.Put(stopKey, delegatorBytes) + return prefixStopDB.Put(stopKey, nil) } -// Returns the pending validator that will start validating next -func (vm *VM) nextValidatorToStart(db database.Database, subnetID ids.ID) (TimedTx, error) { +// Returns the pending staker that will start staking next +func (vm *VM) nextStakerStart(db database.Database, subnetID ids.ID) (*Tx, error) { iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() + // Key: [Staker start time] | [Tx ID] + // Value: Byte repr. of tx that added this validator if iter.Next() { - txBytes := iter.Value() - var tx *Tx - if err := Codec.Unmarshal(txBytes, tx); err != nil { + var tx Tx + if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { return nil, err } if err := tx.Sign(vm.codec, nil); err != nil { return nil, fmt.Errorf("couldn't sign tx: %w", err) } - asTimedTx, ok := tx.UnsignedTx.(TimedTx) - if !ok { - return nil, fmt.Errorf("expected validator to be type TimedTx but is %T", tx) - } - return asTimedTx, nil + return &tx, nil } return nil, errNoValidators } -// Returns the current validator that will top validating next -func (vm *VM) nextValidatorToStop(db database.Database, subnetID ids.ID) (TimedTx, error) { +// Returns the current staker that will stop staking next +func (vm *VM) nextStakerStop(db database.Database, subnetID ids.ID) (*Tx, error) { iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, stop)), db).NewIterator() + // Key: [Staker stop time] | [Tx ID] + // Value: Byte repr. of tx that added this validator if iter.Next() { - txBytes := iter.Value() - var tx *Tx - if err := Codec.Unmarshal(txBytes, tx); err != nil { + var tx Tx + if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { return nil, err } if err := tx.Sign(vm.codec, nil); err != nil { return nil, fmt.Errorf("couldn't sign tx: %w", err) } - asTimedTx, ok := tx.UnsignedTx.(TimedTx) - if !ok { - return nil, fmt.Errorf("expected validator to be type TimedTx but is %T", tx) - } - return asTimedTx, nil + return &tx, nil } return nil, errNoValidators } -// Returns true if [nodeID] is a validator +// Returns true if [nodeID] is a validator (not a delegator) of subnet [subnetID] func (vm *VM) isValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool) { - iter := prefixdb.NewNested([]byte(start), db).NewIterator() + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() for iter.Next() { txBytes := iter.Value() - var tx *Tx - if err := Codec.Unmarshal(txBytes, tx); err != nil { - vm.Ctx.Log.Warn("couldn't unmarshal Tx: %w", err) + if txBytes == nil { + break + } + var tx Tx + if err := Codec.Unmarshal(txBytes, &tx); err != nil { + vm.Ctx.Log.Warn("couldn't unmarshal Tx: %s", err) return nil, false } if err := tx.Sign(vm.codec, nil); err != nil { - vm.Ctx.Log.Warn("couldn't sign *Tx: %w", err) + vm.Ctx.Log.Warn("couldn't sign *Tx: %s", err) return nil, false } switch vdr := tx.UnsignedTx.(type) { @@ -257,71 +215,15 @@ func (vm *VM) isValidator(db database.Database, subnetID ids.ID, nodeID ids.Shor if subnetID.Equals(constants.PrimaryNetworkID) && vdr.Validator.NodeID.Equals(nodeID) { return vdr, true } - return nil, false case *UnsignedAddSubnetValidatorTx: if subnetID.Equals(vdr.Validator.SubnetID()) && vdr.Validator.NodeID.Equals(nodeID) { return vdr, true } - return nil, false - default: - vm.Ctx.Log.Warn("expected tx to be *UnsignedAddValidatorTx or *UnsignedAddSubnetValidatorTx but got %T", tx) - return nil, false } } return nil, false } -// get the validators that are slated to validate the specified subnet in the future -func (vm *VM) getPendingValidators(db database.Database, subnetID ids.ID) (*EventHeap, error) { - return vm.getValidatorsFromDB(db, subnetID, pendingValidatorsPrefix, true) -} - -// put the validators that are slated to validate the specified subnet in the future -func (vm *VM) putPendingValidators(db database.Database, validators *EventHeap, subnetID ids.ID) error { - if !validators.SortByStartTime { - return errors.New("pending validators should be sorted by start time") - } - err := vm.State.Put(db, validatorsTypeID, subnetID.Prefix(pendingValidatorsPrefix), validators) - if err != nil { - return fmt.Errorf("couldn't put pending validator set: %w", err) - } - return nil -} - -// get the validators currently validating the specified subnet -func (vm *VM) getValidatorsFromDB( - db database.Database, - subnetID ids.ID, - prefix uint64, - sortByStartTime bool, -) (*EventHeap, error) { - // if current validators aren't specified in database, return empty validator set - key := subnetID.Prefix(prefix) - has, err := vm.State.Has(db, validatorsTypeID, key) - if err != nil { - return nil, err - } - if !has { - return &EventHeap{SortByStartTime: sortByStartTime}, nil - } - validatorsInterface, err := vm.State.Get(db, validatorsTypeID, key) - if err != nil { - return nil, err - } - validators, ok := validatorsInterface.(*EventHeap) - if !ok { - err := fmt.Errorf("expected to retrieve *EventHeap from database but got type %T", validatorsInterface) - vm.Ctx.Log.Error("error while fetching validators: %s", err) - return nil, err - } - for _, tx := range validators.Txs { - if err := tx.Sign(vm.codec, nil); err != nil { - return nil, err - } - } - return validators, nil -} - // getUTXO returns the UTXO with the specified ID func (vm *VM) getUTXO(db database.Database, ID ids.ID) (*avax.UTXO, error) { utxoIntf, err := vm.State.Get(db, utxoTypeID, ID) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index a477e1acdc61..365585a3420d 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -4,15 +4,14 @@ package platformvm import ( - "container/heap" "errors" "fmt" - "math" "time" "github.com/ava-labs/gecko/cache" "github.com/ava-labs/gecko/chains" "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/database/versiondb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" @@ -30,8 +29,6 @@ import ( "github.com/ava-labs/gecko/vms/components/avax" "github.com/ava-labs/gecko/vms/components/core" "github.com/ava-labs/gecko/vms/secp256k1fx" - - safemath "github.com/ava-labs/gecko/utils/math" ) const ( @@ -145,7 +142,7 @@ type VM struct { // Node's validator manager // Maps Subnets --> nodes in the Subnet - validators validators.Manager + vdrMgr validators.Manager // true if the node is being run with staking enabled stakingEnabled bool @@ -191,7 +188,7 @@ type VM struct { } // Initialize this blockchain. -// [vm.ChainManager] and [vm.Validators] must be set before this function is called. +// [vm.ChainManager] and [vm.vdrMgr] must be set before this function is called. func (vm *VM) Initialize( ctx *snow.Context, db database.Database, @@ -236,15 +233,17 @@ func (vm *VM) Initialize( } } - validators := &EventHeap{ - SortByStartTime: false, - Txs: genesis.Validators, + // Persist the platform chain's timestamp at genesis + time := time.Unix(int64(genesis.Timestamp), 0) + if err := vm.State.PutTime(vm.DB, timestampKey, time); err != nil { + return err } - heap.Init(validators) // Persist primary network validator set at genesis - if err := vm.putCurrentValidators(vm.DB, validators, constants.PrimaryNetworkID); err != nil { - return err + for _, vdrTx := range genesis.Validators { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, vdrTx); err != nil { + return err + } } // Persist the subnets that exist at genesis (none do) @@ -273,17 +272,6 @@ func (vm *VM) Initialize( return err } - // Persist the platform chain's timestamp at genesis - time := time.Unix(int64(genesis.Timestamp), 0) - if err := vm.State.PutTime(vm.DB, timestampKey, time); err != nil { - return err - } - - // There are no pending stakers at genesis - if err := vm.putPendingValidators(vm.DB, &EventHeap{SortByStartTime: true}, constants.PrimaryNetworkID); err != nil { - return err - } - // Create the genesis block and save it as being accepted (We don't just // do genesisBlock.Accept() because then it'd look for genesisBlock's // non-existent parent) @@ -391,22 +379,11 @@ func (vm *VM) initBlockchains() error { // Set the node's validator manager to be up to date func (vm *VM) initSubnets() error { vm.Ctx.Log.Info("initializing Subnets") - subnets, err := vm.getSubnets(vm.DB) - if err != nil { - return err - } - if err := vm.updateValidators(constants.PrimaryNetworkID); err != nil { + if err := vm.updateValidators(vm.DB); err != nil { return err } - - for _, subnet := range subnets { - if err := vm.updateValidators(subnet.ID()); err != nil { - return err - } - } - - return nil + return vm.updateVdrMgr() // TODO is this right? } // Create the blockchain described in [tx], but only if this node is a member of @@ -418,9 +395,10 @@ func (vm *VM) createChain(tx *Tx) { return } // The validators that compose the Subnet that validates this chain - validators, subnetExists := vm.validators.GetValidatorSet(unsignedTx.SubnetID) + validators, subnetExists := vm.vdrMgr.GetValidators(unsignedTx.SubnetID) if !subnetExists { - vm.Ctx.Log.Error("blockchain %s validated by Subnet %s but couldn't get that Subnet. Blockchain not created") + vm.Ctx.Log.Error("blockchain %s validated by Subnet %s but couldn't get that Subnet. Blockchain not created", + tx.ID(), unsignedTx.SubnetID) return } if vm.stakingEnabled && // Staking is enabled, so nodes might not validate all chains @@ -537,16 +515,20 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { return nil, errEndOfTime } - // If the chain time would be the time for the next primary network validator to leave, - // then we create a block that removes the validator and proposes they receive a validator reward + // If the chain time would be the time for the next primary network staker to leave, + // then we create a block that removes the staker and proposes they receive a staker reward nextValidatorEndtime := maxTime - nextToLeave, err := vm.nextValidatorToStop(db, constants.PrimaryNetworkID) // TODO check for delegators too + tx, err := vm.nextStakerStop(db, constants.PrimaryNetworkID) if err != nil { return nil, err } - nextValidatorEndtime = nextToLeave.EndTime() + staker, ok := tx.UnsignedTx.(TimedTx) + if !ok { + return nil, fmt.Errorf("expected staker tx to be TimedTx but got %T", tx) + } + nextValidatorEndtime = staker.EndTime() if currentChainTimestamp.Equal(nextValidatorEndtime) { - rewardValidatorTx, err := vm.newRewardValidatorTx(nextToLeave.ID()) + rewardValidatorTx, err := vm.newRewardValidatorTx(tx.ID()) if err != nil { return nil, err } @@ -560,19 +542,16 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { return blk, vm.DB.Commit() } - // If local time is >= time of the next validator set change, + // If local time is >= time of the next staker set change, // propose moving the chain time forward - nextValidatorStartTime := vm.nextValidatorChangeTime(db /*start=*/, true) - nextValidatorEndTime := vm.nextValidatorChangeTime(db /*start=*/, false) - - nextValidatorSetChangeTime := nextValidatorStartTime - if nextValidatorEndTime.Before(nextValidatorStartTime) { - nextValidatorSetChangeTime = nextValidatorEndTime + nextStakerChangeTime, err := vm.nextStakerChangeTime(db) + if err != nil { + return nil, err } localTime := vm.clock.Time() - if !localTime.Before(nextValidatorSetChangeTime) { // time is at or after the time for the next validator to join/leave - advanceTimeTx, err := vm.newAdvanceTimeTx(nextValidatorSetChangeTime) + if !localTime.Before(nextStakerChangeTime) { // local time is at or after the time for the next staker to start/stop + advanceTimeTx, err := vm.newAdvanceTimeTx(nextStakerChangeTime) if err != nil { return nil, err } @@ -709,31 +688,26 @@ func (vm *VM) resetTimer() { if err != nil { vm.Ctx.Log.Error("could not retrieve timestamp from database") return - } - if timestamp.Equal(maxTime) { + } else if timestamp.Equal(maxTime) { vm.Ctx.Log.Error("Program time is suspiciously far in the future. Either this codebase was way more successful than expected, or a critical error has occurred.") return } - nextDSValidatorEndTime := vm.nextSubnetValidatorChangeTime(db, constants.PrimaryNetworkID, false) - if timestamp.Equal(nextDSValidatorEndTime) { - vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeRewardValidator - return - } - // If local time is >= time of the next change in the validator set, // propose moving forward the chain timestamp - nextValidatorStartTime := vm.nextValidatorChangeTime(db, true) - nextValidatorEndTime := vm.nextValidatorChangeTime(db, false) - - nextValidatorSetChangeTime := nextValidatorStartTime - if nextValidatorEndTime.Before(nextValidatorStartTime) { - nextValidatorSetChangeTime = nextValidatorEndTime + nextStakerChangeTime, err := vm.nextStakerChangeTime(db) + if err != nil { + vm.Ctx.Log.Error("couldn't get next staker change time: %w", err) + return + } + if timestamp.Equal(nextStakerChangeTime) { + vm.SnowmanVM.NotifyBlockReady() // Should issue a proposal to reward validator + return } localTime := vm.clock.Time() - if !localTime.Before(nextValidatorSetChangeTime) { // time is at or after the time for the next validator to join/leave - vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeTimestamp + if !localTime.Before(nextStakerChangeTime) { // time is at or after the time for the next validator to join/leave + vm.SnowmanVM.NotifyBlockReady() // Should issue a proposal to advance timestamp return } @@ -748,160 +722,187 @@ func (vm *VM) resetTimer() { vm.Ctx.Log.Debug("dropping tx to add validator because its start time has passed") } - waitTime := nextValidatorSetChangeTime.Sub(localTime) - vm.Ctx.Log.Debug("next scheduled event is at %s (%s in the future)", nextValidatorSetChangeTime, waitTime) + waitTime := nextStakerChangeTime.Sub(localTime) + vm.Ctx.Log.Debug("next scheduled event is at %s (%s in the future)", nextStakerChangeTime, waitTime) // Wake up when it's time to add/remove the next validator vm.timer.SetTimeoutIn(waitTime) } -// If [start], returns the time at which the next validator (of any subnet) in the pending set starts validating -// Otherwise, returns the time at which the next validator (of any subnet) stops validating -// If no such validator is found, returns maxTime -func (vm *VM) nextValidatorChangeTime(db database.Database, start bool) time.Time { - earliest := vm.nextSubnetValidatorChangeTime(db, constants.PrimaryNetworkID, start) +// Returns the time when the next staker of any subnet starts/stops staking +// after the current timestamp +func (vm *VM) nextStakerChangeTime(db database.Database) (time.Time, error) { + timestamp, err := vm.getTimestamp(db) + if err != nil { + return time.Time{}, fmt.Errorf("couldn't get timestamp: %w", err) + } + subnets, err := vm.getSubnets(db) if err != nil { - return earliest + return time.Time{}, fmt.Errorf("couldn't get subnets: %w", err) } + subnetIDs := ids.Set{} + subnetIDs.Add(constants.PrimaryNetworkID) for _, subnet := range subnets { - t := vm.nextSubnetValidatorChangeTime(db, subnet.ID(), start) - if t.Before(earliest) { - earliest = t - } + subnetIDs.Add(subnet.ID()) } - return earliest -} -func (vm *VM) nextSubnetValidatorChangeTime(db database.Database, subnetID ids.ID, start bool) time.Time { - nextToStart, err1 := vm.nextValidatorToStart(db, subnetID) - nextToStop, err2 := vm.nextValidatorToStop(db, subnetID) - if err1 != nil { - if err2 != nil { - return maxTime + earliest := maxTime + for _, subnetID := range subnetIDs.List() { + t, err := vm.nextStakerStart(db, subnetID) + if err == nil { + if staker, ok := t.UnsignedTx.(TimedTx); ok { + if startTime := staker.StartTime(); startTime.Before(earliest) && startTime.After(timestamp) { + earliest = startTime + } + } + } + t, err = vm.nextStakerStop(db, subnetID) + if err == nil { + if staker, ok := t.UnsignedTx.(TimedTx); ok { + if endTime := staker.EndTime(); endTime.Before(earliest) && endTime.After(timestamp) { + earliest = endTime + } + } } - return nextToStop.EndTime() - } - if err2 != nil { - return nextToStart.StartTime() - } - if nextToStart.StartTime().Before(nextToStop.EndTime()) { - return nextToStart.StartTime() } - return nextToStop.EndTime() + return earliest, nil } -// Returns: -// 1) The validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] -// 2) The pending validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] -// 3) The IDs of the validators that start validating [subnetID] between now and [timestamp] -// 4) The IDs of the validators that stop validating [subnetID] between now and [timestamp] -// Note that this method will not remove validators from the current validator set of the primary network. -// That happens in reward blocks. -func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, - pending *EventHeap, started, stopped ids.ShortSet, err error) { - // remove validators whose end time <= [timestamp] - current, err = vm.getCurrentValidators(db, subnetID) +// update validator set of [subnetID] based on the current chain timestamp +func (vm *VM) updateValidators(db database.Database) error { + timestamp, err := vm.getTimestamp(vm.DB) if err != nil { - return nil, nil, nil, nil, err - } - if !subnetID.Equals(constants.PrimaryNetworkID) { // validators of primary network removed in rewardValidatorTxs, not here - for current.Len() > 0 { - next := current.Peek().UnsignedTx.(*UnsignedAddSubnetValidatorTx) // current validator with earliest end time - if timestamp.Before(next.EndTime()) { - break - } - current.Remove() - stopped.Add(next.Validator.ID()) - } + return fmt.Errorf("can't get timestamp: %w", err) } - pending, err = vm.getPendingValidators(db, subnetID) + + subnets, err := vm.getSubnets(vm.DB) if err != nil { - return nil, nil, nil, nil, err - } - for pending.Len() > 0 { - nextTx := pending.Peek() // pending staker with earliest start time - switch tx := nextTx.UnsignedTx.(type) { - case *UnsignedAddValidatorTx: - if timestamp.Before(tx.StartTime()) { - return current, pending, started, stopped, nil - } - current.Add(nextTx) - pending.Remove() - started.Add(tx.Validator.ID()) - case *UnsignedAddSubnetValidatorTx: - if timestamp.Before(tx.StartTime()) { - return current, pending, started, stopped, nil - } - current.Add(nextTx) - pending.Remove() - started.Add(tx.Validator.ID()) - case *UnsignedAddDelegatorTx: - if timestamp.Before(tx.StartTime()) { - return current, pending, started, stopped, nil - } - current.Add(nextTx) - pending.Remove() - started.Add(tx.Validator.ID()) - default: - pending.Remove() - } + return err } - return current, pending, started, stopped, nil -} -func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { - vdrMap := make(map[[20]byte]*Validator, validatorEvents.Len()) - for _, event := range validatorEvents.Txs { - var vdr validators.Validator - switch tx := event.UnsignedTx.(type) { - case *UnsignedAddValidatorTx: - vdr = &tx.Validator - case *UnsignedAddDelegatorTx: - vdr = &tx.Validator - case *UnsignedAddSubnetValidatorTx: - vdr = &tx.Validator - default: - continue - } - vdrID := vdr.ID() - vdrKey := vdrID.Key() - validator, exists := vdrMap[vdrKey] - if !exists { - validator = &Validator{NodeID: vdrID} - vdrMap[vdrKey] = validator - } - weight, err := safemath.Add64(validator.Wght, vdr.Weight()) - if err != nil { - weight = math.MaxUint64 - } - validator.Wght = weight + subnetIDs := ids.Set{} + subnetIDs.Add(constants.PrimaryNetworkID) + for _, subnet := range subnets { + subnetIDs.Add(subnet.ID()) } - vdrList := make([]validators.Validator, len(vdrMap)) - i := 0 - for _, validator := range vdrMap { - vdrList[i] = validator - i++ + for _, subnetID := range subnetIDs.List() { + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) + iter := prefixdb.NewNested(prefixStart, db).NewIterator() + + var tx Tx + for iter.Next() { // Iterates in order of increasing start time + txBytes := iter.Value() + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + break // TODO what to do here? + //return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + continue + } + if staker.EndTime().Before(timestamp) { + if err := vm.removeStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't remove staker: %w", err) + } + } + case *UnsignedAddValidatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + continue + } + if staker.EndTime().Before(timestamp) { + if err := vm.removeStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't remove staker: %w", err) + } + } + case *UnsignedAddSubnetValidatorTx: + if !subnetID.Equals(staker.Validator.SubnetID()) { + continue + } + if staker.EndTime().Before(timestamp) { + if err := vm.removeStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't remove staker: %w", err) + } + } + case TimedTx: + if staker.EndTime().Before(timestamp) { + if err := vm.removeStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't remove staker: %w", err) + } + } + default: + vm.Ctx.Log.Warn(fmt.Sprintf("expected validator but got %T", tx.UnsignedTx)) + } + } } - return vdrList + + return nil } -// update the node's validator manager to contain the current validator set of the given Subnet -func (vm *VM) updateValidators(subnetID ids.ID) error { - validatorSet, subnetInitialized := vm.validators.GetValidatorSet(subnetID) - if !subnetInitialized { // validator manager doesn't know about this subnet yet - validatorSet = validators.NewSet() - vm.validators.PutValidatorSet(subnetID, validatorSet) +func (vm *VM) updateVdrMgr() error { + timestamp, err := vm.getTimestamp(vm.DB) + if err != nil { + return fmt.Errorf("can't get timestamp: %w", err) } - currentValidators, err := vm.getCurrentValidators(vm.DB, subnetID) + subnets, err := vm.getSubnets(vm.DB) if err != nil { return err } - validators := vm.getValidators(currentValidators) - return validatorSet.Set(validators) + subnetIDs := ids.Set{} + subnetIDs.Add(constants.PrimaryNetworkID) + for _, subnet := range subnets { + subnetIDs.Add(subnet.ID()) + } + + for _, subnetID := range subnetIDs.List() { + vdrs, initialized := vm.vdrMgr.GetValidators(subnetID) + if !initialized { + vdrs = validators.NewBestSet(5) + } + + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) + iter := prefixdb.NewNested(prefixStart, vm.DB).NewIterator() + + var tx Tx + for iter.Next() { // Iterates in order of increasing start time + txBytes := iter.Value() + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + break // TODO what to do here? + // return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + continue + } + if !staker.StartTime().After(timestamp) { // Staker is staking now + vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + } + case *UnsignedAddValidatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + continue + } + if !staker.StartTime().After(timestamp) { // Staker is staking now + vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + } + case *UnsignedAddSubnetValidatorTx: + if !subnetID.Equals(staker.Validator.SubnetID()) { + continue + } + if !staker.StartTime().After(timestamp) { // Staker is staking now + vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + } + default: + vm.Ctx.Log.Warn(fmt.Sprintf("expected validator but got %T", tx.UnsignedTx)) + } + } + vm.vdrMgr.Set(subnetID, vdrs) + } + return nil } // Codec ... From 766faf908c3557f5e5d87bd7ac3bf02a75144d6e Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 10:02:56 -0400 Subject: [PATCH 28/47] cleaned up validator sets --- snow/validators/manager.go | 21 ++++------ snow/validators/set.go | 79 ++++++++++++++++--------------------- snow/validators/set_test.go | 77 ++++++++++++++++-------------------- 3 files changed, 75 insertions(+), 102 deletions(-) diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 0bf40b68e9a0..ba8eec611289 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -18,7 +18,7 @@ type Manager interface { AddWeight(ids.ID, ids.ShortID, uint64) error // RemoveWeight removes weight from a given validator on a given subnet - RemoveWeight(ids.ID, ids.ShortID, uint64) + RemoveWeight(ids.ID, ids.ShortID, uint64) error // GetValidators returns the validator set for the given subnet // Returns false if the subnet doesn't exist @@ -55,26 +55,21 @@ func (m *manager) AddWeight(subnetID ids.ID, vdrID ids.ShortID, weight uint64) e vdrs, ok := m.subnetToVdrs[subnetIDKey] if !ok { - vdrs = NewBestSet(5) - if err := vdrs.AddWeight(vdrID, weight); err != nil { - return err - } - } else { - vdrs.AddWeight(vdrID, weight) + vdrs = NewSet() + m.subnetToVdrs[subnetIDKey] = vdrs } - m.subnetToVdrs[subnetIDKey] = vdrs - return nil + return vdrs.AddWeight(vdrID, weight) } // RemoveValidatorSet implements the Manager interface. -func (m *manager) RemoveWeight(subnetID ids.ID, vdrID ids.ShortID, weight uint64) { +func (m *manager) RemoveWeight(subnetID ids.ID, vdrID ids.ShortID, weight uint64) error { m.lock.Lock() defer m.lock.Unlock() - vdrs, ok := m.subnetToVdrs[subnetID.Key()] - if ok { - vdrs.RemoveWeight(vdrID, weight) + if vdrs, ok := m.subnetToVdrs[subnetID.Key()]; ok { + return vdrs.RemoveWeight(vdrID, weight) } + return nil } // GetValidatorSet implements the Manager interface. diff --git a/snow/validators/set.go b/snow/validators/set.go index 008d53d58279..958e200a4ea4 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -8,8 +8,6 @@ import ( "strings" "sync" - "math" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" safemath "github.com/ava-labs/gecko/utils/math" @@ -157,40 +155,31 @@ func (s *set) addWeight(vdrID ids.ShortID, weight uint64) error { return nil // This validator would never be sampled anyway } + newTotalWeight, err := safemath.Add64(s.totalWeight, weight) + if err != nil { + return nil + } + s.totalWeight = newTotalWeight + vdrIDKey := vdrID.Key() + var vdr *validator i, ok := s.vdrMap[vdrIDKey] - if ok { // Validator already exists - vdr := s.vdrSlice[i] - newWeight, err := safemath.Add64(s.vdrWeights[i], weight) - if err != nil { - newWeight = math.MaxUint64 - } - s.vdrWeights[i] = newWeight - s.totalWeight, err = safemath.Add64(s.totalWeight, weight) - if err != nil { - newWeight = 0 + if !ok { + vdr = &validator{ + id: vdrID, } - vdr.addWeight(weight) - s.vdrSlice[i] = vdr - return s.sampler.Initialize(s.vdrWeights) + i = len(s.vdrSlice) + s.vdrSlice = append(s.vdrSlice, vdr) + s.vdrWeights = append(s.vdrWeights, 0) + s.vdrMap[vdrIDKey] = i + } else { + vdr = s.vdrSlice[i] } - vdr := &validator{ - id: vdrID, - weight: weight, - } - i = len(s.vdrSlice) - s.vdrSlice = append(s.vdrSlice, vdr) - s.vdrWeights = append(s.vdrWeights, weight) - s.vdrMap[vdrIDKey] = i - newTotalWeight, err := safemath.Add64(s.totalWeight, weight) - if err != nil { - return err - } - s.totalWeight = newTotalWeight + s.vdrWeights[i] += weight + vdr.addWeight(weight) return s.sampler.Initialize(s.vdrWeights) - } // GetWeight implements the Set interface. @@ -202,11 +191,10 @@ func (s *set) GetWeight(vdrID ids.ShortID) (uint64, bool) { } func (s *set) getWeight(vdrID ids.ShortID) (uint64, bool) { - index, ok := s.vdrMap[vdrID.Key()] - if !ok { - return 0, false + if index, ok := s.vdrMap[vdrID.Key()]; ok { + return s.vdrWeights[index], true } - return s.vdrWeights[index], true + return 0, false } // RemoveWeight implements the Set interface. @@ -222,25 +210,24 @@ func (s *set) removeWeight(vdrID ids.ShortID, weight uint64) error { return nil } - vdrIDKey := vdrID.Key() - i, ok := s.vdrMap[vdrIDKey] + i, ok := s.vdrMap[vdrID.Key()] if !ok { return nil - } + // Validator exists vdr := s.vdrSlice[i] - newWeight, err := safemath.Sub64(s.vdrWeights[i], weight) - if err != nil { - newWeight = 0 - } - s.vdrWeights[i] = newWeight - s.totalWeight, err = safemath.Sub64(s.totalWeight, weight) - if err != nil { - newWeight = 0 - } + + weight = safemath.Min64(s.vdrWeights[i], weight) + s.vdrWeights[i] -= weight + s.totalWeight -= weight vdr.removeWeight(weight) - s.vdrSlice[i] = vdr + + if vdr.Weight() == 0 { + if err := s.remove(vdrID); err != nil { + return err + } + } return s.sampler.Initialize(s.vdrWeights) } diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 05447515a5b5..170e5c966b14 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -38,101 +38,92 @@ func TestSetSet(t *testing.T) { } func TestSamplerSample(t *testing.T) { - vdr0 := GenerateRandomValidator(1) - vdr1 := GenerateRandomValidator(math.MaxInt64 - 1) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() s := NewSet() - err := s.Add(vdr0) + err := s.AddWeight(vdr0, 1) assert.NoError(t, err) sampled, err := s.Sample(1) assert.NoError(t, err) assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr0.ID(), sampled[0].ID(), "should have sampled vdr0") + assert.Equal(t, vdr0, sampled[0].ID(), "should have sampled vdr0") _, err = s.Sample(2) assert.Error(t, err, "should have errored during sampling") - err = s.Add(vdr1) + err = s.AddWeight(vdr1, math.MaxInt64-1) assert.NoError(t, err) sampled, err = s.Sample(1) assert.NoError(t, err) assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr1.ID(), sampled[0].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") sampled, err = s.Sample(2) assert.NoError(t, err) assert.Len(t, sampled, 2, "should have sampled two validators") - assert.Equal(t, vdr1.ID(), sampled[0].ID(), "should have sampled vdr1") - assert.Equal(t, vdr1.ID(), sampled[1].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[1].ID(), "should have sampled vdr1") sampled, err = s.Sample(3) assert.NoError(t, err) assert.Len(t, sampled, 3, "should have sampled three validators") - assert.Equal(t, vdr1.ID(), sampled[0].ID(), "should have sampled vdr1") - assert.Equal(t, vdr1.ID(), sampled[1].ID(), "should have sampled vdr1") - assert.Equal(t, vdr1.ID(), sampled[2].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[1].ID(), "should have sampled vdr1") + assert.Equal(t, vdr1, sampled[2].ID(), "should have sampled vdr1") } func TestSamplerDuplicate(t *testing.T) { - vdr0 := GenerateRandomValidator(1) - vdr1_0 := GenerateRandomValidator(math.MaxInt64 - 1) - vdr1_1 := NewValidator(vdr1_0.ID(), 0) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() s := NewSet() - err := s.Add(vdr0) - assert.NoError(t, err) - - err = s.Add(vdr1_0) + err := s.AddWeight(vdr0, 1) assert.NoError(t, err) - sampled, err := s.Sample(1) + err = s.AddWeight(vdr1, 1) assert.NoError(t, err) - assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr1_0.ID(), sampled[0].ID(), "should have sampled vdr1") - err = s.Add(vdr1_1) + err = s.AddWeight(vdr1, math.MaxInt64-2) assert.NoError(t, err) - sampled, err = s.Sample(1) + sampled, err := s.Sample(1) assert.NoError(t, err) assert.Len(t, sampled, 1, "should have only sampled one validator") - assert.Equal(t, vdr0.ID(), sampled[0].ID(), "should have sampled vdr0") + assert.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") } func TestSamplerContains(t *testing.T) { - vdr := GenerateRandomValidator(1) + vdr := ids.GenerateTestShortID() s := NewSet() - err := s.Add(vdr) + err := s.AddWeight(vdr, 1) assert.NoError(t, err) - contains := s.Contains(vdr.ID()) + contains := s.Contains(vdr) assert.True(t, contains, "should have contained validator") - err = s.Remove(vdr.ID()) + err = s.RemoveWeight(vdr, 1) assert.NoError(t, err) - contains = s.Contains(vdr.ID()) + contains = s.Contains(vdr) assert.False(t, contains, "shouldn't have contained validator") } func TestSamplerString(t *testing.T) { - vdr0 := NewValidator(ids.ShortEmpty, 1) - vdr1 := NewValidator( - ids.NewShortID([20]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - }), - math.MaxInt64-1, - ) + vdr0 := ids.ShortEmpty + vdr1 := ids.NewShortID([20]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + }) s := NewSet() - err := s.Add(vdr0) + err := s.AddWeight(vdr0, 1) assert.NoError(t, err) - err = s.Add(vdr1) + err = s.AddWeight(vdr1, math.MaxInt64-1) assert.NoError(t, err) expected := "Validator Set: (Size = 2)\n" + @@ -143,16 +134,16 @@ func TestSamplerString(t *testing.T) { } func TestSetWeight(t *testing.T) { + vdr0 := ids.NewShortID([20]byte{1}) weight0 := uint64(93) - vdr0 := NewValidator(ids.NewShortID([20]byte{1}), weight0) + vdr1 := ids.NewShortID([20]byte{2}) weight1 := uint64(123) - vdr1 := NewValidator(ids.NewShortID([20]byte{2}), weight1) s := NewSet() - err := s.Add(vdr0) + err := s.AddWeight(vdr0, weight0) assert.NoError(t, err) - err = s.Add(vdr1) + err = s.AddWeight(vdr1, weight1) assert.NoError(t, err) setWeight := s.Weight() From a07d7c0ff8c5d2b076403e1aec9ccd80365fb9f4 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 11:04:01 -0400 Subject: [PATCH 29/47] wip --- snow/engine/snowman/bootstrap/block_job.go | 4 +- vms/platformvm/add_delegator_tx.go | 5 +- vms/platformvm/add_subnet_validator_tx.go | 14 ++- vms/platformvm/add_validator_tx.go | 16 ++- vms/platformvm/common_blocks.go | 3 + vms/platformvm/reward_validator_tx.go | 5 +- vms/platformvm/spend.go | 3 +- vms/platformvm/state.go | 121 ++++++++++++--------- vms/platformvm/vm.go | 7 +- 9 files changed, 111 insertions(+), 67 deletions(-) diff --git a/snow/engine/snowman/bootstrap/block_job.go b/snow/engine/snowman/bootstrap/block_job.go index 3e463cb472ca..0328a572ae13 100644 --- a/snow/engine/snowman/bootstrap/block_job.go +++ b/snow/engine/snowman/bootstrap/block_job.go @@ -66,8 +66,8 @@ func (b *blockJob) Execute() error { return fmt.Errorf("attempting to execute block with status %s", status) case choices.Processing: if err := b.blk.Verify(); err != nil { - b.log.Debug("block %s failed verification during bootstrapping due to %s", - b.blk.ID(), err) + return fmt.Errorf("block %s failed verification during bootstrapping due to %s", + b.blk.ID(), err, b.blk) } b.numAccepted.Inc() diff --git a/vms/platformvm/add_delegator_tx.go b/vms/platformvm/add_delegator_tx.go index d84a6913e8e1..af5cf55dbda6 100644 --- a/vms/platformvm/add_delegator_tx.go +++ b/vms/platformvm/add_delegator_tx.go @@ -129,7 +129,10 @@ func (tx *UnsignedAddDelegatorTx) SemanticVerify( // Ensure that the period this delegator delegates is a subset of the time // the validator validates. - vdr, isValidator := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + vdr, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { return nil, nil, nil, nil, permError{errDSValidatorSubset} } diff --git a/vms/platformvm/add_subnet_validator_tx.go b/vms/platformvm/add_subnet_validator_tx.go index 98a7e58e0014..d23f40a15fba 100644 --- a/vms/platformvm/add_subnet_validator_tx.go +++ b/vms/platformvm/add_subnet_validator_tx.go @@ -103,7 +103,10 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( // Ensure that the period this validator validates the specified subnet is a // subnet of the time they validate the primary network. - vdr, isValidator := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + vdr, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { return nil, nil, nil, nil, permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", @@ -112,7 +115,10 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( } // Ensure the proposed validator is not already a validator of the specified subnet - vdr, isValidator = vm.isValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) + vdr, isValidator, err = vm.isValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { return nil, nil, nil, nil, permError{fmt.Errorf("already validating subnet between [%v, %v]", @@ -123,9 +129,9 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( baseTxCreds := stx.Creds[:baseTxCredsLen] subnetCred := stx.Creds[baseTxCredsLen] - subnet, err := vm.getSubnet(db, tx.Validator.Subnet) + subnet, timedErr := vm.getSubnet(db, tx.Validator.Subnet) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, timedErr } unsignedSubnet := subnet.UnsignedTx.(*UnsignedCreateSubnetTx) if err := vm.fx.VerifyPermission(tx, tx.SubnetAuth, subnetCred, unsignedSubnet.Owner); err != nil { diff --git a/vms/platformvm/add_validator_tx.go b/vms/platformvm/add_validator_tx.go index e0418bbffb87..4304852ab671 100644 --- a/vms/platformvm/add_validator_tx.go +++ b/vms/platformvm/add_validator_tx.go @@ -122,11 +122,13 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( ) { // Verify the tx is well-formed if err := tx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { + vm.Ctx.Log.Error("Verify: %s", err) return nil, nil, nil, nil, permError{err} } // Ensure the proposed validator starts after the current time if currentTime, err := vm.getTimestamp(db); err != nil { + vm.Ctx.Log.Error("Timestamp: %s", err) return nil, nil, nil, nil, tempError{err} } else if startTime := tx.StartTime(); !currentTime.Before(startTime) { return nil, nil, nil, nil, permError{fmt.Errorf("validator's start time (%s) at or after current timestamp (%s)", @@ -134,7 +136,11 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( startTime)} } - vdr, isValidator := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + vdr, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if err != nil { + vm.Ctx.Log.Error("isValidator: %s", err) + return nil, nil, nil, nil, tempError{err} + } if isValidator && tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator from %s to %s", tx.Validator.NodeID, vdr.StartTime(), vdr.EndTime())} @@ -146,6 +152,7 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( // Verify the flowcheck if err := vm.semanticVerifySpend(db, tx, tx.Ins, outs, stx.Creds, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { + vm.Ctx.Log.Error("semanticVerify: %s", err) return nil, nil, nil, nil, err } @@ -155,25 +162,30 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( onCommitDB := versiondb.New(db) // Consume the UTXOS if err := vm.consumeInputs(onCommitDB, tx.Ins); err != nil { + vm.Ctx.Log.Error("consumeInputs: %s", err) return nil, nil, nil, nil, tempError{err} } // Produce the UTXOS if err := vm.produceOutputs(onCommitDB, txID, tx.Outs); err != nil { + vm.Ctx.Log.Error("produceOutputs: %s", err) return nil, nil, nil, nil, tempError{err} } // Add validator to set of pending validators if err := vm.addStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { + vm.Ctx.Log.Error("addStaker: %s", err) return nil, nil, nil, nil, tempError{err} } onAbortDB := versiondb.New(db) // Consume the UTXOS if err := vm.consumeInputs(onAbortDB, tx.Ins); err != nil { + vm.Ctx.Log.Error("consumeInputs: %s", err) return nil, nil, nil, nil, tempError{err} } // Produce the UTXOS - if err := vm.produceOutputs(onAbortDB, txID, tx.Outs); err != nil { + if err := vm.produceOutputs(onAbortDB, txID, outs); err != nil { + vm.Ctx.Log.Error("produceOutputs: %s", err) return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/common_blocks.go b/vms/platformvm/common_blocks.go index b62f92d57552..a54695c5988e 100644 --- a/vms/platformvm/common_blocks.go +++ b/vms/platformvm/common_blocks.go @@ -206,6 +206,9 @@ func (cdb *CommonDecisionBlock) onAccept() database.Database { if cdb.Status().Decided() { return cdb.vm.DB } + if cdb.onAcceptDB == nil { + panic(":(") + } return cdb.onAcceptDB } diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index 024f5a142dee..5b992569d8f1 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -150,7 +150,10 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } case *UnsignedAddDelegatorTx: // We're removing a delegator - vdrTx, ok := vm.isValidator(db, constants.PrimaryNetworkID, uStakerTx.Validator.NodeID) + vdrTx, ok, err := vm.isValidator(db, constants.PrimaryNetworkID, uStakerTx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } if !ok { return nil, nil, nil, nil, permError{ fmt.Errorf("couldn't find validator %s: %w", uStakerTx.Validator.NodeID, err)} diff --git a/vms/platformvm/spend.go b/vms/platformvm/spend.go index 8585313a2643..dee84c8294e2 100644 --- a/vms/platformvm/spend.go +++ b/vms/platformvm/spend.go @@ -337,7 +337,7 @@ func (vm *VM) semanticVerifySpend( utxoID := input.UTXOID.InputID() utxo, err := vm.getUTXO(db, utxoID) if err != nil { - return tempError{err} + return tempError{fmt.Errorf("failed to read consumed UTXO %s due to: %w", utxoID, err)} } utxos[index] = utxo } @@ -536,6 +536,7 @@ func (vm *VM) consumeInputs( if err := vm.removeUTXO(db, utxoID); err != nil { return tempError{err} } + vm.Ctx.Log.Error("Consuming UTXOID %s", utxoID) } return nil } diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index 5d3c87ac8cb0..a891feda7da8 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -9,6 +9,8 @@ import ( "fmt" "time" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" @@ -75,13 +77,18 @@ func (vm *VM) getStatus(db database.Database, ID ids.ID) (Status, error) { // A staker may be a validator or a delegator func (vm *VM) addStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { var staker TimedTx - switch stakerTx.UnsignedTx.(type) { - case *UnsignedAddDelegatorTx, *UnsignedAddSubnetValidatorTx, *UnsignedAddValidatorTx: - staker = stakerTx.UnsignedTx.(TimedTx) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + case *UnsignedAddValidatorTx: + staker = unsignedTx default: return fmt.Errorf("staker is unexpected type %T", stakerTx) } stakerID := staker.ID().Bytes() // Tx ID of this tx + txBytes := stakerTx.Bytes() // Sorted by subnet ID then start time then tx ID prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) @@ -94,33 +101,39 @@ func (vm *VM) addStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) err prefixStopDB.Close() }() - p := wrappers.Packer{MaxSize: wrappers.LongLen} + p := wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} p.PackLong(uint64(staker.StartTime().Unix())) + p.PackFixedBytes(stakerID) if p.Err != nil { - return fmt.Errorf("couldn't serialize start time: %w", p.Err) + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) } - startKey := append(p.Bytes, stakerID...) + startKey := p.Bytes - p = wrappers.Packer{MaxSize: wrappers.LongLen} + p = wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} p.PackLong(uint64(staker.EndTime().Unix())) + p.PackFixedBytes(stakerID) if p.Err != nil { - return fmt.Errorf("couldn't serialize stop time: %w", p.Err) + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) } - stopKey := append(p.Bytes, stakerID...) + stopKey := p.Bytes - if err := prefixStartDB.Put(startKey, stakerTx.Bytes()); err != nil { + if err := prefixStartDB.Put(startKey, txBytes); err != nil { return err } - return prefixStopDB.Put(stopKey, stakerTx.Bytes()) + return prefixStopDB.Put(stopKey, txBytes) } // Remove a staker from subnet [subnetID] // A staker may be a validator or a delegator func (vm *VM) removeStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { var staker TimedTx - switch stakerTx.UnsignedTx.(type) { - case *UnsignedAddDelegatorTx, *UnsignedAddSubnetValidatorTx, *UnsignedAddValidatorTx: - staker = stakerTx.UnsignedTx.(TimedTx) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + case *UnsignedAddValidatorTx: + staker = unsignedTx default: return fmt.Errorf("staker is unexpected type %T", stakerTx) } @@ -137,91 +150,91 @@ func (vm *VM) removeStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) prefixStopDB.Close() }() - p := wrappers.Packer{MaxSize: wrappers.LongLen} + p := wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} p.PackLong(uint64(staker.StartTime().Unix())) + p.PackFixedBytes(stakerID) if p.Err != nil { - return fmt.Errorf("couldn't serialize start time: %w", p.Err) + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) } - startKey := append(p.Bytes, stakerID...) + startKey := p.Bytes - p = wrappers.Packer{MaxSize: wrappers.LongLen} + p = wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} p.PackLong(uint64(staker.EndTime().Unix())) + p.PackFixedBytes(stakerID) if p.Err != nil { - return fmt.Errorf("couldn't serialize stop time: %w", p.Err) + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) } - stopKey := append(p.Bytes, stakerID...) + stopKey := p.Bytes - if err := prefixStartDB.Put(startKey, nil); err != nil { + if err := prefixStartDB.Delete(startKey); err != nil { return err } - return prefixStopDB.Put(stopKey, nil) + return prefixStopDB.Delete(stopKey) } // Returns the pending staker that will start staking next func (vm *VM) nextStakerStart(db database.Database, subnetID ids.ID) (*Tx, error) { iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() + defer iter.Release() + + if !iter.Next() { + return nil, errNoValidators + } // Key: [Staker start time] | [Tx ID] // Value: Byte repr. of tx that added this validator - if iter.Next() { - var tx Tx - if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { - return nil, err - } - if err := tx.Sign(vm.codec, nil); err != nil { - return nil, fmt.Errorf("couldn't sign tx: %w", err) - } - return &tx, nil + + tx := Tx{} + if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { + return nil, err } - return nil, errNoValidators + return &tx, tx.Sign(vm.codec, nil) } // Returns the current staker that will stop staking next func (vm *VM) nextStakerStop(db database.Database, subnetID ids.ID) (*Tx, error) { iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, stop)), db).NewIterator() + defer iter.Release() + + if !iter.Next() { + return nil, errNoValidators + } // Key: [Staker stop time] | [Tx ID] // Value: Byte repr. of tx that added this validator - if iter.Next() { - var tx Tx - if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { - return nil, err - } - if err := tx.Sign(vm.codec, nil); err != nil { - return nil, fmt.Errorf("couldn't sign tx: %w", err) - } - return &tx, nil + + tx := Tx{} + if err := Codec.Unmarshal(iter.Value(), &tx); err != nil { + return nil, err } - return nil, errNoValidators + return &tx, tx.Sign(vm.codec, nil) } // Returns true if [nodeID] is a validator (not a delegator) of subnet [subnetID] -func (vm *VM) isValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool) { +func (vm *VM) isValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool, error) { iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() + defer iter.Release() + for iter.Next() { txBytes := iter.Value() - if txBytes == nil { - break - } - var tx Tx + tx := Tx{} if err := Codec.Unmarshal(txBytes, &tx); err != nil { - vm.Ctx.Log.Warn("couldn't unmarshal Tx: %s", err) - return nil, false + return nil, false, err } if err := tx.Sign(vm.codec, nil); err != nil { - vm.Ctx.Log.Warn("couldn't sign *Tx: %s", err) - return nil, false + return nil, false, err } + switch vdr := tx.UnsignedTx.(type) { case *UnsignedAddValidatorTx: if subnetID.Equals(constants.PrimaryNetworkID) && vdr.Validator.NodeID.Equals(nodeID) { - return vdr, true + return vdr, true, nil } case *UnsignedAddSubnetValidatorTx: if subnetID.Equals(vdr.Validator.SubnetID()) && vdr.Validator.NodeID.Equals(nodeID) { - return vdr, true + return vdr, true, nil } } } - return nil, false + return nil, false, nil } // getUTXO returns the UTXO with the specified ID diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 365585a3420d..9e82a9243d51 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -795,9 +795,12 @@ func (vm *VM) updateValidators(db database.Database) error { for iter.Next() { // Iterates in order of increasing start time txBytes := iter.Value() if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { - break // TODO what to do here? - //return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(vm.codec, nil); err != nil { + return err } + switch staker := tx.UnsignedTx.(type) { case *UnsignedAddDelegatorTx: if !subnetID.Equals(constants.PrimaryNetworkID) { From 8534f00c889a6d5a0deb216a6db3c6567e94897f Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 12:37:11 -0400 Subject: [PATCH 30/47] reverted change in UTXO TxID --- vms/platformvm/reward_validator_tx.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index 5b992569d8f1..d351bf29a469 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -66,16 +66,15 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( case len(stx.Creds) != 0: return nil, nil, nil, nil, permError{errWrongNumberOfCredentials} } - txID := tx.ID() stakerTx, err := vm.nextStakerStop(db, constants.PrimaryNetworkID) if err != nil { return nil, nil, nil, nil, permError{err} } - if !stakerTx.ID().Equals(tx.TxID) { + if stakerID := stakerTx.ID(); !stakerID.Equals(tx.TxID) { return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s. Should be removing %s", tx.TxID, - stakerTx)} + stakerID)} } // Verify that the chain's timestamp is the validator's end time @@ -112,7 +111,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( for i, out := range uStakerTx.Stake { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: tx.ID(), + TxID: tx.TxID, OutputIndex: uint32(len(uStakerTx.Outs) + i), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, @@ -139,7 +138,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, + TxID: tx.TxID, OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake)), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, @@ -168,7 +167,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( for i, out := range uStakerTx.Stake { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, + TxID: tx.TxID, OutputIndex: uint32(len(uStakerTx.Outs) + i), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, @@ -209,7 +208,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, + TxID: tx.TxID, OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake)), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, @@ -233,7 +232,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( } if err := vm.putUTXO(onCommitDB, &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: txID, + TxID: tx.TxID, OutputIndex: uint32(len(uStakerTx.Outs) + len(uStakerTx.Stake) + offset), }, Asset: avax.Asset{ID: vm.Ctx.AVAXAssetID}, From e0edebd602aefa99c96d4a8ebdcb3555d904a5fe Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 12:41:30 -0400 Subject: [PATCH 31/47] remove the correct weight from the peer --- node/node.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/node/node.go b/node/node.go index c2f3ea593e59..af1c445e574d 100644 --- a/node/node.go +++ b/node/node.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io/ioutil" - "math" "net" "os" "path/filepath" @@ -206,7 +205,7 @@ func (i *insecureValidatorManager) Connected(vdrID ids.ShortID) bool { func (i *insecureValidatorManager) Disconnected(vdrID ids.ShortID) bool { // Shouldn't error unless the set previously had an error, which should // never happen as described above - _ = i.vdrs.RemoveWeight(vdrID, math.MaxUint64) + _ = i.vdrs.RemoveWeight(vdrID, i.weight) return false } From bd94fd9d95154d5cb367d60fb76de31065017510 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 12:43:19 -0400 Subject: [PATCH 32/47] cleaned up error formatting --- snow/engine/snowman/bootstrap/block_job.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snow/engine/snowman/bootstrap/block_job.go b/snow/engine/snowman/bootstrap/block_job.go index 0328a572ae13..0f99072b6d92 100644 --- a/snow/engine/snowman/bootstrap/block_job.go +++ b/snow/engine/snowman/bootstrap/block_job.go @@ -66,8 +66,8 @@ func (b *blockJob) Execute() error { return fmt.Errorf("attempting to execute block with status %s", status) case choices.Processing: if err := b.blk.Verify(); err != nil { - return fmt.Errorf("block %s failed verification during bootstrapping due to %s", - b.blk.ID(), err, b.blk) + return fmt.Errorf("block %s failed verification during bootstrapping due to: %w", + b.blk.ID(), err) } b.numAccepted.Inc() From e42a17d94c09c6bd6ec8e5844ff3632d610abc26 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 12:50:15 -0400 Subject: [PATCH 33/47] removed debug logging --- vms/platformvm/add_validator_tx.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/vms/platformvm/add_validator_tx.go b/vms/platformvm/add_validator_tx.go index 4304852ab671..635fa209911e 100644 --- a/vms/platformvm/add_validator_tx.go +++ b/vms/platformvm/add_validator_tx.go @@ -122,13 +122,11 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( ) { // Verify the tx is well-formed if err := tx.Verify(vm.Ctx, vm.codec, vm.txFee, vm.Ctx.AVAXAssetID, vm.minStake); err != nil { - vm.Ctx.Log.Error("Verify: %s", err) return nil, nil, nil, nil, permError{err} } // Ensure the proposed validator starts after the current time if currentTime, err := vm.getTimestamp(db); err != nil { - vm.Ctx.Log.Error("Timestamp: %s", err) return nil, nil, nil, nil, tempError{err} } else if startTime := tx.StartTime(); !currentTime.Before(startTime) { return nil, nil, nil, nil, permError{fmt.Errorf("validator's start time (%s) at or after current timestamp (%s)", @@ -138,7 +136,6 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( vdr, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { - vm.Ctx.Log.Error("isValidator: %s", err) return nil, nil, nil, nil, tempError{err} } if isValidator && tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { @@ -152,7 +149,6 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( // Verify the flowcheck if err := vm.semanticVerifySpend(db, tx, tx.Ins, outs, stx.Creds, vm.txFee, vm.Ctx.AVAXAssetID); err != nil { - vm.Ctx.Log.Error("semanticVerify: %s", err) return nil, nil, nil, nil, err } @@ -162,30 +158,25 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( onCommitDB := versiondb.New(db) // Consume the UTXOS if err := vm.consumeInputs(onCommitDB, tx.Ins); err != nil { - vm.Ctx.Log.Error("consumeInputs: %s", err) return nil, nil, nil, nil, tempError{err} } // Produce the UTXOS if err := vm.produceOutputs(onCommitDB, txID, tx.Outs); err != nil { - vm.Ctx.Log.Error("produceOutputs: %s", err) return nil, nil, nil, nil, tempError{err} } // Add validator to set of pending validators if err := vm.addStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { - vm.Ctx.Log.Error("addStaker: %s", err) return nil, nil, nil, nil, tempError{err} } onAbortDB := versiondb.New(db) // Consume the UTXOS if err := vm.consumeInputs(onAbortDB, tx.Ins); err != nil { - vm.Ctx.Log.Error("consumeInputs: %s", err) return nil, nil, nil, nil, tempError{err} } // Produce the UTXOS if err := vm.produceOutputs(onAbortDB, txID, outs); err != nil { - vm.Ctx.Log.Error("produceOutputs: %s", err) return nil, nil, nil, nil, tempError{err} } From 7ab5595b3f51e0cce88e9bc66e3eda55d519a951 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 28 Aug 2020 15:47:02 -0400 Subject: [PATCH 34/47] Remove conflicting travis ci secure variables --- .ci/run_e2e_tests.sh | 3 +++ .travis.yml | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 62c9a9b37620..bd5cf6e59ca9 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -6,6 +6,9 @@ GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) DOCKER_REPO="avaplatform" +echo "$DOCKER_USERNAME" +echo "${#DOCKER_USERNAME}" + echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin TESTING_CONTROLLER_IMAGE="$DOCKER_REPO/avalanche-testing_controller:everest-dev" diff --git a/.travis.yml b/.travis.yml index 89e4a11eb88d..95b8cfdf46f5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,8 +17,6 @@ env: - E2E_TEST_HOME=$GOPATH/src/github.com/ava-labs/avalanche-testing/ - COMMIT=${TRAVIS_COMMIT::8} - DOCKERHUB_REPO=avaplatform/gecko - - secure: "L/A9+re0NEKP6EV6H9RcTGiDhX3WMvsiWrkRKDYKqnviqbjY30RK6EM4vvjrM4Lrw2QwsO3YKgnku3+zioE/TxEZFkpkbjNUXru0nYBrWAg1TKVsDXnYaIZkHUejfryST3E8N7F4Hx6zCtGEO0sEdUeKuT+MNUIuHezHooTgGzDjMogm70EWMFjQHc7VucTJu7dWU1RBPjovWQ0q9qflrtCpbrvXFIiihQQ1PQha1Q2C4wLakKuLbhhSafue90Mnyss0blaPHy/tyewcASJu4vsGTKRBn0DzttlkNTwuD6+nKrbmJY0ohunnkVFzYjrZAw1gyN+DCDb/lPbz4ZDItKPwrIUPEtL5xuUOrxUZPUh+0io3Q2d6rjaqkdGjd1KQXzbnW1mn0BxX3d3b2UpIqhBn9umYYjHBKnMuoRiTK33b7U9+LF3K84+tEvVDCPeHs/mw6Inp5jGRSravnM6yPQ6feGzogs4+3EMzZXxnkngKFKCsnd67Oe9xfV9amOU2aQAx4jaAwlPjEpBEkUa8YKx3lPznvmUk1QsNCUbLjdSl5JBaXojLJoiuPbj29hp4S5AXXgn+3Hvwk3ndcFCxi6/l1W9mjYSOtFqg3EAUdF4EgnA/ykQg9ZokkoKY0+qgOzG2bKOAYuCDWeGr7P1apToh00ccsQXL81nVPiq7uDw=" - - secure: "zfTm7tJBYiPYrli76d4Ep6Lc2TJQ8Xv//+7OoqTA/aIf6YJDHe05f2GFTWAHG2iOIix/yjwHYwnhyIW66eWPb+Ujejnmh4eXlYZFufX9J5jUpDpbFu/+ybOLgE1Tmr0je0ycneSMe/NAaS74nWU1wnP34/cEE4sYL7TJyhwbeEtgz3cbSWwkpdvHFbXCjSOA196jdIYYUwsnqU9yycAG+2WUSk3DHHzzdtMrh/UOH2r1VFyp5US0zmbW90WkWX+o3TIlzZJgTUGQRNnWKq95Mrh1EQotxgL6CJ8NkfY4bVAGAhusPjdjscJsHxfY93WRMH64TzPYYp0zdibatH0ztyhnZPXVKqv+AIIVTEW+xWv5V18kTQAd1uBW103NFacbgXhIGWtbFcN9g1+ws29HROMclYs7ci6+72Qnq0eL55huqSyFx6+InhYwn+LfJmaBcGW4wx1umdp505M0obZ4ghlyn6b0pDYmqsu1XyBC3mjUTFbwlQmWE2Fize4L5o+DdH4ZDc9japF9ntxIMvO+b3nOicr7tplY2AGp61bB89o3dUAFlN5mDaEJotiAuFk5mo244rY1FjSzyGiKkA3M9TkTIbgcbN098hOJoMCYybH7yqiPwNnZiFvUuYjHuC5D1kIYBWuqqO0iVcbIZn0rV2jyzbVFlhFVk2clTZGhkrY=" before_install: - pip install --user awscli # need awscli to access byz-gecko images - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/before_install_linux.sh; fi From 637febafc21d1cee644eee524fc4a9a58d13259e Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Sat, 29 Aug 2020 13:17:39 -0400 Subject: [PATCH 35/47] Run only the non-byzantine tests when docker credentials are not present --- .ci/run_e2e_tests.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index bd5cf6e59ca9..9aba21135f60 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -29,4 +29,10 @@ git checkout "tags/$E2E_TAG" -b "$E2E_TAG" go mod edit -replace github.com/ava-labs/gecko="$GECKO_HOME" bash "./scripts/rebuild_initializer_binary.sh" -./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" --byzantine-image-name="${BYZANTINE_IMAGE}" + +# If Docker Credentials are not present (ex. PR from an untrusted fork) run only the non-byzantine tests +if [[ ${#DOCKER_USERNAME} == 0 ]]; then + ./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" +else + ./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" --byzantine-image-name="${BYZANTINE_IMAGE}" +fi From 2aa51e34d85c94ab783a782820508a90878a021b Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Sat, 29 Aug 2020 13:54:37 -0400 Subject: [PATCH 36/47] Remove unnecessary install of awscli --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 95b8cfdf46f5..9fd78bcdf5a7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,7 +18,6 @@ env: - COMMIT=${TRAVIS_COMMIT::8} - DOCKERHUB_REPO=avaplatform/gecko before_install: - - pip install --user awscli # need awscli to access byz-gecko images - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/before_install_linux.sh; fi install: From 3b11b6e10d3f0019b6af711e933f8a6667e796ca Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 14:07:12 -0400 Subject: [PATCH 37/47] cleaned up var usage --- vms/platformvm/vm.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 9e82a9243d51..b25493acc133 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -749,17 +749,15 @@ func (vm *VM) nextStakerChangeTime(db database.Database) (time.Time, error) { earliest := maxTime for _, subnetID := range subnetIDs.List() { - t, err := vm.nextStakerStart(db, subnetID) - if err == nil { - if staker, ok := t.UnsignedTx.(TimedTx); ok { + if tx, err := vm.nextStakerStart(db, subnetID); err == nil { + if staker, ok := tx.UnsignedTx.(TimedTx); ok { if startTime := staker.StartTime(); startTime.Before(earliest) && startTime.After(timestamp) { earliest = startTime } } } - t, err = vm.nextStakerStop(db, subnetID) - if err == nil { - if staker, ok := t.UnsignedTx.(TimedTx); ok { + if tx, err := vm.nextStakerStop(db, subnetID); err == nil { + if staker, ok := tx.UnsignedTx.(TimedTx); ok { if endTime := staker.EndTime(); endTime.Before(earliest) && endTime.After(timestamp) { earliest = endTime } @@ -864,7 +862,7 @@ func (vm *VM) updateVdrMgr() error { for _, subnetID := range subnetIDs.List() { vdrs, initialized := vm.vdrMgr.GetValidators(subnetID) if !initialized { - vdrs = validators.NewBestSet(5) + vdrs = validators.NewSet() } prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) From 824190b51f51b10df9ea6dd2b2e5c675b092905d Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 14:08:21 -0400 Subject: [PATCH 38/47] removed unused function --- snow/validators/set.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/snow/validators/set.go b/snow/validators/set.go index 958e200a4ea4..b5986e571a7a 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -247,14 +247,6 @@ func (s *set) get(vdrID ids.ShortID) (Validator, bool) { return s.vdrSlice[index], true } -// Remove implements the Set interface. -func (s *set) Remove(vdrID ids.ShortID) error { - s.lock.Lock() - defer s.lock.Unlock() - - return s.remove(vdrID) -} - func (s *set) remove(vdrID ids.ShortID) error { // Get the element to remove iKey := vdrID.Key() From 616487d0545e212c600e9a0c00554a88236e96b1 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 14:43:09 -0400 Subject: [PATCH 39/47] started fixing up tests --- chains/awaiter_test.go | 6 +- snow/engine/avalanche/transitive_test.go | 274 +++++++++++------------ 2 files changed, 134 insertions(+), 146 deletions(-) diff --git a/chains/awaiter_test.go b/chains/awaiter_test.go index fedec4a5c049..6120cfd4b008 100644 --- a/chains/awaiter_test.go +++ b/chains/awaiter_test.go @@ -18,9 +18,9 @@ func TestAwaiter(t *testing.T) { vdrID3 := ids.NewShortID([20]byte{3}) s := validators.NewSet() - s.Add(validators.NewValidator(vdrID0, 1)) - s.Add(validators.NewValidator(vdrID1, 1)) - s.Add(validators.NewValidator(vdrID3, 1)) + s.AddWeight(vdrID0, 1) + s.AddWeight(vdrID1, 1) + s.AddWeight(vdrID3, 1) called := make(chan struct{}, 1) aw := NewAwaiter(s, 3, func() { diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index ac8f0aec328f..82dee760ecca 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -47,12 +47,11 @@ func TestEngineShutdown(t *testing.T) { func TestEngineAdd(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -99,7 +98,7 @@ func TestEngineAdd(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx.ParentsV[0].ID().Equals(vtxID) { @@ -114,7 +113,7 @@ func TestEngineAdd(t *testing.T) { return vtx, nil } - te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.Put(vdr, 0, vtx.ID(), vtx.Bytes()) manager.ParseVertexF = nil @@ -128,7 +127,7 @@ func TestEngineAdd(t *testing.T) { manager.ParseVertexF = func(b []byte) (avalanche.Vertex, error) { return nil, errFailedParsing } - te.Put(vdr.ID(), *reqID, vtx.ParentsV[0].ID(), nil) + te.Put(vdr, *reqID, vtx.ParentsV[0].ID(), nil) manager.ParseVertexF = nil @@ -140,12 +139,11 @@ func TestEngineAdd(t *testing.T) { func TestEngineQuery(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -224,7 +222,7 @@ func TestEngineQuery(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx0.ID().Equals(vtxID) { @@ -232,7 +230,7 @@ func TestEngineQuery(t *testing.T) { } } - te.PullQuery(vdr.ID(), 0, vtx0.ID()) + te.PullQuery(vdr, 0, vtx0.ID()) if !*vertexed { t.Fatalf("Didn't request vertex") } @@ -249,7 +247,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -275,7 +273,7 @@ func TestEngineQuery(t *testing.T) { } return vtx0, nil } - te.Put(vdr.ID(), 0, vtx0.ID(), vtx0.Bytes()) + te.Put(vdr, 0, vtx0.ID(), vtx0.Bytes()) manager.ParseVertexF = nil if !*queried { @@ -317,7 +315,7 @@ func TestEngineQuery(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx1.ID().Equals(vtxID) { @@ -327,7 +325,7 @@ func TestEngineQuery(t *testing.T) { s := ids.Set{} s.Add(vtx1.ID()) - te.Chits(vdr.ID(), *queryRequestID, s) + te.Chits(vdr, *queryRequestID, s) *queried = false sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { @@ -337,7 +335,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -368,7 +366,7 @@ func TestEngineQuery(t *testing.T) { return vtx1, nil } - te.Put(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) + te.Put(vdr, 0, vtx1.ID(), vtx1.Bytes()) manager.ParseVertexF = nil if vtx0.Status() != choices.Accepted { @@ -380,7 +378,7 @@ func TestEngineQuery(t *testing.T) { _ = te.polls.String() // Shouldn't panic - te.QueryFailed(vdr.ID(), *queryRequestID) + te.QueryFailed(vdr, *queryRequestID) if len(te.vtxBlocked) != 0 { t.Fatalf("Should have finished blocking") } @@ -402,16 +400,16 @@ func TestEngineMultipleQuery(t *testing.T) { BatchSize: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -477,7 +475,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -521,7 +519,7 @@ func TestEngineMultipleQuery(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr0.ID().Equals(inVdr) { + if !vdr0.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx1.ID().Equals(vtxID) { @@ -536,14 +534,14 @@ func TestEngineMultipleQuery(t *testing.T) { s2 := ids.Set{} s2.Add(vtx0.ID()) - te.Chits(vdr0.ID(), *queryRequestID, s0) - te.QueryFailed(vdr1.ID(), *queryRequestID) - te.Chits(vdr2.ID(), *queryRequestID, s2) + te.Chits(vdr0, *queryRequestID, s0) + te.QueryFailed(vdr1, *queryRequestID) + te.Chits(vdr2, *queryRequestID, s2) // Should be dropped because the query was marked as failed - te.Chits(vdr1.ID(), *queryRequestID, s0) + te.Chits(vdr1, *queryRequestID, s0) - te.GetFailed(vdr0.ID(), *reqID) + te.GetFailed(vdr0, *reqID) if vtx0.Status() != choices.Accepted { t.Fatalf("Should have executed vertex") @@ -556,12 +554,11 @@ func TestEngineMultipleQuery(t *testing.T) { func TestEngineBlockedIssue(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -627,12 +624,11 @@ func TestEngineBlockedIssue(t *testing.T) { func TestEngineAbandonResponse(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -683,8 +679,8 @@ func TestEngineAbandonResponse(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, vtx.ID()) - te.GetFailed(vdr.ID(), *reqID) + te.PullQuery(vdr, 0, vtx.ID()) + te.GetFailed(vdr, *reqID) if len(te.vtxBlocked) != 0 { t.Fatalf("Should have removed blocking event") @@ -694,12 +690,11 @@ func TestEngineAbandonResponse(t *testing.T) { func TestEngineScheduleRepoll(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -764,7 +759,7 @@ func TestEngineScheduleRepoll(t *testing.T) { } } - te.QueryFailed(vdr.ID(), *requestID) + te.QueryFailed(vdr, *requestID) if !*repolled { t.Fatalf("Should have issued a noop") @@ -783,12 +778,11 @@ func TestEngineRejectDoubleSpendTx(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -882,12 +876,11 @@ func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -985,12 +978,11 @@ func TestEngineIssueRepoll(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -1025,7 +1017,7 @@ func TestEngineIssueRepoll(t *testing.T) { sender.PullQueryF = func(vdrs ids.ShortSet, _ uint32, vtxID ids.ID) { vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !vdrs.Equals(vdrSet) { t.Fatalf("Wrong query recipients") } @@ -1050,12 +1042,11 @@ func TestEngineReissue(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -1186,7 +1177,7 @@ func TestEngineReissue(t *testing.T) { } return vtx, nil } - te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.Put(vdr, 0, vtx.ID(), vtx.Bytes()) manager.ParseVertexF = nil vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx3} } @@ -1194,7 +1185,7 @@ func TestEngineReissue(t *testing.T) { s := ids.Set{} s.Add(vtx.ID()) - te.Chits(vdr.ID(), *queryRequestID, s) + te.Chits(vdr, *queryRequestID, s) if len(lastVtx.TxsV) != 1 || !lastVtx.TxsV[0].ID().Equals(tx0.ID()) { t.Fatalf("Should have re-issued the tx") @@ -1214,12 +1205,11 @@ func TestEngineLargeIssue(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -1434,11 +1424,12 @@ func TestEngineInsufficientValidators(t *testing.T) { func TestEnginePushGossip(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1506,7 +1497,7 @@ func TestEnginePushGossip(t *testing.T) { sender.CantPushQuery = false sender.CantChits = false - te.PushQuery(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.PushQuery(vdr, 0, vtx.ID(), vtx.Bytes()) if *requested { t.Fatalf("Shouldn't have requested the vertex") @@ -1516,11 +1507,12 @@ func TestEnginePushGossip(t *testing.T) { func TestEngineSingleQuery(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1582,11 +1574,12 @@ func TestEngineSingleQuery(t *testing.T) { func TestEngineParentBlockingInsert(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1677,11 +1670,12 @@ func TestEngineParentBlockingInsert(t *testing.T) { func TestEngineBlockingChitRequest(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1771,7 +1765,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { panic("Should have errored") } - te.PushQuery(vdr.ID(), 0, blockingVtx.ID(), blockingVtx.Bytes()) + te.PushQuery(vdr, 0, blockingVtx.ID(), blockingVtx.Bytes()) if len(te.vtxBlocked) != 3 { t.Fatalf("Both inserts and the query should be blocking") @@ -1791,11 +1785,12 @@ func TestEngineBlockingChitRequest(t *testing.T) { func TestEngineBlockingChitResponse(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1872,7 +1867,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1894,7 +1889,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { voteSet := ids.Set{} voteSet.Add(blockingVtx.ID()) - te.Chits(vdr.ID(), *queryRequestID, voteSet) + te.Chits(vdr, *queryRequestID, voteSet) if len(te.vtxBlocked) != 2 { t.Fatalf("The insert should be blocking, as well as the chit response") @@ -1915,11 +1910,12 @@ func TestEngineBlockingChitResponse(t *testing.T) { func TestEngineMissingTx(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr) config.Validators = vals + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -1996,7 +1992,7 @@ func TestEngineMissingTx(t *testing.T) { sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -2018,7 +2014,7 @@ func TestEngineMissingTx(t *testing.T) { voteSet := ids.Set{} voteSet.Add(blockingVtx.ID()) - te.Chits(vdr.ID(), *queryRequestID, voteSet) + te.Chits(vdr, *queryRequestID, voteSet) if len(te.vtxBlocked) != 2 { t.Fatalf("The insert should be blocking, as well as the chit response") @@ -2039,12 +2035,11 @@ func TestEngineMissingTx(t *testing.T) { func TestEngineIssueBlockingTx(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -2097,13 +2092,11 @@ func TestEngineIssueBlockingTx(t *testing.T) { func TestEngineReissueAbortedVertex(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vdrID := vdr.ID() - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2191,12 +2184,12 @@ func TestEngineReissueAbortedVertex(t *testing.T) { panic("Unknown bytes provided") } - te.PushQuery(vdrID, 0, vtxID1, vtx1.Bytes()) + te.PushQuery(vdr, 0, vtxID1, vtx1.Bytes()) sender.GetF = nil manager.ParseVertexF = nil - te.GetFailed(vdrID, *requestID) + te.GetFailed(vdr, *requestID) requested := new(bool) sender.GetF = func(_ ids.ShortID, _ uint32, vtxID ids.ID) { @@ -2213,7 +2206,7 @@ func TestEngineReissueAbortedVertex(t *testing.T) { panic("Unknown bytes provided") } - te.PullQuery(vdrID, 0, vtxID1) + te.PullQuery(vdr, 0, vtxID1) if !*requested { t.Fatalf("Should have requested the missing vertex") @@ -2223,14 +2216,12 @@ func TestEngineReissueAbortedVertex(t *testing.T) { func TestEngineBootstrappingIntoConsensus(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vdrID := vdr.ID() - vals := validators.NewSet() config.Validators = vals config.Beacons = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2311,8 +2302,8 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } - if !vdrs.Contains(vdrID) { - t.Fatalf("Should have requested from %s", vdrID) + if !vdrs.Contains(vdr) { + t.Fatalf("Should have requested from %s", vdr) } *requested = true *requestID = reqID @@ -2336,8 +2327,8 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } - if !vdrs.Contains(vdrID) { - t.Fatalf("Should have requested from %s", vdrID) + if !vdrs.Contains(vdr) { + t.Fatalf("Should have requested from %s", vdr) } if !acceptedFrontier.Equals(proposedAccepted) { t.Fatalf("Wrong proposedAccepted vertices.\nExpected: %s\nGot: %s", acceptedFrontier, proposedAccepted) @@ -2346,7 +2337,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { *requestID = reqID } - te.AcceptedFrontier(vdrID, *requestID, acceptedFrontier) + te.AcceptedFrontier(vdr, *requestID, acceptedFrontier) if !*requested { t.Fatalf("Should have requested from the validators during AcceptedFrontier") @@ -2362,7 +2353,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { } sender.GetAncestorsF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) { - if !vdrID.Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for vertex") } if !vtx0.ID().Equals(vtxID) { @@ -2371,7 +2362,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { *requestID = reqID } - te.Accepted(vdrID, *requestID, acceptedFrontier) + te.Accepted(vdr, *requestID, acceptedFrontier) manager.GetVertexF = nil sender.GetF = nil @@ -2404,7 +2395,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - te.MultiPut(vdrID, *requestID, [][]byte{vtxBytes0}) + te.MultiPut(vdr, *requestID, [][]byte{vtxBytes0}) vm.ParseTxF = nil manager.ParseVertexF = nil @@ -2427,7 +2418,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } sender.ChitsF = func(inVdr ids.ShortID, _ uint32, chits ids.Set) { - if !inVdr.Equals(vdrID) { + if !inVdr.Equals(vdr) { t.Fatalf("Sent to the wrong validator") } @@ -2442,8 +2433,8 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } - if !vdrs.Contains(vdrID) { - t.Fatalf("Should have requested from %s", vdrID) + if !vdrs.Contains(vdr) { + t.Fatalf("Should have requested from %s", vdr) } if !vtxID1.Equals(vtxID) { @@ -2462,7 +2453,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - te.PushQuery(vdrID, 0, vtxID1, vtxBytes1) + te.PushQuery(vdr, 0, vtxID1, vtxBytes1) manager.ParseVertexF = nil sender.ChitsF = nil @@ -2473,12 +2464,11 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -2559,7 +2549,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { votes := ids.Set{} votes.Add(vtx1.ID()) - te.Chits(vdr.ID(), *reqID, votes) + te.Chits(vdr, *reqID, votes) if status := vtx0.Status(); status != choices.Accepted { t.Fatalf("should have accepted the vertex due to transitive voting") @@ -2569,12 +2559,11 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { func TestEnginePartiallyValidVertex(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -2699,14 +2688,14 @@ func TestEngineGossip(t *testing.T) { func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - secondVdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) - vals.Add(secondVdr) + vdr := ids.GenerateTestShortID() + secondVdr := ids.GenerateTestShortID() + + vals.AddWeight(vdr, 1) + vals.AddWeight(secondVdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2789,7 +2778,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, vtxID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !vtxID.Equals(vtx0.ID()) { @@ -2797,9 +2786,9 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) + te.PushQuery(vdr, 0, vtx1.ID(), vtx1.Bytes()) - te.Put(secondVdr.ID(), *reqID, vtx0.ID(), []byte{3}) + te.Put(secondVdr, *reqID, vtx0.ID(), []byte{3}) *parsed = false manager.ParseVertexF = func(b []byte) (avalanche.Vertex, error) { @@ -2827,7 +2816,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { vtx0.StatusV = choices.Processing - te.Put(vdr.ID(), *reqID, vtx0.ID(), vtx0.Bytes()) + te.Put(vdr, *reqID, vtx0.ID(), vtx0.Bytes()) prefs := te.Consensus.Preferences() if !prefs.Contains(vtx1.ID()) { @@ -2838,12 +2827,11 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { func TestEnginePushQueryRequestIDConflict(t *testing.T) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -2929,7 +2917,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, vtxID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !vtxID.Equals(vtx0.ID()) { @@ -2937,12 +2925,12 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) + te.PushQuery(vdr, 0, vtx1.ID(), vtx1.Bytes()) sender.GetF = nil sender.CantGet = false - te.PushQuery(vdr.ID(), *reqID, randomVtxID, []byte{3}) + te.PushQuery(vdr, *reqID, randomVtxID, []byte{3}) *parsed = false manager.ParseVertexF = func(b []byte) (avalanche.Vertex, error) { @@ -2970,7 +2958,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { vtx0.StatusV = choices.Processing - te.Put(vdr.ID(), *reqID, vtx0.ID(), vtx0.Bytes()) + te.Put(vdr, *reqID, vtx0.ID(), vtx0.Bytes()) prefs := te.Consensus.Preferences() if !prefs.Contains(vtx1.ID()) { @@ -2984,12 +2972,11 @@ func TestEngineAggressivePolling(t *testing.T) { config.Params.ConcurrentRepolls = 3 config.Params.BetaRogue = 3 - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -3069,7 +3056,7 @@ func TestEngineAggressivePolling(t *testing.T) { numPullQueries := new(int) sender.PullQueryF = func(ids.ShortSet, uint32, ids.ID) { *numPullQueries++ } - te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + te.Put(vdr, 0, vtx.ID(), vtx.Bytes()) if *numPushQueries != 1 { t.Fatalf("should have issued one push query") @@ -3092,12 +3079,11 @@ func TestEngineDuplicatedIssuance(t *testing.T) { sender.Default(true) sender.CantGetAcceptedFrontier = false - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) manager := &vertex.TestManager{T: t} config.Manager = manager @@ -3190,13 +3176,15 @@ func TestEngineDoubleChit(t *testing.T) { config.Params.Alpha = 2 config.Params.K = 2 - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) vals := validators.NewSet() - vals.Add(vdr0) - vals.Add(vdr1) config.Validators = vals + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + sender := &common.SenderTest{} sender.T = t config.Sender = sender @@ -3283,19 +3271,19 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *reqID, votes) + te.Chits(vdr0, *reqID, votes) if status := tx.Status(); status != choices.Processing { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *reqID, votes) + te.Chits(vdr0, *reqID, votes) if status := tx.Status(); status != choices.Processing { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr1.ID(), *reqID, votes) + te.Chits(vdr1, *reqID, votes) if status := tx.Status(); status != choices.Accepted { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Accepted) From 59f2e60431ee0bfe6bd280f46f85375647cc7180 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sat, 29 Aug 2020 15:40:26 -0400 Subject: [PATCH 40/47] fixing up more tests --- .../avalanche/bootstrap/bootstrapper_test.go | 7 +- .../snowman/bootstrap/bootstrapper_test.go | 7 +- snow/engine/snowman/transitive_test.go | 162 +++++++++--------- 3 files changed, 86 insertions(+), 90 deletions(-) diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index b4f995410d56..25b939fa3915 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -48,9 +48,8 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *vertex.T sender.CantGetAcceptedFrontier = false - peer := validators.GenerateRandomValidator(1) - peerID := peer.ID() - peers.Add(peer) + peer := ids.GenerateTestShortID() + peers.AddWeight(peer, 1) vtxBlocker, _ := queue.New(prefixdb.New([]byte("vtx"), db)) txBlocker, _ := queue.New(prefixdb.New([]byte("tx"), db)) @@ -68,7 +67,7 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *vertex.T TxBlocked: txBlocker, Manager: manager, VM: vm, - }, peerID, sender, manager, vm + }, peer, sender, manager, vm } // Three vertices in the accepted frontier. None have parents. No need to fetch anything diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 8e10b7208b07..1bdcef3183b6 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -42,9 +42,8 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *block.Te sender.CantGetAcceptedFrontier = false - peer := validators.GenerateRandomValidator(1) - peerID := peer.ID() - peers.Add(peer) + peer := ids.GenerateTestShortID() + peers.AddWeight(peer, 1) blocker, _ := queue.New(db) @@ -59,7 +58,7 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *block.Te Config: commonConfig, Blocked: blocker, VM: vm, - }, peerID, sender, vm + }, peer, sender, vm } // Single node in the accepted frontier; no need to fecth parent diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 23bdd6d6ecce..7c7ef8902c9f 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -26,15 +26,14 @@ var ( Genesis = ids.GenerateTestID() ) -func setup(t *testing.T) (validators.Validator, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { +func setup(t *testing.T) (ids.ShortID, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -118,7 +117,7 @@ func TestEngineAdd(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blkID.Equals(blk.Parent().ID()) { @@ -133,7 +132,7 @@ func TestEngineAdd(t *testing.T) { return blk, nil } - te.Put(vdr.ID(), 0, blk.ID(), blk.Bytes()) + te.Put(vdr, 0, blk.ID(), blk.Bytes()) vm.ParseBlockF = nil @@ -147,7 +146,7 @@ func TestEngineAdd(t *testing.T) { vm.ParseBlockF = func(b []byte) (snowman.Block, error) { return nil, errUnknownBytes } - te.Put(vdr.ID(), *reqID, blk.Parent().ID(), nil) + te.Put(vdr, *reqID, blk.Parent().ID(), nil) vm.ParseBlockF = nil @@ -189,7 +188,7 @@ func TestEngineQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk.ID().Equals(blkID) { @@ -197,7 +196,7 @@ func TestEngineQuery(t *testing.T) { } } - te.PullQuery(vdr.ID(), 15, blk.ID()) + te.PullQuery(vdr, 15, blk.ID()) if !*blocked { t.Fatalf("Didn't request block") } @@ -214,7 +213,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -246,7 +245,7 @@ func TestEngineQuery(t *testing.T) { } return blk, nil } - te.Put(vdr.ID(), *getRequestID, blk.ID(), blk.Bytes()) + te.Put(vdr, *getRequestID, blk.ID(), blk.Bytes()) vm.ParseBlockF = nil if !*queried { @@ -284,7 +283,7 @@ func TestEngineQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk1.ID().Equals(blkID) { @@ -293,7 +292,7 @@ func TestEngineQuery(t *testing.T) { } blkSet := ids.Set{} blkSet.Add(blk1.ID()) - te.Chits(vdr.ID(), *queryRequestID, blkSet) + te.Chits(vdr, *queryRequestID, blkSet) *queried = false sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { @@ -303,7 +302,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -330,7 +329,7 @@ func TestEngineQuery(t *testing.T) { return blk1, nil } - te.Put(vdr.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + te.Put(vdr, *getRequestID, blk1.ID(), blk1.Bytes()) vm.ParseBlockF = nil if blk1.Status() != choices.Accepted { @@ -342,7 +341,7 @@ func TestEngineQuery(t *testing.T) { _ = te.polls.String() // Shouldn't panic - te.QueryFailed(vdr.ID(), *queryRequestID) + te.QueryFailed(vdr, *queryRequestID) if len(te.blocked) != 0 { t.Fatalf("Should have finished blocking") } @@ -360,16 +359,16 @@ func TestEngineMultipleQuery(t *testing.T) { ConcurrentRepolls: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -428,7 +427,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -470,7 +469,7 @@ func TestEngineMultipleQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr0.ID().Equals(inVdr) { + if !vdr0.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk1.ID().Equals(blkID) { @@ -479,8 +478,8 @@ func TestEngineMultipleQuery(t *testing.T) { } blkSet := ids.Set{} blkSet.Add(blk1.ID()) - te.Chits(vdr0.ID(), *queryRequestID, blkSet) - te.Chits(vdr1.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) + te.Chits(vdr1, *queryRequestID, blkSet) vm.ParseBlockF = func(b []byte) (snowman.Block, error) { vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { @@ -506,7 +505,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *secondQueryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -514,12 +513,12 @@ func TestEngineMultipleQuery(t *testing.T) { t.Fatalf("Asking for wrong block") } } - te.Put(vdr0.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + te.Put(vdr0, *getRequestID, blk1.ID(), blk1.Bytes()) // Should be dropped because the query was already filled blkSet = ids.Set{} blkSet.Add(blk0.ID()) - te.Chits(vdr2.ID(), *queryRequestID, blkSet) + te.Chits(vdr2, *queryRequestID, blkSet) if blk1.Status() != choices.Accepted { t.Fatalf("Should have executed block") @@ -579,7 +578,7 @@ func TestEngineAbandonResponse(t *testing.T) { } te.issue(blk) - te.QueryFailed(vdr.ID(), 1) + te.QueryFailed(vdr, 1) if len(te.blocked) != 0 { t.Fatalf("Should have removed blocking event") @@ -601,7 +600,7 @@ func TestEngineFetchBlock(t *testing.T) { added := new(bool) sender.PutF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID, blk []byte) { - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Wrong validator") } if requestID != 123 { @@ -613,7 +612,7 @@ func TestEngineFetchBlock(t *testing.T) { *added = true } - te.Get(vdr.ID(), 123, gBlk.ID()) + te.Get(vdr, 123, gBlk.ID()) if !*added { t.Fatalf("Should have sent block to peer") @@ -656,7 +655,7 @@ func TestEnginePushQuery(t *testing.T) { t.Fatalf("Sent chit multiple times") } *chitted = true - if !inVdr.Equals(vdr.ID()) { + if !inVdr.Equals(vdr) { t.Fatalf("Asking wrong validator for preference") } if requestID != 20 { @@ -678,7 +677,7 @@ func TestEnginePushQuery(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -687,7 +686,7 @@ func TestEnginePushQuery(t *testing.T) { } } - te.PushQuery(vdr.ID(), 20, blk.ID(), blk.Bytes()) + te.PushQuery(vdr, 20, blk.ID(), blk.Bytes()) if !*chitted { t.Fatalf("Should have sent a chit to the peer") @@ -719,7 +718,7 @@ func TestEngineBuildBlock(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -745,7 +744,7 @@ func TestEngineRepoll(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -770,16 +769,16 @@ func TestVoteCanceling(t *testing.T) { ConcurrentRepolls: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -838,7 +837,7 @@ func TestVoteCanceling(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -853,7 +852,7 @@ func TestVoteCanceling(t *testing.T) { t.Fatalf("Shouldn't have finished blocking issue") } - te.QueryFailed(vdr0.ID(), *queryRequestID) + te.QueryFailed(vdr0, *queryRequestID) if te.polls.Len() != 1 { t.Fatalf("Shouldn't have finished blocking issue") @@ -863,7 +862,7 @@ func TestVoteCanceling(t *testing.T) { sender.PullQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID) { *repolled = true } - te.QueryFailed(vdr1.ID(), *queryRequestID) + te.QueryFailed(vdr1, *queryRequestID) if !*repolled { t.Fatalf("Should have finished blocking issue and repolled the network") @@ -958,13 +957,13 @@ func TestEngineAbandonQuery(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, blkID) + te.PullQuery(vdr, 0, blkID) if len(te.blocked) != 1 { t.Fatalf("Should have blocked on request") } - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) if len(te.blocked) != 0 { t.Fatalf("Should have removed request") @@ -1008,13 +1007,13 @@ func TestEngineAbandonChit(t *testing.T) { fakeBlkIDSet := ids.Set{} fakeBlkIDSet.Add(fakeBlkID) - te.Chits(vdr.ID(), 0, fakeBlkIDSet) + te.Chits(vdr, 0, fakeBlkIDSet) if len(te.blocked) != 1 { t.Fatalf("Should have blocked on request") } - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) if len(te.blocked) != 0 { t.Fatalf("Should have removed request") @@ -1075,7 +1074,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, blockingBlk.ID(), blockingBlk.Bytes()) + te.PushQuery(vdr, 0, blockingBlk.ID(), blockingBlk.Bytes()) if len(te.blocked) != 3 { t.Fatalf("Both inserts should be blocking in addition to the chit request") @@ -1131,7 +1130,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1153,7 +1152,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { } blockingBlkIDSet := ids.Set{} blockingBlkIDSet.Add(blockingBlk.ID()) - te.Chits(vdr.ID(), *queryRequestID, blockingBlkIDSet) + te.Chits(vdr, *queryRequestID, blockingBlkIDSet) if len(te.blocked) != 2 { t.Fatalf("The insert and the chit should be blocking") @@ -1188,12 +1187,12 @@ func TestEngineRetryFetch(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + te.PullQuery(vdr, 0, missingBlk.ID()) vm.CantGetBlock = true sender.GetF = nil - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) vm.CantGetBlock = false @@ -1202,7 +1201,7 @@ func TestEngineRetryFetch(t *testing.T) { *called = true } - te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + te.PullQuery(vdr, 0, missingBlk.ID()) vm.CantGetBlock = true sender.GetF = nil @@ -1263,7 +1262,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { votes := ids.Set{} votes.Add(invalidBlkID) - te.Chits(vdr.ID(), *reqID, votes) + te.Chits(vdr, *reqID, votes) vm.GetBlockF = nil @@ -1308,8 +1307,8 @@ func TestEngineGossip(t *testing.T) { func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { vdr, vdrs, sender, vm, te, gBlk := setup(t) - secondVdr := validators.GenerateRandomValidator(1) - vdrs.Add(secondVdr) + secondVdr := ids.GenerateTestShortID() + vdrs.AddWeight(secondVdr, 1) sender.Default(true) @@ -1357,7 +1356,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !blkID.Equals(missingBlk.ID()) { @@ -1365,9 +1364,9 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.PushQuery(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) - te.Put(secondVdr.ID(), *reqID, missingBlk.ID(), []byte{3}) + te.Put(secondVdr, *reqID, missingBlk.ID(), []byte{3}) *parsed = false vm.ParseBlockF = func(b []byte) (snowman.Block, error) { @@ -1394,7 +1393,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { missingBlk.StatusV = choices.Processing - te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes()) + te.Put(vdr, *reqID, missingBlk.ID(), missingBlk.Bytes()) pref := te.Consensus.Preference() if !pref.Equals(pendingBlk.ID()) { @@ -1453,7 +1452,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !blkID.Equals(missingBlk.ID()) { @@ -1461,12 +1460,12 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.PushQuery(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) sender.GetF = nil sender.CantGet = false - te.PushQuery(vdr.ID(), *reqID, randomBlkID, []byte{3}) + te.PushQuery(vdr, *reqID, randomBlkID, []byte{3}) *parsed = false vm.ParseBlockF = func(b []byte) (snowman.Block, error) { @@ -1491,7 +1490,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { sender.CantPushQuery = false sender.CantChits = false - te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes()) + te.Put(vdr, *reqID, missingBlk.ID(), missingBlk.Bytes()) pref := te.Consensus.Preference() if !pref.Equals(pendingBlk.ID()) { @@ -1504,12 +1503,11 @@ func TestEngineAggressivePolling(t *testing.T) { config.Params.ConcurrentRepolls = 2 - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -1590,7 +1588,7 @@ func TestEngineAggressivePolling(t *testing.T) { numPulled := new(int) sender.PullQueryF = func(_ ids.ShortSet, _ uint32, _ ids.ID) { *numPulled++ } - te.Put(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.Put(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) if *numPushed != 1 { t.Fatalf("Should have initially sent a push query") @@ -1612,14 +1610,14 @@ func TestEngineDoubleChit(t *testing.T) { BetaRogue: 2, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) sender := &common.SenderTest{} sender.T = t @@ -1678,7 +1676,7 @@ func TestEngineDoubleChit(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID()) + vdrSet.Add(vdr0, vdr1) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1707,19 +1705,19 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Processing { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Processing { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr1.ID(), *queryRequestID, blkSet) + te.Chits(vdr1, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Accepted { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Accepted) From af9fc4055942bd87f94ccb3672093df0a7591f91 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Sat, 29 Aug 2020 15:47:05 -0400 Subject: [PATCH 41/47] Cleanup e2e test script --- .ci/run_e2e_tests.sh | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 9aba21135f60..cd3271dc1734 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -6,18 +6,6 @@ GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) DOCKER_REPO="avaplatform" -echo "$DOCKER_USERNAME" -echo "${#DOCKER_USERNAME}" - -echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin - -TESTING_CONTROLLER_IMAGE="$DOCKER_REPO/avalanche-testing_controller:everest-dev" -BYZANTINE_IMAGE="$DOCKER_REPO/gecko-byzantine:everest-dev" - -docker pull "$TESTING_CONTROLLER_IMAGE" -docker pull "${BYZANTINE_IMAGE}" - - E2E_TESTING_REMOTE="https://github.com/ava-labs/avalanche-testing.git" E2E_TAG="v0.8.4-dev" @@ -30,9 +18,18 @@ git checkout "tags/$E2E_TAG" -b "$E2E_TAG" go mod edit -replace github.com/ava-labs/gecko="$GECKO_HOME" bash "./scripts/rebuild_initializer_binary.sh" -# If Docker Credentials are not present (ex. PR from an untrusted fork) run only the non-byzantine tests + +TESTING_CONTROLLER_IMAGE="$DOCKER_REPO/avalanche-testing_controller:everest-dev" +BYZANTINE_IMAGE="$DOCKER_REPO/gecko-byzantine:everest-dev" + +docker pull "$TESTING_CONTROLLER_IMAGE" + +# If Docker Credentials are not available skip the Byzantine Tests if [[ ${#DOCKER_USERNAME} == 0 ]]; then + echo "Skipping Byzantine Tests because Docker Credentials were not present." ./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" -else +else + echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin + docker pull "${BYZANTINE_IMAGE}" ./build/avalanche-testing --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${TESTING_CONTROLLER_IMAGE}" --byzantine-image-name="${BYZANTINE_IMAGE}" fi From 8a1cd53d428d4ba050391081eb190154656f8265 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Sat, 29 Aug 2020 13:01:40 -0400 Subject: [PATCH 42/47] Separate message throttling from CPU tracking --- snow/networking/router/chain_router_test.go | 13 +- snow/networking/router/handler.go | 5 - snow/networking/router/handler_test.go | 19 +- snow/networking/router/service_queue.go | 26 ++- snow/networking/router/service_queue_test.go | 19 +- snow/networking/sender/sender_test.go | 19 +- snow/networking/throttler/ewma.go | 211 ++++------------- .../networking/throttler/message_throttler.go | 221 ++++++++++++++++++ snow/networking/throttler/no.go | 26 ++- snow/networking/throttler/throttler.go | 21 ++ snow/networking/throttler/throttler_test.go | 179 ++++++++------ vms/platformvm/vm_test.go | 7 +- vms/spchainvm/consensus_benchmark_test.go | 14 +- 13 files changed, 482 insertions(+), 298 deletions(-) create mode 100644 snow/networking/throttler/message_throttler.go diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 8fb89e9cb322..ede7004d4b41 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" @@ -39,9 +40,9 @@ func TestShutdown(t *testing.T) { validators.NewSet(), nil, 1, - DefaultMaxNonStakerPendingMsgs, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -98,9 +99,9 @@ func TestShutdownTimesOut(t *testing.T) { validators.NewSet(), nil, 1, - DefaultMaxNonStakerPendingMsgs, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 5f58e4e49edb..4aed84cf8c45 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -17,11 +17,6 @@ import ( "github.com/ava-labs/gecko/utils/timer" ) -const ( - DefaultStakerPortion float64 = 0.2 - DefaultMaxNonStakerPendingMsgs uint32 = 3 -) - // Requirement: A set of nodes spamming messages (potentially costly) shouldn't // impact other node's queries. diff --git a/snow/networking/router/handler_test.go b/snow/networking/router/handler_test.go index 14aee95ad413..64657c354472 100644 --- a/snow/networking/router/handler_test.go +++ b/snow/networking/router/handler_test.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/networking/throttler" ) func TestHandlerDropsTimedOutMessages(t *testing.T) { @@ -41,9 +42,9 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { vdrs, nil, 16, - DefaultMaxNonStakerPendingMsgs, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -84,9 +85,9 @@ func TestHandlerDoesntDrop(t *testing.T) { validators, nil, 16, - DefaultMaxNonStakerPendingMsgs, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -120,9 +121,9 @@ func TestHandlerClosesOnError(t *testing.T) { validators.NewSet(), nil, 16, - DefaultMaxNonStakerPendingMsgs, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) diff --git a/snow/networking/router/service_queue.go b/snow/networking/router/service_queue.go index 5fe5b47a6182..708f87c23d5f 100644 --- a/snow/networking/router/service_queue.go +++ b/snow/networking/router/service_queue.go @@ -31,8 +31,9 @@ type messageQueue interface { type multiLevelQueue struct { lock sync.Mutex - validators validators.Set - throttler throttler.Throttler + validators validators.Set + cpuTracker throttler.CPUTracker + msgThrottler throttler.CountingThrottler // Tracks total CPU consumption intervalConsumption, tierConsumption, cpuInterval time.Duration @@ -67,7 +68,8 @@ func newMultiLevelQueue( ) (messageQueue, chan struct{}) { semaChan := make(chan struct{}, bufferSize) singleLevelSize := bufferSize / len(consumptionRanges) - throttler := throttler.NewEWMAThrottler(vdrs, uint32(bufferSize), maxNonStakerPendingMsgs, msgPortion, cpuPortion, cpuInterval, log) + cpuTracker := throttler.NewEWMATracker(vdrs, cpuPortion, cpuInterval, log) + msgThrottler := throttler.NewMessageThrottler(vdrs, uint32(bufferSize), maxNonStakerPendingMsgs, msgPortion, log) queues := make([]singleLevelQueue, len(consumptionRanges)) for index := 0; index < len(queues); index++ { gauge, histogram, err := metrics.registerTierStatistics(index) @@ -85,7 +87,8 @@ func newMultiLevelQueue( return &multiLevelQueue{ validators: vdrs, - throttler: throttler, + cpuTracker: cpuTracker, + msgThrottler: msgThrottler, queues: queues, cpuRanges: consumptionRanges, cpuAllotments: consumptionAllotments, @@ -117,7 +120,7 @@ func (ml *multiLevelQueue) PushMessage(msg message) bool { return false } ml.pendingMessages++ - ml.throttler.AddMessage(msg.validatorID) + ml.msgThrottler.Add(msg.validatorID) select { case ml.semaChan <- struct{}{}: default: @@ -135,7 +138,7 @@ func (ml *multiLevelQueue) PopMessage() (message, error) { msg, err := ml.popMessage() if err == nil { ml.pendingMessages-- - ml.throttler.RemoveMessage(msg.validatorID) + ml.msgThrottler.Remove(msg.validatorID) ml.metrics.pending.Dec() } return msg, err @@ -146,7 +149,7 @@ func (ml *multiLevelQueue) UtilizeCPU(vdr ids.ShortID, duration time.Duration) { ml.lock.Lock() defer ml.lock.Unlock() - ml.throttler.UtilizeCPU(vdr, duration) + ml.cpuTracker.UtilizeCPU(vdr, duration) ml.intervalConsumption += duration ml.tierConsumption += duration if ml.tierConsumption > ml.cpuAllotments[ml.currentTier] { @@ -161,7 +164,8 @@ func (ml *multiLevelQueue) EndInterval() { ml.lock.Lock() defer ml.lock.Unlock() - ml.throttler.EndInterval() + ml.cpuTracker.EndInterval() + ml.msgThrottler.EndInterval() ml.metrics.cpu.Observe(float64(ml.intervalConsumption.Milliseconds())) ml.intervalConsumption = 0 } @@ -190,7 +194,7 @@ func (ml *multiLevelQueue) popMessage() (message, error) { ml.queues[ml.currentTier].waitingTime.Observe(float64(time.Since(msg.received))) // Check where messages from this validator currently belong - cpu, _ := ml.throttler.GetUtilization(msg.validatorID) + cpu := ml.cpuTracker.GetUtilization(msg.validatorID) correctIndex := ml.getPriorityIndex(cpu) // If the message is at least the priority of the current tier @@ -228,12 +232,12 @@ func (ml *multiLevelQueue) pushMessage(msg message) bool { ml.log.Warn("Dropping message due to invalid validatorID") return false } - cpu, throttle := ml.throttler.GetUtilization(validatorID) + throttle := ml.msgThrottler.Throttle(validatorID) if throttle { ml.metrics.throttled.Inc() return false } - + cpu := ml.cpuTracker.GetUtilization(validatorID) queueIndex := ml.getPriorityIndex(cpu) return ml.waterfallMessage(msg, queueIndex) diff --git a/snow/networking/router/service_queue_test.go b/snow/networking/router/service_queue_test.go index 8ef87f0050c7..bad122e57579 100644 --- a/snow/networking/router/service_queue_test.go +++ b/snow/networking/router/service_queue_test.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" ) @@ -42,10 +43,10 @@ func setupMultiLevelQueue(t *testing.T, bufferSize int) (messageQueue, chan stru consumptionRanges, consumptionAllotments, bufferSize, - DefaultMaxNonStakerPendingMsgs, + throttler.DefaultMaxNonStakerPendingMsgs, time.Second, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, ) return queue, semaChan, vdrs @@ -170,10 +171,10 @@ func TestMultiLevelQueuePrioritizes(t *testing.T) { consumptionRanges, consumptionAllotments, bufferSize, - DefaultMaxNonStakerPendingMsgs, + throttler.DefaultMaxNonStakerPendingMsgs, time.Second, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, ) // Utilize CPU such that the next message from validator2 will be placed on a lower @@ -265,10 +266,10 @@ func TestMultiLevelQueuePushesDownOldMessages(t *testing.T) { consumptionRanges, consumptionAllotments, bufferSize, - DefaultMaxNonStakerPendingMsgs, + throttler.DefaultMaxNonStakerPendingMsgs, time.Second, - DefaultStakerPortion, - DefaultStakerPortion, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, ) queue.PushMessage(message{ diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 08f6b509d226..26a2b91a37c7 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" @@ -67,9 +68,9 @@ func TestTimeout(t *testing.T) { validators.NewSet(), nil, 1, - router.DefaultMaxNonStakerPendingMsgs, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -124,9 +125,9 @@ func TestReliableMessages(t *testing.T) { validators.NewSet(), nil, 1, - router.DefaultMaxNonStakerPendingMsgs, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -191,9 +192,9 @@ func TestReliableMessagesToMyself(t *testing.T) { validators.NewSet(), nil, 1, - router.DefaultMaxNonStakerPendingMsgs, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) diff --git a/snow/networking/throttler/ewma.go b/snow/networking/throttler/ewma.go index 0ea9098c15cd..19d984973a8f 100644 --- a/snow/networking/throttler/ewma.go +++ b/snow/networking/throttler/ewma.go @@ -15,17 +15,16 @@ import ( ) const ( - defaultDecayFactor float64 = 2 - defaultIntervalsUntilPruning uint32 = 60 - defaultMinimumCPUAllotment = time.Nanosecond + defaultDecayFactor float64 = 2 + defaultMinimumCPUAllotment = time.Nanosecond ) -type ewmaThrottler struct { +type ewmaCPUTracker struct { lock sync.Mutex log logging.Logger // Track peers - spenders map[[20]byte]*spender + cpuSpenders map[[20]byte]*cpuSpender cumulativeEWMA time.Duration vdrs validators.Set @@ -33,41 +32,22 @@ type ewmaThrottler struct { decayFactor float64 // Factor used to discount the EWMA at every period stakerCPU time.Duration // Amount of CPU time reserved for stakers nonReservedCPU time.Duration // Amount of CPU time that is not reserved for stakers - - // Track pending messages - reservedStakerMessages uint32 // Number of messages reserved for stakers - nonReservedMsgs uint32 // Number of non-reserved messages left to a shared message pool - pendingNonReservedMsgs uint32 // Number of pending messages taken from the shared message pool - - // Threshold of messages taken from the pool before the throttler begins to enforce hard caps on individual peers' pending messages - enforceIndividualCapThreshold uint32 - // Cap on number of pending messages allowed to a non-staker (not enforced until above [enforceIndividualCapThreshold] is exceeded) - maxNonStakerPendingMsgs uint32 - - // Statistics adjusted at every interval - currentPeriod uint32 } -// NewEWMAThrottler returns a Throttler that uses exponentially weighted moving +// NewEWMATracker returns a CPUTracker that uses exponentially weighted moving // average to estimate CPU utilization. // -// [maxMessages] is the maximum number of messages allotted to this chain -// [stakerMsgPortion] is the portion of messages to reserve exclusively for stakers -// [stakerCPUPortion] is the portion of CPU utilization to reserve for stakers -// both staker portions should be in the range (0, 1] +// [stakerCPUPortion] is the portion of CPU utilization to reserve for stakers (range (0, 1]) // [period] is the interval of time to use for the calculation of EWMA // -// Note: ewmaThrottler uses the period as the total amount of time per interval, +// Note: ewmaCPUTracker uses the period as the total amount of time per interval, // which is not the limit since it tracks consumption using EWMA. -func NewEWMAThrottler( +func NewEWMATracker( vdrs validators.Set, - maxMessages, - maxNonStakerPendingMsgs uint32, - stakerMsgPortion, stakerCPUPortion float64, period time.Duration, log logging.Logger, -) Throttler { +) CPUTracker { // Amount of CPU time reserved for processing messages from stakers stakerCPU := time.Duration(float64(period) * stakerCPUPortion) if stakerCPU < defaultMinimumCPUAllotment { @@ -82,69 +62,30 @@ func NewEWMAThrottler( nonReservedCPU = defaultMinimumCPUAllotment } - // Number of messages reserved for Stakers vs. Non-Stakers - reservedStakerMessages := uint32(stakerMsgPortion * float64(maxMessages)) - nonReservedMsgs := maxMessages - reservedStakerMessages - - throttler := &ewmaThrottler{ - spenders: make(map[[20]byte]*spender), - vdrs: vdrs, - log: log, + throttler := &ewmaCPUTracker{ + cpuSpenders: make(map[[20]byte]*cpuSpender), + vdrs: vdrs, + log: log, decayFactor: defaultDecayFactor, stakerCPU: stakerCPU, nonReservedCPU: nonReservedCPU, - - reservedStakerMessages: reservedStakerMessages, - nonReservedMsgs: nonReservedMsgs, - enforceIndividualCapThreshold: nonReservedMsgs / 2, // If the pool is half empty, begin to enforce the max message caps - maxNonStakerPendingMsgs: maxNonStakerPendingMsgs, } - // Add validators to spenders, so that they will be calculated correctly in + // Add validators to cpuSpenders, so that they will be calculated correctly in // EndInterval for _, vdr := range vdrs.List() { - throttler.spenders[vdr.ID().Key()] = &spender{} + throttler.cpuSpenders[vdr.ID().Key()] = &cpuSpender{} } // Call EndInterval to calculate initial period statistics and initial - // spender values for validators + // cpuSpender values for validators throttler.EndInterval() return throttler } -func (et *ewmaThrottler) AddMessage(validatorID ids.ShortID) { - et.lock.Lock() - defer et.lock.Unlock() - - sp := et.getSpender(validatorID) - sp.pendingMessages++ - - // If the spender has exceeded its message allotment, then the additional - // message is taken from the pool - if sp.pendingMessages > sp.msgAllotment { - sp.pendingPoolMessages++ - et.pendingNonReservedMsgs++ - } -} - -func (et *ewmaThrottler) RemoveMessage(validatorID ids.ShortID) { - et.lock.Lock() - defer et.lock.Unlock() - - sp := et.getSpender(validatorID) - sp.pendingMessages-- - - // If the spender has pending messages taken from the pool, - // they are the first messages to be removed. - if sp.pendingPoolMessages > 0 { - sp.pendingPoolMessages-- - et.pendingNonReservedMsgs-- - } -} - -func (et *ewmaThrottler) UtilizeCPU( +func (et *ewmaCPUTracker) UtilizeCPU( validatorID ids.ShortID, consumption time.Duration, ) { @@ -153,140 +94,86 @@ func (et *ewmaThrottler) UtilizeCPU( sp := et.getSpender(validatorID) sp.cpuEWMA += consumption - sp.lastSpend = et.currentPeriod et.cumulativeEWMA += consumption } -// Returns CPU GetUtilization metric as percentage of expected utilization and -// boolean specifying whether or not the validator has exceeded its message -// allotment. -func (et *ewmaThrottler) GetUtilization( - validatorID ids.ShortID, -) (float64, bool) { +// GetUtilization returns a percentage of expected CPU utilization of the peer +// corresponding to [validatorID] +func (et *ewmaCPUTracker) GetUtilization(validatorID ids.ShortID) float64 { et.lock.Lock() defer et.lock.Unlock() sharedUtilization := float64(et.cumulativeEWMA) / float64(et.nonReservedCPU) sp := et.getSpender(validatorID) if !sp.staking { - exceedsMessageAllotment := et.pendingNonReservedMsgs > et.nonReservedMsgs || // the shared message pool has been taken - (sp.pendingMessages > sp.maxMessages && // Spender has exceeded its individual cap - et.pendingNonReservedMsgs > et.enforceIndividualCapThreshold) // And the threshold before enforcing the cap has been reached - - if exceedsMessageAllotment { - et.log.Verbo("Throttling non-staker %s: %s. Pending pool messages: %d/%d.", - validatorID, - sp, - et.pendingNonReservedMsgs, - et.nonReservedMsgs) - } - return sharedUtilization, exceedsMessageAllotment - } - - // Staker should only be throttled if it has exceeded its message allotment - // and there are either no messages left in the shared pool or it has - // exceeded its own maximum message allocation. - exceedsMessageAllotment := sp.pendingMessages > sp.msgAllotment && // Throttle if the staker has exceeded its allotment - (et.pendingNonReservedMsgs > et.nonReservedMsgs || // And either the shared message pool is empty - (et.pendingNonReservedMsgs > et.enforceIndividualCapThreshold && // Or the threshold before enforcing the cap has been reached - sp.pendingMessages > sp.maxMessages)) // and this staker has exceeded its individual cap - - if exceedsMessageAllotment { - et.log.Debug("Throttling staker %s: %s. Pending pool messages: %d/%d.", - validatorID, - sp, - et.pendingNonReservedMsgs, - et.nonReservedMsgs) + return sharedUtilization } - return math.Min(float64(sp.cpuEWMA)/float64(sp.expectedCPU), sharedUtilization), exceedsMessageAllotment + return math.Min(float64(sp.cpuEWMA)/float64(sp.expectedCPU), sharedUtilization) } -func (et *ewmaThrottler) EndInterval() { +// EndInterval registers the end of a given CPU interval by discounting +// all cpuSpenders' cpuEWMA and removing outstanding spenders that have sufficiently +// low cpuEWMA stats +func (et *ewmaCPUTracker) EndInterval() { et.lock.Lock() defer et.lock.Unlock() - et.currentPeriod++ - et.cumulativeEWMA = time.Duration(float64(et.cumulativeEWMA) / et.decayFactor) stakingWeight := et.vdrs.Weight() - for key, spender := range et.spenders { - spender.cpuEWMA = time.Duration(float64(spender.cpuEWMA) / et.decayFactor) + removed := 0 + for key, cpuSpender := range et.cpuSpenders { + cpuSpender.cpuEWMA = time.Duration(float64(cpuSpender.cpuEWMA) / et.decayFactor) if vdr, exists := et.vdrs.Get(ids.NewShortID(key)); exists { stakerPortion := float64(vdr.Weight()) / float64(stakingWeight) // Calculate staker allotment here - spender.staking = true - spender.msgAllotment = uint32(float64(et.reservedStakerMessages) * stakerPortion) - spender.maxMessages = spender.msgAllotment + et.maxNonStakerPendingMsgs - spender.expectedCPU = time.Duration(float64(et.stakerCPU)*stakerPortion) + defaultMinimumCPUAllotment + cpuSpender.staking = true + cpuSpender.expectedCPU = time.Duration(float64(et.stakerCPU)*stakerPortion) + defaultMinimumCPUAllotment continue } - if spender.lastSpend+defaultIntervalsUntilPruning < et.currentPeriod && spender.pendingMessages == 0 { - et.log.Debug("Removing validator from throttler after not hearing from it for %d periods", - et.currentPeriod-spender.lastSpend) - delete(et.spenders, key) + if cpuSpender.cpuEWMA == 0 { + removed++ + delete(et.cpuSpenders, key) } - // If the validator is not a staker and was not deleted, set its spender + // If the validator is not a staker and was not deleted, set its cpuSpender // attributes - spender.staking = false - spender.msgAllotment = 0 - spender.maxMessages = et.maxNonStakerPendingMsgs - spender.expectedCPU = defaultMinimumCPUAllotment + cpuSpender.staking = false + cpuSpender.expectedCPU = defaultMinimumCPUAllotment } + et.log.Debug("Removed %d validators from CPU Tracker.", removed) } -// getSpender returns the [spender] corresponding to [validatorID] -func (et *ewmaThrottler) getSpender(validatorID ids.ShortID) *spender { +// getSpender returns the [cpuSpender] corresponding to [validatorID] +func (et *ewmaCPUTracker) getSpender(validatorID ids.ShortID) *cpuSpender { validatorKey := validatorID.Key() - if sp, exists := et.spenders[validatorKey]; exists { + if sp, exists := et.cpuSpenders[validatorKey]; exists { return sp } - // If this validator did not exist in spenders, create it and return - sp := &spender{ - maxMessages: et.maxNonStakerPendingMsgs, + // If this validator did not exist in cpuSpenders, create it and return + sp := &cpuSpender{ expectedCPU: defaultMinimumCPUAllotment, } - et.spenders[validatorKey] = sp + et.cpuSpenders[validatorKey] = sp return sp } -type spender struct { - // Last period that this spender utilized the CPU - lastSpend uint32 - - // Number of pending messages this spender has taken from the pool - pendingPoolMessages uint32 - - // Number of messages this spender currently has pending - pendingMessages uint32 - - // Number of messages allocated to this spender as a staker - msgAllotment uint32 - - // Max number of messages this spender can use even if the shared pool is - // non-empty - maxMessages uint32 - - // EWMA of this spender's CPU utilization +type cpuSpender struct { + // EWMA of this cpuSpender's CPU utilization cpuEWMA time.Duration // The expected CPU utilization of this peer expectedCPU time.Duration - // Flag to indicate if this spender is a staker + // Flag to indicate if this cpuSpender is a staker staking bool } -func (sp *spender) String() string { - return fmt.Sprintf("Spender(Messages: (%d+%d)/(%d+%d), CPU: %s/%s)", - sp.pendingPoolMessages, - sp.pendingMessages-sp.pendingPoolMessages, - sp.msgAllotment, - sp.maxMessages-sp.msgAllotment, +func (sp *cpuSpender) String() string { + return fmt.Sprintf("CPUTracker(CPU: %s/%s)", sp.cpuEWMA, sp.expectedCPU, ) diff --git a/snow/networking/throttler/message_throttler.go b/snow/networking/throttler/message_throttler.go new file mode 100644 index 000000000000..7eb3b4cd626e --- /dev/null +++ b/snow/networking/throttler/message_throttler.go @@ -0,0 +1,221 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package throttler + +import ( + "fmt" + "sync" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/logging" +) + +const ( + defaultIntervalsUntilPruning uint32 = 60 +) + +type messageThrottler struct { + lock sync.Mutex + log logging.Logger + + // Track peers + msgSpenders map[[20]byte]*msgSpender + vdrs validators.Set + + // Track pending messages + reservedStakerMessages uint32 // Number of messages reserved for stakers + nonReservedMsgs uint32 // Number of non-reserved messages left to a shared message pool + pendingNonReservedMsgs uint32 // Number of pending messages taken from the shared message pool + + // Cap on number of pending messages allowed to a non-staker + maxNonStakerPendingMsgs uint32 + + // Statistics adjusted at every interval + currentPeriod uint32 +} + +// NewMessageThrottler returns a MessageThrottler that throttles peers +// when they have too many pending messages outstanding. +// +// [maxMessages] is the maximum number of messages allotted to this chain +// [stakerMsgPortion] is the portion of messages to reserve exclusively for stakers +// should be in the range (0, 1] +func NewMessageThrottler( + vdrs validators.Set, + maxMessages, + maxNonStakerPendingMsgs uint32, + stakerMsgPortion float64, + log logging.Logger, +) CountingThrottler { + // Number of messages reserved for Stakers vs. Non-Stakers + reservedStakerMessages := uint32(stakerMsgPortion * float64(maxMessages)) + nonReservedMsgs := maxMessages - reservedStakerMessages + + throttler := &messageThrottler{ + msgSpenders: make(map[[20]byte]*msgSpender), + vdrs: vdrs, + log: log, + + reservedStakerMessages: reservedStakerMessages, + nonReservedMsgs: nonReservedMsgs, + maxNonStakerPendingMsgs: maxNonStakerPendingMsgs, + } + + // Add validators to msgSpenders, so that they will be calculated correctly in + // EndInterval + for _, vdr := range vdrs.List() { + throttler.msgSpenders[vdr.ID().Key()] = &msgSpender{} + } + + // Call EndInterval to calculate initial period statistics and initial + // msgSpender values for validators + throttler.EndInterval() + return throttler +} + +func (et *messageThrottler) Add(validatorID ids.ShortID) { + et.lock.Lock() + defer et.lock.Unlock() + + sp := et.getSpender(validatorID) + sp.pendingMessages++ + sp.lastSpend = et.currentPeriod + + // If the msgSpender has exceeded its message allotment, then the additional + // message is taken from the pool + if sp.pendingMessages > sp.msgAllotment { + sp.pendingPoolMessages++ + et.pendingNonReservedMsgs++ + } +} + +func (et *messageThrottler) Remove(validatorID ids.ShortID) { + et.lock.Lock() + defer et.lock.Unlock() + + sp := et.getSpender(validatorID) + sp.pendingMessages-- + + // If the msgSpender has pending messages taken from the pool, + // they are the first messages to be removed. + if sp.pendingPoolMessages > 0 { + sp.pendingPoolMessages-- + et.pendingNonReservedMsgs-- + } +} + +// Throttle returns true if messages from [validatorID] should be throttled due +// to having too many pending messages +func (et *messageThrottler) Throttle( + validatorID ids.ShortID, +) bool { + et.lock.Lock() + defer et.lock.Unlock() + + sp := et.getSpender(validatorID) + if !sp.staking { + exceedsMessageAllotment := et.pendingNonReservedMsgs > et.nonReservedMsgs || // the shared message pool has been taken + (sp.pendingMessages > sp.maxMessages) // Spender has exceeded its individual cap + + if exceedsMessageAllotment { + et.log.Verbo("Throttling non-staker %s: %s. Pending pool messages: %d/%d.", + validatorID, + sp, + et.pendingNonReservedMsgs, + et.nonReservedMsgs) + } + return exceedsMessageAllotment + } + + exceedsMessageAllotment := sp.pendingMessages > sp.msgAllotment && // Throttle if the staker has exceeded its allotment + (et.pendingNonReservedMsgs > et.nonReservedMsgs || // And either the shared message pool is empty + sp.pendingMessages > sp.maxMessages) // or this staker has exceeded its individual cap + + if exceedsMessageAllotment { + et.log.Debug("Throttling staker %s: %s. Pending pool messages: %d/%d.", + validatorID, + sp, + et.pendingNonReservedMsgs, + et.nonReservedMsgs) + } + return exceedsMessageAllotment +} + +func (et *messageThrottler) EndInterval() { + et.lock.Lock() + defer et.lock.Unlock() + + et.currentPeriod++ + stakingWeight := et.vdrs.Weight() + + for key, msgSpender := range et.msgSpenders { + if vdr, exists := et.vdrs.Get(ids.NewShortID(key)); exists { + stakerPortion := float64(vdr.Weight()) / float64(stakingWeight) + + // Calculate staker allotment here + msgSpender.staking = true + msgSpender.msgAllotment = uint32(float64(et.reservedStakerMessages) * stakerPortion) + msgSpender.maxMessages = msgSpender.msgAllotment + et.maxNonStakerPendingMsgs + continue + } + + if msgSpender.lastSpend+defaultIntervalsUntilPruning < et.currentPeriod && msgSpender.pendingMessages == 0 { + et.log.Debug("Removing validator from throttler after not hearing from it for %d periods", + et.currentPeriod-msgSpender.lastSpend) + delete(et.msgSpenders, key) + } + + // If the validator is not a staker and was not deleted, set its msgSpender + // attributes + msgSpender.staking = false + msgSpender.msgAllotment = 0 + msgSpender.maxMessages = et.maxNonStakerPendingMsgs + } +} + +// getSpender returns the [msgSpender] corresponding to [validatorID] +func (et *messageThrottler) getSpender(validatorID ids.ShortID) *msgSpender { + validatorKey := validatorID.Key() + if sp, exists := et.msgSpenders[validatorKey]; exists { + return sp + } + + // If this validator did not exist in msgSpenders, create it and return + sp := &msgSpender{ + maxMessages: et.maxNonStakerPendingMsgs, + } + et.msgSpenders[validatorKey] = sp + return sp +} + +type msgSpender struct { + // Last period that this msgSpender utilized the CPU + lastSpend uint32 + + // Number of pending messages this msgSpender has taken from the pool + pendingPoolMessages uint32 + + // Number of messages this msgSpender currently has pending + pendingMessages uint32 + + // Number of messages allocated to this msgSpender as a staker + msgAllotment uint32 + + // Max number of messages this msgSpender can use even if the shared pool is + // non-empty + maxMessages uint32 + + // Flag to indicate if this msgSpender is a staker + staking bool +} + +func (sp *msgSpender) String() string { + return fmt.Sprintf("MsgSpender(Messages: (%d+%d)/(%d+%d))", + sp.pendingPoolMessages, + sp.pendingMessages-sp.pendingPoolMessages, + sp.msgAllotment, + sp.maxMessages-sp.msgAllotment, + ) +} diff --git a/snow/networking/throttler/no.go b/snow/networking/throttler/no.go index 02f6dc18ae79..f734336375b9 100644 --- a/snow/networking/throttler/no.go +++ b/snow/networking/throttler/no.go @@ -9,17 +9,27 @@ import ( "github.com/ava-labs/gecko/ids" ) -type noThrottler struct{} +type noCountThrottler struct{} -func (noThrottler) AddMessage(ids.ShortID) {} +func (noCountThrottler) Add(ids.ShortID) {} -func (noThrottler) RemoveMessage(ids.ShortID) {} +func (noCountThrottler) Remove(ids.ShortID) {} -func (noThrottler) UtilizeCPU(ids.ShortID, time.Duration) {} +func (noCountThrottler) Throttle(ids.ShortID) bool { return false } -func (noThrottler) GetUtilization(ids.ShortID) (float64, bool) { return 0, false } +func (noCountThrottler) EndInterval() {} -func (noThrottler) EndInterval() {} +// NewNoCountThrottler returns a CountingThrottler that will never throttle +func NewNoCountThrottler() CountingThrottler { return noCountThrottler{} } -// NewNoThrottler returns a throttler that will never throttle -func NewNoThrottler() Throttler { return noThrottler{} } +type noCPUTracker struct{} + +func (noCPUTracker) UtilizeCPU(ids.ShortID, time.Duration) {} + +func (noCPUTracker) GetUtilization(ids.ShortID) float64 { return 0 } + +func (noCPUTracker) EndInterval() {} + +// NewNoCPUTracker returns a CPUTracker that does not track CPU usage and +// always returns 0 for the utilization value +func NewNoCPUTracker() CPUTracker { return noCPUTracker{} } diff --git a/snow/networking/throttler/throttler.go b/snow/networking/throttler/throttler.go index 3c0ef9679bab..bf4411863d16 100644 --- a/snow/networking/throttler/throttler.go +++ b/snow/networking/throttler/throttler.go @@ -9,6 +9,11 @@ import ( "github.com/ava-labs/gecko/ids" ) +const ( + DefaultMaxNonStakerPendingMsgs uint32 = 3 + DefaultStakerPortion float64 = 0.2 +) + // Throttler provides an interface to register consumption // of resources and prioritize messages from nodes that have // used less CPU time. @@ -19,3 +24,19 @@ type Throttler interface { GetUtilization(ids.ShortID) (float64, bool) // Returns the CPU based priority and whether or not the peer has too many pending messages EndInterval() // Notify throttler that the current period has ended } + +// CPUTracker tracks the consumption of CPU time +type CPUTracker interface { + UtilizeCPU(ids.ShortID, time.Duration) + GetUtilization(ids.ShortID) float64 + EndInterval() +} + +// CountingThrottler tracks the usage of a discrete resource (ex. pending messages) by a peer +// and determines whether or not a peer should be throttled. +type CountingThrottler interface { + Add(ids.ShortID) + Remove(ids.ShortID) + Throttle(ids.ShortID) bool + EndInterval() +} diff --git a/snow/networking/throttler/throttler_test.go b/snow/networking/throttler/throttler_test.go index c7d3d2cc54ab..5c9c640d72bf 100644 --- a/snow/networking/throttler/throttler_test.go +++ b/snow/networking/throttler/throttler_test.go @@ -12,52 +12,80 @@ import ( "github.com/ava-labs/gecko/utils/logging" ) -const ( - defaultMaxNonStakerPendingMsgs uint32 = 3 -) - -func TestEWMAThrottler(t *testing.T) { +func TestEWMATrackerPrioritizes(t *testing.T) { vdrs := validators.NewSet() validator0 := validators.GenerateRandomValidator(1) validator1 := validators.GenerateRandomValidator(1) + nonStaker := ids.NewShortID([20]byte{1}) vdrs.Add(validator0) vdrs.Add(validator1) - maxMessages := uint32(16) - msgPortion := 0.25 cpuPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, cpuPortion, period, logging.NoLog{}) + throttler := NewEWMATracker(vdrs, cpuPortion, period, logging.NoLog{}) throttler.UtilizeCPU(validator0.ID(), 25*time.Millisecond) throttler.UtilizeCPU(validator1.ID(), 5*time.Second) - cpu0, throttle0 := throttler.GetUtilization(validator0.ID()) - cpu1, throttle1 := throttler.GetUtilization(validator1.ID()) - - if throttle0 { - t.Fatalf("Should not throttle validator0 with no pending messages") - } - if throttle1 { - t.Fatalf("Should not throttle validator1 with no pending messages") - } + cpu0 := throttler.GetUtilization(validator0.ID()) + cpu1 := throttler.GetUtilization(validator1.ID()) + cpuNonStaker := throttler.GetUtilization(nonStaker) if cpu1 <= cpu0 { t.Fatalf("CPU utilization for validator1: %f should be greater than that of validator0: %f", cpu1, cpu0) } - // Test that throttler prevents unknown validators from taking up half the message queue - for i := uint32(0); i < maxMessages; i++ { - throttler.AddMessage(ids.NewShortID([20]byte{byte(i)})) + if cpuNonStaker < cpu1 { + t.Fatalf("CPU Utilization for non-staker: %f should be greater than or equal to the CPU Utilization for the highest spending staker: %f", cpuNonStaker, cpu1) + } +} + +func TestEWMATrackerPrunesSpenders(t *testing.T) { + vdrs := validators.NewSet() + staker0 := validators.GenerateRandomValidator(1) + staker1 := validators.GenerateRandomValidator(1) + nonStaker0 := ids.NewShortID([20]byte{1}) + nonStaker1 := ids.NewShortID([20]byte{2}) + + vdrs.Add(staker0) + vdrs.Add(staker1) + + cpuPortion := 0.25 + period := time.Second + throttler := NewEWMATracker(vdrs, cpuPortion, period, logging.NoLog{}) + + throttler.UtilizeCPU(staker0.ID(), 1.0) + throttler.UtilizeCPU(nonStaker0, 1.0) + + // 3 Cases: + // Stakers should not be pruned + // Non-stakers with non-zero cpuEWMA should not be pruned + // Non-stakers with cpuEWMA of 0 should be pruned + + // After 64 intervals nonStaker0 should be removed because its cpuEWMA statistic should reach 0 + // while nonStaker1 utilizes the CPU in every interval, so it should not be removed. + for i := 0; i < 64; i++ { + throttler.UtilizeCPU(nonStaker1, 1.0) + throttler.EndInterval() } - _, throttle := throttler.GetUtilization(ids.NewShortID([20]byte{'s', 'y', 'b', 'i', 'l'})) - if !throttle { - t.Fatal("Throttler should have started throttling messages from unknown peers") + // Ensure that the validators and the non-staker heard from every interval were not pruned + ewmat := throttler.(*ewmaCPUTracker) + if _, ok := ewmat.cpuSpenders[staker0.ID().Key()]; !ok { + t.Fatal("Staker was pruned from the set of spenders") + } + if _, ok := ewmat.cpuSpenders[staker1.ID().Key()]; !ok { + t.Fatal("Staker was pruned from the set of spenders") + } + if _, ok := ewmat.cpuSpenders[nonStaker0.Key()]; ok { + t.Fatal("Non-staker, not heard from in 64 periods, should have been pruned from the set of spenders") + } + if _, ok := ewmat.cpuSpenders[nonStaker1.Key()]; ok { + t.Fatal("Non-staker heard from in every period, was pruned from the set of spenders") } } -func TestThrottlerPrunesSpenders(t *testing.T) { +func TestMessageThrottlerPrunesSpenders(t *testing.T) { vdrs := validators.NewSet() staker0 := validators.GenerateRandomValidator(1) staker1 := validators.GenerateRandomValidator(1) @@ -69,44 +97,50 @@ func TestThrottlerPrunesSpenders(t *testing.T) { vdrs.Add(staker1) maxMessages := uint32(1024) - cpuPortion := 0.25 msgPortion := 0.25 - period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, cpuPortion, period, logging.NoLog{}) - throttler.AddMessage(nonStaker2) // nonStaker2 should not be removed with a pending message - throttler.UtilizeCPU(nonStaker0, 1.0) - throttler.UtilizeCPU(nonStaker1, 1.0) - intervalsUntilPruning := int(defaultIntervalsUntilPruning) - // Let two intervals pass with no activity to ensure that nonStaker1 can be pruned + throttler := NewMessageThrottler(vdrs, maxMessages, DefaultMaxNonStakerPendingMsgs, msgPortion, logging.NoLog{}) + + // 4 Cases: + // Stakers should not be pruned + // Non-stakers with pending messages should not be pruned + // Non-stakers heard from recently should not be pruned + // Non-stakers not heard from in [defaultIntervalsUntilPruning] should be pruned + + // Add pending messages for nonStaker1 and nonStaker2 + throttler.Add(nonStaker2) // Will not be removed, so it should not be pruned + throttler.Add(nonStaker1) + throttler.EndInterval() + throttler.Remove(nonStaker1) // The pending message was removed, so nonStaker1 should be pruned throttler.EndInterval() - throttler.UtilizeCPU(nonStaker0, 1.0) + intervalsUntilPruning := int(defaultIntervalsUntilPruning) // Let the required number of intervals elapse to allow nonStaker1 to be pruned for i := 0; i < intervalsUntilPruning; i++ { + throttler.Add(nonStaker0) // nonStaker0 is heard from in every interval, so it should not be pruned throttler.EndInterval() + throttler.Remove(nonStaker0) } - // Ensure that the validators and the non-staker heard from in the past [intervalsUntilPruning] were not pruned - ewmat := throttler.(*ewmaThrottler) - if _, ok := ewmat.spenders[staker0.ID().Key()]; !ok { + msgThrottler := throttler.(*messageThrottler) + if _, ok := msgThrottler.msgSpenders[staker0.ID().Key()]; !ok { t.Fatal("Staker was pruned from the set of spenders") } - if _, ok := ewmat.spenders[staker1.ID().Key()]; !ok { + if _, ok := msgThrottler.msgSpenders[staker1.ID().Key()]; !ok { t.Fatal("Staker was pruned from the set of spenders") } - if _, ok := ewmat.spenders[nonStaker0.Key()]; !ok { - t.Fatal("Non-staker heard from recently was pruned from the set of spenders") + if _, ok := msgThrottler.msgSpenders[nonStaker0.Key()]; !ok { + t.Fatal("Non-staker heard from within [intervalsUntilPruning] was removed from the set of spenders") } - if _, ok := ewmat.spenders[nonStaker1.Key()]; ok { - t.Fatal("Non-staker not heard from in a long time was not pruned from the set of spenders") + if _, ok := msgThrottler.msgSpenders[nonStaker1.Key()]; ok { + t.Fatal("Non-staker not heard from within [intervalsUntilPruning] was not removed from the set of spenders") } - if _, ok := ewmat.spenders[nonStaker2.Key()]; !ok { + if _, ok := msgThrottler.msgSpenders[nonStaker2.Key()]; !ok { t.Fatal("Non-staker with a pending message was pruned from the set of spenders") } } -func TestThrottleStaker(t *testing.T) { +func TestMessageThrottling(t *testing.T) { vdrs := validators.NewSet() staker0 := validators.GenerateRandomValidator(1) staker1 := validators.GenerateRandomValidator(1) @@ -116,62 +150,69 @@ func TestThrottleStaker(t *testing.T) { vdrs.Add(staker0) vdrs.Add(staker1) - maxMessages := uint32(9) + maxMessages := uint32(8) msgPortion := 0.25 - cpuPortion := 0.25 - period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, cpuPortion, period, logging.NoLog{}) + throttler := NewMessageThrottler(vdrs, maxMessages, DefaultMaxNonStakerPendingMsgs, msgPortion, logging.NoLog{}) // Message Allotment: 0.5 * 0.25 * 8 = 1 - // Message Pool: 6 messages - // Max Messages: 1 + defaultMaxNonStakerPendingMsgs + // Message Pool: 8 * 0.75 = 6 messages + // Max Messages: 1 + DefaultMaxNonStakerPendingMsgs // Validator should be throttled if it has exceeded its max messages // or it has exceeded its message allotment and the shared message pool is empty. // staker0 consumes its entire message allotment // Ensure that it is allowed to consume its entire max messages before being throttled - for i := 0; i < int(defaultMaxNonStakerPendingMsgs)+1; i++ { - throttler.AddMessage(staker0.ID()) - if _, throttle := throttler.GetUtilization(staker0.ID()); throttle { + for i := 0; i < int(DefaultMaxNonStakerPendingMsgs)+1; i++ { + throttler.Add(staker0.ID()) + if throttler.Throttle(staker0.ID()) { t.Fatal("Should not throttle message from staker until it has exceeded its own allotment") } } - throttler.AddMessage(staker0.ID()) - if _, throttle := throttler.GetUtilization(staker0.ID()); !throttle { - t.Fatal("Should have throttled message after exceeding message") + // Ensure staker is throttled after exceeding its own max messages cap + throttler.Add(staker0.ID()) + if !throttler.Throttle(staker0.ID()) { + t.Fatal("Should have throttled message after exceeding message cap") } - // Remove messages to reduce staker0 to have its normal message allotment pending - for i := 0; i < int(defaultMaxNonStakerPendingMsgs); i++ { - throttler.RemoveMessage(staker0.ID()) + // Remove messages to reduce staker0 to have its normal message allotment in pending + for i := 0; i < int(DefaultMaxNonStakerPendingMsgs)+1; i++ { + throttler.Remove(staker0.ID()) } // Consume the entire message pool among two non-stakers - for i := 0; i < int(defaultMaxNonStakerPendingMsgs); i++ { - throttler.AddMessage(nonStaker0) - throttler.AddMessage(nonStaker1) + for i := 0; i < int(DefaultMaxNonStakerPendingMsgs); i++ { + throttler.Add(nonStaker0) + throttler.Add(nonStaker1) // Neither should be throttled because they are only consuming until their own messsage cap // and the shared pool has been emptied. - if _, throttle := throttler.GetUtilization(nonStaker0); throttle { + if throttler.Throttle(nonStaker0) { t.Fatalf("Should not have throttled message from nonStaker0 after %d messages", i) } - if _, throttle := throttler.GetUtilization(nonStaker1); throttle { + if throttler.Throttle(nonStaker1) { t.Fatalf("Should not have throttled message from nonStaker1 after %d messages", i) } } // An additional message from staker0 should now cause it to be throttled since the mesasage pool // has been emptied. - if _, throttle := throttler.GetUtilization(staker0.ID()); throttle { + if throttler.Throttle(staker0.ID()) { t.Fatal("Should not have throttled message from staker until it had exceeded its message allotment.") } - throttler.AddMessage(staker0.ID()) - if _, throttle := throttler.GetUtilization(staker0.ID()); !throttle { + throttler.Add(staker0.ID()) + if !throttler.Throttle(staker0.ID()) { t.Fatal("Should have throttled message from staker0 after it exceeded its message allotment because the message pool was empty.") } + + if !throttler.Throttle(nonStaker0) { + t.Fatal("Should have throttled message from nonStaker0 after the message pool was emptied") + } + + if !throttler.Throttle(nonStaker1) { + t.Fatal("Should have throttled message from nonStaker1 after the message pool was emptied") + } } func TestCalculatesEWMA(t *testing.T) { @@ -181,11 +222,9 @@ func TestCalculatesEWMA(t *testing.T) { vdrs.Add(validator0) vdrs.Add(validator1) - maxMessages := uint32(16) - msgPortion := 0.25 stakerPortion := 0.25 period := time.Second - throttler := NewEWMAThrottler(vdrs, maxMessages, defaultMaxNonStakerPendingMsgs, msgPortion, stakerPortion, period, logging.NoLog{}) + throttler := NewEWMATracker(vdrs, stakerPortion, period, logging.NoLog{}) // Spend X CPU time in consecutive intervals and ensure that the throttler correctly calculates EWMA spends := []time.Duration{ @@ -206,7 +245,7 @@ func TestCalculatesEWMA(t *testing.T) { throttler.EndInterval() } - ewmat := throttler.(*ewmaThrottler) + ewmat := throttler.(*ewmaCPUTracker) sp := ewmat.getSpender(validator0.ID()) if sp.cpuEWMA != ewma { t.Fatalf("EWMA Throttler calculated EWMA incorrectly, expected: %s, but calculated: %s", ewma, sp.cpuEWMA) diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 6e8cfd87a968..a3ff946c91fc 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -26,6 +26,7 @@ import ( "github.com/ava-labs/gecko/snow/engine/snowman/bootstrap" "github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/snow/networking/sender" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/constants" @@ -1641,9 +1642,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { vdrs, msgChan, 1000, - router.DefaultMaxNonStakerPendingMsgs, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go index 0f49b89ca4ac..a4e5b0d34f4a 100644 --- a/vms/spchainvm/consensus_benchmark_test.go +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -18,7 +18,9 @@ import ( "github.com/ava-labs/gecko/snow/engine/snowman/bootstrap" "github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/snow/networking/sender" + "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" @@ -111,9 +113,9 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { vdrs, msgChan, 1000, - router.DefaultMaxNonStakerPendingMsgs, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) @@ -254,9 +256,9 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { vdrs, msgChan, 1000, - router.DefaultMaxNonStakerPendingMsgs, - router.DefaultStakerPortion, - router.DefaultStakerPortion, + throttler.DefaultMaxNonStakerPendingMsgs, + throttler.DefaultStakerPortion, + throttler.DefaultStakerPortion, "", prometheus.NewRegistry(), ) From 7531e761ad6cb2b117eee2a0174bd83761035c34 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Sat, 29 Aug 2020 16:08:59 -0400 Subject: [PATCH 43/47] Rename command line parameter setting max number of peer's pending msgs --- main/params.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main/params.go b/main/params.go index 03997835749e..8c9d9aae8fab 100644 --- a/main/params.go +++ b/main/params.go @@ -202,7 +202,7 @@ func init() { fs.Uint64Var(&Config.DisabledStakingWeight, "staking-disabled-weight", 1, "Weight to provide to each peer when staking is disabled") // Throttling: - fs.UintVar(&Config.MaxNonStakerPendingMsgs, "max-non-staker-pending", 3, "Maximum number of messages a non-staker is allowed to have pending.") + fs.UintVar(&Config.MaxNonStakerPendingMsgs, "max-non-staker-pending-msgs", 3, "Maximum number of messages a non-staker is allowed to have pending.") fs.Float64Var(&Config.StakerMsgPortion, "staker-msg-reserved", 0.2, "Reserve a portion of the chain message queue's space for stakers.") fs.Float64Var(&Config.StakerCPUPortion, "staker-cpu-reserved", 0.2, "Reserve a portion of the chain's CPU time for stakers.") From 4ea7152df359059e22b5b2b72dd1c76ff4ecaa45 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 30 Aug 2020 04:27:31 -0400 Subject: [PATCH 44/47] fixed various bugs + fixeds tests --- chains/manager.go | 3 +- main/params.go | 2 +- node/node.go | 9 +- .../avalanche/bootstrap/bootstrapper.go | 2 +- .../avalanche/bootstrap/bootstrapper_test.go | 7 +- snow/engine/snowman/bootstrap/bootstrapper.go | 2 +- .../snowman/bootstrap/bootstrapper_test.go | 7 +- snow/engine/snowman/transitive_test.go | 162 +++++----- snow/networking/awaiting_connections.go | 46 --- snow/networking/router/handler_test.go | 4 +- snow/networking/throttler/throttler_test.go | 76 ++--- snow/validators/manager.go | 13 +- vms/platformvm/add_delegator_tx.go | 20 +- vms/platformvm/add_delegator_tx_test.go | 51 ++-- vms/platformvm/add_subnet_validator_tx.go | 39 ++- .../add_subnet_validator_tx_test.go | 60 ++-- vms/platformvm/add_validator_tx.go | 21 +- vms/platformvm/add_validator_tx_test.go | 14 +- vms/platformvm/advance_time_tx.go | 22 +- vms/platformvm/advance_time_tx_test.go | 43 +-- vms/platformvm/base_tx_test.go | 2 +- vms/platformvm/create_subnet_tx.go | 3 +- vms/platformvm/reward_validator_tx.go | 2 +- vms/platformvm/reward_validator_tx_test.go | 55 ++-- vms/platformvm/service.go | 215 ++++++------- vms/platformvm/spend.go | 1 - vms/platformvm/state.go | 164 +++++++--- vms/platformvm/vm.go | 288 +++++++++++------- vms/platformvm/vm_test.go | 213 ++++++------- vms/spchainvm/consensus_benchmark_test.go | 4 +- vms/spchainvm/vm_test.go | 6 +- 31 files changed, 821 insertions(+), 735 deletions(-) delete mode 100644 snow/networking/awaiting_connections.go diff --git a/chains/manager.go b/chains/manager.go index 31ca9e8c9f53..4a285e2b36c1 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -238,7 +238,8 @@ func (m *manager) ForceCreateChain(chainParams ChainParameters) { // Assert that there isn't already a chain with an alias in [chain].Aliases // (Recall that the string repr. of a chain's ID is also an alias for a chain) if alias, isRepeat := m.isChainWithAlias(chainParams.ID.String()); isRepeat { - m.log.Error("there is already a chain with alias '%s'. Chain not created.", alias) + m.log.Debug("there is already a chain with alias '%s'. Chain not created.", + alias) return } diff --git a/main/params.go b/main/params.go index de74f278fad9..8f477da7ac7b 100644 --- a/main/params.go +++ b/main/params.go @@ -32,7 +32,7 @@ import ( ) const ( - dbVersion = "v0.6.1" + dbVersion = "v0.6.5" ) // Results of parsing the CLI diff --git a/node/node.go b/node/node.go index af1c445e574d..960833a03e65 100644 --- a/node/node.go +++ b/node/node.go @@ -158,7 +158,9 @@ func (n *Node) initNetworking() error { // Initialize validator manager and primary network's validator set primaryNetworkValidators := validators.NewSet() n.vdrs = validators.NewManager() - n.vdrs.Set(constants.PrimaryNetworkID, primaryNetworkValidators) + if err := n.vdrs.Set(constants.PrimaryNetworkID, primaryNetworkValidators); err != nil { + return err + } n.Net = network.NewDefaultNetwork( n.Config.ConsensusParams.Metrics, @@ -443,12 +445,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { // Instead of updating node's validator manager, platform chain makes changes // to its own local validator manager (which isn't used for sampling) if !n.Config.EnableStaking { - primaryNetworkValidators := validators.NewSet() - if err := primaryNetworkValidators.AddWeight(n.ID, 1); err != nil { - return fmt.Errorf("couldn't add validator to primary network: %w", err) - } vdrs = validators.NewManager() - vdrs.Set(constants.PrimaryNetworkID, primaryNetworkValidators) } errs := wrappers.Errs{} diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index 05dcb23b6baf..9974e231122f 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -136,7 +136,7 @@ func (b *Bootstrapper) fetch(vtxIDs ...ids.ID) error { continue } - validators, err := b.Validators.Sample(1) // validator to send request to + validators, err := b.Beacons.Sample(1) // validator to send request to if err != nil { return fmt.Errorf("dropping request for %s as there are no validators", vtxID) } diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index b4f995410d56..25b939fa3915 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -48,9 +48,8 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *vertex.T sender.CantGetAcceptedFrontier = false - peer := validators.GenerateRandomValidator(1) - peerID := peer.ID() - peers.Add(peer) + peer := ids.GenerateTestShortID() + peers.AddWeight(peer, 1) vtxBlocker, _ := queue.New(prefixdb.New([]byte("vtx"), db)) txBlocker, _ := queue.New(prefixdb.New([]byte("tx"), db)) @@ -68,7 +67,7 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *vertex.T TxBlocked: txBlocker, Manager: manager, VM: vm, - }, peerID, sender, manager, vm + }, peer, sender, manager, vm } // Three vertices in the accepted frontier. None have parents. No need to fetch anything diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 080ea3d1f03a..629b386a1813 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -131,7 +131,7 @@ func (b *Bootstrapper) fetch(blkID ids.ID) error { return nil } - validators, err := b.Validators.Sample(1) // validator to send request to + validators, err := b.Beacons.Sample(1) // validator to send request to if err != nil { return fmt.Errorf("dropping request for %s as there are no validators", blkID) } diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 8e10b7208b07..1bdcef3183b6 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -42,9 +42,8 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *block.Te sender.CantGetAcceptedFrontier = false - peer := validators.GenerateRandomValidator(1) - peerID := peer.ID() - peers.Add(peer) + peer := ids.GenerateTestShortID() + peers.AddWeight(peer, 1) blocker, _ := queue.New(db) @@ -59,7 +58,7 @@ func newConfig(t *testing.T) (Config, ids.ShortID, *common.SenderTest, *block.Te Config: commonConfig, Blocked: blocker, VM: vm, - }, peerID, sender, vm + }, peer, sender, vm } // Single node in the accepted frontier; no need to fecth parent diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 23bdd6d6ecce..7c7ef8902c9f 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -26,15 +26,14 @@ var ( Genesis = ids.GenerateTestID() ) -func setup(t *testing.T) (validators.Validator, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { +func setup(t *testing.T) (ids.ShortID, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { config := DefaultConfig() - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -118,7 +117,7 @@ func TestEngineAdd(t *testing.T) { t.Fatalf("Asked multiple times") } *asked = true - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blkID.Equals(blk.Parent().ID()) { @@ -133,7 +132,7 @@ func TestEngineAdd(t *testing.T) { return blk, nil } - te.Put(vdr.ID(), 0, blk.ID(), blk.Bytes()) + te.Put(vdr, 0, blk.ID(), blk.Bytes()) vm.ParseBlockF = nil @@ -147,7 +146,7 @@ func TestEngineAdd(t *testing.T) { vm.ParseBlockF = func(b []byte) (snowman.Block, error) { return nil, errUnknownBytes } - te.Put(vdr.ID(), *reqID, blk.Parent().ID(), nil) + te.Put(vdr, *reqID, blk.Parent().ID(), nil) vm.ParseBlockF = nil @@ -189,7 +188,7 @@ func TestEngineQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk.ID().Equals(blkID) { @@ -197,7 +196,7 @@ func TestEngineQuery(t *testing.T) { } } - te.PullQuery(vdr.ID(), 15, blk.ID()) + te.PullQuery(vdr, 15, blk.ID()) if !*blocked { t.Fatalf("Didn't request block") } @@ -214,7 +213,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -246,7 +245,7 @@ func TestEngineQuery(t *testing.T) { } return blk, nil } - te.Put(vdr.ID(), *getRequestID, blk.ID(), blk.Bytes()) + te.Put(vdr, *getRequestID, blk.ID(), blk.Bytes()) vm.ParseBlockF = nil if !*queried { @@ -284,7 +283,7 @@ func TestEngineQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk1.ID().Equals(blkID) { @@ -293,7 +292,7 @@ func TestEngineQuery(t *testing.T) { } blkSet := ids.Set{} blkSet.Add(blk1.ID()) - te.Chits(vdr.ID(), *queryRequestID, blkSet) + te.Chits(vdr, *queryRequestID, blkSet) *queried = false sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { @@ -303,7 +302,7 @@ func TestEngineQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -330,7 +329,7 @@ func TestEngineQuery(t *testing.T) { return blk1, nil } - te.Put(vdr.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + te.Put(vdr, *getRequestID, blk1.ID(), blk1.Bytes()) vm.ParseBlockF = nil if blk1.Status() != choices.Accepted { @@ -342,7 +341,7 @@ func TestEngineQuery(t *testing.T) { _ = te.polls.String() // Shouldn't panic - te.QueryFailed(vdr.ID(), *queryRequestID) + te.QueryFailed(vdr, *queryRequestID) if len(te.blocked) != 0 { t.Fatalf("Should have finished blocking") } @@ -360,16 +359,16 @@ func TestEngineMultipleQuery(t *testing.T) { ConcurrentRepolls: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -428,7 +427,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -470,7 +469,7 @@ func TestEngineMultipleQuery(t *testing.T) { } *asked = true *getRequestID = requestID - if !vdr0.ID().Equals(inVdr) { + if !vdr0.Equals(inVdr) { t.Fatalf("Asking wrong validator for block") } if !blk1.ID().Equals(blkID) { @@ -479,8 +478,8 @@ func TestEngineMultipleQuery(t *testing.T) { } blkSet := ids.Set{} blkSet.Add(blk1.ID()) - te.Chits(vdr0.ID(), *queryRequestID, blkSet) - te.Chits(vdr1.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) + te.Chits(vdr1, *queryRequestID, blkSet) vm.ParseBlockF = func(b []byte) (snowman.Block, error) { vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { @@ -506,7 +505,7 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = true *secondQueryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -514,12 +513,12 @@ func TestEngineMultipleQuery(t *testing.T) { t.Fatalf("Asking for wrong block") } } - te.Put(vdr0.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + te.Put(vdr0, *getRequestID, blk1.ID(), blk1.Bytes()) // Should be dropped because the query was already filled blkSet = ids.Set{} blkSet.Add(blk0.ID()) - te.Chits(vdr2.ID(), *queryRequestID, blkSet) + te.Chits(vdr2, *queryRequestID, blkSet) if blk1.Status() != choices.Accepted { t.Fatalf("Should have executed block") @@ -579,7 +578,7 @@ func TestEngineAbandonResponse(t *testing.T) { } te.issue(blk) - te.QueryFailed(vdr.ID(), 1) + te.QueryFailed(vdr, 1) if len(te.blocked) != 0 { t.Fatalf("Should have removed blocking event") @@ -601,7 +600,7 @@ func TestEngineFetchBlock(t *testing.T) { added := new(bool) sender.PutF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID, blk []byte) { - if !vdr.ID().Equals(inVdr) { + if !vdr.Equals(inVdr) { t.Fatalf("Wrong validator") } if requestID != 123 { @@ -613,7 +612,7 @@ func TestEngineFetchBlock(t *testing.T) { *added = true } - te.Get(vdr.ID(), 123, gBlk.ID()) + te.Get(vdr, 123, gBlk.ID()) if !*added { t.Fatalf("Should have sent block to peer") @@ -656,7 +655,7 @@ func TestEnginePushQuery(t *testing.T) { t.Fatalf("Sent chit multiple times") } *chitted = true - if !inVdr.Equals(vdr.ID()) { + if !inVdr.Equals(vdr) { t.Fatalf("Asking wrong validator for preference") } if requestID != 20 { @@ -678,7 +677,7 @@ func TestEnginePushQuery(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -687,7 +686,7 @@ func TestEnginePushQuery(t *testing.T) { } } - te.PushQuery(vdr.ID(), 20, blk.ID(), blk.Bytes()) + te.PushQuery(vdr, 20, blk.ID(), blk.Bytes()) if !*chitted { t.Fatalf("Should have sent a chit to the peer") @@ -719,7 +718,7 @@ func TestEngineBuildBlock(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -745,7 +744,7 @@ func TestEngineRepoll(t *testing.T) { } *queried = true vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -770,16 +769,16 @@ func TestVoteCanceling(t *testing.T) { ConcurrentRepolls: 1, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vdr2 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) - vals.Add(vdr2) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + vdr2 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) + vals.AddWeight(vdr2, 1) sender := &common.SenderTest{} sender.T = t @@ -838,7 +837,7 @@ func TestVoteCanceling(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -853,7 +852,7 @@ func TestVoteCanceling(t *testing.T) { t.Fatalf("Shouldn't have finished blocking issue") } - te.QueryFailed(vdr0.ID(), *queryRequestID) + te.QueryFailed(vdr0, *queryRequestID) if te.polls.Len() != 1 { t.Fatalf("Shouldn't have finished blocking issue") @@ -863,7 +862,7 @@ func TestVoteCanceling(t *testing.T) { sender.PullQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID) { *repolled = true } - te.QueryFailed(vdr1.ID(), *queryRequestID) + te.QueryFailed(vdr1, *queryRequestID) if !*repolled { t.Fatalf("Should have finished blocking issue and repolled the network") @@ -958,13 +957,13 @@ func TestEngineAbandonQuery(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, blkID) + te.PullQuery(vdr, 0, blkID) if len(te.blocked) != 1 { t.Fatalf("Should have blocked on request") } - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) if len(te.blocked) != 0 { t.Fatalf("Should have removed request") @@ -1008,13 +1007,13 @@ func TestEngineAbandonChit(t *testing.T) { fakeBlkIDSet := ids.Set{} fakeBlkIDSet.Add(fakeBlkID) - te.Chits(vdr.ID(), 0, fakeBlkIDSet) + te.Chits(vdr, 0, fakeBlkIDSet) if len(te.blocked) != 1 { t.Fatalf("Should have blocked on request") } - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) if len(te.blocked) != 0 { t.Fatalf("Should have removed request") @@ -1075,7 +1074,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, blockingBlk.ID(), blockingBlk.Bytes()) + te.PushQuery(vdr, 0, blockingBlk.ID(), blockingBlk.Bytes()) if len(te.blocked) != 3 { t.Fatalf("Both inserts should be blocking in addition to the chit request") @@ -1131,7 +1130,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr.ID()) + vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1153,7 +1152,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { } blockingBlkIDSet := ids.Set{} blockingBlkIDSet.Add(blockingBlk.ID()) - te.Chits(vdr.ID(), *queryRequestID, blockingBlkIDSet) + te.Chits(vdr, *queryRequestID, blockingBlkIDSet) if len(te.blocked) != 2 { t.Fatalf("The insert and the chit should be blocking") @@ -1188,12 +1187,12 @@ func TestEngineRetryFetch(t *testing.T) { *reqID = requestID } - te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + te.PullQuery(vdr, 0, missingBlk.ID()) vm.CantGetBlock = true sender.GetF = nil - te.GetFailed(vdr.ID(), *reqID) + te.GetFailed(vdr, *reqID) vm.CantGetBlock = false @@ -1202,7 +1201,7 @@ func TestEngineRetryFetch(t *testing.T) { *called = true } - te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + te.PullQuery(vdr, 0, missingBlk.ID()) vm.CantGetBlock = true sender.GetF = nil @@ -1263,7 +1262,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { votes := ids.Set{} votes.Add(invalidBlkID) - te.Chits(vdr.ID(), *reqID, votes) + te.Chits(vdr, *reqID, votes) vm.GetBlockF = nil @@ -1308,8 +1307,8 @@ func TestEngineGossip(t *testing.T) { func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { vdr, vdrs, sender, vm, te, gBlk := setup(t) - secondVdr := validators.GenerateRandomValidator(1) - vdrs.Add(secondVdr) + secondVdr := ids.GenerateTestShortID() + vdrs.AddWeight(secondVdr, 1) sender.Default(true) @@ -1357,7 +1356,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !blkID.Equals(missingBlk.ID()) { @@ -1365,9 +1364,9 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.PushQuery(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) - te.Put(secondVdr.ID(), *reqID, missingBlk.ID(), []byte{3}) + te.Put(secondVdr, *reqID, missingBlk.ID(), []byte{3}) *parsed = false vm.ParseBlockF = func(b []byte) (snowman.Block, error) { @@ -1394,7 +1393,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { missingBlk.StatusV = choices.Processing - te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes()) + te.Put(vdr, *reqID, missingBlk.ID(), missingBlk.Bytes()) pref := te.Consensus.Preference() if !pref.Equals(pendingBlk.ID()) { @@ -1453,7 +1452,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { reqID := new(uint32) sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) { *reqID = requestID - if !reqVdr.Equals(vdr.ID()) { + if !reqVdr.Equals(vdr) { t.Fatalf("Wrong validator requested") } if !blkID.Equals(missingBlk.ID()) { @@ -1461,12 +1460,12 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } } - te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.PushQuery(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) sender.GetF = nil sender.CantGet = false - te.PushQuery(vdr.ID(), *reqID, randomBlkID, []byte{3}) + te.PushQuery(vdr, *reqID, randomBlkID, []byte{3}) *parsed = false vm.ParseBlockF = func(b []byte) (snowman.Block, error) { @@ -1491,7 +1490,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { sender.CantPushQuery = false sender.CantChits = false - te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes()) + te.Put(vdr, *reqID, missingBlk.ID(), missingBlk.Bytes()) pref := te.Consensus.Preference() if !pref.Equals(pendingBlk.ID()) { @@ -1504,12 +1503,11 @@ func TestEngineAggressivePolling(t *testing.T) { config.Params.ConcurrentRepolls = 2 - vdr := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr) + vdr := ids.GenerateTestShortID() + vals.AddWeight(vdr, 1) sender := &common.SenderTest{} sender.T = t @@ -1590,7 +1588,7 @@ func TestEngineAggressivePolling(t *testing.T) { numPulled := new(int) sender.PullQueryF = func(_ ids.ShortSet, _ uint32, _ ids.ID) { *numPulled++ } - te.Put(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes()) + te.Put(vdr, 0, pendingBlk.ID(), pendingBlk.Bytes()) if *numPushed != 1 { t.Fatalf("Should have initially sent a push query") @@ -1612,14 +1610,14 @@ func TestEngineDoubleChit(t *testing.T) { BetaRogue: 2, } - vdr0 := validators.GenerateRandomValidator(1) - vdr1 := validators.GenerateRandomValidator(1) - vals := validators.NewSet() config.Validators = vals - vals.Add(vdr0) - vals.Add(vdr1) + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vals.AddWeight(vdr0, 1) + vals.AddWeight(vdr1, 1) sender := &common.SenderTest{} sender.T = t @@ -1678,7 +1676,7 @@ func TestEngineDoubleChit(t *testing.T) { *queried = true *queryRequestID = requestID vdrSet := ids.ShortSet{} - vdrSet.Add(vdr0.ID(), vdr1.ID()) + vdrSet.Add(vdr0, vdr1) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } @@ -1707,19 +1705,19 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Processing { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr0.ID(), *queryRequestID, blkSet) + te.Chits(vdr0, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Processing { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - te.Chits(vdr1.ID(), *queryRequestID, blkSet) + te.Chits(vdr1, *queryRequestID, blkSet) if status := blk.Status(); status != choices.Accepted { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Accepted) diff --git a/snow/networking/awaiting_connections.go b/snow/networking/awaiting_connections.go deleted file mode 100644 index 5887cea6a8fa..000000000000 --- a/snow/networking/awaiting_connections.go +++ /dev/null @@ -1,46 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package networking - -import ( - stdmath "math" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow/validators" - "github.com/ava-labs/gecko/utils/math" -) - -// AwaitingConnections ... -type AwaitingConnections struct { - Requested validators.Set - WeightRequired uint64 - Finish func() - - weight uint64 -} - -// Add ... -func (aw *AwaitingConnections) Add(conn ids.ShortID) { - vdr, ok := aw.Requested.Get(conn) - if !ok { - return - } - weight, err := math.Add64(vdr.Weight(), aw.weight) - if err != nil { - weight = stdmath.MaxUint64 - } - aw.weight = weight -} - -// Remove ... -func (aw *AwaitingConnections) Remove(conn ids.ShortID) { - vdr, ok := aw.Requested.Get(conn) - if !ok { - return - } - aw.weight -= vdr.Weight() -} - -// Ready ... -func (aw *AwaitingConnections) Ready() bool { return aw.weight >= aw.WeightRequired } diff --git a/snow/networking/router/handler_test.go b/snow/networking/router/handler_test.go index e7fdb1a3357c..cf904549cf00 100644 --- a/snow/networking/router/handler_test.go +++ b/snow/networking/router/handler_test.go @@ -34,8 +34,8 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { handler := &Handler{} vdrs := validators.NewSet() - vdr0 := validators.GenerateRandomValidator(1) - vdrs.Add(vdr0) + vdr0 := ids.GenerateTestShortID() + vdrs.AddWeight(vdr0, 1) handler.Initialize( &engine, vdrs, diff --git a/snow/networking/throttler/throttler_test.go b/snow/networking/throttler/throttler_test.go index 835385e87dc2..3b16d64a14fd 100644 --- a/snow/networking/throttler/throttler_test.go +++ b/snow/networking/throttler/throttler_test.go @@ -14,10 +14,12 @@ import ( func TestEWMAThrottler(t *testing.T) { vdrs := validators.NewSet() - validator0 := validators.GenerateRandomValidator(1) - validator1 := validators.GenerateRandomValidator(1) - vdrs.Add(validator0) - vdrs.Add(validator1) + + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vdrs.AddWeight(vdr0, 1) + vdrs.AddWeight(vdr1, 1) maxMessages := uint32(16) msgPortion := 0.25 @@ -25,21 +27,21 @@ func TestEWMAThrottler(t *testing.T) { period := time.Second throttler := NewEWMAThrottler(vdrs, maxMessages, msgPortion, cpuPortion, period, logging.NoLog{}) - throttler.UtilizeCPU(validator0.ID(), 25*time.Millisecond) - throttler.UtilizeCPU(validator1.ID(), 5*time.Second) + throttler.UtilizeCPU(vdr0, 25*time.Millisecond) + throttler.UtilizeCPU(vdr1, 5*time.Second) - cpu0, throttle0 := throttler.GetUtilization(validator0.ID()) - cpu1, throttle1 := throttler.GetUtilization(validator1.ID()) + cpu0, throttle0 := throttler.GetUtilization(vdr0) + cpu1, throttle1 := throttler.GetUtilization(vdr1) if throttle0 { - t.Fatalf("Should not throttle validator0 with no pending messages") + t.Fatalf("Should not throttle vdr0 with no pending messages") } if throttle1 { - t.Fatalf("Should not throttle validator1 with no pending messages") + t.Fatalf("Should not throttle vdr1 with no pending messages") } if cpu1 <= cpu0 { - t.Fatalf("CPU utilization for validator1: %f should be greater than that of validator0: %f", cpu1, cpu0) + t.Fatalf("CPU utilization for vdr1: %f should be greater than that of vdr0: %f", cpu1, cpu0) } // Test that throttler prevents unknown validators from taking up half the message queue @@ -55,14 +57,15 @@ func TestEWMAThrottler(t *testing.T) { func TestThrottlerPrunesSpenders(t *testing.T) { vdrs := validators.NewSet() - staker0 := validators.GenerateRandomValidator(1) - staker1 := validators.GenerateRandomValidator(1) - nonStaker0 := ids.NewShortID([20]byte{1}) - nonStaker1 := ids.NewShortID([20]byte{2}) - nonStaker2 := ids.NewShortID([20]byte{3}) - vdrs.Add(staker0) - vdrs.Add(staker1) + staker0 := ids.GenerateTestShortID() + staker1 := ids.GenerateTestShortID() + nonStaker0 := ids.GenerateTestShortID() + nonStaker1 := ids.GenerateTestShortID() + nonStaker2 := ids.GenerateTestShortID() + + vdrs.AddWeight(staker0, 1) + vdrs.AddWeight(staker1, 1) maxMessages := uint32(1024) cpuPortion := 0.25 @@ -85,10 +88,10 @@ func TestThrottlerPrunesSpenders(t *testing.T) { // Ensure that the validators and the non-staker heard from in the past [intervalsUntilPruning] were not pruned ewmat := throttler.(*ewmaThrottler) - if _, ok := ewmat.spenders[staker0.ID().Key()]; !ok { + if _, ok := ewmat.spenders[staker0.Key()]; !ok { t.Fatal("Staker was pruned from the set of spenders") } - if _, ok := ewmat.spenders[staker1.ID().Key()]; !ok { + if _, ok := ewmat.spenders[staker1.Key()]; !ok { t.Fatal("Staker was pruned from the set of spenders") } if _, ok := ewmat.spenders[nonStaker0.Key()]; !ok { @@ -104,12 +107,13 @@ func TestThrottlerPrunesSpenders(t *testing.T) { func TestThrottleStaker(t *testing.T) { vdrs := validators.NewSet() - staker0 := validators.GenerateRandomValidator(1) - staker1 := validators.GenerateRandomValidator(1) - nonStaker0 := ids.NewShortID([20]byte{1}) - vdrs.Add(staker0) - vdrs.Add(staker1) + staker0 := ids.GenerateTestShortID() + staker1 := ids.GenerateTestShortID() + nonStaker0 := ids.GenerateTestShortID() + + vdrs.AddWeight(staker0, 1) + vdrs.AddWeight(staker1, 1) maxMessages := uint32(16) msgPortion := 0.25 @@ -124,12 +128,12 @@ func TestThrottleStaker(t *testing.T) { // staker0 consumes its own allotment plus 10 messages from the shared pool for i := 0; i < 12; i++ { - throttler.AddMessage(staker0.ID()) + throttler.AddMessage(staker0) } for i := 0; i < 3; i++ { - throttler.AddMessage(staker1.ID()) - if _, throttle := throttler.GetUtilization(staker1.ID()); throttle { + throttler.AddMessage(staker1) + if _, throttle := throttler.GetUtilization(staker1); throttle { t.Fatal("Should not throttle message from staker until it has exceeded its own allotment") } } @@ -139,17 +143,19 @@ func TestThrottleStaker(t *testing.T) { throttler.AddMessage(nonStaker0) throttler.AddMessage(nonStaker0) - if _, throttle := throttler.GetUtilization(staker1.ID()); !throttle { + if _, throttle := throttler.GetUtilization(staker1); !throttle { t.Fatal("Should have throttled message from staker after it exceeded its own allotment and the shared pool was empty") } } func TestCalculatesEWMA(t *testing.T) { vdrs := validators.NewSet() - validator0 := validators.GenerateRandomValidator(1) - validator1 := validators.GenerateRandomValidator(1) - vdrs.Add(validator0) - vdrs.Add(validator1) + + vdr0 := ids.GenerateTestShortID() + vdr1 := ids.GenerateTestShortID() + + vdrs.AddWeight(vdr0, 1) + vdrs.AddWeight(vdr1, 1) maxMessages := uint32(16) msgPortion := 0.25 @@ -172,12 +178,12 @@ func TestCalculatesEWMA(t *testing.T) { ewma += spend ewma = time.Duration(float64(ewma) / decayFactor) - throttler.UtilizeCPU(validator0.ID(), spend) + throttler.UtilizeCPU(vdr0, spend) throttler.EndInterval() } ewmat := throttler.(*ewmaThrottler) - sp := ewmat.getSpender(validator0.ID()) + sp := ewmat.getSpender(vdr0) if sp.cpuEWMA != ewma { t.Fatalf("EWMA Throttler calculated EWMA incorrectly, expected: %s, but calculated: %s", ewma, sp.cpuEWMA) } diff --git a/snow/validators/manager.go b/snow/validators/manager.go index ba8eec611289..da83e21e0a6a 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -12,7 +12,7 @@ import ( // Manager holds the validator set of each subnet type Manager interface { // Set a subnet's validator set - Set(ids.ID, Set) + Set(ids.ID, Set) error // AddWeight adds weight to a given validator on the given subnet AddWeight(ids.ID, ids.ShortID, uint64) error @@ -40,11 +40,18 @@ type manager struct { subnetToVdrs map[[32]byte]Set } -func (m *manager) Set(subnetID ids.ID, vdrSet Set) { +func (m *manager) Set(subnetID ids.ID, newSet Set) error { m.lock.Lock() defer m.lock.Unlock() - m.subnetToVdrs[subnetID.Key()] = vdrSet + subnetKey := subnetID.Key() + + oldSet, exists := m.subnetToVdrs[subnetKey] + if !exists { + m.subnetToVdrs[subnetKey] = newSet + return nil + } + return oldSet.Set(newSet.List()) } // AddWeight implements the Manager interface. diff --git a/vms/platformvm/add_delegator_tx.go b/vms/platformvm/add_delegator_tx.go index af5cf55dbda6..1c1f08a16add 100644 --- a/vms/platformvm/add_delegator_tx.go +++ b/vms/platformvm/add_delegator_tx.go @@ -23,8 +23,9 @@ import ( ) var ( - errInvalidState = errors.New("generated output isn't valid state") - errInvalidAmount = errors.New("invalid amount") + errDelegatorSubset = errors.New("delegator's time range must be a subset of the validator's time range") + errInvalidState = errors.New("generated output isn't valid state") + errInvalidAmount = errors.New("invalid amount") _ UnsignedProposalTx = &UnsignedAddDelegatorTx{} _ TimedTx = &UnsignedAddDelegatorTx{} @@ -134,7 +135,18 @@ func (tx *UnsignedAddDelegatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { - return nil, nil, nil, nil, permError{errDSValidatorSubset} + return nil, nil, nil, nil, permError{errDelegatorSubset} + } + if !isValidator { + // Ensure that the period this delegator delegates is a subset of the + // time the validator will validates. + vdr, willBeValidator, err := vm.willBeValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } + if !willBeValidator || !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{errDelegatorSubset} + } } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.Stake)) @@ -160,7 +172,7 @@ func (tx *UnsignedAddDelegatorTx) SemanticVerify( } // If this proposal is committed, update the pending validator set to include the delegator - if err := vm.addStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { + if err := vm.enqueueStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/add_delegator_tx_test.go b/vms/platformvm/add_delegator_tx_test.go index 2e1322cabfbb..3ffd3e7b93fc 100644 --- a/vms/platformvm/add_delegator_tx_test.go +++ b/vms/platformvm/add_delegator_tx_test.go @@ -176,14 +176,7 @@ func TestAddDelegatorTxSemanticVerify(t *testing.T) { []*crypto.PrivateKeySECP256K1R{keys[0]}, // key ); err != nil { t.Fatal(err) - } else if err := vm.putPendingValidators( - db, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{tx}, - }, - constants.PrimaryNetworkID, - ); err != nil { + } else if err := vm.addStaker(db, constants.PrimaryNetworkID, tx); err != nil { t.Fatal(err) } } @@ -302,25 +295,27 @@ func TestAddDelegatorTxSemanticVerify(t *testing.T) { } for _, tt := range tests { - vdb.Abort() - tx, err := vm.newAddDelegatorTx( - tt.stakeAmount, - tt.startTime, - tt.endTime, - tt.nodeID, - tt.rewardAddress, - tt.feeKeys, - ) - if err != nil { - t.Fatalf("couldn't build tx in test '%s': %s", tt.description, err) - } - if tt.setup != nil { - tt.setup(vdb) - } - if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vdb, tx); err != nil && !tt.shouldErr { - t.Fatalf("test '%s' shouldn't have errored but got %s", tt.description, err) - } else if err == nil && tt.shouldErr { - t.Fatalf("expected test '%s' to error but got none", tt.description) - } + t.Run(tt.description, func(t *testing.T) { + vdb.Abort() + tx, err := vm.newAddDelegatorTx( + tt.stakeAmount, + tt.startTime, + tt.endTime, + tt.nodeID, + tt.rewardAddress, + tt.feeKeys, + ) + if err != nil { + t.Fatalf("couldn't build tx: %s", err) + } + if tt.setup != nil { + tt.setup(vdb) + } + if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vdb, tx); err != nil && !tt.shouldErr { + t.Fatalf("shouldn't have errored but got %s", err) + } else if err == nil && tt.shouldErr { + t.Fatalf("expected test to error but got none") + } + }) } } diff --git a/vms/platformvm/add_subnet_validator_tx.go b/vms/platformvm/add_subnet_validator_tx.go index d23f40a15fba..2a995038b7b1 100644 --- a/vms/platformvm/add_subnet_validator_tx.go +++ b/vms/platformvm/add_subnet_validator_tx.go @@ -108,21 +108,38 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { - return nil, nil, nil, nil, - permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating primary network [%v, %v]", - tx.StartTime(), tx.EndTime(), - vdr.StartTime(), vdr.EndTime())} + return nil, nil, nil, nil, permError{errDSValidatorSubset} + } + if !isValidator { + // Ensure that the period this validator validates the specified subnet + // is a subnet of the time they will validate the primary network. + vdr, willBeValidator, err := vm.willBeValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } + if !willBeValidator || !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { + return nil, nil, nil, nil, permError{errDSValidatorSubset} + } } - // Ensure the proposed validator is not already a validator of the specified subnet - vdr, isValidator, err = vm.isValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) + // Ensure that the period this validator validates the specified subnet is a + // subnet of the time they validate the primary network. + _, isValidator, err = vm.isValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, tempError{err} } - if isValidator && !tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { - return nil, nil, nil, nil, - permError{fmt.Errorf("already validating subnet between [%v, %v]", - vdr.StartTime(), vdr.EndTime())} + if isValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("already validating subnet between")} + } + + // Ensure that the period this validator validates the specified subnet + // is a subnet of the time they will validate the primary network. + _, willBeValidator, err := vm.willBeValidator(db, tx.Validator.Subnet, tx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } + if willBeValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("already validating subnet between")} } baseTxCredsLen := len(stx.Creds) - 1 @@ -156,7 +173,7 @@ func (tx *UnsignedAddSubnetValidatorTx) SemanticVerify( return nil, nil, nil, nil, tempError{err} } // Add the validator to the set of pending validators - if err := vm.addStaker(onCommitDB, tx.Validator.Subnet, stx); err != nil { + if err := vm.enqueueStaker(onCommitDB, tx.Validator.Subnet, stx); err != nil { return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/add_subnet_validator_tx_test.go b/vms/platformvm/add_subnet_validator_tx_test.go index e5cc8f84e432..39c3f1084075 100644 --- a/vms/platformvm/add_subnet_validator_tx_test.go +++ b/vms/platformvm/add_subnet_validator_tx_test.go @@ -261,14 +261,7 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { t.Fatal("should have failed because validator not in the current or pending validator sets of the primary network") } - if err := vm.putPendingValidators( - vm.DB, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{addDSTx}, - }, - constants.PrimaryNetworkID, - ); err != nil { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, addDSTx); err != nil { t.Fatal(err) } // Node with ID key.PublicKey().Address() now a pending validator for primary network @@ -328,8 +321,8 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { } if tx, err := vm.newAddSubnetValidatorTx( - defaultWeight, // weight - uint64(newTimestamp.Unix()), // start time + defaultWeight, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -351,18 +344,12 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) - } else if err := vm.putCurrentValidators(vm.DB, - &EventHeap{ - SortByStartTime: false, - Txs: []*Tx{tx}, - }, - testSubnet1.ID(), - ); err != nil { + } else if err := vm.addStaker(vm.DB, testSubnet1.ID(), tx); err != nil { t.Fatal(err) } // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID @@ -371,26 +358,21 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatal("should have failed verification because validator already validating the specified subnet") - } else if err := vm.putCurrentValidators(vm.DB, // reset validator heap - &EventHeap{ - SortByStartTime: false, - }, - testSubnet1.ID(), - ); err != nil { + } else if err := vm.removeStaker(vm.DB, testSubnet1.ID(), tx); err != nil { t.Fatal(err) } // Case: Too many signatures if tx, err := vm.newAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -403,8 +385,8 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { // Case: Too few signatures tx, err := vm.newAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -424,8 +406,8 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { // Case: Control Signature from invalid key (keys[3] is not a control key) tx, err = vm.newAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time nodeID, // node ID testSubnet1.ID(), // subnet ID @@ -447,21 +429,15 @@ func TestAddSubnetValidatorTxSemanticVerify(t *testing.T) { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet if tx, err := vm.newAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time + defaultWeight, // weight + uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time nodeID, // node ID testSubnet1.ID(), // subnet ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ); err != nil { t.Fatal(err) - } else if err = vm.putPendingValidators(vm.DB, // Node ID nodeIDKey.PublicKey().Address() now pending - &EventHeap{ // validator for subnet testSubnet1.ID - SortByStartTime: true, - Txs: []*Tx{tx}, - }, - testSubnet1.ID(), - ); err != nil { + } else if err = vm.addStaker(vm.DB, testSubnet1.ID(), tx); err != nil { t.Fatal(err) } else if _, _, _, _, err = tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatal("should have failed verification because validator already in pending validator set of the specified subnet") diff --git a/vms/platformvm/add_validator_tx.go b/vms/platformvm/add_validator_tx.go index 635fa209911e..4631123c7126 100644 --- a/vms/platformvm/add_validator_tx.go +++ b/vms/platformvm/add_validator_tx.go @@ -134,13 +134,24 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( startTime)} } - vdr, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + _, isValidator, err := vm.isValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) if err != nil { return nil, nil, nil, nil, tempError{err} } - if isValidator && tx.Validator.BoundedBy(vdr.StartTime(), vdr.EndTime()) { - return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator from %s to %s", - tx.Validator.NodeID, vdr.StartTime(), vdr.EndTime())} + if isValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator", + tx.Validator.NodeID)} + } + + // Ensure that the period this validator validates the specified subnet + // is a subnet of the time they will validate the primary network. + _, willBeValidator, err := vm.willBeValidator(db, constants.PrimaryNetworkID, tx.Validator.NodeID) + if err != nil { + return nil, nil, nil, nil, tempError{err} + } + if willBeValidator { + return nil, nil, nil, nil, permError{fmt.Errorf("validator %s already is already a primary network validator", + tx.Validator.NodeID)} } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.Stake)) @@ -166,7 +177,7 @@ func (tx *UnsignedAddValidatorTx) SemanticVerify( } // Add validator to set of pending validators - if err := vm.addStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { + if err := vm.enqueueStaker(onCommitDB, constants.PrimaryNetworkID, stx); err != nil { return nil, nil, nil, nil, tempError{err} } diff --git a/vms/platformvm/add_validator_tx_test.go b/vms/platformvm/add_validator_tx_test.go index 0e7469c38c44..64c795ce5875 100644 --- a/vms/platformvm/add_validator_tx_test.go +++ b/vms/platformvm/add_validator_tx_test.go @@ -301,23 +301,17 @@ func TestAddValidatorTxSemanticVerify(t *testing.T) { } startTime := defaultGenesisTime.Add(1 * time.Second) tx, err := vm.newAddValidatorTx( - vm.minStake, // stake amount - uint64(startTime.Unix()), // start time + vm.minStake, // stake amount + uint64(startTime.Unix()), // start time uint64(startTime.Add(MinimumStakingDuration).Unix()), // end time - nodeID, // node ID + nodeID, // node ID key2.PublicKey().Address(), // reward address NumberOfShares, // shares []*crypto.PrivateKeySECP256K1R{keys[0]}, // key ) if err != nil { t.Fatal(err) - } else if err := vm.putPendingValidators(vDB, // Put validator in pending validator set - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{tx}, - }, - constants.PrimaryNetworkID, - ); err != nil { + } else if err := vm.addStaker(vDB, constants.PrimaryNetworkID, tx); err != nil { t.Fatal(err) } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vDB, tx); err == nil { t.Fatal("should have failed because validator in pending validator set") diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go index ce87fff2d2a9..885d1063cc50 100644 --- a/vms/platformvm/advance_time_tx.go +++ b/vms/platformvm/advance_time_tx.go @@ -84,27 +84,9 @@ func (tx *UnsignedAdvanceTimeTx) SemanticVerify( // If this block is committed, update the validator sets // onAbortDB or onCommitDB should commit (flush to vm.DB) before this is called onCommitFunc := func() error { + // For each Subnet, update the node's validator manager to reflect current Subnet membership - return vm.updateVdrMgr() - - // If this node started validating a Subnet, create the blockchains that the Subnet validates - // TODO this - // chains, err := vm.getChains(vm.DB) // all blockchains - // if err != nil { - // return err - // } - // for subnetID, validatorIDs := range startedValidating { - // if !validatorIDs.Contains(vm.Ctx.NodeID) { - // continue - // } - // for _, chain := range chains { - // unsignedChain := chain.UnsignedTx.(*UnsignedCreateChainTx) - // if bytes.Equal(subnetID[:], unsignedChain.SubnetID.Bytes()) { - // vm.createChain(chain) - // } - // } - // } - // return nil + return vm.updateVdrMgr(false) } // State doesn't change if this proposal is aborted diff --git a/vms/platformvm/advance_time_tx_test.go b/vms/platformvm/advance_time_tx_test.go index 00a7f8354f4d..aed48a128c4b 100644 --- a/vms/platformvm/advance_time_tx_test.go +++ b/vms/platformvm/advance_time_tx_test.go @@ -51,15 +51,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { t.Fatal(err) } - err = vm.putPendingValidators( - vm.DB, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{addPendingValidatorTx}, - }, - constants.PrimaryNetworkID, - ) - if err != nil { + if err := vm.enqueueStaker(vm.DB, constants.PrimaryNetworkID, addPendingValidatorTx); err != nil { t.Fatal(err) } @@ -119,14 +111,7 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { t.Fatal(err) } - if err := vm.putPendingValidators( - vm.DB, - &EventHeap{ - SortByStartTime: true, - Txs: []*Tx{addPendingValidatorTx}, - }, - constants.PrimaryNetworkID, - ); err != nil { + if err := vm.enqueueStaker(vm.DB, constants.PrimaryNetworkID, addPendingValidatorTx); err != nil { t.Fatal(err) } @@ -139,28 +124,28 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) { t.Fatal(err) } - if onCommitCurrentEvents, err := vm.getCurrentValidators(onCommit, constants.PrimaryNetworkID); err != nil { + if validatorTx, isValidator, err := vm.isValidator(onCommit, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onCommitCurrentEvents.Len() != len(keys)+1 { // Each key in [keys] is a validator to start with...then we added a validator + } else if !isValidator { t.Fatalf("Should have added the validator to the validator set") - } - - if onCommitPendingEvents, err := vm.getPendingValidators(onCommit, constants.PrimaryNetworkID); err != nil { + } else if !validatorTx.ID().Equals(addPendingValidatorTx.ID()) { + t.Fatalf("Added the wrong tx to the validator set") + } else if _, willBeValidator, err := vm.willBeValidator(onCommit, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onCommitPendingEvents.Len() != 0 { + } else if willBeValidator { t.Fatalf("Should have removed the validator from the pending validator set") } - if onAbortCurrentEvents, err := vm.getCurrentValidators(onAbort, constants.PrimaryNetworkID); err != nil { + if _, isValidator, err := vm.isValidator(onAbort, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onAbortCurrentEvents.Len() != len(keys) { + } else if isValidator { t.Fatalf("Shouldn't have added the validator to the validator set") - } - - if onAbortPendingEvents, err := vm.getPendingValidators(onAbort, constants.PrimaryNetworkID); err != nil { + } else if validatorTx, willBeValidator, err := vm.willBeValidator(onAbort, constants.PrimaryNetworkID, nodeID); err != nil { t.Fatal(err) - } else if onAbortPendingEvents.Len() != 1 { + } else if !willBeValidator { t.Fatalf("Shouldn't have removed the validator from the pending validator set") + } else if !validatorTx.ID().Equals(addPendingValidatorTx.ID()) { + t.Fatalf("Added the wrong tx to the pending validator set") } } diff --git a/vms/platformvm/base_tx_test.go b/vms/platformvm/base_tx_test.go index 53a76539487c..d740bb24c871 100644 --- a/vms/platformvm/base_tx_test.go +++ b/vms/platformvm/base_tx_test.go @@ -10,7 +10,7 @@ import ( ) func TestBaseTxMarshalJSON(t *testing.T) { - vm , _ := defaultVM() + vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { vm.Shutdown() diff --git a/vms/platformvm/create_subnet_tx.go b/vms/platformvm/create_subnet_tx.go index babf317f70c7..63ab11d7554c 100644 --- a/vms/platformvm/create_subnet_tx.go +++ b/vms/platformvm/create_subnet_tx.go @@ -95,8 +95,7 @@ func (tx *UnsignedCreateSubnetTx) SemanticVerify( } // Register new subnet in validator manager onAccept := func() error { - vm.vdrMgr.Set(tx.ID(), validators.NewSet()) - return nil + return vm.vdrMgr.Set(tx.ID(), validators.NewSet()) } return onAccept, nil } diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go index d351bf29a469..d2659766e6c3 100644 --- a/vms/platformvm/reward_validator_tx.go +++ b/vms/platformvm/reward_validator_tx.go @@ -249,7 +249,7 @@ func (tx *UnsignedRewardValidatorTx) SemanticVerify( // validator set to remove the staker. onAbortDB or onCommitDB should commit // (flush to vm.DB) before this is called updateValidators := func() error { - return vm.updateVdrMgr() + return vm.updateVdrMgr(false) } return onCommitDB, onAbortDB, updateValidators, updateValidators, nil diff --git a/vms/platformvm/reward_validator_tx_test.go b/vms/platformvm/reward_validator_tx_test.go index 329e303695cf..64cf28ee887f 100644 --- a/vms/platformvm/reward_validator_tx_test.go +++ b/vms/platformvm/reward_validator_tx_test.go @@ -23,33 +23,33 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { vm.Ctx.Lock.Unlock() }() - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) + // ID of validator that should leave DS validator set next + toRemoveIntf, err := vm.nextStakerStop(vm.DB, constants.PrimaryNetworkID) if err != nil { t.Fatal(err) } - // ID of validator that should leave DS validator set next - nextToRemove := currentValidators.Peek().UnsignedTx.(*UnsignedAddValidatorTx) + toRemove := toRemoveIntf.UnsignedTx.(*UnsignedAddValidatorTx) // Case 1: Chain timestamp is wrong - if tx, err := vm.newRewardValidatorTx(nextToRemove.ID()); err != nil { + if tx, err := vm.newRewardValidatorTx(toRemove.ID()); err != nil { t.Fatal(err) - } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { + } else if _, _, _, _, err := toRemove.SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatalf("should have failed because validator end time doesn't match chain timestamp") } // Case 2: Wrong validator if tx, err := vm.newRewardValidatorTx(ids.GenerateTestID()); err != nil { t.Fatal(err) - } else if _, _, _, _, err := tx.UnsignedTx.(UnsignedProposalTx).SemanticVerify(vm, vm.DB, tx); err == nil { + } else if _, _, _, _, err := toRemove.SemanticVerify(vm, vm.DB, tx); err == nil { t.Fatalf("should have failed because validator ID is wrong") } // Case 3: Happy path // Advance chain timestamp to time that next validator leaves - if err := vm.putTimestamp(vm.DB, nextToRemove.EndTime()); err != nil { + if err := vm.putTimestamp(vm.DB, toRemove.EndTime()); err != nil { t.Fatal(err) } - tx, err := vm.newRewardValidatorTx(nextToRemove.ID()) + tx, err := vm.newRewardValidatorTx(toRemove.ID()) if err != nil { t.Fatal(err) } @@ -58,20 +58,16 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - // Should be one less validator than before - oldNumValidators := len(currentValidators.Txs) - if currentValidators, err := vm.getCurrentValidators(onCommitDB, constants.PrimaryNetworkID); err != nil { - t.Fatal(err) - } else if numValidators := currentValidators.Len(); numValidators != oldNumValidators-1 { - t.Fatalf("Should be %d validators but are %d", oldNumValidators-1, numValidators) - } else if currentValidators, err = vm.getCurrentValidators(onAbortDB, constants.PrimaryNetworkID); err != nil { + // ID of validator that should leave DS validator set next + + if nextToRemove, err := vm.nextStakerStop(onCommitDB, constants.PrimaryNetworkID); err != nil { t.Fatal(err) - } else if numValidators := currentValidators.Len(); numValidators != oldNumValidators-1 { - t.Fatalf("Should be %d validators but there are %d", oldNumValidators-1, numValidators) + } else if toRemove.ID().Equals(nextToRemove.ID()) { + t.Fatalf("Should have removed the previous validator") } // check that stake/reward is given back - stakeOwners := nextToRemove.Stake[0].Out.(*secp256k1fx.TransferOutput).AddressesSet() + stakeOwners := toRemove.Stake[0].Out.(*secp256k1fx.TransferOutput).AddressesSet() // Get old balances, balances if tx abort, balances if tx committed for _, stakeOwner := range stakeOwners.List() { stakeOwnerSet := ids.ShortSet{} @@ -89,13 +85,13 @@ func TestUnsignedRewardValidatorTxSemanticVerify(t *testing.T) { if err != nil { t.Fatal(err) } - if onAbortBalance != oldBalance+nextToRemove.Validator.Weight() { + if onAbortBalance != oldBalance+toRemove.Validator.Weight() { t.Fatalf("on abort, should have got back staked amount") } - expectedReward := reward(nextToRemove.Validator.Duration(), nextToRemove.Validator.Weight(), InflationRate) - if onCommitBalance != oldBalance+expectedReward+nextToRemove.Validator.Weight() { + expectedReward := reward(toRemove.Validator.Duration(), toRemove.Validator.Weight(), InflationRate) + if onCommitBalance != oldBalance+expectedReward+toRemove.Validator.Weight() { t.Fatalf("on commit, should have old balance (%d) + staked amount (%d) + reward (%d) but have %d", - oldBalance, nextToRemove.Validator.Weight(), expectedReward, onCommitBalance) + oldBalance, toRemove.Validator.Weight(), expectedReward, onCommitBalance) } } } @@ -127,8 +123,8 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { t.Fatal(err) } - delStartTime := vdrStartTime + 1 - delEndTime := vdrEndTime - 1 + delStartTime := vdrStartTime + delEndTime := vdrEndTime delTx, err := vm.newAddDelegatorTx( vm.minStake, // stakeAmt delStartTime, @@ -142,16 +138,13 @@ func TestRewardDelegatorTxSemanticVerify(t *testing.T) { } unsignedDelTx := delTx.UnsignedTx.(*UnsignedAddDelegatorTx) - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) - if err != nil { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, vdrTx); err != nil { t.Fatal(err) } - currentValidators.Add(vdrTx) - currentValidators.Add(delTx) - if err := vm.putCurrentValidators(vm.DB, currentValidators, constants.PrimaryNetworkID); err != nil { + if err := vm.addStaker(vm.DB, constants.PrimaryNetworkID, delTx); err != nil { t.Fatal(err) - // Advance timestamp to when delegator should leave validator set - } else if err := vm.putTimestamp(vm.DB, time.Unix(int64(delEndTime), 0)); err != nil { + } + if err := vm.putTimestamp(vm.DB, time.Unix(int64(delEndTime), 0)); err != nil { t.Fatal(err) } diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index fbcc11198901..2046f0df33a4 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -11,6 +11,7 @@ import ( "time" "github.com/ava-labs/gecko/api" + "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/utils/crypto" @@ -507,7 +508,6 @@ func (service *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAsset ****************************************************** */ -/* // GetCurrentValidatorsArgs are the arguments for calling GetCurrentValidators type GetCurrentValidatorsArgs struct { // Subnet we're listing the validators of @@ -527,113 +527,124 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa args.SubnetID = constants.PrimaryNetworkID } - validators, err := service.vm.getCurrentValidators(service.vm.DB, args.SubnetID) - if err != nil { - return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) - } + stopPrefix := []byte(fmt.Sprintf("%s%s", args.SubnetID, stop)) + stopDB := prefixdb.NewNested(stopPrefix, service.vm.DB) + defer stopDB.Close() - reply.Validators = make([]FormattedAPIValidator, validators.Len()) - if args.SubnetID.Equals(constants.PrimaryNetworkID) { - for i, tx := range validators.Txs { - switch tx := tx.UnsignedTx.(type) { - case *UnsignedAddValidatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - case *UnsignedAddDelegatorTx: - weight := json.Uint64(tx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(tx.StartTime().Unix()), - EndTime: json.Uint64(tx.EndTime().Unix()), - StakeAmount: &weight, - } - default: // Shouldn't happen - return fmt.Errorf("couldn't get the reward address of %s", tx.ID()) - } + stopIter := stopDB.NewIterator() + defer stopIter.Release() + + for stopIter.Next() { // Iterates in order of increasing start time + txBytes := stopIter.Value() + + tx := Tx{} + if err := service.vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) } - } else { - for i, tx := range validators.Txs { - utx := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx) - weight := json.Uint64(utx.Validator.Weight()) - reply.Validators[i] = FormattedAPIValidator{ - ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), - StartTime: json.Uint64(utx.StartTime().Unix()), - EndTime: json.Uint64(utx.EndTime().Unix()), + if err := tx.Sign(service.vm.codec, nil); err != nil { + return err + } + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddSubnetValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), Weight: &weight, - } + }) + default: + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) } } + return stopIter.Error() +} - return nil +// GetPendingValidatorsArgs are the arguments for calling GetPendingValidators +type GetPendingValidatorsArgs struct { + // Subnet we're getting the pending validators of + // If omitted, defaults to primary network + SubnetID ids.ID `json:"subnetID"` +} + +// GetPendingValidatorsReply are the results from calling GetPendingValidators +type GetPendingValidatorsReply struct { + Validators []FormattedAPIValidator `json:"validators"` +} + +// GetPendingValidators returns the list of pending validators +func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { + service.vm.Ctx.Log.Info("Platform: GetPendingValidators called") + if args.SubnetID.IsZero() { + args.SubnetID = constants.PrimaryNetworkID + } + + startPrefix := []byte(fmt.Sprintf("%s%s", args.SubnetID, start)) + startDB := prefixdb.NewNested(startPrefix, service.vm.DB) + defer startDB.Close() + + startIter := startDB.NewIterator() + defer startIter.Release() + + for startIter.Next() { // Iterates in order of increasing start time + txBytes := startIter.Value() + + tx := Tx{} + if err := service.vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(service.vm.codec, nil); err != nil { + return err + } + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + StakeAmount: &weight, + }) + case *UnsignedAddSubnetValidatorTx: + weight := json.Uint64(staker.Validator.Weight()) + reply.Validators = append(reply.Validators, FormattedAPIValidator{ + ID: staker.Validator.ID().PrefixedString(constants.NodeIDPrefix), + StartTime: json.Uint64(staker.StartTime().Unix()), + EndTime: json.Uint64(staker.EndTime().Unix()), + Weight: &weight, + }) + default: + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) + } + } + return startIter.Error() } -*/ - -// // GetPendingValidatorsArgs are the arguments for calling GetPendingValidators -// type GetPendingValidatorsArgs struct { -// // Subnet we're getting the pending validators of -// // If omitted, defaults to primary network -// SubnetID ids.ID `json:"subnetID"` -// } - -// // GetPendingValidatorsReply are the results from calling GetPendingValidators -// type GetPendingValidatorsReply struct { -// Validators []FormattedAPIValidator `json:"validators"` -// } - -// // GetPendingValidators returns the list of pending validators -// func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { -// service.vm.Ctx.Log.Info("Platform: GetPendingValidators called") -// if args.SubnetID.IsZero() { -// args.SubnetID = constants.PrimaryNetworkID -// } - -// validators, err := service.vm.getPendingValidators(service.vm.DB, args.SubnetID) -// if err != nil { -// return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) -// } - -// reply.Validators = make([]FormattedAPIValidator, validators.Len()) -// for i, tx := range validators.Txs { -// if args.SubnetID.Equals(constants.PrimaryNetworkID) { -// switch tx := tx.UnsignedTx.(type) { -// case *UnsignedAddValidatorTx: -// weight := json.Uint64(tx.Validator.Weight()) -// reply.Validators[i] = FormattedAPIValidator{ -// ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), -// StartTime: json.Uint64(tx.StartTime().Unix()), -// EndTime: json.Uint64(tx.EndTime().Unix()), -// StakeAmount: &weight, -// } -// case *UnsignedAddDelegatorTx: -// weight := json.Uint64(tx.Validator.Weight()) -// reply.Validators[i] = FormattedAPIValidator{ -// ID: tx.Validator.ID().PrefixedString(constants.NodeIDPrefix), -// StartTime: json.Uint64(tx.StartTime().Unix()), -// EndTime: json.Uint64(tx.EndTime().Unix()), -// StakeAmount: &weight, -// } -// default: // Shouldn't happen -// return fmt.Errorf("couldn't get the reward address of %s", tx.ID()) -// } -// } else { -// utx := tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx) -// weight := json.Uint64(utx.Validator.Weight()) -// reply.Validators[i] = FormattedAPIValidator{ -// ID: utx.Validator.ID().PrefixedString(constants.NodeIDPrefix), -// StartTime: json.Uint64(utx.StartTime().Unix()), -// EndTime: json.Uint64(utx.EndTime().Unix()), -// Weight: &weight, -// } -// } -// } - -// return nil -// } // SampleValidatorsArgs are the arguments for calling SampleValidators type SampleValidatorsArgs struct { @@ -741,7 +752,7 @@ func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, re nodeID, // Node ID rewardAddress, // Reward Address uint32(10000*args.DelegationFeeRate), // Shares - privKeys, // Private keys + privKeys, // Private keys ) if err != nil { return fmt.Errorf("couldn't create tx: %w", err) diff --git a/vms/platformvm/spend.go b/vms/platformvm/spend.go index dee84c8294e2..392e0b82b94e 100644 --- a/vms/platformvm/spend.go +++ b/vms/platformvm/spend.go @@ -536,7 +536,6 @@ func (vm *VM) consumeInputs( if err := vm.removeUTXO(db, utxoID); err != nil { return tempError{err} } - vm.Ctx.Log.Error("Consuming UTXOID %s", utxoID) } return nil } diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index a891feda7da8..b62124c1d593 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -9,14 +9,13 @@ import ( "fmt" "time" - "github.com/ava-labs/gecko/utils/hashing" - "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/consensus/snowman" "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/avax" @@ -73,17 +72,23 @@ func (vm *VM) getStatus(db database.Database, ID ids.ID) (Status, error) { return Unknown, fmt.Errorf("expected status to be type Status but is type %T", statusIntf) } -// Add a staker to subnet [subnetID] -// A staker may be a validator or a delegator -func (vm *VM) addStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { - var staker TimedTx +// Add a staker to subnet [subnetID]'s pending validator queue. A staker may be +// a validator or a delegator +func (vm *VM) enqueueStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var ( + staker TimedTx + priority byte + ) switch unsignedTx := stakerTx.UnsignedTx.(type) { case *UnsignedAddDelegatorTx: staker = unsignedTx + priority = 1 case *UnsignedAddSubnetValidatorTx: staker = unsignedTx + priority = 0 case *UnsignedAddValidatorTx: staker = unsignedTx + priority = 2 default: return fmt.Errorf("staker is unexpected type %T", stakerTx) } @@ -93,82 +98,135 @@ func (vm *VM) addStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) err // Sorted by subnet ID then start time then tx ID prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) prefixStartDB := prefixdb.NewNested(prefixStart, db) - // Sorted by subnet ID then stop time then tx ID - prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) - prefixStopDB := prefixdb.NewNested(prefixStop, db) - defer func() { - prefixStartDB.Close() - prefixStopDB.Close() - }() + defer prefixStartDB.Close() - p := wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} p.PackLong(uint64(staker.StartTime().Unix())) + p.PackByte(priority) p.PackFixedBytes(stakerID) if p.Err != nil { return fmt.Errorf("couldn't serialize validator key: %w", p.Err) } startKey := p.Bytes - p = wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} + return prefixStartDB.Put(startKey, txBytes) +} + +// Remove a staker from subnet [subnetID]'s pending validator queue. A staker +// may be a validator or a delegator +func (vm *VM) dequeueStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var ( + staker TimedTx + priority byte + ) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + priority = 1 + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + priority = 0 + case *UnsignedAddValidatorTx: + staker = unsignedTx + priority = 2 + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) + } + stakerID := staker.ID().Bytes() // Tx ID of this tx + + // Sorted by subnet ID then start time then ID + prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) + prefixStartDB := prefixdb.NewNested(prefixStart, db) + defer prefixStartDB.Close() + + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} + p.PackLong(uint64(staker.StartTime().Unix())) + p.PackByte(priority) + p.PackFixedBytes(stakerID) + if p.Err != nil { + return fmt.Errorf("couldn't serialize validator key: %w", p.Err) + } + startKey := p.Bytes + + return prefixStartDB.Delete(startKey) +} + +// Add a staker to subnet [subnetID] +// A staker may be a validator or a delegator +func (vm *VM) addStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { + var ( + staker TimedTx + priority byte + ) + switch unsignedTx := stakerTx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + staker = unsignedTx + priority = 0 + case *UnsignedAddSubnetValidatorTx: + staker = unsignedTx + priority = 1 + case *UnsignedAddValidatorTx: + staker = unsignedTx + priority = 2 + default: + return fmt.Errorf("staker is unexpected type %T", stakerTx) + } + stakerID := staker.ID().Bytes() // Tx ID of this tx + txBytes := stakerTx.Bytes() + + // Sorted by subnet ID then stop time then tx ID + prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + prefixStopDB := prefixdb.NewNested(prefixStop, db) + defer prefixStopDB.Close() + + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} p.PackLong(uint64(staker.EndTime().Unix())) + p.PackByte(priority) p.PackFixedBytes(stakerID) if p.Err != nil { return fmt.Errorf("couldn't serialize validator key: %w", p.Err) } stopKey := p.Bytes - if err := prefixStartDB.Put(startKey, txBytes); err != nil { - return err - } return prefixStopDB.Put(stopKey, txBytes) } // Remove a staker from subnet [subnetID] // A staker may be a validator or a delegator func (vm *VM) removeStaker(db database.Database, subnetID ids.ID, stakerTx *Tx) error { - var staker TimedTx + var ( + staker TimedTx + priority byte + ) switch unsignedTx := stakerTx.UnsignedTx.(type) { case *UnsignedAddDelegatorTx: staker = unsignedTx + priority = 0 case *UnsignedAddSubnetValidatorTx: staker = unsignedTx + priority = 1 case *UnsignedAddValidatorTx: staker = unsignedTx + priority = 2 default: return fmt.Errorf("staker is unexpected type %T", stakerTx) } stakerID := staker.ID().Bytes() // Tx ID of this tx - // Sorted by subnet ID then start time then ID - prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) - prefixStartDB := prefixdb.NewNested(prefixStart, db) // Sorted by subnet ID then stop time prefixStop := []byte(fmt.Sprintf("%s%s", subnetID, stop)) prefixStopDB := prefixdb.NewNested(prefixStop, db) - defer func() { - prefixStartDB.Close() - prefixStopDB.Close() - }() + defer prefixStopDB.Close() - p := wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} - p.PackLong(uint64(staker.StartTime().Unix())) - p.PackFixedBytes(stakerID) - if p.Err != nil { - return fmt.Errorf("couldn't serialize validator key: %w", p.Err) - } - startKey := p.Bytes - - p = wrappers.Packer{MaxSize: wrappers.LongLen + hashing.HashLen} + p := wrappers.Packer{MaxSize: wrappers.LongLen + wrappers.ByteLen + hashing.HashLen} p.PackLong(uint64(staker.EndTime().Unix())) + p.PackByte(priority) p.PackFixedBytes(stakerID) if p.Err != nil { return fmt.Errorf("couldn't serialize validator key: %w", p.Err) } stopKey := p.Bytes - if err := prefixStartDB.Delete(startKey); err != nil { - return err - } return prefixStopDB.Delete(stopKey) } @@ -210,6 +268,36 @@ func (vm *VM) nextStakerStop(db database.Database, subnetID ids.ID) (*Tx, error) // Returns true if [nodeID] is a validator (not a delegator) of subnet [subnetID] func (vm *VM) isValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool, error) { + iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, stop)), db).NewIterator() + defer iter.Release() + + for iter.Next() { + txBytes := iter.Value() + tx := Tx{} + if err := Codec.Unmarshal(txBytes, &tx); err != nil { + return nil, false, err + } + if err := tx.Sign(vm.codec, nil); err != nil { + return nil, false, err + } + + switch vdr := tx.UnsignedTx.(type) { + case *UnsignedAddValidatorTx: + if subnetID.Equals(constants.PrimaryNetworkID) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true, nil + } + case *UnsignedAddSubnetValidatorTx: + if subnetID.Equals(vdr.Validator.SubnetID()) && vdr.Validator.NodeID.Equals(nodeID) { + return vdr, true, nil + } + } + } + return nil, false, nil +} + +// Returns true if [nodeID] will be a validator (not a delegator) of subnet +// [subnetID] +func (vm *VM) willBeValidator(db database.Database, subnetID ids.ID, nodeID ids.ShortID) (TimedTx, bool, error) { iter := prefixdb.NewNested([]byte(fmt.Sprintf("%s%s", subnetID, start)), db).NewIterator() defer iter.Release() diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index b25493acc133..1b7da10d21d2 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -383,7 +383,7 @@ func (vm *VM) initSubnets() error { if err := vm.updateValidators(vm.DB); err != nil { return err } - return vm.updateVdrMgr() // TODO is this right? + return vm.updateVdrMgr(true) } // Create the blockchain described in [tx], but only if this node is a member of @@ -423,7 +423,15 @@ func (vm *VM) createChain(tx *Tx) { func (vm *VM) Bootstrapping() error { vm.bootstrapped = false; return vm.fx.Bootstrapping() } // Bootstrapped marks this VM as bootstrapped -func (vm *VM) Bootstrapped() error { vm.bootstrapped = true; return vm.fx.Bootstrapped() } +func (vm *VM) Bootstrapped() error { + vm.bootstrapped = true + errs := wrappers.Errs{} + errs.Add( + vm.updateVdrMgr(false), + vm.fx.Bootstrapped(), + ) + return errs.Err +} // Shutdown this blockchain func (vm *VM) Shutdown() error { @@ -732,11 +740,6 @@ func (vm *VM) resetTimer() { // Returns the time when the next staker of any subnet starts/stops staking // after the current timestamp func (vm *VM) nextStakerChangeTime(db database.Database) (time.Time, error) { - timestamp, err := vm.getTimestamp(db) - if err != nil { - return time.Time{}, fmt.Errorf("couldn't get timestamp: %w", err) - } - subnets, err := vm.getSubnets(db) if err != nil { return time.Time{}, fmt.Errorf("couldn't get subnets: %w", err) @@ -751,14 +754,14 @@ func (vm *VM) nextStakerChangeTime(db database.Database) (time.Time, error) { for _, subnetID := range subnetIDs.List() { if tx, err := vm.nextStakerStart(db, subnetID); err == nil { if staker, ok := tx.UnsignedTx.(TimedTx); ok { - if startTime := staker.StartTime(); startTime.Before(earliest) && startTime.After(timestamp) { + if startTime := staker.StartTime(); startTime.Before(earliest) { earliest = startTime } } } if tx, err := vm.nextStakerStop(db, subnetID); err == nil { if staker, ok := tx.UnsignedTx.(TimedTx); ok { - if endTime := staker.EndTime(); endTime.Before(earliest) && endTime.After(timestamp) { + if endTime := staker.EndTime(); endTime.Before(earliest) { earliest = endTime } } @@ -769,12 +772,12 @@ func (vm *VM) nextStakerChangeTime(db database.Database) (time.Time, error) { // update validator set of [subnetID] based on the current chain timestamp func (vm *VM) updateValidators(db database.Database) error { - timestamp, err := vm.getTimestamp(vm.DB) + timestamp, err := vm.getTimestamp(db) if err != nil { return fmt.Errorf("can't get timestamp: %w", err) } - subnets, err := vm.getSubnets(vm.DB) + subnets, err := vm.getSubnets(db) if err != nil { return err } @@ -784,68 +787,144 @@ func (vm *VM) updateValidators(db database.Database) error { for _, subnet := range subnets { subnetIDs.Add(subnet.ID()) } + subnetIDList := subnetIDs.List() - for _, subnetID := range subnetIDs.List() { - prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) - iter := prefixdb.NewNested(prefixStart, db).NewIterator() - - var tx Tx - for iter.Next() { // Iterates in order of increasing start time - txBytes := iter.Value() - if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { - return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + for _, subnetID := range subnetIDList { + if err := vm.updateSubnetValidators(db, subnetID, timestamp); err != nil { + return err + } + } + return nil +} +func (vm *VM) updateSubnetValidators(db database.Database, subnetID ids.ID, timestamp time.Time) error { + startPrefix := []byte(fmt.Sprintf("%s%s", subnetID, start)) + startDB := prefixdb.NewNested(startPrefix, db) + defer startDB.Close() + + startIter := startDB.NewIterator() + defer startIter.Release() + + for startIter.Next() { // Iterates in order of increasing start time + txBytes := startIter.Value() + + tx := Tx{} + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(vm.codec, nil); err != nil { + return err + } + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddDelegatorTx is invalid for subnet %s", + subnetID) } - if err := tx.Sign(vm.codec, nil); err != nil { - return err + if staker.StartTime().After(timestamp) { + return nil + } + if err := vm.dequeueStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't dequeue staker: %w", err) + } + if err := vm.addStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't add staker: %w", err) + } + case *UnsignedAddValidatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddValidatorTx is invalid for subnet %s", + subnetID) + } + if staker.StartTime().After(timestamp) { + return nil + } + if err := vm.dequeueStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't dequeue staker: %w", err) + } + if err := vm.addStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't add staker: %w", err) + } + case *UnsignedAddSubnetValidatorTx: + if txSubnetID := staker.Validator.SubnetID(); !subnetID.Equals(txSubnetID) { + return fmt.Errorf("AddSubnetValidatorTx references the incorrect subnet. Expected %s; Got %s", + subnetID, txSubnetID) + } + if staker.StartTime().After(timestamp) { + return nil + } + if err := vm.dequeueStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't dequeue staker: %w", err) } + if err := vm.addStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't add staker: %w", err) + } + default: + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) + } + } - switch staker := tx.UnsignedTx.(type) { - case *UnsignedAddDelegatorTx: - if !subnetID.Equals(constants.PrimaryNetworkID) { - continue - } - if staker.EndTime().Before(timestamp) { - if err := vm.removeStaker(db, subnetID, &tx); err != nil { - return fmt.Errorf("couldn't remove staker: %w", err) - } - } - case *UnsignedAddValidatorTx: - if !subnetID.Equals(constants.PrimaryNetworkID) { - continue - } - if staker.EndTime().Before(timestamp) { - if err := vm.removeStaker(db, subnetID, &tx); err != nil { - return fmt.Errorf("couldn't remove staker: %w", err) - } - } - case *UnsignedAddSubnetValidatorTx: - if !subnetID.Equals(staker.Validator.SubnetID()) { - continue - } - if staker.EndTime().Before(timestamp) { - if err := vm.removeStaker(db, subnetID, &tx); err != nil { - return fmt.Errorf("couldn't remove staker: %w", err) - } - } - case TimedTx: - if staker.EndTime().Before(timestamp) { - if err := vm.removeStaker(db, subnetID, &tx); err != nil { - return fmt.Errorf("couldn't remove staker: %w", err) - } - } - default: - vm.Ctx.Log.Warn(fmt.Sprintf("expected validator but got %T", tx.UnsignedTx)) + stopPrefix := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + stopDB := prefixdb.NewNested(stopPrefix, db) + defer stopDB.Close() + + stopIter := stopDB.NewIterator() + defer stopIter.Release() + + for stopIter.Next() { // Iterates in order of increasing start time + txBytes := stopIter.Value() + + tx := Tx{} + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(vm.codec, nil); err != nil { + return err + } + + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddDelegatorTx is invalid for subnet %s", + subnetID) + } + if staker.EndTime().After(timestamp) { + return nil + } + case *UnsignedAddValidatorTx: + if !subnetID.Equals(constants.PrimaryNetworkID) { + return fmt.Errorf("AddValidatorTx is invalid for subnet %s", + subnetID) + } + if staker.EndTime().After(timestamp) { + return nil + } + case *UnsignedAddSubnetValidatorTx: + if txSubnetID := staker.Validator.SubnetID(); !subnetID.Equals(txSubnetID) { + return fmt.Errorf("AddSubnetValidatorTx references the incorrect subnet. Expected %s; Got %s", + subnetID, txSubnetID) + } + if staker.EndTime().After(timestamp) { + return nil } + if err := vm.removeStaker(db, subnetID, &tx); err != nil { + return fmt.Errorf("couldn't remove staker: %w", err) + } + default: + return fmt.Errorf("expected validator but got %T", tx.UnsignedTx) } } - return nil + errs := wrappers.Errs{} + errs.Add( + startIter.Error(), + stopIter.Error(), + ) + return errs.Err } -func (vm *VM) updateVdrMgr() error { - timestamp, err := vm.getTimestamp(vm.DB) - if err != nil { - return fmt.Errorf("can't get timestamp: %w", err) +func (vm *VM) updateVdrMgr(force bool) error { + if !force && !vm.bootstrapped { + return nil } subnets, err := vm.getSubnets(vm.DB) @@ -860,50 +939,55 @@ func (vm *VM) updateVdrMgr() error { } for _, subnetID := range subnetIDs.List() { - vdrs, initialized := vm.vdrMgr.GetValidators(subnetID) - if !initialized { - vdrs = validators.NewSet() + if err := vm.updateVdrSet(subnetID); err != nil { + return err } + } + return vm.initBlockchains() +} - prefixStart := []byte(fmt.Sprintf("%s%s", subnetID, start)) - iter := prefixdb.NewNested(prefixStart, vm.DB).NewIterator() +func (vm *VM) updateVdrSet(subnetID ids.ID) error { + vdrs := validators.NewSet() - var tx Tx - for iter.Next() { // Iterates in order of increasing start time - txBytes := iter.Value() - if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { - break // TODO what to do here? - // return fmt.Errorf("couldn't unmarshal validator tx: %w", err) - } - switch staker := tx.UnsignedTx.(type) { - case *UnsignedAddDelegatorTx: - if !subnetID.Equals(constants.PrimaryNetworkID) { - continue - } - if !staker.StartTime().After(timestamp) { // Staker is staking now - vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) - } - case *UnsignedAddValidatorTx: - if !subnetID.Equals(constants.PrimaryNetworkID) { - continue - } - if !staker.StartTime().After(timestamp) { // Staker is staking now - vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) - } - case *UnsignedAddSubnetValidatorTx: - if !subnetID.Equals(staker.Validator.SubnetID()) { - continue - } - if !staker.StartTime().After(timestamp) { // Staker is staking now - vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) - } - default: - vm.Ctx.Log.Warn(fmt.Sprintf("expected validator but got %T", tx.UnsignedTx)) - } + stopPrefix := []byte(fmt.Sprintf("%s%s", subnetID, stop)) + stopDB := prefixdb.NewNested(stopPrefix, vm.DB) + defer stopDB.Close() + stopIter := stopDB.NewIterator() + defer stopIter.Release() + + for stopIter.Next() { // Iterates in order of increasing start time + txBytes := stopIter.Value() + + tx := Tx{} + if err := vm.codec.Unmarshal(txBytes, &tx); err != nil { + return fmt.Errorf("couldn't unmarshal validator tx: %w", err) + } + if err := tx.Sign(vm.codec, nil); err != nil { + return err + } + + var err error + switch staker := tx.UnsignedTx.(type) { + case *UnsignedAddDelegatorTx: + err = vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + case *UnsignedAddValidatorTx: + err = vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + case *UnsignedAddSubnetValidatorTx: + err = vdrs.AddWeight(staker.Validator.NodeID, staker.Validator.Weight()) + default: + err = fmt.Errorf("expected validator but got %T", tx.UnsignedTx) + } + if err != nil { + return err } - vm.vdrMgr.Set(subnetID, vdrs) } - return nil + + errs := wrappers.Errs{} + errs.Add( + vm.vdrMgr.Set(subnetID, vdrs), + stopIter.Error(), + ) + return errs.Err } // Codec ... diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 025b84d39393..2e63892455d3 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/chains" @@ -50,7 +52,7 @@ var ( defaultTxFee = uint64(100) // chain timestamp at genesis - defaultGenesisTime = time.Now().Round(time.Second) + defaultGenesisTime = time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC) // time that genesis validators start validating defaultValidateStartTime = defaultGenesisTime @@ -185,9 +187,7 @@ func defaultVM() (*VM, database.Database) { chainDB := prefixdb.New([]byte{0}, baseDB) atomicDB := prefixdb.New([]byte{1}, baseDB) - primaryNetwork := validators.NewSet() // TODO do we need this? - vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.PrimaryNetworkID, primaryNetwork) + vm.vdrMgr = validators.NewManager() vm.clock.Set(defaultGenesisTime) msgChan := make(chan common.Message, 1) @@ -286,29 +286,20 @@ func TestGenesis(t *testing.T) { } // Ensure current validator set of primary network is correct - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) - if err != nil { - t.Fatal(err) - } else if len(currentValidators.Txs) != len(genesisState.Validators) { + vdrSet, ok := vm.vdrMgr.GetValidators(constants.PrimaryNetworkID) + if !ok { + t.Fatalf("Missing the primary network validator set") + } + currentValidators := vdrSet.List() + if len(currentValidators) != len(genesisState.Validators) { t.Fatal("vm's current validator set is wrong") - } else if currentValidators.SortByStartTime == true { - t.Fatal("vm's current validators should be sorted by end time") } - currentSampler := validators.NewSet() - currentSampler.Set(vm.getValidators(currentValidators)) for _, key := range keys { - if addr := key.PublicKey().Address(); !currentSampler.Contains(addr) { + if addr := key.PublicKey().Address(); !vdrSet.Contains(addr) { t.Fatalf("should have had validator with NodeID %s", addr) } } - // Ensure pending validator set is correct (empty) - if pendingValidators, err := vm.getPendingValidators(vm.DB, constants.PrimaryNetworkID); err != nil { - t.Fatal(err) - } else if pendingValidators.Len() != 0 { - t.Fatal("vm's pending validator set should be empty") - } - // Ensure genesis timestamp is correct if timestamp, err := vm.getTimestamp(vm.DB); err != nil { t.Fatal(err) @@ -323,7 +314,7 @@ func TestGenesis(t *testing.T) { } // accept proposal to add validator to primary network -func TestAddidatorCommit(t *testing.T) { +func TestAddValidatorCommit(t *testing.T) { vm, _ := defaultVM() vm.Ctx.Lock.Lock() defer func() { @@ -390,14 +381,12 @@ func TestAddidatorCommit(t *testing.T) { } // Verify that new validator now in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, constants.PrimaryNetworkID) + _, willBeValidator, err := vm.willBeValidator(vm.DB, constants.PrimaryNetworkID, ID) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if !pendingSampler.Contains(ID) { - t.Fatalf("pending validator should have validator with ID %s", ID) + if !willBeValidator { + t.Fatalf("Should have added validator to the pending queue") } } @@ -514,14 +503,12 @@ func TestAddValidatorReject(t *testing.T) { } // Verify that new validator NOT in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, constants.PrimaryNetworkID) + _, willBeValidator, err := vm.willBeValidator(vm.DB, constants.PrimaryNetworkID, ID) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if pendingSampler.Contains(ID) { - t.Fatalf("should not have added validator to pending validator set") + if willBeValidator { + t.Fatalf("Shouldn't have added validator to the pending queue") } } @@ -593,14 +580,12 @@ func TestAddSubnetValidatorAccept(t *testing.T) { } // Verify that new validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID()) + _, willBeValidator, err := vm.willBeValidator(vm.DB, testSubnet1.ID(), keys[0].PublicKey().Address()) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if !pendingSampler.Contains(keys[0].PublicKey().Address()) { - t.Fatalf("should have added validator to pending validator set") + if !willBeValidator { + t.Fatalf("Should have added validator to the pending queue") } } @@ -615,8 +600,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) endTime := startTime.Add(MinimumStakingDuration) - key, _ := vm.factory.NewPrivateKey() - ID := key.PublicKey().Address() + nodeID := keys[0].PublicKey().Address() // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] @@ -625,7 +609,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { defaultWeight, uint64(startTime.Unix()), uint64(endTime.Unix()), - keys[0].PublicKey().Address(), + nodeID, testSubnet1.ID(), []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ) @@ -674,14 +658,12 @@ func TestAddSubnetValidatorReject(t *testing.T) { } // Verify that new validator NOT in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID()) + _, willBeValidator, err := vm.willBeValidator(vm.DB, testSubnet1.ID(), nodeID) if err != nil { t.Fatal(err) } - pendingSampler := validators.NewSet() - pendingSampler.Set(vm.getValidators(pendingValidators)) - if pendingSampler.Contains(ID) { - t.Fatalf("should not have added validator to pending validator set") + if willBeValidator { + t.Fatalf("Shouldn't have added validator to the pending queue") } } @@ -773,10 +755,10 @@ func TestRewardValidatorAccept(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } else if currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID); err != nil { + } else if _, isValidator, err := vm.isValidator(vm.DB, constants.PrimaryNetworkID, keys[1].PublicKey().Address()); err != nil { // Verify that genesis validator was rewarded and removed from current validator set t.Fatal(err) - } else if currentValidators.Len() != len(keys)-1 { + } else if isValidator { t.Fatal("should have removed a genesis validator") } } @@ -857,14 +839,10 @@ func TestRewardValidatorReject(t *testing.T) { t.Fatal(err) } else if status != Aborted { t.Fatalf("status should be Aborted but is %s", status) - } - - // Verify that genesis validator was removed from current validator set - currentValidators, err := vm.getCurrentValidators(vm.DB, constants.PrimaryNetworkID) - if err != nil { + } else if _, isValidator, err := vm.isValidator(vm.DB, constants.PrimaryNetworkID, keys[1].PublicKey().Address()); err != nil { + // Verify that genesis validator was removed from current validator set t.Fatal(err) - } - if currentValidators.Len() != len(keys)-1 { + } else if isValidator { t.Fatal("should have removed a genesis validator") } } @@ -1024,22 +1002,11 @@ func TestCreateSubnet(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } - - // Verify validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, createSubnetTx.ID()) - if err != nil { + } else if _, willBeValidator, err := vm.willBeValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was added to the pending validator set t.Fatal(err) - } - foundNewValidator := false - for _, tx := range pendingValidators.Txs { - if tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.ID().Equals(nodeID) { - foundNewValidator = true - break - } - } - if !foundNewValidator { - t.Fatal("didn't add validator to new subnet's pending validator set") + } else if !willBeValidator { + t.Fatal("should have added a pending validator") } // Advance time to when new validator should start validating @@ -1081,30 +1048,16 @@ func TestCreateSubnet(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } - - // Verify validator no longer in pending validator set - // Verify validator is in pending validator set - if pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID()); err != nil { + } else if _, willBeValidator, err := vm.willBeValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was removed from the pending validator set t.Fatal(err) - } else if pendingValidators.Len() != 0 { - t.Fatal("pending validator set should be empty") - } - - // Verify validator is in current validator set - currentValidators, err := vm.getCurrentValidators(vm.DB, createSubnetTx.ID()) - if err != nil { + } else if willBeValidator { + t.Fatal("should have removed the pending validator") + } else if _, isValidator, err := vm.isValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was added to the validator set t.Fatal(err) - } - foundNewValidator = false - for _, tx := range currentValidators.Txs { - if tx.UnsignedTx.(*UnsignedAddSubnetValidatorTx).Validator.ID().Equals(nodeID) { - foundNewValidator = true - break - } - } - if !foundNewValidator { - t.Fatal("didn't add validator to new subnet's current validator set") + } else if !isValidator { + t.Fatal("should have been added to the validator set") } // fast forward clock to time validator should stop validating @@ -1144,16 +1097,16 @@ func TestCreateSubnet(t *testing.T) { t.Fatal(err) } else if status != Committed { t.Fatalf("status should be Committed but is %s", status) - } - // pending validators and current validator should be empty - if pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID()); err != nil { + } else if _, willBeValidator, err := vm.willBeValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was removed from the pending validator set t.Fatal(err) - } else if pendingValidators.Len() != 0 { - t.Fatal("pending validator set should be empty") - } else if currentValidators, err = vm.getCurrentValidators(vm.DB, createSubnetTx.ID()); err != nil { + } else if willBeValidator { + t.Fatal("should have removed the pending validator") + } else if _, isValidator, err := vm.isValidator(vm.DB, createSubnetTx.ID(), nodeID); err != nil { + // Verify that validator was added to the validator set t.Fatal(err) - } else if currentValidators.Len() != 0 { - t.Fatal("pending validator set should be empty") + } else if isValidator { + t.Fatal("should have removed from the validator set") } } @@ -1321,9 +1274,7 @@ func TestRestartPartiallyAccepted(t *testing.T) { SnowmanVM: &core.SnowmanVM{}, chainManager: chains.MockManager{}, } - firstPrimaryNetwork := validators.NewSet() - firstVM.validators = validators.NewManager() - firstVM.validators.PutValidatorSet(constants.PrimaryNetworkID, firstPrimaryNetwork) + firstVM.vdrMgr = validators.NewManager() firstVM.clock.Set(defaultGenesisTime) firstCtx := defaultContext() firstCtx.Lock.Lock() @@ -1394,9 +1345,7 @@ func TestRestartPartiallyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - secondPrimaryNetwork := validators.NewSet() - secondVM.validators = validators.NewManager() - secondVM.validators.PutValidatorSet(constants.PrimaryNetworkID, secondPrimaryNetwork) + secondVM.vdrMgr = validators.NewManager() secondVM.clock.Set(defaultGenesisTime) secondCtx := defaultContext() @@ -1427,9 +1376,7 @@ func TestRestartFullyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - firstPrimaryNetwork := validators.NewSet() - firstVM.validators = validators.NewManager() - firstVM.validators.PutValidatorSet(constants.PrimaryNetworkID, firstPrimaryNetwork) + firstVM.vdrMgr = validators.NewManager() firstVM.clock.Set(defaultGenesisTime) firstCtx := defaultContext() @@ -1515,9 +1462,7 @@ func TestRestartFullyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - secondPrimaryNetwork := validators.NewSet() - secondVM.validators = validators.NewManager() - secondVM.validators.PutValidatorSet(constants.PrimaryNetworkID, secondPrimaryNetwork) + secondVM.vdrMgr = validators.NewManager() secondVM.clock.Set(defaultGenesisTime) secondCtx := defaultContext() @@ -1553,9 +1498,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { chainManager: chains.MockManager{}, } - primaryNetwork := validators.NewSet() - vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.PrimaryNetworkID, primaryNetwork) + vm.vdrMgr = validators.NewManager() vm.clock.Set(defaultGenesisTime) ctx := defaultContext() @@ -1591,7 +1534,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { peerID := ids.NewShortID([20]byte{1, 2, 3, 4, 5, 4, 3, 2, 1}) vdrs := validators.NewSet() - vdrs.Add(validators.NewValidator(peerID, 1)) + vdrs.AddWeight(peerID, 1) beacons := vdrs timeoutManager := timeout.Manager{} @@ -1706,9 +1649,7 @@ func TestUnverifiedParent(t *testing.T) { chainManager: chains.MockManager{}, } - primaryNetwork := validators.NewSet() - vm.validators = validators.NewManager() - vm.validators.PutValidatorSet(constants.PrimaryNetworkID, primaryNetwork) + vm.vdrMgr = validators.NewManager() vm.clock.Set(defaultGenesisTime) ctx := defaultContext() @@ -1827,3 +1768,41 @@ func TestFormatAddress(t *testing.T) { }) } } + +func TestNextValidatorStartTime(t *testing.T) { + vm, _ := defaultVM() + vm.Ctx.Lock.Lock() + defer func() { + vm.Shutdown() + vm.Ctx.Lock.Unlock() + }() + + currentTime, err := vm.getTimestamp(vm.DB) + assert.NoError(t, err) + + startTime := currentTime.Add(time.Second) + endTime := startTime.Add(MinimumStakingDuration) + + tx, err := vm.newAddValidatorTx( + vm.minStake, // stake amount + uint64(startTime.Unix()), // start time + uint64(endTime.Unix()), // end time + vm.Ctx.NodeID, // node ID + ids.GenerateTestShortID(), // reward address + NumberOfShares, // shares + []*crypto.PrivateKeySECP256K1R{keys[0]}, // key + ) + assert.NoError(t, err) + + err = vm.enqueueStaker(vm.DB, constants.PrimaryNetworkID, tx) + assert.NoError(t, err) + + nextStaker, err := vm.nextStakerStart(vm.DB, constants.PrimaryNetworkID) + assert.NoError(t, err) + assert.Equal( + t, + tx.ID().Bytes(), + nextStaker.ID().Bytes(), + "should have marked the new tx as the next validator to be added", + ) +} diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go index 019ffe0ab3eb..717fa5ccb1fe 100644 --- a/vms/spchainvm/consensus_benchmark_test.go +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -54,7 +54,7 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { msgChan := make(chan common.Message, 1000) vdrs := validators.NewSet() - vdrs.Add(validators.NewValidator(ctx.NodeID, 1)) + vdrs.AddWeight(ctx.NodeID, 1) beacons := validators.NewSet() timeoutManager := timeout.Manager{} @@ -191,7 +191,7 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { msgChan := make(chan common.Message, 1000) vdrs := validators.NewSet() - vdrs.Add(validators.NewValidator(ctx.NodeID, 1)) + vdrs.AddWeight(ctx.NodeID, 1) beacons := validators.NewSet() timeoutManager := timeout.Manager{} diff --git a/vms/spchainvm/vm_test.go b/vms/spchainvm/vm_test.go index e26f4f424f80..20f951cba644 100644 --- a/vms/spchainvm/vm_test.go +++ b/vms/spchainvm/vm_test.go @@ -79,8 +79,8 @@ func TestPayments(t *testing.T) { sender.Default(true) vdrs := validators.NewSet() - vdr := validators.GenerateRandomValidator(1) - vdrs.Add(vdr) + vdr := ids.GenerateTestShortID() + vdrs.AddWeight(vdr, 1) ctx.Lock.Lock() consensus := smeng.Transitive{} @@ -141,7 +141,7 @@ func TestPayments(t *testing.T) { queriedVtxIDSet := ids.Set{} queriedVtxIDSet.Add(*queriedVtxID) - consensus.Chits(vdr.ID(), *queryRequestID, queriedVtxIDSet) + consensus.Chits(vdr, *queryRequestID, queriedVtxIDSet) if account := vm.GetAccount(vm.baseDB, keys[0].PublicKey().Address()); account.Balance() != 20*units.KiloAvax-200 { t.Fatalf("Wrong Balance") From 998f2b439d2288619b2cb25abd9ce9d5f8470995 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 30 Aug 2020 04:32:51 -0400 Subject: [PATCH 45/47] minor cleanup --- snow/networking/router/handler.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 0d17b8340835..3558110f6c6e 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -8,18 +8,19 @@ import ( "sync" "time" - "github.com/ava-labs/gecko/utils/constants" - "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/utils/timer" ) const ( + // DefaultStakerPortion defines the default percentage of resources to + // allocate to stakers. DefaultStakerPortion float64 = 0.2 ) From 6d885f26325f23e2f1c6f085c1e16f26b87375a0 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 30 Aug 2020 05:32:05 -0400 Subject: [PATCH 46/47] cleaned up import --- vms/spchainvm/consensus_benchmark_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go index 87afd9385278..1f73ed5fd105 100644 --- a/vms/spchainvm/consensus_benchmark_test.go +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/gecko/snow/networking/sender" "github.com/ava-labs/gecko/snow/networking/throttler" "github.com/ava-labs/gecko/snow/networking/timeout" - "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" From 9fb7cf7c3298d2fdd34e8ee09337c34b38a383ec Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 30 Aug 2020 12:37:53 -0400 Subject: [PATCH 47/47] updated version handling --- main/params.go | 2 +- network/peer.go | 9 ++- node/node.go | 2 +- snow/networking/router/handler.go | 7 +- snow/networking/router/message.go | 6 ++ snow/networking/throttler/ewma.go | 2 +- version/version.go | 4 +- version/version_test.go | 130 ++++++++++++++++++------------ 8 files changed, 101 insertions(+), 61 deletions(-) diff --git a/main/params.go b/main/params.go index 239037a9b626..599da77ed323 100644 --- a/main/params.go +++ b/main/params.go @@ -32,7 +32,7 @@ import ( ) const ( - dbVersion = "v0.6.5" + dbVersion = "v0.7.0" ) // Results of parsing the CLI diff --git a/network/peer.go b/network/peer.go index 08a10e37bc02..9e880814329d 100644 --- a/network/peer.go +++ b/network/peer.go @@ -469,8 +469,13 @@ func (p *peer) version(msg Msg) { if err := p.net.version.Compatible(peerVersion); err != nil { p.net.log.Debug("peer version not compatible due to %s", err) - p.discardIP() - return + if !p.net.beacons.Contains(p.id) { + p.discardIP() + return + } + p.net.log.Info("allowing beacon %s to connect with a lower version %s", + p.id, + peerVersion) } if p.ip.IsZero() { diff --git a/node/node.go b/node/node.go index 6c62b9add9b0..d758df5c6282 100644 --- a/node/node.go +++ b/node/node.go @@ -63,7 +63,7 @@ var ( genesisHashKey = []byte("genesisID") // Version is the version of this code - Version = version.NewDefaultVersion("avalanche", 0, 6, 5) + Version = version.NewDefaultVersion("avalanche", 0, 7, 0) versionParser = version.NewDefaultParser() ) diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 2368e354d7a4..751cd6445d15 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/validators" - "github.com/ava-labs/gecko/utils/constants" "github.com/ava-labs/gecko/utils/timer" ) @@ -234,10 +233,10 @@ func (h *Handler) dispatchMsg(msg message) { h.ctx.Lock.Lock() defer h.ctx.Lock.Unlock() - if msg.requestID != constants.GossipMsgRequestID { - h.ctx.Log.Debug("Forwarding message to consensus: %s", msg) - } else { + if msg.IsPeriodic() { h.ctx.Log.Verbo("Forwarding message to consensus: %s", msg) + } else { + h.ctx.Log.Debug("Forwarding message to consensus: %s", msg) } var ( diff --git a/snow/networking/router/message.go b/snow/networking/router/message.go index d88b71d74fc0..89d0742b1f85 100644 --- a/snow/networking/router/message.go +++ b/snow/networking/router/message.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/constants" ) type msgType int @@ -49,6 +50,11 @@ type message struct { deadline time.Time // Time this message must be responded to } +func (m message) IsPeriodic() bool { + return m.requestID == constants.GossipMsgRequestID || + m.messageType == gossipMsg +} + func (m message) String() string { sb := strings.Builder{} sb.WriteString(fmt.Sprintf("\n messageType: %s", m.messageType)) diff --git a/snow/networking/throttler/ewma.go b/snow/networking/throttler/ewma.go index 9cfc0169a201..87fab7298d33 100644 --- a/snow/networking/throttler/ewma.go +++ b/snow/networking/throttler/ewma.go @@ -143,7 +143,7 @@ func (et *ewmaCPUTracker) EndInterval() { cpuSpender.staking = false cpuSpender.expectedCPU = defaultMinimumCPUAllotment } - et.log.Debug("Removed %d validators from CPU Tracker.", removed) + et.log.Verbo("Removed %d validators from CPU Tracker.", removed) } // getSpender returns the [cpuSpender] corresponding to [validatorID] diff --git a/version/version.go b/version/version.go index 1bf2d639cf3f..7c9d62e86cc0 100644 --- a/version/version.go +++ b/version/version.go @@ -93,9 +93,9 @@ func (v *version) Compatible(o Version) error { switch { case v.App() != o.App(): return errDifferentApps - case v.Major() != o.Major(): + case v.Major() > o.Major(): return errDifferentMajor - case v.Minor() != o.Minor(): + case v.Major() == o.Major() && v.Minor() > o.Minor(): return errDifferentMinor default: return nil diff --git a/version/version_test.go b/version/version_test.go index 7ffbba4bc72f..7fa9a892ae96 100644 --- a/version/version_test.go +++ b/version/version_test.go @@ -4,6 +4,7 @@ package version import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -35,54 +36,83 @@ func TestNewVersion(t *testing.T) { assert.False(t, v.Before(v)) } -func TestIncompatibleApps(t *testing.T) { - v0 := NewDefaultVersion("avalanche", 1, 2, 3) - v1 := NewDefaultVersion("notavalanche", 1, 2, 3) - - assert.NotNil(t, v0) - assert.NotNil(t, v1) - assert.Error(t, v0.Compatible(v1)) - assert.Error(t, v1.Compatible(v0)) - - assert.False(t, v0.Before(v1)) - assert.False(t, v1.Before(v0)) -} - -func TestIncompatibleMajor(t *testing.T) { - v0 := NewDefaultVersion("avalanche", 1, 2, 3) - v1 := NewDefaultVersion("avalanche", 2, 2, 3) - - assert.NotNil(t, v0) - assert.NotNil(t, v1) - assert.Error(t, v0.Compatible(v1)) - assert.Error(t, v1.Compatible(v0)) - - assert.True(t, v0.Before(v1)) - assert.False(t, v1.Before(v0)) -} - -func TestIncompatibleMinor(t *testing.T) { - v0 := NewDefaultVersion("avalanche", 1, 2, 3) - v1 := NewDefaultVersion("avalanche", 1, 3, 3) - - assert.NotNil(t, v0) - assert.NotNil(t, v1) - assert.Error(t, v0.Compatible(v1)) - assert.Error(t, v1.Compatible(v0)) - - assert.True(t, v0.Before(v1)) - assert.False(t, v1.Before(v0)) -} - -func TestCompatiblePatch(t *testing.T) { - v0 := NewDefaultVersion("avalanche", 1, 2, 3) - v1 := NewDefaultVersion("avalanche", 1, 2, 4) - - assert.NotNil(t, v0) - assert.NotNil(t, v1) - assert.NoError(t, v0.Compatible(v1)) - assert.NoError(t, v1.Compatible(v0)) - - assert.True(t, v0.Before(v1)) - assert.False(t, v1.Before(v0)) +func TestComparingVersions(t *testing.T) { + tests := []struct { + myVersion Version + peerVersion Version + compatible bool + before bool + }{ + { + myVersion: NewDefaultVersion("avalanche", 1, 2, 3), + peerVersion: NewDefaultVersion("avalanche", 1, 2, 3), + compatible: true, + before: false, + }, + { + myVersion: NewDefaultVersion("avalanche", 1, 2, 4), + peerVersion: NewDefaultVersion("avalanche", 1, 2, 3), + compatible: true, + before: false, + }, + { + myVersion: NewDefaultVersion("avalanche", 1, 2, 3), + peerVersion: NewDefaultVersion("avalanche", 1, 2, 4), + compatible: true, + before: true, + }, + { + myVersion: NewDefaultVersion("avalanche", 1, 3, 3), + peerVersion: NewDefaultVersion("avalanche", 1, 2, 3), + compatible: false, + before: false, + }, + { + myVersion: NewDefaultVersion("avalanche", 1, 2, 3), + peerVersion: NewDefaultVersion("avalanche", 1, 3, 3), + compatible: true, + before: true, + }, + { + myVersion: NewDefaultVersion("avalanche", 2, 2, 3), + peerVersion: NewDefaultVersion("avalanche", 1, 2, 3), + compatible: false, + before: false, + }, + { + myVersion: NewDefaultVersion("avalanche", 1, 2, 3), + peerVersion: NewDefaultVersion("avalanche", 2, 2, 3), + compatible: true, + before: true, + }, + { + myVersion: NewDefaultVersion("avax", 1, 2, 4), + peerVersion: NewDefaultVersion("avalanche", 1, 2, 3), + compatible: false, + before: false, + }, + { + myVersion: NewDefaultVersion("avalanche", 1, 2, 3), + peerVersion: NewDefaultVersion("avax", 1, 2, 3), + compatible: false, + before: false, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s %s", test.myVersion, test.peerVersion), func(t *testing.T) { + err := test.myVersion.Compatible(test.peerVersion) + if test.compatible && err != nil { + t.Fatalf("Expected version to be compatible but returned: %s", + err) + } else if !test.compatible && err == nil { + t.Fatalf("Expected version to be incompatible but returned no error") + } + before := test.myVersion.Before(test.peerVersion) + if test.before && !before { + t.Fatalf("Expected version to be before the peer version but wasn't") + } else if !test.before && before { + t.Fatalf("Expected version not to be before the peer version but was") + } + }) + } }