From 2586fec178783a4842c8f58fcf3774938307736e Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 13:39:07 +0300 Subject: [PATCH 01/25] Remove eviction phase 1 (not necessary). --- storage/txcache/config.go | 4 +- storage/txcache/eviction.go | 42 +------------- storage/txcache/eviction_test.go | 73 ++----------------------- storage/txcache/monitoring.go | 7 +-- storage/txcache/txCache_test.go | 4 -- storage/txcache/txListForSender.go | 25 --------- storage/txcache/txListForSender_test.go | 31 ----------- 7 files changed, 11 insertions(+), 175 deletions(-) diff --git a/storage/txcache/config.go b/storage/txcache/config.go index cdd29d7295e..b4b4638e2b3 100644 --- a/storage/txcache/config.go +++ b/storage/txcache/config.go @@ -6,9 +6,9 @@ type CacheConfig struct { NumChunksHint uint32 EvictionEnabled bool NumBytesThreshold uint32 + NumBytesPerSenderThreshold uint32 CountThreshold uint32 + CountPerSenderThreshold uint32 NumSendersToEvictInOneStep uint32 - LargeNumOfTxsForASender uint32 - NumTxsToEvictFromASender uint32 MinGasPriceMicroErd uint32 } diff --git a/storage/txcache/eviction.go b/storage/txcache/eviction.go index 2ddede3e290..cf7e861435b 100644 --- a/storage/txcache/eviction.go +++ b/storage/txcache/eviction.go @@ -30,15 +30,9 @@ func (cache *TxCache) doEviction() { stopWatch := cache.monitorEvictionStart() - if tooManyTxs { - cache.makeSnapshotOfSenders() - journal.passOneNumTxs, journal.passOneNumSenders = cache.evictHighNonceTransactions() - journal.evictionPerformed = true - } - if cache.shouldContinueEvictingSenders() { cache.makeSnapshotOfSenders() - journal.passTwoNumSteps, journal.passTwoNumTxs, journal.passTwoNumSenders = cache.evictSendersInLoop() + journal.passOneNumSteps, journal.passOneNumTxs, journal.passOneNumSenders = cache.evictSendersInLoop() journal.evictionPerformed = true } @@ -77,40 +71,6 @@ func (cache *TxCache) shouldContinueEvictingSenders() bool { return cache.areThereTooManyTxs() || cache.areThereTooManySenders() || cache.areThereTooManyBytes() } -// evictHighNonceTransactions removes transactions from the cache -// For senders with many transactions (> "LargeNumOfTxsForASender"), evict "NumTxsToEvictFromASender" transactions -// Also makes sure that there's no sender with 0 transactions -func (cache *TxCache) evictHighNonceTransactions() (uint32, uint32) { - threshold := cache.config.LargeNumOfTxsForASender - numTxsToEvict := cache.config.NumTxsToEvictFromASender - - // Heuristics: estimate that ~10% of senders have more transactions than the threshold - sendersToEvictInitialCapacity := len(cache.evictionSnapshotOfSenders)/10 + 1 - txsToEvictInitialCapacity := sendersToEvictInitialCapacity * int(numTxsToEvict) - - sendersToEvict := make([]string, 0, sendersToEvictInitialCapacity) - txsToEvict := make([][]byte, 0, txsToEvictInitialCapacity) - - for _, txList := range cache.evictionSnapshotOfSenders { - if txList.HasMoreThan(threshold) { - txsToEvictForSender := txList.RemoveHighNonceTxs(numTxsToEvict) - txsToEvict = append(txsToEvict, txsToEvictForSender...) - } - - if txList.IsEmpty() { - sendersToEvict = append(sendersToEvict, txList.sender) - } - } - - // Note that, at this very moment, high nonce transactions have been evicted from senders' lists, - // but not yet from the map of transactions. - // - // This may cause slight inconsistencies, such as: - // - if a tx previously (recently) removed from the sender's list ("RemoveHighNonceTxs") arrives again at the pool, - // before the execution of "doEvictItems", the tx will be ignored as it still exists (for a short time) in the map of transactions. - return cache.doEvictItems(txsToEvict, sendersToEvict) -} - // This is called concurrently by two goroutines: the eviction one and the sweeping one func (cache *TxCache) doEvictItems(txsToEvict [][]byte, sendersToEvict []string) (countTxs uint32, countSenders uint32) { countTxs = cache.txByHash.RemoveTxsBulk(txsToEvict) diff --git a/storage/txcache/eviction_test.go b/storage/txcache/eviction_test.go index 7f22906285a..f4acf5a3f21 100644 --- a/storage/txcache/eviction_test.go +++ b/storage/txcache/eviction_test.go @@ -9,61 +9,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestEviction_EvictHighNonceTransactions(t *testing.T) { - config := CacheConfig{ - NumChunksHint: 16, - CountThreshold: 400, - LargeNumOfTxsForASender: 50, - NumTxsToEvictFromASender: 25, - MinGasPriceMicroErd: 100, - } - - cache := NewTxCache(config) - - for index := 0; index < 200; index++ { - cache.AddTx(createTx([]byte{'a', byte(index)}, "alice", uint64(index))) - } - - for index := 0; index < 200; index++ { - cache.AddTx(createTx([]byte{'b', byte(index)}, "bob", uint64(index))) - } - - cache.AddTx(createTx([]byte("hash-carol"), "carol", uint64(1))) - - require.Equal(t, int64(3), cache.txListBySender.counter.Get()) - require.Equal(t, int64(401), cache.txByHash.counter.Get()) - - cache.makeSnapshotOfSenders() - nTxs, nSenders := cache.evictHighNonceTransactions() - - require.Equal(t, uint32(50), nTxs) - require.Equal(t, uint32(0), nSenders) - require.Equal(t, int64(3), cache.txListBySender.counter.Get()) - require.Equal(t, int64(351), cache.txByHash.counter.Get()) -} - -func TestEviction_EvictHighNonceTransactions_CoverEmptiedSenderList(t *testing.T) { - config := CacheConfig{ - NumChunksHint: 1, - CountThreshold: 0, - LargeNumOfTxsForASender: 0, - NumTxsToEvictFromASender: 1, - MinGasPriceMicroErd: 100, - } - - cache := NewTxCache(config) - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) - require.Equal(t, int64(1), cache.CountSenders()) - - cache.makeSnapshotOfSenders() - - // Alice is also removed from the map of senders, since it has no transaction left - nTxs, nSenders := cache.evictHighNonceTransactions() - require.Equal(t, uint32(1), nTxs) - require.Equal(t, uint32(1), nSenders) - require.Equal(t, int64(0), cache.CountSenders()) -} - func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { config := CacheConfig{ NumChunksHint: 16, @@ -141,11 +86,9 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneTrilion)) cache.doEviction() - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passTwoNumSteps) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) + require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) // Alice and Bob evicted. Carol still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -173,11 +116,9 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { require.InDelta(t, float64(100), cache.getRawScoreOfSender("carol"), delta) cache.doEviction() - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passTwoNumSteps) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) + require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) // Alice and Bob evicted (lower score). Carol still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -252,8 +193,6 @@ func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { NumBytesThreshold: 1000000000, CountThreshold: 240000, NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, - LargeNumOfTxsForASender: 1000, - NumTxsToEvictFromASender: 250, } numSenders := 25000 diff --git a/storage/txcache/monitoring.go b/storage/txcache/monitoring.go index 5dcac6617d6..95848b450ca 100644 --- a/storage/txcache/monitoring.go +++ b/storage/txcache/monitoring.go @@ -93,14 +93,11 @@ type evictionJournal struct { evictionPerformed bool passOneNumTxs uint32 passOneNumSenders uint32 - passTwoNumTxs uint32 - passTwoNumSenders uint32 - passTwoNumSteps uint32 + passOneNumSteps uint32 } func (journal *evictionJournal) display() { - log.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders) - log.Debug("Eviction.pass2:", "txs", journal.passTwoNumTxs, "senders", journal.passTwoNumSenders, "steps", journal.passTwoNumSteps) + log.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders, "steps", journal.passOneNumSteps) } func (cache *TxCache) diagnose() { diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index cecef150a0c..c163a54c18d 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -246,8 +246,6 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { NumBytesThreshold: math.MaxUint32, CountThreshold: 100, NumSendersToEvictInOneStep: 1, - LargeNumOfTxsForASender: math.MaxUint32, - NumTxsToEvictFromASender: 0, } // 11 * 10 @@ -261,8 +259,6 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { NumBytesThreshold: math.MaxUint32, CountThreshold: 250000, NumSendersToEvictInOneStep: 1, - LargeNumOfTxsForASender: math.MaxUint32, - NumTxsToEvictFromASender: 0, } // 100 * 1000 diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index fbb5d16072d..27bc512df6f 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -114,31 +114,6 @@ func (listForSender *txListForSender) onRemovedListElement(element *list.Element listForSender.onScoreChange(listForSender) } -// RemoveHighNonceTxs removes "count" transactions from the back of the list -func (listForSender *txListForSender) RemoveHighNonceTxs(count uint32) [][]byte { - listForSender.mutex.Lock() - defer listForSender.mutex.Unlock() - - removedTxHashes := make([][]byte, count) - - index := uint32(0) - var previous *list.Element - for element := listForSender.items.Back(); element != nil && count > index; element = previous { - // Remove node - previous = element.Prev() - listForSender.items.Remove(element) - listForSender.onRemovedListElement(element) - - // Keep track of removed transaction - value := element.Value.(*WrappedTransaction) - removedTxHashes[index] = value.TxHash - - index++ - } - - return removedTxHashes -} - // This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) findListElementWithTx(txToFind *WrappedTransaction) *list.Element { for element := listForSender.items.Front(); element != nil; element = element.Next() { diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 144357e3f66..8632c034d08 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -97,37 +97,6 @@ func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { require.Equal(t, 0, list.items.Len()) } -func TestListForSender_RemoveHighNonceTransactions(t *testing.T) { - list := newListToTest() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) - } - - list.RemoveHighNonceTxs(50) - require.Equal(t, 50, list.items.Len()) - - list.RemoveHighNonceTxs(20) - require.Equal(t, 30, list.items.Len()) - - list.RemoveHighNonceTxs(30) - require.Equal(t, 0, list.items.Len()) -} - -func TestListForSender_RemoveHighNonceTransactions_NoPanicWhenCornerCases(t *testing.T) { - list := newListToTest() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) - } - - list.RemoveHighNonceTxs(0) - require.Equal(t, 100, list.items.Len()) - - list.RemoveHighNonceTxs(500) - require.Equal(t, 0, list.items.Len()) -} - func TestListForSender_SelectBatchTo(t *testing.T) { list := newListToTest() From a8642f2007183682515ad06604b735c6352cc3d1 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 14:33:47 +0300 Subject: [PATCH 02/25] Sketch config. --- cmd/node/config/config.toml | 2 ++ config/config.go | 10 +++--- dataRetriever/constants.go | 8 ----- .../factory/txpool/txPoolFactory_test.go | 2 +- dataRetriever/txpool/argShardedTxPool.go | 6 ++++ dataRetriever/txpool/shardedTxPool.go | 6 ++-- dataRetriever/txpool/shardedTxPool_test.go | 36 +++++++++++++------ storage/factory/common.go | 10 +++--- storage/storageUnit/storageunit.go | 12 ++++--- 9 files changed, 56 insertions(+), 36 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 8c442678629..d3316333894 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -220,7 +220,9 @@ [TxDataPool] Size = 900000 + SizePerSender = 1000 SizeInBytes = 524288000 + SizeInBytesPerSender = 614400 Type = "TxCache" Shards = 16 diff --git a/config/config.go b/config/config.go index 47713b6ac89..8fefa671ec8 100644 --- a/config/config.go +++ b/config/config.go @@ -2,10 +2,12 @@ package config // CacheConfig will map the json cache configuration type CacheConfig struct { - Type string `json:"type"` - Size uint32 `json:"size"` - SizeInBytes uint32 `json:"sizeInBytes"` - Shards uint32 `json:"shards"` + Type string `json:"type"` + Size uint32 `json:"size"` + SizePerSender uint32 `json:"sizePerSender"` + SizeInBytes uint32 `json:"sizeInBytes"` + SizeInBytesPerSender uint32 `json:"sizeInBytesPerSender"` + Shards uint32 `json:"shards"` } //HeadersPoolConfig will map the headers cache configuration diff --git a/dataRetriever/constants.go b/dataRetriever/constants.go index a3688510364..e171d666b78 100644 --- a/dataRetriever/constants.go +++ b/dataRetriever/constants.go @@ -3,13 +3,5 @@ package dataRetriever // TxPoolNumSendersToEvictInOneStep instructs tx pool eviction algorithm to remove this many senders when eviction takes place const TxPoolNumSendersToEvictInOneStep = uint32(100) -// TxPoolLargeNumOfTxsForASender instructs tx pool eviction algorithm to tag a sender with more transactions than this value -// as a "sender with a large number of transactions" -const TxPoolLargeNumOfTxsForASender = uint32(500) - -// TxPoolNumTxsToEvictFromASender instructs tx pool eviction algorithm to remove this many transactions -// for "a sender with a large number of transactions" when eviction takes place -const TxPoolNumTxsToEvictFromASender = uint32(100) - // TxPoolMinSizeInBytes is the lower limit of the tx cache / eviction parameter "sizeInBytes" const TxPoolMinSizeInBytes = uint32(40960) diff --git a/dataRetriever/factory/txpool/txPoolFactory_test.go b/dataRetriever/factory/txpool/txPoolFactory_test.go index 3c6c9745aad..5cd85f4c65e 100644 --- a/dataRetriever/factory/txpool/txPoolFactory_test.go +++ b/dataRetriever/factory/txpool/txPoolFactory_test.go @@ -24,7 +24,7 @@ func Test_CreateNewTxPool_ShardedData(t *testing.T) { } func Test_CreateNewTxPool_ShardedTxPool(t *testing.T) { - config := storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 1} + config := storageUnit.CacheConfig{Size: 100, SizePerSender: 1, SizeInBytes: 40960, SizeInBytesPerSender: 40960, Shards: 1} args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 1} txPool, err := CreateTxPool(args) diff --git a/dataRetriever/txpool/argShardedTxPool.go b/dataRetriever/txpool/argShardedTxPool.go index 362c6f47cee..204a5c754cb 100644 --- a/dataRetriever/txpool/argShardedTxPool.go +++ b/dataRetriever/txpool/argShardedTxPool.go @@ -21,9 +21,15 @@ func (args *ArgShardedTxPool) verify() error { if config.SizeInBytes < dataRetriever.TxPoolMinSizeInBytes { return fmt.Errorf("%w: config.SizeInBytes is less than [dataRetriever.TxPoolMinSizeInBytes]", dataRetriever.ErrCacheConfigInvalidSizeInBytes) } + if config.SizeInBytesPerSender < dataRetriever.TxPoolMinSizeInBytes { + return fmt.Errorf("%w: config.SizeInBytesPerSender is less than [dataRetriever.TxPoolMinSizeInBytes]", dataRetriever.ErrCacheConfigInvalidSizeInBytes) + } if config.Size < 1 { return fmt.Errorf("%w: config.Size is less than 1", dataRetriever.ErrCacheConfigInvalidSize) } + if config.SizePerSender < 1 { + return fmt.Errorf("%w: config.SizePerSender is less than 1", dataRetriever.ErrCacheConfigInvalidSize) + } if config.Shards < 1 { return fmt.Errorf("%w: config.Shards (map chunks) is less than 1", dataRetriever.ErrCacheConfigInvalidShards) } diff --git a/dataRetriever/txpool/shardedTxPool.go b/dataRetriever/txpool/shardedTxPool.go index 18a7cd9c958..6c4bcfea46f 100644 --- a/dataRetriever/txpool/shardedTxPool.go +++ b/dataRetriever/txpool/shardedTxPool.go @@ -4,7 +4,7 @@ import ( "strconv" "sync" - "github.com/ElrondNetwork/elrond-go-logger" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/core/counting" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -51,10 +51,10 @@ func NewShardedTxPool(args ArgShardedTxPool) (dataRetriever.ShardedDataCacherNot NumChunksHint: args.Config.Shards, EvictionEnabled: true, NumBytesThreshold: args.Config.SizeInBytes / numCaches, + NumBytesPerSenderThreshold: args.Config.SizeInBytesPerSender, CountThreshold: args.Config.Size / numCaches, + CountPerSenderThreshold: args.Config.SizePerSender, NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, - LargeNumOfTxsForASender: dataRetriever.TxPoolLargeNumOfTxsForASender, - NumTxsToEvictFromASender: dataRetriever.TxPoolNumTxsToEvictFromASender, MinGasPriceMicroErd: uint32(args.MinGasPrice / oneTrilion), } diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index a24de4aec61..7e1e91788d4 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -24,29 +24,43 @@ func Test_NewShardedTxPool(t *testing.T) { } func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { - goodArgs := ArgShardedTxPool{Config: storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 16}, MinGasPrice: 100000000000000, NumberOfShards: 1} + goodArgs := ArgShardedTxPool{Config: storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16}, MinGasPrice: 100000000000000, NumberOfShards: 1} args := goodArgs - args.Config = storageUnit.CacheConfig{SizeInBytes: 1} + args.Config.SizeInBytes = 1 pool, err := NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSizeInBytes.Error()) args = goodArgs - args.Config = storageUnit.CacheConfig{SizeInBytes: 40960, Size: 1} + args.Config.SizeInBytesPerSender = 1 pool, err = NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) - require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidShards.Error()) + require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSizeInBytes.Error()) + + args = goodArgs + args.Config.Size = 0 + pool, err = NewShardedTxPool(args) + require.Nil(t, pool) + require.NotNil(t, err) + require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSize.Error()) args = goodArgs - args.Config = storageUnit.CacheConfig{SizeInBytes: 40960, Shards: 1} + args.Config.SizePerSender = 0 pool, err = NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSize.Error()) + args = goodArgs + args.Config.Shards = 0 + pool, err = NewShardedTxPool(args) + require.Nil(t, pool) + require.NotNil(t, err) + require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidShards.Error()) + args = goodArgs args.MinGasPrice = 0 pool, err = NewShardedTxPool(args) @@ -63,7 +77,7 @@ func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { } func Test_NewShardedTxPool_ComputesCacheConfig(t *testing.T) { - config := storageUnit.CacheConfig{SizeInBytes: 524288000, Size: 900000, Shards: 1} + config := storageUnit.CacheConfig{SizeInBytes: 524288000, SizeInBytesPerSender: 614400, Size: 900000, SizePerSender: 1000, Shards: 1} args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 5} poolAsInterface, err := NewShardedTxPool(args) @@ -73,10 +87,10 @@ func Test_NewShardedTxPool_ComputesCacheConfig(t *testing.T) { require.Equal(t, true, pool.cacheConfigPrototype.EvictionEnabled) require.Equal(t, uint32(58254222), pool.cacheConfigPrototype.NumBytesThreshold) + require.Equal(t, uint32(614400), pool.cacheConfigPrototype.NumBytesPerSenderThreshold) require.Equal(t, uint32(100000), pool.cacheConfigPrototype.CountThreshold) + require.Equal(t, uint32(1000), pool.cacheConfigPrototype.CountPerSenderThreshold) require.Equal(t, uint32(100), pool.cacheConfigPrototype.NumSendersToEvictInOneStep) - require.Equal(t, uint32(500), pool.cacheConfigPrototype.LargeNumOfTxsForASender) - require.Equal(t, uint32(100), pool.cacheConfigPrototype.NumTxsToEvictFromASender) require.Equal(t, uint32(100), pool.cacheConfigPrototype.MinGasPriceMicroErd) require.Equal(t, uint32(291271110), pool.cacheConfigPrototypeForSelfShard.NumBytesThreshold) require.Equal(t, uint32(500000), pool.cacheConfigPrototypeForSelfShard.CountThreshold) @@ -304,7 +318,7 @@ func Test_NotImplementedFunctions(t *testing.T) { } func Test_routeToCacheUnions(t *testing.T) { - config := storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 16} + config := storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16} args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 4, SelfShardID: 42} poolAsInterface, _ := NewShardedTxPool(args) pool := poolAsInterface.(*shardedTxPool) @@ -319,7 +333,7 @@ func Test_routeToCacheUnions(t *testing.T) { } func Test_getCacheConfig(t *testing.T) { - config := storageUnit.CacheConfig{Size: 150, SizeInBytes: 61440, Shards: 16} + config := storageUnit.CacheConfig{Size: 150, SizePerSender: 1, SizeInBytes: 61440, SizeInBytesPerSender: 40960, Shards: 16} args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 8, SelfShardID: 4} poolAsInterface, _ := NewShardedTxPool(args) pool := poolAsInterface.(*shardedTxPool) @@ -353,7 +367,7 @@ type thisIsNotATransaction struct { } func newTxPoolToTest() (dataRetriever.ShardedDataCacherNotifier, error) { - config := storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 16} + config := storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16} args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 4} return NewShardedTxPool(args) } diff --git a/storage/factory/common.go b/storage/factory/common.go index fec9d8da1ce..d431acfc946 100644 --- a/storage/factory/common.go +++ b/storage/factory/common.go @@ -13,10 +13,12 @@ const allFiles = -1 // GetCacherFromConfig will return the cache config needed for storage unit from a config came from the toml file func GetCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { return storageUnit.CacheConfig{ - Size: cfg.Size, - SizeInBytes: cfg.SizeInBytes, - Type: storageUnit.CacheType(cfg.Type), - Shards: cfg.Shards, + Size: cfg.Size, + SizePerSender: cfg.SizePerSender, + SizeInBytes: cfg.SizeInBytes, + SizeInBytesPerSender: cfg.SizeInBytesPerSender, + Type: storageUnit.CacheType(cfg.Type), + Shards: cfg.Shards, } } diff --git a/storage/storageUnit/storageunit.go b/storage/storageUnit/storageunit.go index cf2703cc960..eecce45a640 100644 --- a/storage/storageUnit/storageunit.go +++ b/storage/storageUnit/storageunit.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/ElrondNetwork/elrond-go-logger" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/hashing" @@ -67,10 +67,12 @@ type UnitConfig struct { // CacheConfig holds the configurable elements of a cache type CacheConfig struct { - Type CacheType - SizeInBytes uint32 - Size uint32 - Shards uint32 + Type CacheType + SizeInBytes uint32 + SizeInBytesPerSender uint32 + Size uint32 + SizePerSender uint32 + Shards uint32 } // DBConfig holds the configurable elements of a database From 879add50e7e6914e62af48b6eff400c187bbcd3e Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 14:54:53 +0300 Subject: [PATCH 03/25] Fix initialization of mocks etc. --- epochStart/metachain/epochStartData_test.go | 8 +++++--- integrationTests/consensus/testInitializer.go | 8 +++++--- integrationTests/testInitializer.go | 8 +++++--- process/block/preprocess/transactions_test.go | 8 +++++--- process/block/shardblock_test.go | 8 +++++--- process/coordinator/process_test.go | 8 +++++--- process/mock/poolsHolderMock.go | 8 +++++--- update/mock/poolsHolderMock.go | 8 +++++--- 8 files changed, 40 insertions(+), 24 deletions(-) diff --git a/epochStart/metachain/epochStartData_test.go b/epochStart/metachain/epochStartData_test.go index d9b1ca3a913..5563c0fdaa7 100644 --- a/epochStart/metachain/epochStartData_test.go +++ b/epochStart/metachain/epochStartData_test.go @@ -102,9 +102,11 @@ func createTxPool(selfShardID uint32) (dataRetriever.ShardedDataCacherNotifier, return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index e291c4c1b87..e1249e03c58 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -164,9 +164,11 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 46b19daf117..d5f57e50d56 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1901,9 +1901,11 @@ func createTxPool(selfShardID uint32) (dataRetriever.ShardedDataCacherNotifier, return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index f6c46390ed3..676bde4d612 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1057,9 +1057,11 @@ func createTxPool() (dataRetriever.ShardedDataCacherNotifier, error) { return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 42c5be6feb9..2f2528804ab 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -49,9 +49,11 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index eadcf6ce54e..87fd193fb1a 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -2548,9 +2548,11 @@ func createTxPool() (dataRetriever.ShardedDataCacherNotifier, error) { return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, diff --git a/process/mock/poolsHolderMock.go b/process/mock/poolsHolderMock.go index 89f8ac9b3bb..9bd448e8c18 100644 --- a/process/mock/poolsHolderMock.go +++ b/process/mock/poolsHolderMock.go @@ -30,9 +30,11 @@ func NewPoolsHolderMock() *PoolsHolderMock { phf.transactions, _ = txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 10000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, diff --git a/update/mock/poolsHolderMock.go b/update/mock/poolsHolderMock.go index 54e593f9241..9d2680b65c7 100644 --- a/update/mock/poolsHolderMock.go +++ b/update/mock/poolsHolderMock.go @@ -29,9 +29,11 @@ func NewPoolsHolderMock() *PoolsHolderMock { phf.transactions, _ = txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 10000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, From 65b3c129653f1ecde6de3c7d4ade105effc963d9 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 16:55:37 +0300 Subject: [PATCH 04/25] Sketch application of limits. --- storage/txcache/txListForSender.go | 37 ++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index 27bc512df6f..5dfb691402a 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -45,7 +45,7 @@ func newTxListForSender(sender string, cacheConfig *CacheConfig, onScoreChange s // AddTx adds a transaction in sender's list // This is a "sorted" insert -func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) { +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) [][]byte { // We don't allow concurrent interceptor goroutines to mutate a given sender's list listForSender.mutex.Lock() defer listForSender.mutex.Unlock() @@ -61,12 +61,45 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) { } listForSender.onAddedTransaction(tx) + evicted := listForSender.applyLimit() + listForSender.triggerScoreChange() + + return evicted +} + +// This function should only be used in critical section (listForSender.mutex) +func (listForSender *txListForSender) applyLimit() [][]byte { + evictedTxHashes := make([][]byte, 0) + + for element := listForSender.items.Back(); element != nil; element = element.Prev() { + if !listForSender.isLimitReached() { + break + } + + listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + + // Keep track of removed transaction + value := element.Value.(*WrappedTransaction) + evictedTxHashes = append(evictedTxHashes, value.TxHash) + } + + return evictedTxHashes +} + +func (listForSender *txListForSender) isLimitReached() { + tooManyBytes := listForSender.totalBytes.Get() > listForSender.cacheConfig.NumBytesPerSenderThreshold + tooManyTxs := listForSender.countTx() > listForSender.cacheConfig.CountPerSenderThreshold + return tooManyBytes || tooManyTxs } func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction) { listForSender.totalBytes.Add(int64(estimateTxSize(tx))) listForSender.totalGas.Add(int64(estimateTxGas(tx))) listForSender.totalFee.Add(int64(estimateTxFee(tx))) +} + +func (listForSender *txListForSender) triggerScoreChange() { listForSender.onScoreChange(listForSender) } @@ -100,6 +133,7 @@ func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { if isFound { listForSender.items.Remove(marker) listForSender.onRemovedListElement(marker) + listForSender.triggerScoreChange() } return isFound @@ -111,7 +145,6 @@ func (listForSender *txListForSender) onRemovedListElement(element *list.Element listForSender.totalBytes.Subtract(int64(estimateTxSize(value))) listForSender.totalGas.Subtract(int64(estimateTxGas(value))) listForSender.totalFee.Subtract(int64(estimateTxFee(value))) - listForSender.onScoreChange(listForSender) } // This function should only be used in critical section (listForSender.mutex) From b0b1d0c8ddfca02a79d3bb6a28f51fe8de870ede Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 16:58:39 +0300 Subject: [PATCH 05/25] Fix build. --- storage/txcache/txListForSender.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index 5dfb691402a..6b71f9bb87b 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -87,9 +87,9 @@ func (listForSender *txListForSender) applyLimit() [][]byte { return evictedTxHashes } -func (listForSender *txListForSender) isLimitReached() { - tooManyBytes := listForSender.totalBytes.Get() > listForSender.cacheConfig.NumBytesPerSenderThreshold - tooManyTxs := listForSender.countTx() > listForSender.cacheConfig.CountPerSenderThreshold +func (listForSender *txListForSender) isLimitReached() bool { + tooManyBytes := listForSender.totalBytes.Get() > int64(listForSender.cacheConfig.NumBytesPerSenderThreshold) + tooManyTxs := listForSender.countTx() > uint64(listForSender.cacheConfig.CountPerSenderThreshold) return tooManyBytes || tooManyTxs } From ae53ec5cc18bde3d57a4f041d316cdeac3594840 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 17:05:34 +0300 Subject: [PATCH 06/25] Temporarily bypass application of limits. --- storage/txcache/txListForSender.go | 7 +++++-- storage/txcache/txListForSender_test.go | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index 6b71f9bb87b..bcbf18fbfed 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -88,8 +88,11 @@ func (listForSender *txListForSender) applyLimit() [][]byte { } func (listForSender *txListForSender) isLimitReached() bool { - tooManyBytes := listForSender.totalBytes.Get() > int64(listForSender.cacheConfig.NumBytesPerSenderThreshold) - tooManyTxs := listForSender.countTx() > uint64(listForSender.cacheConfig.CountPerSenderThreshold) + maxBytes := int64(listForSender.cacheConfig.NumBytesPerSenderThreshold) + maxNumTxs := uint64(listForSender.cacheConfig.CountPerSenderThreshold) + tooManyBytes := maxBytes > 0 && listForSender.totalBytes.Get() > maxBytes + tooManyTxs := maxNumTxs > 0 && listForSender.countTx() > maxNumTxs + return tooManyBytes || tooManyTxs } diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 8632c034d08..61ec88bfc27 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -63,6 +63,10 @@ func TestListForSender_findTx(t *testing.T) { elementWithB := list.findListElementWithTx(txB) noElementWithD := list.findListElementWithTx(txD) + require.NotNil(t, elementWithA) + require.NotNil(t, elementWithANewer) + require.NotNil(t, elementWithB) + require.Equal(t, txA, elementWithA.Value.(*WrappedTransaction)) require.Equal(t, txANewer, elementWithANewer.Value.(*WrappedTransaction)) require.Equal(t, txB, elementWithB.Value.(*WrappedTransaction)) From dfdc4c4b7b35339f0604c40e3b9ea44a5cfa3c27 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 17:08:54 +0300 Subject: [PATCH 07/25] Add type alias. --- storage/txcache/eviction.go | 4 ++-- storage/txcache/txByHashMap.go | 6 +++--- storage/txcache/txCache.go | 4 +++- storage/txcache/txListForSender.go | 10 +++++----- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/storage/txcache/eviction.go b/storage/txcache/eviction.go index cf7e861435b..64394de2152 100644 --- a/storage/txcache/eviction.go +++ b/storage/txcache/eviction.go @@ -72,7 +72,7 @@ func (cache *TxCache) shouldContinueEvictingSenders() bool { } // This is called concurrently by two goroutines: the eviction one and the sweeping one -func (cache *TxCache) doEvictItems(txsToEvict [][]byte, sendersToEvict []string) (countTxs uint32, countSenders uint32) { +func (cache *TxCache) doEvictItems(txsToEvict txHashes, sendersToEvict []string) (countTxs uint32, countSenders uint32) { countTxs = cache.txByHash.RemoveTxsBulk(txsToEvict) countSenders = cache.txListBySender.RemoveSendersBulk(sendersToEvict) return @@ -116,7 +116,7 @@ func (cache *TxCache) evictSendersWhile(shouldContinue func() bool) (step uint32 // This is called concurrently by two goroutines: the eviction one and the sweeping one func (cache *TxCache) evictSendersAndTheirTxs(listsToEvict []*txListForSender) (uint32, uint32) { sendersToEvict := make([]string, 0, len(listsToEvict)) - txsToEvict := make([][]byte, 0, approximatelyCountTxInLists(listsToEvict)) + txsToEvict := make(txHashes, 0, approximatelyCountTxInLists(listsToEvict)) for _, txList := range listsToEvict { sendersToEvict = append(sendersToEvict, txList.sender) diff --git a/storage/txcache/txByHashMap.go b/storage/txcache/txByHashMap.go index 626bce1b8a4..bf0c23ace68 100644 --- a/storage/txcache/txByHashMap.go +++ b/storage/txcache/txByHashMap.go @@ -57,7 +57,7 @@ func (txMap *txByHashMap) getTx(txHash string) (*WrappedTransaction, bool) { } // RemoveTxsBulk removes transactions, in bulk -func (txMap *txByHashMap) RemoveTxsBulk(txHashes [][]byte) uint32 { +func (txMap *txByHashMap) RemoveTxsBulk(txHashes txHashes) uint32 { oldCount := uint32(txMap.counter.Get()) for _, txHash := range txHashes { @@ -85,9 +85,9 @@ func (txMap *txByHashMap) clear() { txMap.counter.Set(0) } -func (txMap *txByHashMap) keys() [][]byte { +func (txMap *txByHashMap) keys() txHashes { keys := txMap.backingMap.Keys() - keysAsBytes := make([][]byte, len(keys)) + keysAsBytes := make(txHashes, len(keys)) for i := 0; i < len(keys); i++ { keysAsBytes[i] = []byte(keys[i]) } diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index 680c76ea6da..70a3e4cb46d 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -10,6 +10,8 @@ import ( var _ storage.Cacher = (*TxCache)(nil) +type txHashes = [][]byte + // TxCache represents a cache-like structure (it has a fixed capacity and implements an eviction mechanism) for holding transactions type TxCache struct { name string @@ -234,7 +236,7 @@ func (cache *TxCache) RemoveOldest() { } // Keys returns the tx hashes in the cache -func (cache *TxCache) Keys() [][]byte { +func (cache *TxCache) Keys() txHashes { return cache.txByHash.keys() } diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index bcbf18fbfed..b7491ce5675 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -45,7 +45,7 @@ func newTxListForSender(sender string, cacheConfig *CacheConfig, onScoreChange s // AddTx adds a transaction in sender's list // This is a "sorted" insert -func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) [][]byte { +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) txHashes { // We don't allow concurrent interceptor goroutines to mutate a given sender's list listForSender.mutex.Lock() defer listForSender.mutex.Unlock() @@ -68,8 +68,8 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) [][]byte { } // This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) applyLimit() [][]byte { - evictedTxHashes := make([][]byte, 0) +func (listForSender *txListForSender) applyLimit() txHashes { + evictedTxHashes := make(txHashes, 0) for element := listForSender.items.Back(); element != nil; element = element.Prev() { if !listForSender.isLimitReached() { @@ -236,11 +236,11 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati } // getTxHashes returns the hashes of transactions in the list -func (listForSender *txListForSender) getTxHashes() [][]byte { +func (listForSender *txListForSender) getTxHashes() txHashes { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() - result := make([][]byte, 0, listForSender.countTx()) + result := make(txHashes, 0, listForSender.countTx()) for element := listForSender.items.Front(); element != nil; element = element.Next() { value := element.Value.(*WrappedTransaction) From 264c1eeb4217e049b3da5b0f70e1a459f649aa2e Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 18:12:07 +0300 Subject: [PATCH 08/25] Sketch deduplication. --- storage/txcache/txCache.go | 6 ++++-- storage/txcache/txListBySenderMap.go | 4 ++-- storage/txcache/txListForSender.go | 10 ++++++++-- storage/txcache/wrappedTransaction.go | 10 +++++++++- 4 files changed, 23 insertions(+), 7 deletions(-) diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index 70a3e4cb46d..29f600776b6 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -64,12 +64,14 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { } ok = true - added = cache.txByHash.addTx(tx) + added, evicted := cache.txListBySender.addTx(tx) if added { - cache.txListBySender.addTx(tx) + cache.txByHash.addTx(tx) cache.monitorTxAddition() } + cache.txByHash.RemoveTxsBulk(evicted) + return } diff --git a/storage/txcache/txListBySenderMap.go b/storage/txcache/txListBySenderMap.go index 5c41ad5e2a1..119d4e755b9 100644 --- a/storage/txcache/txListBySenderMap.go +++ b/storage/txcache/txListBySenderMap.go @@ -25,10 +25,10 @@ func newTxListBySenderMap(nChunksHint uint32, cacheConfig CacheConfig) txListByS } // addTx adds a transaction in the map, in the corresponding list (selected by its sender) -func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) { +func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) (bool, txHashes) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - listForSender.AddTx(tx) + return listForSender.AddTx(tx) } func (txMap *txListBySenderMap) getOrAddListForSender(sender string) *txListForSender { diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index b7491ce5675..6f8e13f8650 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -45,7 +45,7 @@ func newTxListForSender(sender string, cacheConfig *CacheConfig, onScoreChange s // AddTx adds a transaction in sender's list // This is a "sorted" insert -func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) txHashes { +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, txHashes) { // We don't allow concurrent interceptor goroutines to mutate a given sender's list listForSender.mutex.Lock() defer listForSender.mutex.Unlock() @@ -56,7 +56,13 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) txHashes { if insertionPlace == nil { listForSender.items.PushFront(tx) + // TODO: fix, check duplicated here. } else { + duplicated := insertionPlace.Value.(*WrappedTransaction).sameAs(tx) + if duplicated { + return false, nil + } + listForSender.items.InsertAfter(tx, insertionPlace) } @@ -64,7 +70,7 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) txHashes { evicted := listForSender.applyLimit() listForSender.triggerScoreChange() - return evicted + return true, evicted } // This function should only be used in critical section (listForSender.mutex) diff --git a/storage/txcache/wrappedTransaction.go b/storage/txcache/wrappedTransaction.go index 84c08b5afbc..354fb058b69 100644 --- a/storage/txcache/wrappedTransaction.go +++ b/storage/txcache/wrappedTransaction.go @@ -1,6 +1,10 @@ package txcache -import "github.com/ElrondNetwork/elrond-go/data" +import ( + "bytes" + + "github.com/ElrondNetwork/elrond-go/data" +) // WrappedTransaction contains a transaction, its hash and extra information type WrappedTransaction struct { @@ -9,3 +13,7 @@ type WrappedTransaction struct { SenderShardID uint32 ReceiverShardID uint32 } + +func (wrappedTx *WrappedTransaction) sameAs(another *WrappedTransaction) bool { + return bytes.Equal(wrappedTx.TxHash, another.TxHash) +} From edef44f4b1774a1b5b5bf6ea3fd51d51643b64a5 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 18:33:56 +0300 Subject: [PATCH 09/25] Fix deduplication logic. --- storage/txcache/errors.go | 8 +++--- storage/txcache/txByHashMap.go | 1 + storage/txcache/txCache.go | 5 ++-- storage/txcache/txCache_test.go | 4 +-- storage/txcache/txListForSender.go | 39 ++++++++++++++++++------------ 5 files changed, 31 insertions(+), 26 deletions(-) diff --git a/storage/txcache/errors.go b/storage/txcache/errors.go index 26e9c62aade..ba52ba431ba 100644 --- a/storage/txcache/errors.go +++ b/storage/txcache/errors.go @@ -2,8 +2,6 @@ package txcache import "fmt" -// ErrTxNotFound signals that the transactions was not found in the cache -var ErrTxNotFound = fmt.Errorf("tx not found in cache") - -// ErrMapsSyncInconsistency signals that there's an inconsistency between the internal maps on which the cache relies -var ErrMapsSyncInconsistency = fmt.Errorf("maps sync inconsistency between 'txByHash' and 'txListBySender'") +var errTxNotFound = fmt.Errorf("tx not found in cache") +var errMapsSyncInconsistency = fmt.Errorf("maps sync inconsistency between 'txByHash' and 'txListBySender'") +var errTxDuplicated = fmt.Errorf("duplicated tx") diff --git a/storage/txcache/txByHashMap.go b/storage/txcache/txByHashMap.go index bf0c23ace68..28372087d8f 100644 --- a/storage/txcache/txByHashMap.go +++ b/storage/txcache/txByHashMap.go @@ -65,6 +65,7 @@ func (txMap *txByHashMap) RemoveTxsBulk(txHashes txHashes) uint32 { } newCount := uint32(txMap.counter.Get()) + // TODO: Check this for overflow as well? numRemoved := oldCount - newCount return numRemoved } diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index 29f600776b6..b05cdc490eb 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -71,7 +71,6 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { } cache.txByHash.RemoveTxsBulk(evicted) - return } @@ -146,7 +145,7 @@ func (cache *TxCache) doAfterSelection() { func (cache *TxCache) RemoveTxByHash(txHash []byte) error { tx, ok := cache.txByHash.removeTx(string(txHash)) if !ok { - return ErrTxNotFound + return errTxNotFound } cache.monitorTxRemoval() @@ -154,7 +153,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) error { found := cache.txListBySender.removeTx(tx) if !found { cache.onRemoveTxInconsistency(txHash) - return ErrMapsSyncInconsistency + return errMapsSyncInconsistency } return nil diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index c163a54c18d..f5684808a96 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -107,7 +107,7 @@ func Test_GetByTxHash_And_Peek_And_Get(t *testing.T) { func Test_RemoveByTxHash_Error_WhenMissing(t *testing.T) { cache := newCacheToTest() err := cache.RemoveTxByHash([]byte("missing")) - require.Equal(t, err, ErrTxNotFound) + require.Equal(t, err, errTxNotFound) } func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { @@ -121,7 +121,7 @@ func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { cache.txListBySender.removeTx(tx) err := cache.RemoveTxByHash(txHash) - require.Equal(t, err, ErrMapsSyncInconsistency) + require.Equal(t, err, errMapsSyncInconsistency) } func Test_Clear(t *testing.T) { diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index 6f8e13f8650..a111609dbb2 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -50,19 +50,14 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, txHas listForSender.mutex.Lock() defer listForSender.mutex.Unlock() - nonce := tx.Tx.GetNonce() - gasPrice := tx.Tx.GetGasPrice() - insertionPlace := listForSender.findInsertionPlace(nonce, gasPrice) + insertionPlace, err := listForSender.findInsertionPlace(tx) + if err != nil { + return false, nil + } if insertionPlace == nil { listForSender.items.PushFront(tx) - // TODO: fix, check duplicated here. } else { - duplicated := insertionPlace.Value.(*WrappedTransaction).sameAs(tx) - if duplicated { - return false, nil - } - listForSender.items.InsertAfter(tx, insertionPlace) } @@ -113,22 +108,34 @@ func (listForSender *txListForSender) triggerScoreChange() { } // This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) findInsertionPlace(incomingNonce uint64, incomingGasPrice uint64) *list.Element { +func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTransaction) (*list.Element, error) { + incomingNonce := incomingTx.Tx.GetNonce() + incomingGasPrice := incomingTx.Tx.GetGasPrice() + for element := listForSender.items.Back(); element != nil; element = element.Prev() { - tx := element.Value.(*WrappedTransaction).Tx - nonce := tx.GetNonce() - gasPrice := tx.GetGasPrice() + tx := element.Value.(*WrappedTransaction) + nonce := tx.Tx.GetNonce() + gasPrice := tx.Tx.GetGasPrice() + + if incomingTx.sameAs(tx) { + // The incoming transaction will be discarded + return nil, errTxDuplicated + } if nonce == incomingNonce && gasPrice > incomingGasPrice { - return element + // The incoming transaction will be placed right after the existing one (with higher price). + return element, nil } if nonce < incomingNonce { - return element + // We've found the first transaction with a lower nonce than the incoming one, + // thus the incoming transaction will be placed right after this one. + return element, nil } } - return nil + // The incoming transaction will be inserted at the head of the list. + return nil, nil } // RemoveTx removes a transaction from the sender's list From a2efbd588bd8de5f766a303ab6a0b0a1dbf8000c Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 18:52:27 +0300 Subject: [PATCH 10/25] Fix some tests, remove unused code. --- storage/txcache/testutils_test.go | 6 +++++- storage/txcache/txListBySenderMap.go | 4 ++-- storage/txcache/txListBySenderMap_test.go | 8 ++++++-- storage/txcache/txListForSender.go | 5 ----- storage/txcache/txListForSender_test.go | 5 ++--- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/storage/txcache/testutils_test.go b/storage/txcache/testutils_test.go index 7cac87d2f0c..c694f242426 100644 --- a/storage/txcache/testutils_test.go +++ b/storage/txcache/testutils_test.go @@ -23,7 +23,11 @@ func kBToBytes(kB float32) uint64 { } func (cache *TxCache) getListForSender(sender string) *txListForSender { - list, ok := cache.txListBySender.getListForSender(sender) + return cache.txListBySender.testGetListForSender(sender) +} + +func (sendersMap *txListBySenderMap) testGetListForSender(sender string) *txListForSender { + list, ok := sendersMap.getListForSender(sender) if !ok { panic("sender not in cache") } diff --git a/storage/txcache/txListBySenderMap.go b/storage/txcache/txListBySenderMap.go index 3972f20cf6b..1c6b7a2f14c 100644 --- a/storage/txcache/txListBySenderMap.go +++ b/storage/txcache/txListBySenderMap.go @@ -75,8 +75,8 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { } isFound := listForSender.RemoveTx(tx) - - if listForSender.IsEmpty() { + isEmpty := listForSender.IsEmpty() + if isEmpty { txMap.removeSender(sender) } diff --git a/storage/txcache/txListBySenderMap_test.go b/storage/txcache/txListBySenderMap_test.go index 36d740659e0..2962e96b67c 100644 --- a/storage/txcache/txListBySenderMap_test.go +++ b/storage/txcache/txListBySenderMap_test.go @@ -22,17 +22,21 @@ func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { myMap := newSendersMapToTest() - txAlice1 := createTx([]byte("a"), "alice", uint64(1)) - txAlice2 := createTx([]byte("a"), "alice", uint64(2)) + txAlice1 := createTx([]byte("a1"), "alice", uint64(1)) + txAlice2 := createTx([]byte("a2"), "alice", uint64(2)) txBob := createTx([]byte("b"), "bob", uint64(1)) myMap.addTx(txAlice1) myMap.addTx(txAlice2) myMap.addTx(txBob) require.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, uint64(2), myMap.testGetListForSender("alice").countTx()) + require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) myMap.removeTx(txAlice1) require.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, uint64(1), myMap.testGetListForSender("alice").countTx()) + require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) myMap.removeTx(txAlice2) // All alice's transactions have been removed now diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index a111609dbb2..04833233ce6 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -181,11 +181,6 @@ func (listForSender *txListForSender) findListElementWithTx(txToFind *WrappedTra return nil } -// HasMoreThan checks whether the list has more items than specified -func (listForSender *txListForSender) HasMoreThan(count uint32) bool { - return uint32(listForSender.countTxWithLock()) > count -} - // IsEmpty checks whether the list is empty func (listForSender *txListForSender) IsEmpty() bool { return listForSender.countTxWithLock() == 0 diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 61ec88bfc27..ec1e841b502 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -263,10 +263,10 @@ func TestListForSender_hasInitialGap(t *testing.T) { // No transaction, no gap require.False(t, list.hasInitialGap()) // One gap - list.AddTx(createTx([]byte("tx-44"), ".", 43)) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) require.True(t, list.hasInitialGap()) // Resolve gap - list.AddTx(createTx([]byte("tx-44"), ".", 42)) + list.AddTx(createTx([]byte("tx-42"), ".", 42)) require.False(t, list.hasInitialGap()) } @@ -288,7 +288,6 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { go func() { // These are called concurrently with addition: during eviction, during removal etc. approximatelyCountTxInLists([]*txListForSender{list}) - list.HasMoreThan(42) list.IsEmpty() }() From 959d65b56bb5ede6cbaea2eb584aca3a18951a82 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 20:22:17 +0300 Subject: [PATCH 11/25] Verify cache config. Extract interface. Add disabled cache (for critical errors). --- dataRetriever/txpool/interface.go | 17 +++ dataRetriever/txpool/shardedTxPool.go | 19 ++- dataRetriever/txpool/shardedTxPool_test.go | 2 +- storage/txcache/config.go | 40 +++++++ storage/txcache/disabledCache.go | 127 +++++++++++++++++++++ storage/txcache/errors.go | 1 + storage/txcache/eviction_test.go | 38 ++++-- storage/txcache/monitoring_test.go | 8 +- storage/txcache/txCache.go | 10 +- storage/txcache/txCache_test.go | 17 ++- 10 files changed, 259 insertions(+), 20 deletions(-) create mode 100644 dataRetriever/txpool/interface.go create mode 100644 storage/txcache/disabledCache.go diff --git a/dataRetriever/txpool/interface.go b/dataRetriever/txpool/interface.go new file mode 100644 index 00000000000..5fcc747837d --- /dev/null +++ b/dataRetriever/txpool/interface.go @@ -0,0 +1,17 @@ +package txpool + +import ( + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/txcache" +) + +type txCache interface { + storage.Cacher + + AddTx(tx *txcache.WrappedTransaction) (ok bool, added bool) + GetByTxHash(txHash []byte) (*txcache.WrappedTransaction, bool) + RemoveTxByHash(txHash []byte) error + CountTx() int64 + ForEachTransaction(function txcache.ForEachTransaction) + SelectTransactions(numRequested int, batchSizePerSender int) []*txcache.WrappedTransaction +} diff --git a/dataRetriever/txpool/shardedTxPool.go b/dataRetriever/txpool/shardedTxPool.go index 6c4bcfea46f..66a966cb525 100644 --- a/dataRetriever/txpool/shardedTxPool.go +++ b/dataRetriever/txpool/shardedTxPool.go @@ -31,7 +31,7 @@ type shardedTxPool struct { type txPoolShard struct { CacheID string - Cache *txcache.TxCache + Cache txCache } // NewShardedTxPool creates a new sharded tx pool @@ -82,7 +82,7 @@ func (txPool *shardedTxPool) ShardDataStore(cacheID string) storage.Cacher { } // getTxCache returns the requested cache -func (txPool *shardedTxPool) getTxCache(cacheID string) *txcache.TxCache { +func (txPool *shardedTxPool) getTxCache(cacheID string) txCache { shard := txPool.getOrCreateShard(cacheID) return shard.Cache } @@ -108,8 +108,7 @@ func (txPool *shardedTxPool) createShard(cacheID string) *txPoolShard { shard, ok := txPool.backingMap[cacheID] if !ok { - cacheConfig := txPool.getCacheConfig(cacheID) - cache := txcache.NewTxCache(cacheConfig) + cache := txPool.createTxCache(cacheID) shard = &txPoolShard{ CacheID: cacheID, Cache: cache, @@ -121,6 +120,17 @@ func (txPool *shardedTxPool) createShard(cacheID string) *txPoolShard { return shard } +func (txPool *shardedTxPool) createTxCache(cacheID string) txCache { + cacheConfig := txPool.getCacheConfig(cacheID) + cache, err := txcache.NewTxCache(cacheConfig) + if err != nil { + log.Error("shardedTxPool.createTxCache()", "err", err) + return txcache.NewDisabledCache() + } + + return cache +} + func (txPool *shardedTxPool) getCacheConfig(cacheID string) txcache.CacheConfig { var cacheConfig txcache.CacheConfig @@ -144,6 +154,7 @@ func (txPool *shardedTxPool) AddData(key []byte, value interface{}, cacheID stri sourceShardID, destinationShardID, err := process.ParseShardCacherIdentifier(cacheID) if err != nil { + log.Error("shardedTxPool.AddData()", "err", err) return } diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index 7e1e91788d4..820451668e1 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -175,7 +175,7 @@ func Test_AddData_CallsOnAddedHandlers(t *testing.T) { // Second addition is ignored (txhash-based deduplication) pool.AddData([]byte("hash-1"), createTx("alice", 42), "1") - pool.AddData([]byte("hash-1"), createTx("whatever", 43), "1") + pool.AddData([]byte("hash-1"), createTx("alice", 42), "1") waitABit() require.Equal(t, uint32(1), atomic.LoadUint32(&numAdded)) diff --git a/storage/txcache/config.go b/storage/txcache/config.go index b4b4638e2b3..9c0c276232a 100644 --- a/storage/txcache/config.go +++ b/storage/txcache/config.go @@ -1,5 +1,7 @@ package txcache +import "fmt" + // CacheConfig holds cache configuration type CacheConfig struct { Name string @@ -12,3 +14,41 @@ type CacheConfig struct { NumSendersToEvictInOneStep uint32 MinGasPriceMicroErd uint32 } + +func (config *CacheConfig) verify() error { + if len(config.Name) == 0 { + return fmt.Errorf("%w: config.Name is invalid", errInvalidCacheConfig) + } + + if config.NumChunksHint == 0 { + return fmt.Errorf("%w: config.NumChunksHint is invalid", errInvalidCacheConfig) + } + + if config.NumBytesPerSenderThreshold == 0 { + return fmt.Errorf("%w: config.NumBytesPerSenderThreshold is invalid", errInvalidCacheConfig) + } + + if config.CountPerSenderThreshold == 0 { + return fmt.Errorf("%w: config.CountPerSenderThreshold is invalid", errInvalidCacheConfig) + } + + if config.MinGasPriceMicroErd == 0 { + return fmt.Errorf("%w: config.MinGasPriceMicroErd is invalid", errInvalidCacheConfig) + } + + if config.EvictionEnabled { + if config.NumBytesThreshold == 0 { + return fmt.Errorf("%w: config.NumBytesThreshold is invalid", errInvalidCacheConfig) + } + + if config.CountThreshold == 0 { + return fmt.Errorf("%w: config.CountThreshold is invalid", errInvalidCacheConfig) + } + + if config.NumSendersToEvictInOneStep == 0 { + return fmt.Errorf("%w: config.NumSendersToEvictInOneStep is invalid", errInvalidCacheConfig) + } + } + + return nil +} diff --git a/storage/txcache/disabledCache.go b/storage/txcache/disabledCache.go new file mode 100644 index 00000000000..a0a5ef6e7d5 --- /dev/null +++ b/storage/txcache/disabledCache.go @@ -0,0 +1,127 @@ +package txcache + +import ( + "github.com/ElrondNetwork/elrond-go/storage" +) + +var _ storage.Cacher = (*DisabledCache)(nil) + +// DisabledCache represents a disabled cache +type DisabledCache struct { +} + +// NewDisabledCache creates a new disabled cache +func NewDisabledCache() *DisabledCache { + return &DisabledCache{} +} + +// AddTx - +func (cache *DisabledCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { + log.Error("DisabledCache.AddTx()") + return false, false +} + +// GetByTxHash - +func (cache *DisabledCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { + log.Error("DisabledCache.GetByTxHash()") + return nil, false +} + +// SelectTransactions - +func (cache *DisabledCache) SelectTransactions(numRequested int, batchSizePerSender int) []*WrappedTransaction { + log.Error("DisabledCache.SelectTransactions()") + return make([]*WrappedTransaction, 0) +} + +// RemoveTxByHash - +func (cache *DisabledCache) RemoveTxByHash(txHash []byte) error { + log.Error("DisabledCache.RemoveTxByHash()") + return nil +} + +// CountTx - +func (cache *DisabledCache) CountTx() int64 { + log.Error("DisabledCache.CountTx()") + return 0 +} + +// Len - +func (cache *DisabledCache) Len() int { + log.Error("DisabledCache.Len()") + return 0 +} + +// ForEachTransaction - +func (cache *DisabledCache) ForEachTransaction(function ForEachTransaction) { + log.Error("DisabledCache.ForEachTransaction()") +} + +// Clear - +func (cache *DisabledCache) Clear() { + log.Error("DisabledCache.Clear()") +} + +// Put - +func (cache *DisabledCache) Put(key []byte, value interface{}) (evicted bool) { + log.Error("DisabledCache.Put()") + return false +} + +// Get - +func (cache *DisabledCache) Get(key []byte) (value interface{}, ok bool) { + tx, ok := cache.GetByTxHash(key) + if ok { + return tx.Tx, true + } + return nil, false +} + +// Has - +func (cache *DisabledCache) Has(key []byte) bool { + log.Error("DisabledCache.Has is not implemented") + return false +} + +// Peek - +func (cache *DisabledCache) Peek(key []byte) (value interface{}, ok bool) { + log.Error("DisabledCache.DisabledCache()") + return nil, false +} + +// HasOrAdd - +func (cache *DisabledCache) HasOrAdd(key []byte, value interface{}) (ok, evicted bool) { + log.Error("DisabledCache.HasOrAdd()") + return false, false +} + +// Remove - +func (cache *DisabledCache) Remove(key []byte) { + log.Error("DisabledCache.Remove()") +} + +// RemoveOldest - +func (cache *DisabledCache) RemoveOldest() { + log.Error("DisabledCache.RemoveOldest()") +} + +// Keys - +func (cache *DisabledCache) Keys() txHashes { + log.Error("DisabledCache.Keys()") + return make([][]byte, 0) +} + +// MaxSize - +func (cache *DisabledCache) MaxSize() int { + log.Error("DisabledCache.MaxSize()") + return 0 +} + +// RegisterHandler - +func (cache *DisabledCache) RegisterHandler(func(key []byte, value interface{})) { + log.Error("DisabledCache.RegisterHandler()") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cache *DisabledCache) IsInterfaceNil() bool { + return cache == nil +} diff --git a/storage/txcache/errors.go b/storage/txcache/errors.go index ba52ba431ba..e58333db214 100644 --- a/storage/txcache/errors.go +++ b/storage/txcache/errors.go @@ -5,3 +5,4 @@ import "fmt" var errTxNotFound = fmt.Errorf("tx not found in cache") var errMapsSyncInconsistency = fmt.Errorf("maps sync inconsistency between 'txByHash' and 'txListBySender'") var errTxDuplicated = fmt.Errorf("duplicated tx") +var errInvalidCacheConfig = fmt.Errorf("invalid cache config") diff --git a/storage/txcache/eviction_test.go b/storage/txcache/eviction_test.go index f4acf5a3f21..ff111b26694 100644 --- a/storage/txcache/eviction_test.go +++ b/storage/txcache/eviction_test.go @@ -18,7 +18,9 @@ func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { MinGasPriceMicroErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { @@ -50,7 +52,9 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { MinGasPriceMicroErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { @@ -80,7 +84,10 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { MinGasPriceMicroErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 1000, 100000, 100*oneTrilion)) cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 1000, 100000, 100*oneTrilion)) cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneTrilion)) @@ -106,7 +113,10 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { MinGasPriceMicroErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 800, 100000, 100*oneTrilion)) cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 500, 100000, 100*oneTrilion)) cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 200, 100000, 700*oneTrilion)) @@ -134,7 +144,10 @@ func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { NumSendersToEvictInOneStep: 1, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) cache.isEvictionInProgress.Set() @@ -150,7 +163,10 @@ func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) NumSendersToEvictInOneStep: 42, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) cache.makeSnapshotOfSenders() @@ -168,7 +184,10 @@ func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { NumSendersToEvictInOneStep: 1, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) cache.AddTx(createTx([]byte("hash-bob"), "bob", uint64(1))) @@ -198,7 +217,10 @@ func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { numSenders := 25000 numTxsPerSender := 10 - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + addManyTransactionsWithUniformDistribution(cache, numSenders, numTxsPerSender) // Sometimes (due to map iteration non-determinism), more eviction happens - one more step of 100 senders. diff --git a/storage/txcache/monitoring_test.go b/storage/txcache/monitoring_test.go index 257e5cb073a..82154f5c5c5 100644 --- a/storage/txcache/monitoring_test.go +++ b/storage/txcache/monitoring_test.go @@ -15,7 +15,9 @@ func TestMonitoring_numTxAddedAndRemovedDuringEviction(t *testing.T) { NumSendersToEvictInOneStep: 1, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) cache.isEvictionInProgress.Set() @@ -41,7 +43,9 @@ func TestMonitoring_numTxAddedAndRemovedBetweenSelections(t *testing.T) { NumSendersToEvictInOneStep: 1, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) require.Equal(t, int64(0), cache.numTxAddedBetweenSelections.Get()) diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index b05cdc490eb..5628241a4a0 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -31,9 +31,15 @@ type TxCache struct { } // NewTxCache creates a new transaction cache -func NewTxCache(config CacheConfig) *TxCache { +func NewTxCache(config CacheConfig) (*TxCache, error) { log.Debug("NewTxCache", "config", config) + err := config.verify() + if err != nil { + log.Error("NewTxCache config.verify()", "err", err) + return nil, err + } + // Note: for simplicity, we use the same "numChunksHint" for both internal concurrent maps numChunksHint := config.NumChunksHint @@ -46,7 +52,7 @@ func NewTxCache(config CacheConfig) *TxCache { } txCache.initSweepable() - return txCache + return txCache, nil } // AddTx adds a transaction in the cache diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index f5684808a96..4c5c98c2d21 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -249,7 +249,10 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { } // 11 * 10 - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + addManyTransactionsWithUniformDistribution(cache, 11, 10) require.LessOrEqual(t, cache.CountTx(), int64(100)) @@ -262,7 +265,10 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { } // 100 * 1000 - cache = NewTxCache(config) + cache, err = NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + addManyTransactionsWithUniformDistribution(cache, 100, 1000) require.LessOrEqual(t, cache.CountTx(), int64(250000)) } @@ -336,5 +342,10 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { } func newCacheToTest() *TxCache { - return NewTxCache(CacheConfig{Name: "test", NumChunksHint: 16, MinGasPriceMicroErd: 100}) + cache, err := NewTxCache(CacheConfig{Name: "test", NumChunksHint: 16, MinGasPriceMicroErd: 100}) + if err != nil { + panic(fmt.Sprintf("newCacheToTest(): %s", err)) + } + + return cache } From e79c8ca1ca6364f296c4792d3d6e2274e1e0e506 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Mon, 11 May 2020 20:40:19 +0300 Subject: [PATCH 12/25] Fix tests. --- storage/txcache/eviction_test.go | 28 +++++++++++++++++++++++ storage/txcache/monitoring_test.go | 8 +++++++ storage/txcache/txCache_test.go | 16 ++++++++++++- storage/txcache/txListBySenderMap_test.go | 6 ++++- storage/txcache/txListForSender.go | 4 ++-- storage/txcache/txListForSender_test.go | 6 ++++- 6 files changed, 63 insertions(+), 5 deletions(-) diff --git a/storage/txcache/eviction_test.go b/storage/txcache/eviction_test.go index ff111b26694..57366378b99 100644 --- a/storage/txcache/eviction_test.go +++ b/storage/txcache/eviction_test.go @@ -11,10 +11,13 @@ import ( func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 20, NumBytesThreshold: math.MaxUint32, + NumBytesPerSenderThreshold: math.MaxUint32, MinGasPriceMicroErd: 100, } @@ -45,9 +48,12 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { numBytesPerTx := uint32(1000) config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, NumBytesThreshold: numBytesPerTx * 100, + NumBytesPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 20, MinGasPriceMicroErd: 100, } @@ -77,9 +83,12 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, NumBytesThreshold: math.MaxUint32, + NumBytesPerSenderThreshold: math.MaxUint32, CountThreshold: 2, + CountPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 2, MinGasPriceMicroErd: 100, } @@ -106,9 +115,12 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 2, MinGasPriceMicroErd: 100, } @@ -139,9 +151,13 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 1, CountThreshold: 0, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } cache, err := NewTxCache(config) @@ -158,9 +174,13 @@ func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 1, CountThreshold: 0, NumSendersToEvictInOneStep: 42, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } cache, err := NewTxCache(config) @@ -179,9 +199,13 @@ func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 1, CountThreshold: 0, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } cache, err := NewTxCache(config) @@ -207,11 +231,15 @@ func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { // ~1 second on average laptop. func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, EvictionEnabled: true, NumBytesThreshold: 1000000000, CountThreshold: 240000, NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } numSenders := 25000 diff --git a/storage/txcache/monitoring_test.go b/storage/txcache/monitoring_test.go index 82154f5c5c5..4e1988ccfad 100644 --- a/storage/txcache/monitoring_test.go +++ b/storage/txcache/monitoring_test.go @@ -9,10 +9,14 @@ import ( func TestMonitoring_numTxAddedAndRemovedDuringEviction(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, NumBytesThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } cache, err := NewTxCache(config) @@ -37,10 +41,14 @@ func TestMonitoring_numTxAddedAndRemovedDuringEviction(t *testing.T) { func TestMonitoring_numTxAddedAndRemovedBetweenSelections(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, NumBytesThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } cache, err := NewTxCache(config) diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index 4c5c98c2d21..91522a9d577 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -241,11 +241,15 @@ func Test_Keys(t *testing.T) { func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, EvictionEnabled: true, NumBytesThreshold: math.MaxUint32, CountThreshold: 100, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } // 11 * 10 @@ -257,11 +261,15 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { require.LessOrEqual(t, cache.CountTx(), int64(100)) config = CacheConfig{ + Name: "untitled", NumChunksHint: 16, EvictionEnabled: true, NumBytesThreshold: math.MaxUint32, CountThreshold: 250000, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, } // 100 * 1000 @@ -342,7 +350,13 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { } func newCacheToTest() *TxCache { - cache, err := NewTxCache(CacheConfig{Name: "test", NumChunksHint: 16, MinGasPriceMicroErd: 100}) + cache, err := NewTxCache(CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, + }) if err != nil { panic(fmt.Sprintf("newCacheToTest(): %s", err)) } diff --git a/storage/txcache/txListBySenderMap_test.go b/storage/txcache/txListBySenderMap_test.go index 2962e96b67c..d97fff3c6ab 100644 --- a/storage/txcache/txListBySenderMap_test.go +++ b/storage/txcache/txListBySenderMap_test.go @@ -2,6 +2,7 @@ package txcache import ( "fmt" + "math" "sync" "testing" @@ -175,5 +176,8 @@ func createTxListBySenderMap(numSenders int) txListBySenderMap { } func newSendersMapToTest() txListBySenderMap { - return newTxListBySenderMap(4, CacheConfig{}) + return newTxListBySenderMap(4, CacheConfig{ + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + }) } diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index 04833233ce6..e20ff772bcd 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -91,8 +91,8 @@ func (listForSender *txListForSender) applyLimit() txHashes { func (listForSender *txListForSender) isLimitReached() bool { maxBytes := int64(listForSender.cacheConfig.NumBytesPerSenderThreshold) maxNumTxs := uint64(listForSender.cacheConfig.CountPerSenderThreshold) - tooManyBytes := maxBytes > 0 && listForSender.totalBytes.Get() > maxBytes - tooManyTxs := maxNumTxs > 0 && listForSender.countTx() > maxNumTxs + tooManyBytes := listForSender.totalBytes.Get() > maxBytes + tooManyTxs := listForSender.countTx() > maxNumTxs return tooManyBytes || tooManyTxs } diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index ec1e841b502..07176f95fa9 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -297,5 +297,9 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { } func newListToTest() *txListForSender { - return newTxListForSender(".", &CacheConfig{MinGasPriceMicroErd: 100}, func(value *txListForSender) {}) + return newTxListForSender(".", &CacheConfig{ + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, + }, func(value *txListForSender) {}) } From 10f74e8fe00d9115dfffe4c89a028c89040ca86d Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 11:16:45 +0300 Subject: [PATCH 13/25] Fix remaining mock. --- genesis/mock/poolsHolderMock.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/genesis/mock/poolsHolderMock.go b/genesis/mock/poolsHolderMock.go index 89f8ac9b3bb..9bd448e8c18 100644 --- a/genesis/mock/poolsHolderMock.go +++ b/genesis/mock/poolsHolderMock.go @@ -30,9 +30,11 @@ func NewPoolsHolderMock() *PoolsHolderMock { phf.transactions, _ = txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 10000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, MinGasPrice: 100000000000000, NumberOfShards: 1, From 19e7eb1ee356c0e971de7aa82a7650ae3301d652 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 13:03:20 +0300 Subject: [PATCH 14/25] Add some tests. --- storage/txcache/testutils_test.go | 15 +++ storage/txcache/txListForSender.go | 4 +- .../txListForSenderAsSortedMapItem_test.go | 4 +- storage/txcache/txListForSender_test.go | 118 +++++++++++++----- 4 files changed, 104 insertions(+), 37 deletions(-) diff --git a/storage/txcache/testutils_test.go b/storage/txcache/testutils_test.go index db29c01cbf3..46a4d56635e 100644 --- a/storage/txcache/testutils_test.go +++ b/storage/txcache/testutils_test.go @@ -53,6 +53,21 @@ func (cache *TxCache) isSenderSweepable(sender string) bool { return false } +func (listForSender *txListForSender) getTxHashesAsStrings() []string { + hashes := listForSender.getTxHashes() + return hashesAsStrings(hashes) +} + +func hashesAsStrings(hashes txHashes) []string { + result := make([]string, len(hashes)) + + for i := 0; i < len(hashes); i++ { + result[i] = string(hashes[i]) + } + + return result +} + func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nTransactionsPerSender int) { for senderTag := 0; senderTag < nSenders; senderTag++ { sender := createFakeSenderAddress(senderTag) diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index e20ff772bcd..07e4370802b 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -62,14 +62,14 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, txHas } listForSender.onAddedTransaction(tx) - evicted := listForSender.applyLimit() + evicted := listForSender.applySizeConstraints() listForSender.triggerScoreChange() return true, evicted } // This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) applyLimit() txHashes { +func (listForSender *txListForSender) applySizeConstraints() txHashes { evictedTxHashes := make(txHashes, 0) for element := listForSender.items.Back(); element != nil; element = element.Prev() { diff --git a/storage/txcache/txListForSenderAsSortedMapItem_test.go b/storage/txcache/txListForSenderAsSortedMapItem_test.go index b94a055f759..28419546098 100644 --- a/storage/txcache/txListForSenderAsSortedMapItem_test.go +++ b/storage/txcache/txListForSenderAsSortedMapItem_test.go @@ -7,7 +7,7 @@ import ( ) func TestSenderAsBucketSortedMapItem_ComputeScore(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 200000, 100*oneTrilion)) list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneTrilion)) @@ -22,7 +22,7 @@ func TestSenderAsBucketSortedMapItem_ComputeScore(t *testing.T) { } func TestSenderAsBucketSortedMapItem_ScoreFluctuatesDeterministicallyWhenTransactionsAreAddedOrRemoved(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() A := createTxWithParams([]byte("A"), ".", 1, 1000, 200000, 100*oneTrilion) B := createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneTrilion) diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 07176f95fa9..fac3ac87124 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -8,26 +8,18 @@ import ( ) func TestListForSender_AddTx_Sorts(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.AddTx(createTx([]byte("a"), ".", 1)) list.AddTx(createTx([]byte("c"), ".", 3)) list.AddTx(createTx([]byte("d"), ".", 4)) list.AddTx(createTx([]byte("b"), ".", 2)) - txHashes := list.getTxHashes() - - require.Equal(t, 4, list.items.Len()) - require.Equal(t, 4, len(txHashes)) - - require.Equal(t, []byte("a"), txHashes[0]) - require.Equal(t, []byte("b"), txHashes[1]) - require.Equal(t, []byte("c"), txHashes[2]) - require.Equal(t, []byte("d"), txHashes[3]) + require.ElementsMatch(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42)) list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100)) @@ -35,20 +27,72 @@ func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { list.AddTx(createTxWithParams([]byte("d"), ".", 2, 128, 42, 42)) list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101)) - txHashes := list.getTxHashes() + require.ElementsMatch(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) +} + +func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { + list := newUnconstrainedListToTest() + + added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.False(t, added) +} + +func TestListForSender_AddTx_AppliesSizeConstraints_Count(t *testing.T) { + list := newListToTest(math.MaxUint32, 3) + + list.AddTx(createTx([]byte("tx1"), ".", 1)) + list.AddTx(createTx([]byte("tx5"), ".", 5)) + list.AddTx(createTx([]byte("tx4"), ".", 4)) + list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.ElementsMatch(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) + + _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3)) + require.ElementsMatch(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.ElementsMatch(t, []string{"tx4"}, hashesAsStrings(evicted)) - require.Equal(t, 5, list.items.Len()) - require.Equal(t, 5, len(txHashes)) + // Gives priority to higher gas - undesirably to some extent, "tx3" is evicted + _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 42, 42)) + require.ElementsMatch(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.ElementsMatch(t, []string{"tx3"}, hashesAsStrings(evicted)) - require.Equal(t, []byte("a"), txHashes[0]) - require.Equal(t, []byte("d"), txHashes[1]) - require.Equal(t, []byte("e"), txHashes[2]) - require.Equal(t, []byte("b"), txHashes[3]) - require.Equal(t, []byte("c"), txHashes[4]) + // Undesirably to some extent, "tx3++"" is added, then evicted + _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 42, 42)) + require.ElementsMatch(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.ElementsMatch(t, []string{"tx3++"}, hashesAsStrings(evicted)) +} + +func TestListForSender_AddTx_AppliesSizeConstraints_SizeInBytes(t *testing.T) { + list := newListToTest(1024, math.MaxUint32) + + list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 42, 42)) + list.AddTx(createTxWithParams([]byte("tx2"), ".", 2, 512, 42, 42)) + list.AddTx(createTxWithParams([]byte("tx3"), ".", 3, 256, 42, 42)) + _, evicted := list.AddTx(createTxWithParams([]byte("tx5"), ".", 4, 256, 42, 42)) + require.ElementsMatch(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.ElementsMatch(t, []string{"tx5"}, hashesAsStrings(evicted)) + + _, evicted = list.AddTx(createTxWithParams([]byte("tx5--"), ".", 4, 128, 42, 42)) + require.ElementsMatch(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) + require.ElementsMatch(t, []string{}, hashesAsStrings(evicted)) + + _, evicted = list.AddTx(createTxWithParams([]byte("tx4"), ".", 4, 128, 42, 42)) + require.ElementsMatch(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) + require.ElementsMatch(t, []string{"tx5--"}, hashesAsStrings(evicted)) + + // Gives priority to higher gas - undesirably to some extent, "tx4" is evicted + _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 4, 256, 42, 100)) + require.ElementsMatch(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) + require.ElementsMatch(t, []string{"tx4"}, hashesAsStrings(evicted)) } func TestListForSender_findTx(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() txA := createTx([]byte("A"), ".", 41) txANewer := createTx([]byte("ANewer"), ".", 41) @@ -74,7 +118,7 @@ func TestListForSender_findTx(t *testing.T) { } func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.AddTx(createTx([]byte("A"), ".", 42)) // Find one with a lower nonce, not added to cache @@ -83,7 +127,7 @@ func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { } func TestListForSender_RemoveTransaction(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() tx := createTx([]byte("a"), ".", 1) list.AddTx(tx) @@ -94,7 +138,7 @@ func TestListForSender_RemoveTransaction(t *testing.T) { } func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() tx := createTx([]byte(""), ".", 1) list.RemoveTx(tx) @@ -102,7 +146,7 @@ func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { } func TestListForSender_SelectBatchTo(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() for index := 0; index < 100; index++ { list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) @@ -131,7 +175,7 @@ func TestListForSender_SelectBatchTo(t *testing.T) { } func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() for index := 0; index < 100; index++ { list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) @@ -149,7 +193,7 @@ func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { } func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(1) @@ -180,7 +224,7 @@ func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { } func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(1) @@ -213,7 +257,7 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) } func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(1) @@ -245,7 +289,7 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing. } func TestListForSender_NotifyAccountNonce(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() require.Equal(t, uint64(0), list.accountNonce.Get()) require.False(t, list.accountNonceKnown.IsSet()) @@ -257,7 +301,7 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { } func TestListForSender_hasInitialGap(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(42) // No transaction, no gap @@ -271,7 +315,7 @@ func TestListForSender_hasInitialGap(t *testing.T) { } func TestListForSender_getTxHashes(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() require.Len(t, list.getTxHashes(), 0) list.AddTx(createTx([]byte("A"), ".", 1)) @@ -283,7 +327,7 @@ func TestListForSender_getTxHashes(t *testing.T) { } func TestListForSender_DetectRaceConditions(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() go func() { // These are called concurrently with addition: during eviction, during removal etc. @@ -296,10 +340,18 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { }() } -func newListToTest() *txListForSender { +func newUnconstrainedListToTest() *txListForSender { return newTxListForSender(".", &CacheConfig{ NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, MinGasPriceMicroErd: 100, }, func(value *txListForSender) {}) } + +func newListToTest(numBytesThreshold uint32, countThreshold uint32) *txListForSender { + return newTxListForSender(".", &CacheConfig{ + NumBytesPerSenderThreshold: numBytesThreshold, + CountPerSenderThreshold: countThreshold, + MinGasPriceMicroErd: 100, + }, func(value *txListForSender) {}) +} From d966ebd14f2e00837dbbae7c01b26f479fc7f88e Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 14:25:59 +0300 Subject: [PATCH 15/25] Extra tests / coverage. --- storage/txcache/disabledCache_test.go | 37 ++++++++++++++++ storage/txcache/txCache_test.go | 64 +++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 storage/txcache/disabledCache_test.go diff --git a/storage/txcache/disabledCache_test.go b/storage/txcache/disabledCache_test.go new file mode 100644 index 00000000000..0f571551a35 --- /dev/null +++ b/storage/txcache/disabledCache_test.go @@ -0,0 +1,37 @@ +package txcache + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDisabledCache_DoesNothing(t *testing.T) { + cache := NewDisabledCache() + + ok, added := cache.AddTx(nil) + require.False(t, ok) + require.False(t, added) + + tx, ok := cache.GetByTxHash([]byte{}) + require.Nil(t, tx) + require.False(t, ok) + + selection := cache.SelectTransactions(42, 42) + require.Equal(t, 0, len(selection)) + + err := cache.RemoveTxByHash([]byte{}) + require.Nil(t, err) + + count := cache.CountTx() + require.Equal(t, int64(0), count) + + length := cache.Len() + require.Equal(t, 0, length) + + require.NotPanics(t, func() { cache.ForEachTransaction(func(_ []byte, _ *WrappedTransaction) {}) }) + + cache.Clear() + evicted := cache.Put(nil, nil) + require.False(t, evicted) +} diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index 91522a9d577..d6d701c8776 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -13,6 +13,70 @@ import ( "github.com/stretchr/testify/require" ) +func Test_NewTxCache(t *testing.T) { + config := CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, + } + + evictionConfig := CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceMicroErd: 100, + EvictionEnabled: true, + NumBytesThreshold: math.MaxUint32, + CountThreshold: math.MaxUint32, + NumSendersToEvictInOneStep: 100, + } + + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + + badConfig := config + badConfig.Name = "" + requireErrorOnNewTxCache(t, badConfig, "config.Name") + + badConfig = config + badConfig.NumChunksHint = 0 + requireErrorOnNewTxCache(t, badConfig, "config.NumChunksHint") + + badConfig = config + badConfig.NumBytesPerSenderThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, "config.NumBytesPerSenderThreshold") + + badConfig = config + badConfig.CountPerSenderThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, "config.CountPerSenderThreshold") + + badConfig = config + badConfig.MinGasPriceMicroErd = 0 + requireErrorOnNewTxCache(t, badConfig, "config.MinGasPriceMicroErd") + + badConfig = evictionConfig + badConfig.NumBytesThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, "config.NumBytesThreshold") + + badConfig = evictionConfig + badConfig.CountThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, "config.CountThreshold") + + badConfig = evictionConfig + badConfig.NumSendersToEvictInOneStep = 0 + requireErrorOnNewTxCache(t, badConfig, "config.NumSendersToEvictInOneStep") +} + +func requireErrorOnNewTxCache(t *testing.T, config CacheConfig, errMessage string) { + cache, err := NewTxCache(config) + require.Contains(t, err.Error(), errMessage) + require.Nil(t, cache) +} + func Test_AddTx(t *testing.T) { cache := newCacheToTest() From e3d0dd49ede027a2757c585c11b9ede3f5239826 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 14:48:51 +0300 Subject: [PATCH 16/25] Cover disabled cache. --- storage/txcache/disabledCache.go | 4 ---- storage/txcache/disabledCache_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/storage/txcache/disabledCache.go b/storage/txcache/disabledCache.go index a0a5ef6e7d5..663f3392daa 100644 --- a/storage/txcache/disabledCache.go +++ b/storage/txcache/disabledCache.go @@ -69,10 +69,6 @@ func (cache *DisabledCache) Put(key []byte, value interface{}) (evicted bool) { // Get - func (cache *DisabledCache) Get(key []byte) (value interface{}, ok bool) { - tx, ok := cache.GetByTxHash(key) - if ok { - return tx.Tx, true - } return nil, false } diff --git a/storage/txcache/disabledCache_test.go b/storage/txcache/disabledCache_test.go index 0f571551a35..ae749c0c023 100644 --- a/storage/txcache/disabledCache_test.go +++ b/storage/txcache/disabledCache_test.go @@ -32,6 +32,33 @@ func TestDisabledCache_DoesNothing(t *testing.T) { require.NotPanics(t, func() { cache.ForEachTransaction(func(_ []byte, _ *WrappedTransaction) {}) }) cache.Clear() + evicted := cache.Put(nil, nil) require.False(t, evicted) + + value, ok := cache.Get([]byte{}) + require.Nil(t, value) + require.False(t, ok) + + value, ok = cache.Peek([]byte{}) + require.Nil(t, value) + require.False(t, ok) + + has := cache.Has([]byte{}) + require.False(t, has) + + has, evicted = cache.HasOrAdd([]byte{}, nil) + require.False(t, has) + require.False(t, evicted) + + cache.Remove([]byte{}) + cache.RemoveOldest() + + keys := cache.Keys() + require.Equal(t, 0, len(keys)) + + maxSize := cache.MaxSize() + require.Equal(t, 0, maxSize) + + require.NotPanics(t, func() { cache.RegisterHandler(func(_ []byte, _ interface{}) {}) }) } From f4ffcabcf11392d1eadd03b258864c72ac21e066 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 15:12:30 +0300 Subject: [PATCH 17/25] Fix long test. --- integrationTests/testInitializer.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 488d10a9022..c5ce044bf80 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "fmt" "io/ioutil" + "math" "math/big" "strings" "sync" @@ -1914,9 +1915,9 @@ func createTxPool(selfShardID uint32) (dataRetriever.ShardedDataCacherNotifier, txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ Size: 100000, - SizePerSender: 1000, + SizePerSender: math.MaxUint32, SizeInBytes: 1000000000, - SizeInBytesPerSender: 10000000, + SizeInBytesPerSender: math.MaxUint32, Shards: 16, }, MinGasPrice: 100000000000000, From 6c3a4039aeadd3e029c6a29a6ca9b99f8a2db719 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 17:26:07 +0300 Subject: [PATCH 18/25] Add extra tests. --- storage/txcache/eviction_test.go | 2 +- storage/txcache/sweeping_test.go | 6 +- storage/txcache/testutils_test.go | 30 +++++++++ storage/txcache/txCache_test.go | 87 ++++++++++++++++++++----- storage/txcache/txListForSender_test.go | 4 +- 5 files changed, 106 insertions(+), 23 deletions(-) diff --git a/storage/txcache/eviction_test.go b/storage/txcache/eviction_test.go index 57366378b99..fb08dfc4347 100644 --- a/storage/txcache/eviction_test.go +++ b/storage/txcache/eviction_test.go @@ -257,7 +257,7 @@ func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { } func Test_EvictSendersAndTheirTxs_Concurrently(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() var wg sync.WaitGroup for i := 0; i < 10; i++ { diff --git a/storage/txcache/sweeping_test.go b/storage/txcache/sweeping_test.go index 2d4a9f5d6f3..9e5e5639525 100644 --- a/storage/txcache/sweeping_test.go +++ b/storage/txcache/sweeping_test.go @@ -7,7 +7,7 @@ import ( ) func TestSweeping_CollectSweepable(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) @@ -50,7 +50,7 @@ func TestSweeping_CollectSweepable(t *testing.T) { } func TestSweeping_WhenSendersEscapeCollection(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) @@ -95,7 +95,7 @@ func TestSweeping_WhenSendersEscapeCollection(t *testing.T) { } func TestSweeping_SweepSweepable(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) diff --git a/storage/txcache/testutils_test.go b/storage/txcache/testutils_test.go index 46a4d56635e..9b9ee673792 100644 --- a/storage/txcache/testutils_test.go +++ b/storage/txcache/testutils_test.go @@ -22,6 +22,36 @@ func kBToBytes(kB float32) uint64 { return uint64(kB * 1000) } +func (cache *TxCache) areInternalMapsConsistent() bool { + internalMapByHash := cache.txByHash + internalMapBySender := cache.txListBySender + + senders := internalMapBySender.getSnapshotAscending() + numTransactionsByHash := len(internalMapByHash.keys()) + numTransactionsBySender := 0 + + for _, sender := range senders { + numTransactionsBySender += int(sender.countTx()) + + for _, hash := range sender.getTxHashesAsStrings() { + _, ok := internalMapByHash.getTx(hash) + if !ok { + return false + } + } + } + + if numTransactionsBySender != numTransactionsByHash { + return false + } + + return true +} + +func (cache *TxCache) getHashesForSender(sender string) []string { + return cache.getListForSender(sender).getTxHashesAsStrings() +} + func (cache *TxCache) getListForSender(sender string) *txListForSender { return cache.txListBySender.testGetListForSender(sender) } diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index d6d701c8776..6d428a5f13f 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -78,7 +78,7 @@ func requireErrorOnNewTxCache(t *testing.T, config CacheConfig, errMessage strin } func Test_AddTx(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() tx := createTx([]byte("hash-1"), "alice", 1) @@ -97,7 +97,7 @@ func Test_AddTx(t *testing.T) { } func Test_AddNilTx_DoesNothing(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() txHash := []byte("hash-1") @@ -110,8 +110,46 @@ func Test_AddNilTx_DoesNothing(t *testing.T) { require.Nil(t, foundTx) } +func Test_AddTx_AppliesSizeConstraintsPerSender_NumTransactions(t *testing.T) { + cache := newCacheToTest(math.MaxUint32, 3) + + cache.AddTx(createTx([]byte("tx-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("tx-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("tx-alice-4"), "alice", 4)) + cache.AddTx(createTx([]byte("tx-bob-1"), "bob", 1)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 2)) + require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.ElementsMatch(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) + + cache.AddTx(createTx([]byte("tx-alice-3"), "alice", 3)) + require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.ElementsMatch(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) +} + +func Test_AddTx_AppliesSizeConstraintsPerSender_NumBytes(t *testing.T) { + cache := newCacheToTest(1024, math.MaxUint32) + + cache.AddTx(createTxWithParams([]byte("tx-alice-1"), "alice", 1, 128, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-2"), "alice", 2, 512, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-4"), "alice", 3, 256, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-1"), "bob", 1, 512, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 2, 513, 42, 42)) + + require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.ElementsMatch(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) + + cache.AddTx(createTxWithParams([]byte("tx-alice-3"), "alice", 3, 256, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 3, 512, 42, 42)) + require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.ElementsMatch(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) +} + func Test_RemoveByTxHash(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-2"), "alice", 2)) @@ -130,7 +168,7 @@ func Test_RemoveByTxHash(t *testing.T) { } func Test_CountTx_And_Len(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-2"), "alice", 2)) @@ -141,7 +179,7 @@ func Test_CountTx_And_Len(t *testing.T) { } func Test_GetByTxHash_And_Peek_And_Get(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() txHash := []byte("hash-1") tx := createTx(txHash, "alice", 1) @@ -169,13 +207,13 @@ func Test_GetByTxHash_And_Peek_And_Get(t *testing.T) { } func Test_RemoveByTxHash_Error_WhenMissing(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() err := cache.RemoveTxByHash([]byte("missing")) require.Equal(t, err, errTxNotFound) } func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() txHash := []byte("hash-1") tx := createTx(txHash, "alice", 1) @@ -189,7 +227,7 @@ func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { } func Test_Clear(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) @@ -201,7 +239,7 @@ func Test_Clear(t *testing.T) { } func Test_ForEachTransaction(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) @@ -214,7 +252,7 @@ func Test_ForEachTransaction(t *testing.T) { } func Test_SelectTransactions_Dummy(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) @@ -230,7 +268,7 @@ func Test_SelectTransactions_Dummy(t *testing.T) { } func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) @@ -251,7 +289,7 @@ func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { } func Test_SelectTransactions(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() // Add "nSenders" * "nTransactionsPerSender" transactions in the cache (in reversed nonce order) nSenders := 1000 @@ -288,7 +326,7 @@ func Test_SelectTransactions(t *testing.T) { } func Test_Keys(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) cache.AddTx(createTx([]byte("alice-y"), "alice", 43)) @@ -346,7 +384,7 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { } func Test_NotImplementedFunctions(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() evicted := cache.Put(nil, nil) require.False(t, evicted) @@ -364,7 +402,7 @@ func Test_NotImplementedFunctions(t *testing.T) { } func Test_IsInterfaceNil(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() require.False(t, check.IfNil(cache)) makeNil := func() storage.Cacher { @@ -376,7 +414,7 @@ func Test_IsInterfaceNil(t *testing.T) { } func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() // Alice will quickly move between two score buckets (chunks) cheapTransaction := createTxWithParams([]byte("alice-x-o"), "alice", 0, 128, 50000, 100*oneTrilion) @@ -413,7 +451,7 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { require.False(t, timedOut, "Timed out. Perhaps deadlock?") } -func newCacheToTest() *TxCache { +func newUnconstrainedCacheToTest() *TxCache { cache, err := NewTxCache(CacheConfig{ Name: "test", NumChunksHint: 16, @@ -421,6 +459,21 @@ func newCacheToTest() *TxCache { CountPerSenderThreshold: math.MaxUint32, MinGasPriceMicroErd: 100, }) + if err != nil { + panic(fmt.Sprintf("newUnconstrainedCacheToTest(): %s", err)) + } + + return cache +} + +func newCacheToTest(numBytesPerSenderThreshold uint32, countPerSenderThreshold uint32) *TxCache { + cache, err := NewTxCache(CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: numBytesPerSenderThreshold, + CountPerSenderThreshold: countPerSenderThreshold, + MinGasPriceMicroErd: 100, + }) if err != nil { panic(fmt.Sprintf("newCacheToTest(): %s", err)) } diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index fac3ac87124..30d10552678 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -43,7 +43,7 @@ func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { require.False(t, added) } -func TestListForSender_AddTx_AppliesSizeConstraints_Count(t *testing.T) { +func TestListForSender_AddTx_AppliesSizeConstraints_NumTransactions(t *testing.T) { list := newListToTest(math.MaxUint32, 3) list.AddTx(createTx([]byte("tx1"), ".", 1)) @@ -67,7 +67,7 @@ func TestListForSender_AddTx_AppliesSizeConstraints_Count(t *testing.T) { require.ElementsMatch(t, []string{"tx3++"}, hashesAsStrings(evicted)) } -func TestListForSender_AddTx_AppliesSizeConstraints_SizeInBytes(t *testing.T) { +func TestListForSender_AddTx_AppliesSizeConstraints_NumBytes(t *testing.T) { list := newListToTest(1024, math.MaxUint32) list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 42, 42)) From 636ad8b0a402a32601b81e5d01059d07f90d13bd Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 18:53:37 +0300 Subject: [PATCH 19/25] Adjust order of magnitude for gas price. --- dataRetriever/factory/txpool/txPoolFactory_test.go | 6 +++--- dataRetriever/txpool/shardedTxPool.go | 6 +++--- dataRetriever/txpool/shardedTxPool_test.go | 12 ++++++------ epochStart/metachain/epochStartData_test.go | 2 +- genesis/mock/poolsHolderMock.go | 2 +- integrationTests/consensus/testInitializer.go | 2 +- integrationTests/testInitializer.go | 2 +- process/block/preprocess/transactions_test.go | 2 +- process/block/shardblock_test.go | 2 +- process/coordinator/process_test.go | 2 +- process/mock/poolsHolderMock.go | 2 +- update/mock/poolsHolderMock.go | 2 +- 12 files changed, 21 insertions(+), 21 deletions(-) diff --git a/dataRetriever/factory/txpool/txPoolFactory_test.go b/dataRetriever/factory/txpool/txPoolFactory_test.go index 5cd85f4c65e..dd7c192820a 100644 --- a/dataRetriever/factory/txpool/txPoolFactory_test.go +++ b/dataRetriever/factory/txpool/txPoolFactory_test.go @@ -10,14 +10,14 @@ import ( func Test_CreateNewTxPool_ShardedData(t *testing.T) { config := storageUnit.CacheConfig{Type: storageUnit.FIFOShardedCache, Size: 100, SizeInBytes: 40960, Shards: 1} - args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 1} + args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 1} txPool, err := CreateTxPool(args) require.Nil(t, err) require.NotNil(t, txPool) config = storageUnit.CacheConfig{Type: storageUnit.LRUCache, Size: 100, SizeInBytes: 40960, Shards: 1} - args = txpool.ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 1} + args = txpool.ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 1} txPool, err = CreateTxPool(args) require.Nil(t, err) require.NotNil(t, txPool) @@ -25,7 +25,7 @@ func Test_CreateNewTxPool_ShardedData(t *testing.T) { func Test_CreateNewTxPool_ShardedTxPool(t *testing.T) { config := storageUnit.CacheConfig{Size: 100, SizePerSender: 1, SizeInBytes: 40960, SizeInBytesPerSender: 40960, Shards: 1} - args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 1} + args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 1} txPool, err := CreateTxPool(args) require.Nil(t, err) diff --git a/dataRetriever/txpool/shardedTxPool.go b/dataRetriever/txpool/shardedTxPool.go index 66a966cb525..5e8b7d7ee15 100644 --- a/dataRetriever/txpool/shardedTxPool.go +++ b/dataRetriever/txpool/shardedTxPool.go @@ -37,14 +37,14 @@ type txPoolShard struct { // NewShardedTxPool creates a new sharded tx pool // Implements "dataRetriever.TxPool" func NewShardedTxPool(args ArgShardedTxPool) (dataRetriever.ShardedDataCacherNotifier, error) { - log.Trace("NewShardedTxPool", "args", args) + log.Info("NewShardedTxPool", "args", args) err := args.verify() if err != nil { return nil, err } - const oneTrilion = 1000000 * 1000000 + const oneBillion = 1000000 * 1000 numCaches := 2*args.NumberOfShards - 1 cacheConfigPrototype := txcache.CacheConfig{ @@ -55,7 +55,7 @@ func NewShardedTxPool(args ArgShardedTxPool) (dataRetriever.ShardedDataCacherNot CountThreshold: args.Config.Size / numCaches, CountPerSenderThreshold: args.Config.SizePerSender, NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, - MinGasPriceMicroErd: uint32(args.MinGasPrice / oneTrilion), + MinGasPriceMicroErd: uint32(args.MinGasPrice / oneBillion), } cacheConfigPrototypeForSelfShard := cacheConfigPrototype diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index 820451668e1..6cfc3f4443c 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -24,7 +24,7 @@ func Test_NewShardedTxPool(t *testing.T) { } func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { - goodArgs := ArgShardedTxPool{Config: storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16}, MinGasPrice: 100000000000000, NumberOfShards: 1} + goodArgs := ArgShardedTxPool{Config: storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16}, MinGasPrice: 200000000000, NumberOfShards: 1} args := goodArgs args.Config.SizeInBytes = 1 @@ -78,7 +78,7 @@ func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { func Test_NewShardedTxPool_ComputesCacheConfig(t *testing.T) { config := storageUnit.CacheConfig{SizeInBytes: 524288000, SizeInBytesPerSender: 614400, Size: 900000, SizePerSender: 1000, Shards: 1} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 5} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 5} poolAsInterface, err := NewShardedTxPool(args) require.Nil(t, err) @@ -91,7 +91,7 @@ func Test_NewShardedTxPool_ComputesCacheConfig(t *testing.T) { require.Equal(t, uint32(100000), pool.cacheConfigPrototype.CountThreshold) require.Equal(t, uint32(1000), pool.cacheConfigPrototype.CountPerSenderThreshold) require.Equal(t, uint32(100), pool.cacheConfigPrototype.NumSendersToEvictInOneStep) - require.Equal(t, uint32(100), pool.cacheConfigPrototype.MinGasPriceMicroErd) + require.Equal(t, uint32(200), pool.cacheConfigPrototype.MinGasPriceMicroErd) require.Equal(t, uint32(291271110), pool.cacheConfigPrototypeForSelfShard.NumBytesThreshold) require.Equal(t, uint32(500000), pool.cacheConfigPrototypeForSelfShard.CountThreshold) } @@ -319,7 +319,7 @@ func Test_NotImplementedFunctions(t *testing.T) { func Test_routeToCacheUnions(t *testing.T) { config := storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 4, SelfShardID: 42} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 4, SelfShardID: 42} poolAsInterface, _ := NewShardedTxPool(args) pool := poolAsInterface.(*shardedTxPool) @@ -334,7 +334,7 @@ func Test_routeToCacheUnions(t *testing.T) { func Test_getCacheConfig(t *testing.T) { config := storageUnit.CacheConfig{Size: 150, SizePerSender: 1, SizeInBytes: 61440, SizeInBytesPerSender: 40960, Shards: 16} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 8, SelfShardID: 4} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 8, SelfShardID: 4} poolAsInterface, _ := NewShardedTxPool(args) pool := poolAsInterface.(*shardedTxPool) @@ -368,6 +368,6 @@ type thisIsNotATransaction struct { func newTxPoolToTest() (dataRetriever.ShardedDataCacherNotifier, error) { config := storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 4} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 4} return NewShardedTxPool(args) } diff --git a/epochStart/metachain/epochStartData_test.go b/epochStart/metachain/epochStartData_test.go index 5563c0fdaa7..e18704570b1 100644 --- a/epochStart/metachain/epochStartData_test.go +++ b/epochStart/metachain/epochStartData_test.go @@ -108,7 +108,7 @@ func createTxPool(selfShardID uint32) (dataRetriever.ShardedDataCacherNotifier, SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, SelfShardID: selfShardID, }, diff --git a/genesis/mock/poolsHolderMock.go b/genesis/mock/poolsHolderMock.go index 9bd448e8c18..40b22d2cfe5 100644 --- a/genesis/mock/poolsHolderMock.go +++ b/genesis/mock/poolsHolderMock.go @@ -36,7 +36,7 @@ func NewPoolsHolderMock() *PoolsHolderMock { SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index e1249e03c58..470fcdbfcab 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -170,7 +170,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index c5ce044bf80..a6579eebe91 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1920,7 +1920,7 @@ func createTxPool(selfShardID uint32) (dataRetriever.ShardedDataCacherNotifier, SizeInBytesPerSender: math.MaxUint32, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, SelfShardID: selfShardID, }, diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 676bde4d612..517d8b9b0fb 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1063,7 +1063,7 @@ func createTxPool() (dataRetriever.ShardedDataCacherNotifier, error) { SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 1bc6865282e..98f57684d0c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -55,7 +55,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 3c5368d005c..245a7ee6528 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -2547,7 +2547,7 @@ func createTxPool() (dataRetriever.ShardedDataCacherNotifier, error) { SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/process/mock/poolsHolderMock.go b/process/mock/poolsHolderMock.go index 9bd448e8c18..40b22d2cfe5 100644 --- a/process/mock/poolsHolderMock.go +++ b/process/mock/poolsHolderMock.go @@ -36,7 +36,7 @@ func NewPoolsHolderMock() *PoolsHolderMock { SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/update/mock/poolsHolderMock.go b/update/mock/poolsHolderMock.go index 9d2680b65c7..975f0fa9480 100644 --- a/update/mock/poolsHolderMock.go +++ b/update/mock/poolsHolderMock.go @@ -35,7 +35,7 @@ func NewPoolsHolderMock() *PoolsHolderMock { SizeInBytesPerSender: 10000000, Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) From e9e47396577a97b7f7ddc5a5b25aead5f4fdc896 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 19:51:27 +0300 Subject: [PATCH 20/25] Extra log. --- storage/txcache/txCache.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index 5628241a4a0..88c8d9bd737 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -76,7 +76,11 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { cache.monitorTxAddition() } - cache.txByHash.RemoveTxsBulk(evicted) + if len(evicted) > 0 { + log.Trace("TxCache.AddTx()", "len(evicted)", len(evicted)) + cache.txByHash.RemoveTxsBulk(evicted) + } + return } From b3858ef49acbceecb5078476016f9c6a5e409aff Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 21:32:34 +0300 Subject: [PATCH 21/25] Fix ERD scale for score. --- dataRetriever/txpool/shardedTxPool.go | 2 +- dataRetriever/txpool/shardedTxPool_test.go | 2 +- storage/txcache/config.go | 6 ++-- storage/txcache/eviction_test.go | 30 +++++++++---------- storage/txcache/monitoring_test.go | 4 +-- storage/txcache/testutils_test.go | 6 ++-- storage/txcache/txCache_test.go | 20 ++++++------- .../txcache/txListForSenderAsSortedMapItem.go | 10 +++---- .../txListForSenderAsSortedMapItem_test.go | 22 +++++++------- storage/txcache/txListForSender_test.go | 4 +-- storage/txcache/txMeasures.go | 15 +++++----- 11 files changed, 60 insertions(+), 61 deletions(-) diff --git a/dataRetriever/txpool/shardedTxPool.go b/dataRetriever/txpool/shardedTxPool.go index 5e8b7d7ee15..5961b7f5356 100644 --- a/dataRetriever/txpool/shardedTxPool.go +++ b/dataRetriever/txpool/shardedTxPool.go @@ -55,7 +55,7 @@ func NewShardedTxPool(args ArgShardedTxPool) (dataRetriever.ShardedDataCacherNot CountThreshold: args.Config.Size / numCaches, CountPerSenderThreshold: args.Config.SizePerSender, NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, - MinGasPriceMicroErd: uint32(args.MinGasPrice / oneBillion), + MinGasPriceNanoErd: uint32(args.MinGasPrice / oneBillion), } cacheConfigPrototypeForSelfShard := cacheConfigPrototype diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index 6cfc3f4443c..a0b3a8b9204 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -91,7 +91,7 @@ func Test_NewShardedTxPool_ComputesCacheConfig(t *testing.T) { require.Equal(t, uint32(100000), pool.cacheConfigPrototype.CountThreshold) require.Equal(t, uint32(1000), pool.cacheConfigPrototype.CountPerSenderThreshold) require.Equal(t, uint32(100), pool.cacheConfigPrototype.NumSendersToEvictInOneStep) - require.Equal(t, uint32(200), pool.cacheConfigPrototype.MinGasPriceMicroErd) + require.Equal(t, uint32(200), pool.cacheConfigPrototype.MinGasPriceNanoErd) require.Equal(t, uint32(291271110), pool.cacheConfigPrototypeForSelfShard.NumBytesThreshold) require.Equal(t, uint32(500000), pool.cacheConfigPrototypeForSelfShard.CountThreshold) } diff --git a/storage/txcache/config.go b/storage/txcache/config.go index 9c0c276232a..62ee3a08b00 100644 --- a/storage/txcache/config.go +++ b/storage/txcache/config.go @@ -12,7 +12,7 @@ type CacheConfig struct { CountThreshold uint32 CountPerSenderThreshold uint32 NumSendersToEvictInOneStep uint32 - MinGasPriceMicroErd uint32 + MinGasPriceNanoErd uint32 } func (config *CacheConfig) verify() error { @@ -32,8 +32,8 @@ func (config *CacheConfig) verify() error { return fmt.Errorf("%w: config.CountPerSenderThreshold is invalid", errInvalidCacheConfig) } - if config.MinGasPriceMicroErd == 0 { - return fmt.Errorf("%w: config.MinGasPriceMicroErd is invalid", errInvalidCacheConfig) + if config.MinGasPriceNanoErd == 0 { + return fmt.Errorf("%w: config.MinGasPriceNanoErd is invalid", errInvalidCacheConfig) } if config.EvictionEnabled { diff --git a/storage/txcache/eviction_test.go b/storage/txcache/eviction_test.go index fb08dfc4347..30943d86f5a 100644 --- a/storage/txcache/eviction_test.go +++ b/storage/txcache/eviction_test.go @@ -18,7 +18,7 @@ func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { NumSendersToEvictInOneStep: 20, NumBytesThreshold: math.MaxUint32, NumBytesPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) @@ -55,7 +55,7 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { NumBytesThreshold: numBytesPerTx * 100, NumBytesPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 20, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) @@ -65,7 +65,7 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 10000, 100*oneTrilion)) + cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 10000, 100*oneBillion)) } require.Equal(t, int64(200), cache.txListBySender.counter.Get()) @@ -90,16 +90,16 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { CountThreshold: 2, CountPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 2, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 1000, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 1000, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneTrilion)) + cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 1000, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 1000, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneBillion)) cache.doEviction() require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) @@ -122,16 +122,16 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { NumBytesThreshold: 1000, NumBytesPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 2, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 800, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 500, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 200, 100000, 700*oneTrilion)) + cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 800, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 500, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 200, 100000, 700*oneBillion)) require.InDelta(t, float64(19.50394606), cache.getRawScoreOfSender("alice"), delta) require.InDelta(t, float64(23.68494667), cache.getRawScoreOfSender("bob"), delta) @@ -157,7 +157,7 @@ func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { NumSendersToEvictInOneStep: 1, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) @@ -180,7 +180,7 @@ func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) NumSendersToEvictInOneStep: 42, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) @@ -205,7 +205,7 @@ func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { NumSendersToEvictInOneStep: 1, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) @@ -239,7 +239,7 @@ func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } numSenders := 25000 diff --git a/storage/txcache/monitoring_test.go b/storage/txcache/monitoring_test.go index 4e1988ccfad..23a27ca6d46 100644 --- a/storage/txcache/monitoring_test.go +++ b/storage/txcache/monitoring_test.go @@ -16,7 +16,7 @@ func TestMonitoring_numTxAddedAndRemovedDuringEviction(t *testing.T) { NumSendersToEvictInOneStep: 1, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) @@ -48,7 +48,7 @@ func TestMonitoring_numTxAddedAndRemovedBetweenSelections(t *testing.T) { NumSendersToEvictInOneStep: 1, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } cache, err := NewTxCache(config) diff --git a/storage/txcache/testutils_test.go b/storage/txcache/testutils_test.go index 9b9ee673792..a4511b3e298 100644 --- a/storage/txcache/testutils_test.go +++ b/storage/txcache/testutils_test.go @@ -11,11 +11,11 @@ import ( ) const oneMilion = 1000000 -const oneTrilion = oneMilion * oneMilion +const oneBillion = oneMilion * 1000 const delta = 0.00000001 -func toMicroERD(erd uint64) uint64 { - return erd * 1000000 +func toNanoERD(erd float64) uint64 { + return uint64(erd * float64(1000000000)) } func kBToBytes(kB float32) uint64 { diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index 6d428a5f13f..b2079814072 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -19,7 +19,7 @@ func Test_NewTxCache(t *testing.T) { NumChunksHint: 16, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } evictionConfig := CacheConfig{ @@ -27,7 +27,7 @@ func Test_NewTxCache(t *testing.T) { NumChunksHint: 16, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, EvictionEnabled: true, NumBytesThreshold: math.MaxUint32, CountThreshold: math.MaxUint32, @@ -55,8 +55,8 @@ func Test_NewTxCache(t *testing.T) { requireErrorOnNewTxCache(t, badConfig, "config.CountPerSenderThreshold") badConfig = config - badConfig.MinGasPriceMicroErd = 0 - requireErrorOnNewTxCache(t, badConfig, "config.MinGasPriceMicroErd") + badConfig.MinGasPriceNanoErd = 0 + requireErrorOnNewTxCache(t, badConfig, "config.MinGasPriceNanoErd") badConfig = evictionConfig badConfig.NumBytesThreshold = 0 @@ -351,7 +351,7 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { NumSendersToEvictInOneStep: 1, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } // 11 * 10 @@ -371,7 +371,7 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { NumSendersToEvictInOneStep: 1, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } // 100 * 1000 @@ -417,8 +417,8 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { cache := newUnconstrainedCacheToTest() // Alice will quickly move between two score buckets (chunks) - cheapTransaction := createTxWithParams([]byte("alice-x-o"), "alice", 0, 128, 50000, 100*oneTrilion) - expensiveTransaction := createTxWithParams([]byte("alice-x-1"), "alice", 1, 128, 50000, 300*oneTrilion) + cheapTransaction := createTxWithParams([]byte("alice-x-o"), "alice", 0, 128, 50000, 100*oneBillion) + expensiveTransaction := createTxWithParams([]byte("alice-x-1"), "alice", 1, 128, 50000, 300*oneBillion) cache.AddTx(cheapTransaction) cache.AddTx(expensiveTransaction) @@ -457,7 +457,7 @@ func newUnconstrainedCacheToTest() *TxCache { NumChunksHint: 16, NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, }) if err != nil { panic(fmt.Sprintf("newUnconstrainedCacheToTest(): %s", err)) @@ -472,7 +472,7 @@ func newCacheToTest(numBytesPerSenderThreshold uint32, countPerSenderThreshold u NumChunksHint: 16, NumBytesPerSenderThreshold: numBytesPerSenderThreshold, CountPerSenderThreshold: countPerSenderThreshold, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, }) if err != nil { panic(fmt.Sprintf("newCacheToTest(): %s", err)) diff --git a/storage/txcache/txListForSenderAsSortedMapItem.go b/storage/txcache/txListForSenderAsSortedMapItem.go index df567f44463..0fa62379e34 100644 --- a/storage/txcache/txListForSenderAsSortedMapItem.go +++ b/storage/txcache/txListForSenderAsSortedMapItem.go @@ -13,10 +13,10 @@ type senderScoreParams struct { count uint64 // Size is in bytes size uint64 - // Fee is in micro ERD + // Fee is in nano ERD fee uint64 gas uint64 - // Price is in micro ERD + // Price is in nano ERD minGasPrice uint32 } @@ -41,7 +41,7 @@ func (listForSender *txListForSender) computeRawScore() float64 { gas := listForSender.totalGas.GetUint64() size := listForSender.totalBytes.GetUint64() count := listForSender.countTx() - minGasPrice := listForSender.cacheConfig.MinGasPriceMicroErd + minGasPrice := listForSender.cacheConfig.MinGasPriceNanoErd return computeSenderScore(senderScoreParams{count: count, size: size, fee: fee, gas: gas, minGasPrice: minGasPrice}) } @@ -59,8 +59,8 @@ func (listForSender *txListForSender) computeRawScore() float64 { // For asymptoticScore, see (https://en.wikipedia.org/wiki/Logistic_function) // // Where: -// - PPUAvg: average gas points (fee) per processing unit, in micro ERD -// - PPUMin: minimum gas points (fee) per processing unit (given by economics.toml), in micro ERD +// - PPUAvg: average gas points (fee) per processing unit, in nano ERD +// - PPUMin: minimum gas points (fee) per processing unit (given by economics.toml), in nano ERD // - txCount: number of transactions // - txSize: size of transactions, in kB (1000 bytes) // diff --git a/storage/txcache/txListForSenderAsSortedMapItem_test.go b/storage/txcache/txListForSenderAsSortedMapItem_test.go index 28419546098..369671cd7d4 100644 --- a/storage/txcache/txListForSenderAsSortedMapItem_test.go +++ b/storage/txcache/txListForSenderAsSortedMapItem_test.go @@ -9,9 +9,9 @@ import ( func TestSenderAsBucketSortedMapItem_ComputeScore(t *testing.T) { list := newUnconstrainedListToTest() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 200000, 100*oneTrilion)) - list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneTrilion)) - list.AddTx(createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneTrilion)) + list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 200000, 100*oneBillion)) + list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneBillion)) + list.AddTx(createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneBillion)) require.Equal(t, uint64(3), list.countTx()) require.Equal(t, int64(2000), list.totalBytes.Get()) @@ -24,9 +24,9 @@ func TestSenderAsBucketSortedMapItem_ComputeScore(t *testing.T) { func TestSenderAsBucketSortedMapItem_ScoreFluctuatesDeterministicallyWhenTransactionsAreAddedOrRemoved(t *testing.T) { list := newUnconstrainedListToTest() - A := createTxWithParams([]byte("A"), ".", 1, 1000, 200000, 100*oneTrilion) - B := createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneTrilion) - C := createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneTrilion) + A := createTxWithParams([]byte("A"), ".", 1, 1000, 200000, 100*oneBillion) + B := createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneBillion) + C := createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneBillion) scoreNone := int(list.ComputeScore()) list.AddTx(A) @@ -55,23 +55,23 @@ func TestSenderAsBucketSortedMapItem_ScoreFluctuatesDeterministicallyWhenTransac } func Test_computeSenderScore(t *testing.T) { - score := computeSenderScore(senderScoreParams{count: 14000, size: kBToBytes(100000), fee: toMicroERD(300000), gas: 2500000000, minGasPrice: 100}) + score := computeSenderScore(senderScoreParams{count: 14000, size: kBToBytes(100000), fee: toNanoERD(300), gas: 2500000000, minGasPrice: 100}) require.InDelta(t, float64(0.1789683371), score, delta) - score = computeSenderScore(senderScoreParams{count: 19000, size: kBToBytes(3000), fee: toMicroERD(2300000), gas: 19000000000, minGasPrice: 100}) + score = computeSenderScore(senderScoreParams{count: 19000, size: kBToBytes(3000), fee: toNanoERD(2300), gas: 19000000000, minGasPrice: 100}) require.InDelta(t, float64(0.2517997181), score, delta) - score = computeSenderScore(senderScoreParams{count: 3, size: kBToBytes(2), fee: toMicroERD(40), gas: 400000, minGasPrice: 100}) + score = computeSenderScore(senderScoreParams{count: 3, size: kBToBytes(2), fee: toNanoERD(0.04), gas: 400000, minGasPrice: 100}) require.InDelta(t, float64(5.795382396), score, delta) - score = computeSenderScore(senderScoreParams{count: 1, size: kBToBytes(0.3), fee: toMicroERD(50), gas: 100000, minGasPrice: 100}) + score = computeSenderScore(senderScoreParams{count: 1, size: kBToBytes(0.3), fee: toNanoERD(0.05), gas: 100000, minGasPrice: 100}) require.InDelta(t, float64(100), score, delta) } func Benchmark_computeSenderScore(b *testing.B) { for i := 0; i < b.N; i++ { for j := uint64(0); j < 10000000; j++ { - computeSenderScore(senderScoreParams{count: j, size: (j + 1) * 500, fee: toMicroERD(11 * j), gas: 100000 * j}) + computeSenderScore(senderScoreParams{count: j, size: (j + 1) * 500, fee: toNanoERD(float64(0.011) * float64(j)), gas: 100000 * j}) } } } diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 30d10552678..63693efefd2 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -344,7 +344,7 @@ func newUnconstrainedListToTest() *txListForSender { return newTxListForSender(".", &CacheConfig{ NumBytesPerSenderThreshold: math.MaxUint32, CountPerSenderThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, }, func(value *txListForSender) {}) } @@ -352,6 +352,6 @@ func newListToTest(numBytesThreshold uint32, countThreshold uint32) *txListForSe return newTxListForSender(".", &CacheConfig{ NumBytesPerSenderThreshold: numBytesThreshold, CountPerSenderThreshold: countThreshold, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, }, func(value *txListForSender) {}) } diff --git a/storage/txcache/txMeasures.go b/storage/txcache/txMeasures.go index 0dab0cada09..7b198faa8c1 100644 --- a/storage/txcache/txMeasures.go +++ b/storage/txcache/txMeasures.go @@ -14,15 +14,14 @@ func estimateTxGas(tx *WrappedTransaction) uint64 { return gasLimit } -// estimateTxFee returns an approximation for the cost of a transaction, in micro ERD (1/1000000 ERD) +// estimateTxFee returns an approximation for the cost of a transaction, in nano ERD // TODO: switch to integer operations (as opposed to float operations). // TODO: do not assume the order of magnitude of minGasPrice. func estimateTxFee(tx *WrappedTransaction) uint64 { - // In order to obtain the result as micro ERD, we have to divide by 10^12, one trillion (since ~1 ERD accounts for ~10^18 gas currency units at the ~minimum price) - // In order to have better precision, we divide each of the factors by 10^6, one million - const SquareRootOfOneTrillion = 1000000 - gasLimit := float32(tx.Tx.GetGasLimit()) / SquareRootOfOneTrillion - gasPrice := float32(tx.Tx.GetGasPrice()) / SquareRootOfOneTrillion - feeInMicroERD := gasLimit * gasPrice - return uint64(feeInMicroERD) + // In order to obtain the result as nano ERD (not as "atomic" 10^-18 ERD), we have to divide by 10^9 + // In order to have better precision, we divide the factors by 10^6, and 10^3 respectively + gasLimit := float32(tx.Tx.GetGasLimit()) / 1000000 + gasPrice := float32(tx.Tx.GetGasPrice()) / 1000 + feeInNanoERD := gasLimit * gasPrice + return uint64(feeInNanoERD) } From 9c169a754cf1ce022388efd359add02161f8e61a Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 21:48:15 +0300 Subject: [PATCH 22/25] Remove log. --- storage/txcache/txCache.go | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index 88c8d9bd737..efef03b0d82 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -77,7 +77,6 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { } if len(evicted) > 0 { - log.Trace("TxCache.AddTx()", "len(evicted)", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } From 7f50aecb4c135be969cc8824f1f09fa3a9d9172f Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Tue, 12 May 2020 22:35:22 +0300 Subject: [PATCH 23/25] Fix after self review. --- dataRetriever/txpool/interface.go | 1 - storage/txcache/eviction.go | 19 +++++++------------ storage/txcache/testutils_test.go | 8 ++++---- storage/txcache/txByHashMap.go | 2 +- storage/txcache/txCache.go | 1 - storage/txcache/txCache_test.go | 12 ++++++------ storage/txcache/txListForSender.go | 9 +++++---- storage/txcache/txListForSender_test.go | 10 +++++----- 8 files changed, 28 insertions(+), 34 deletions(-) diff --git a/dataRetriever/txpool/interface.go b/dataRetriever/txpool/interface.go index 5fcc747837d..bcf9c2fb306 100644 --- a/dataRetriever/txpool/interface.go +++ b/dataRetriever/txpool/interface.go @@ -13,5 +13,4 @@ type txCache interface { RemoveTxByHash(txHash []byte) error CountTx() int64 ForEachTransaction(function txcache.ForEachTransaction) - SelectTransactions(numRequested int, batchSizePerSender int) []*txcache.WrappedTransaction } diff --git a/storage/txcache/eviction.go b/storage/txcache/eviction.go index d014f9fb572..81c266568da 100644 --- a/storage/txcache/eviction.go +++ b/storage/txcache/eviction.go @@ -11,12 +11,7 @@ func (cache *TxCache) doEviction() { return } - tooManyBytes := cache.areThereTooManyBytes() - tooManyTxs := cache.areThereTooManyTxs() - tooManySenders := cache.areThereTooManySenders() - - isCapacityExceeded := tooManyBytes || tooManyTxs || tooManySenders - if !isCapacityExceeded { + if !cache.isCapacityExceeded() { return } @@ -30,7 +25,7 @@ func (cache *TxCache) doEviction() { stopWatch := cache.monitorEvictionStart() - if cache.shouldContinueEvictingSenders() { + if cache.isCapacityExceeded() { cache.makeSnapshotOfSenders() journal.passOneNumSteps, journal.passOneNumTxs, journal.passOneNumSenders = cache.evictSendersInLoop() journal.evictionPerformed = true @@ -49,6 +44,10 @@ func (cache *TxCache) destroySnapshotOfSenders() { cache.evictionSnapshotOfSenders = nil } +func (cache *TxCache) isCapacityExceeded() bool { + return cache.areThereTooManyBytes() || cache.areThereTooManySenders() || cache.areThereTooManyTxs() +} + func (cache *TxCache) areThereTooManyBytes() bool { numBytes := cache.NumBytes() tooManyBytes := numBytes > int64(cache.config.NumBytesThreshold) @@ -67,10 +66,6 @@ func (cache *TxCache) areThereTooManyTxs() bool { return tooManyTxs } -func (cache *TxCache) shouldContinueEvictingSenders() bool { - return cache.areThereTooManyTxs() || cache.areThereTooManySenders() || cache.areThereTooManyBytes() -} - // This is called concurrently by two goroutines: the eviction one and the sweeping one func (cache *TxCache) doEvictItems(txsToEvict txHashes, sendersToEvict []string) (countTxs uint32, countSenders uint32) { countTxs = cache.txByHash.RemoveTxsBulk(txsToEvict) @@ -79,7 +74,7 @@ func (cache *TxCache) doEvictItems(txsToEvict txHashes, sendersToEvict []string) } func (cache *TxCache) evictSendersInLoop() (uint32, uint32, uint32) { - return cache.evictSendersWhile(cache.shouldContinueEvictingSenders) + return cache.evictSendersWhile(cache.isCapacityExceeded) } // evictSendersWhileTooManyTxs removes transactions in a loop, as long as "shouldContinue" is true diff --git a/storage/txcache/testutils_test.go b/storage/txcache/testutils_test.go index a4511b3e298..83c1cee3cb9 100644 --- a/storage/txcache/testutils_test.go +++ b/storage/txcache/testutils_test.go @@ -27,11 +27,11 @@ func (cache *TxCache) areInternalMapsConsistent() bool { internalMapBySender := cache.txListBySender senders := internalMapBySender.getSnapshotAscending() - numTransactionsByHash := len(internalMapByHash.keys()) - numTransactionsBySender := 0 + numTransactionsInMapByHash := len(internalMapByHash.keys()) + numTransactionsInMapBySender := 0 for _, sender := range senders { - numTransactionsBySender += int(sender.countTx()) + numTransactionsInMapBySender += int(sender.countTx()) for _, hash := range sender.getTxHashesAsStrings() { _, ok := internalMapByHash.getTx(hash) @@ -41,7 +41,7 @@ func (cache *TxCache) areInternalMapsConsistent() bool { } } - if numTransactionsBySender != numTransactionsByHash { + if numTransactionsInMapBySender != numTransactionsInMapByHash { return false } diff --git a/storage/txcache/txByHashMap.go b/storage/txcache/txByHashMap.go index 28372087d8f..16f26a213e7 100644 --- a/storage/txcache/txByHashMap.go +++ b/storage/txcache/txByHashMap.go @@ -65,7 +65,7 @@ func (txMap *txByHashMap) RemoveTxsBulk(txHashes txHashes) uint32 { } newCount := uint32(txMap.counter.Get()) - // TODO: Check this for overflow as well? + // TODO: Check this for overflow as well, then fix in EN-6299 numRemoved := oldCount - newCount return numRemoved } diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index efef03b0d82..69d09a833b7 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -36,7 +36,6 @@ func NewTxCache(config CacheConfig) (*TxCache, error) { err := config.verify() if err != nil { - log.Error("NewTxCache config.verify()", "err", err) return nil, err } diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index b2079814072..b04a9cec2bf 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -22,7 +22,7 @@ func Test_NewTxCache(t *testing.T) { MinGasPriceNanoErd: 100, } - evictionConfig := CacheConfig{ + withEvictionConfig := CacheConfig{ Name: "test", NumChunksHint: 16, NumBytesPerSenderThreshold: math.MaxUint32, @@ -58,15 +58,15 @@ func Test_NewTxCache(t *testing.T) { badConfig.MinGasPriceNanoErd = 0 requireErrorOnNewTxCache(t, badConfig, "config.MinGasPriceNanoErd") - badConfig = evictionConfig + badConfig = withEvictionConfig badConfig.NumBytesThreshold = 0 requireErrorOnNewTxCache(t, badConfig, "config.NumBytesThreshold") - badConfig = evictionConfig + badConfig = withEvictionConfig badConfig.CountThreshold = 0 requireErrorOnNewTxCache(t, badConfig, "config.CountThreshold") - badConfig = evictionConfig + badConfig = withEvictionConfig badConfig.NumSendersToEvictInOneStep = 0 requireErrorOnNewTxCache(t, badConfig, "config.NumSendersToEvictInOneStep") } @@ -110,7 +110,7 @@ func Test_AddNilTx_DoesNothing(t *testing.T) { require.Nil(t, foundTx) } -func Test_AddTx_AppliesSizeConstraintsPerSender_NumTransactions(t *testing.T) { +func Test_AddTx_AppliesSizeConstraintsPerSenderForNumTransactions(t *testing.T) { cache := newCacheToTest(math.MaxUint32, 3) cache.AddTx(createTx([]byte("tx-alice-1"), "alice", 1)) @@ -128,7 +128,7 @@ func Test_AddTx_AppliesSizeConstraintsPerSender_NumTransactions(t *testing.T) { require.True(t, cache.areInternalMapsConsistent()) } -func Test_AddTx_AppliesSizeConstraintsPerSender_NumBytes(t *testing.T) { +func Test_AddTx_AppliesSizeConstraintsPerSenderForNumBytes(t *testing.T) { cache := newCacheToTest(1024, math.MaxUint32) cache.AddTx(createTxWithParams([]byte("tx-alice-1"), "alice", 1, 128, 42, 42)) diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index 07e4370802b..b6932b59aea 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -72,15 +72,16 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, txHas func (listForSender *txListForSender) applySizeConstraints() txHashes { evictedTxHashes := make(txHashes, 0) + // Iterate back to front for element := listForSender.items.Back(); element != nil; element = element.Prev() { - if !listForSender.isLimitReached() { + if !listForSender.isCapacityExceeded() { break } listForSender.items.Remove(element) listForSender.onRemovedListElement(element) - // Keep track of removed transaction + // Keep track of removed transactions value := element.Value.(*WrappedTransaction) evictedTxHashes = append(evictedTxHashes, value.TxHash) } @@ -88,7 +89,7 @@ func (listForSender *txListForSender) applySizeConstraints() txHashes { return evictedTxHashes } -func (listForSender *txListForSender) isLimitReached() bool { +func (listForSender *txListForSender) isCapacityExceeded() bool { maxBytes := int64(listForSender.cacheConfig.NumBytesPerSenderThreshold) maxNumTxs := uint64(listForSender.cacheConfig.CountPerSenderThreshold) tooManyBytes := listForSender.totalBytes.Get() > maxBytes @@ -123,7 +124,7 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran } if nonce == incomingNonce && gasPrice > incomingGasPrice { - // The incoming transaction will be placed right after the existing one (with higher price). + // The incoming transaction will be placed right after the existing one, which has same nonce but higher price. return element, nil } diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 63693efefd2..a842a94679c 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -43,7 +43,7 @@ func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { require.False(t, added) } -func TestListForSender_AddTx_AppliesSizeConstraints_NumTransactions(t *testing.T) { +func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing.T) { list := newListToTest(math.MaxUint32, 3) list.AddTx(createTx([]byte("tx1"), ".", 1)) @@ -56,18 +56,18 @@ func TestListForSender_AddTx_AppliesSizeConstraints_NumTransactions(t *testing.T require.ElementsMatch(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.ElementsMatch(t, []string{"tx4"}, hashesAsStrings(evicted)) - // Gives priority to higher gas - undesirably to some extent, "tx3" is evicted + // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 42, 42)) require.ElementsMatch(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.ElementsMatch(t, []string{"tx3"}, hashesAsStrings(evicted)) - // Undesirably to some extent, "tx3++"" is added, then evicted + // Though Undesirably to some extent, "tx3++"" is added, then evicted _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 42, 42)) require.ElementsMatch(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.ElementsMatch(t, []string{"tx3++"}, hashesAsStrings(evicted)) } -func TestListForSender_AddTx_AppliesSizeConstraints_NumBytes(t *testing.T) { +func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { list := newListToTest(1024, math.MaxUint32) list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 42, 42)) @@ -85,7 +85,7 @@ func TestListForSender_AddTx_AppliesSizeConstraints_NumBytes(t *testing.T) { require.ElementsMatch(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) require.ElementsMatch(t, []string{"tx5--"}, hashesAsStrings(evicted)) - // Gives priority to higher gas - undesirably to some extent, "tx4" is evicted + // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 4, 256, 42, 100)) require.ElementsMatch(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) require.ElementsMatch(t, []string{"tx4"}, hashesAsStrings(evicted)) From 4d044bb0f49fba7b5f1177ce31ea857a8b765657 Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Thu, 14 May 2020 13:25:18 +0300 Subject: [PATCH 24/25] Fix after review. --- dataRetriever/constants.go | 3 -- dataRetriever/txpool/argShardedTxPool.go | 28 ++++++------ dataRetriever/txpool/shardedTxPool_test.go | 4 +- storage/txcache/config.go | 5 -- storage/txcache/disabledCache.go | 53 ++++++++-------------- storage/txcache/txCache_test.go | 24 +++++----- 6 files changed, 47 insertions(+), 70 deletions(-) diff --git a/dataRetriever/constants.go b/dataRetriever/constants.go index e171d666b78..98ebbc2b511 100644 --- a/dataRetriever/constants.go +++ b/dataRetriever/constants.go @@ -2,6 +2,3 @@ package dataRetriever // TxPoolNumSendersToEvictInOneStep instructs tx pool eviction algorithm to remove this many senders when eviction takes place const TxPoolNumSendersToEvictInOneStep = uint32(100) - -// TxPoolMinSizeInBytes is the lower limit of the tx cache / eviction parameter "sizeInBytes" -const TxPoolMinSizeInBytes = uint32(40960) diff --git a/dataRetriever/txpool/argShardedTxPool.go b/dataRetriever/txpool/argShardedTxPool.go index 204a5c754cb..a99600ab929 100644 --- a/dataRetriever/txpool/argShardedTxPool.go +++ b/dataRetriever/txpool/argShardedTxPool.go @@ -18,26 +18,26 @@ type ArgShardedTxPool struct { func (args *ArgShardedTxPool) verify() error { config := args.Config - if config.SizeInBytes < dataRetriever.TxPoolMinSizeInBytes { - return fmt.Errorf("%w: config.SizeInBytes is less than [dataRetriever.TxPoolMinSizeInBytes]", dataRetriever.ErrCacheConfigInvalidSizeInBytes) + if config.SizeInBytes == 0 { + return fmt.Errorf("%w: config.SizeInBytes is not valid", dataRetriever.ErrCacheConfigInvalidSizeInBytes) } - if config.SizeInBytesPerSender < dataRetriever.TxPoolMinSizeInBytes { - return fmt.Errorf("%w: config.SizeInBytesPerSender is less than [dataRetriever.TxPoolMinSizeInBytes]", dataRetriever.ErrCacheConfigInvalidSizeInBytes) + if config.SizeInBytesPerSender == 0 { + return fmt.Errorf("%w: config.SizeInBytesPerSender is not valid", dataRetriever.ErrCacheConfigInvalidSizeInBytes) } - if config.Size < 1 { - return fmt.Errorf("%w: config.Size is less than 1", dataRetriever.ErrCacheConfigInvalidSize) + if config.Size == 0 { + return fmt.Errorf("%w: config.Size is not valid", dataRetriever.ErrCacheConfigInvalidSize) } - if config.SizePerSender < 1 { - return fmt.Errorf("%w: config.SizePerSender is less than 1", dataRetriever.ErrCacheConfigInvalidSize) + if config.SizePerSender == 0 { + return fmt.Errorf("%w: config.SizePerSender is not valid", dataRetriever.ErrCacheConfigInvalidSize) } - if config.Shards < 1 { - return fmt.Errorf("%w: config.Shards (map chunks) is less than 1", dataRetriever.ErrCacheConfigInvalidShards) + if config.Shards == 0 { + return fmt.Errorf("%w: config.Shards (map chunks) is not valid", dataRetriever.ErrCacheConfigInvalidShards) } - if args.MinGasPrice < 1 { - return fmt.Errorf("%w: MinGasPrice is less than 1", dataRetriever.ErrCacheConfigInvalidEconomics) + if args.MinGasPrice == 0 { + return fmt.Errorf("%w: MinGasPrice is not valid", dataRetriever.ErrCacheConfigInvalidEconomics) } - if args.NumberOfShards < 1 { - return fmt.Errorf("%w: NumberOfShards is less than 1", dataRetriever.ErrCacheConfigInvalidSharding) + if args.NumberOfShards == 0 { + return fmt.Errorf("%w: NumberOfShards is not valid", dataRetriever.ErrCacheConfigInvalidSharding) } return nil diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index a0b3a8b9204..94aa8fa53d8 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -27,14 +27,14 @@ func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { goodArgs := ArgShardedTxPool{Config: storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16}, MinGasPrice: 200000000000, NumberOfShards: 1} args := goodArgs - args.Config.SizeInBytes = 1 + args.Config.SizeInBytes = 0 pool, err := NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSizeInBytes.Error()) args = goodArgs - args.Config.SizeInBytesPerSender = 1 + args.Config.SizeInBytesPerSender = 0 pool, err = NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) diff --git a/storage/txcache/config.go b/storage/txcache/config.go index 62ee3a08b00..1be512220a6 100644 --- a/storage/txcache/config.go +++ b/storage/txcache/config.go @@ -19,23 +19,18 @@ func (config *CacheConfig) verify() error { if len(config.Name) == 0 { return fmt.Errorf("%w: config.Name is invalid", errInvalidCacheConfig) } - if config.NumChunksHint == 0 { return fmt.Errorf("%w: config.NumChunksHint is invalid", errInvalidCacheConfig) } - if config.NumBytesPerSenderThreshold == 0 { return fmt.Errorf("%w: config.NumBytesPerSenderThreshold is invalid", errInvalidCacheConfig) } - if config.CountPerSenderThreshold == 0 { return fmt.Errorf("%w: config.CountPerSenderThreshold is invalid", errInvalidCacheConfig) } - if config.MinGasPriceNanoErd == 0 { return fmt.Errorf("%w: config.MinGasPriceNanoErd is invalid", errInvalidCacheConfig) } - if config.EvictionEnabled { if config.NumBytesThreshold == 0 { return fmt.Errorf("%w: config.NumBytesThreshold is invalid", errInvalidCacheConfig) diff --git a/storage/txcache/disabledCache.go b/storage/txcache/disabledCache.go index 663f3392daa..cf587439a55 100644 --- a/storage/txcache/disabledCache.go +++ b/storage/txcache/disabledCache.go @@ -15,106 +15,89 @@ func NewDisabledCache() *DisabledCache { return &DisabledCache{} } -// AddTx - +// AddTx does nothing func (cache *DisabledCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { - log.Error("DisabledCache.AddTx()") return false, false } -// GetByTxHash - +// GetByTxHash returns no transaction func (cache *DisabledCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { - log.Error("DisabledCache.GetByTxHash()") return nil, false } -// SelectTransactions - +// SelectTransactions returns an empty slice func (cache *DisabledCache) SelectTransactions(numRequested int, batchSizePerSender int) []*WrappedTransaction { - log.Error("DisabledCache.SelectTransactions()") return make([]*WrappedTransaction, 0) } -// RemoveTxByHash - +// RemoveTxByHash does nothing func (cache *DisabledCache) RemoveTxByHash(txHash []byte) error { - log.Error("DisabledCache.RemoveTxByHash()") return nil } -// CountTx - +// CountTx returns zero func (cache *DisabledCache) CountTx() int64 { - log.Error("DisabledCache.CountTx()") return 0 } -// Len - +// Len returns zero func (cache *DisabledCache) Len() int { - log.Error("DisabledCache.Len()") return 0 } -// ForEachTransaction - +// ForEachTransaction does nothing func (cache *DisabledCache) ForEachTransaction(function ForEachTransaction) { - log.Error("DisabledCache.ForEachTransaction()") } -// Clear - +// Clear does nothing func (cache *DisabledCache) Clear() { - log.Error("DisabledCache.Clear()") } -// Put - +// Put does nothing func (cache *DisabledCache) Put(key []byte, value interface{}) (evicted bool) { - log.Error("DisabledCache.Put()") return false } -// Get - +// Get returns no transaction func (cache *DisabledCache) Get(key []byte) (value interface{}, ok bool) { return nil, false } -// Has - +// Has returns false func (cache *DisabledCache) Has(key []byte) bool { - log.Error("DisabledCache.Has is not implemented") return false } -// Peek - +// Peek returns no transaction func (cache *DisabledCache) Peek(key []byte) (value interface{}, ok bool) { - log.Error("DisabledCache.DisabledCache()") return nil, false } -// HasOrAdd - +// HasOrAdd returns false, does nothing func (cache *DisabledCache) HasOrAdd(key []byte, value interface{}) (ok, evicted bool) { - log.Error("DisabledCache.HasOrAdd()") return false, false } -// Remove - +// Remove does nothing func (cache *DisabledCache) Remove(key []byte) { - log.Error("DisabledCache.Remove()") } -// RemoveOldest - +// RemoveOldest does nothing func (cache *DisabledCache) RemoveOldest() { - log.Error("DisabledCache.RemoveOldest()") } -// Keys - +// Keys returns an empty slice func (cache *DisabledCache) Keys() txHashes { - log.Error("DisabledCache.Keys()") return make([][]byte, 0) } -// MaxSize - +// MaxSize returns zero func (cache *DisabledCache) MaxSize() int { - log.Error("DisabledCache.MaxSize()") return 0 } -// RegisterHandler - +// RegisterHandler does nothing func (cache *DisabledCache) RegisterHandler(func(key []byte, value interface{})) { - log.Error("DisabledCache.RegisterHandler()") } // IsInterfaceNil returns true if there is no value under the interface diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index b04a9cec2bf..507db7e3757 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -1,6 +1,7 @@ package txcache import ( + "errors" "fmt" "math" "sync" @@ -40,41 +41,42 @@ func Test_NewTxCache(t *testing.T) { badConfig := config badConfig.Name = "" - requireErrorOnNewTxCache(t, badConfig, "config.Name") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.Name") badConfig = config badConfig.NumChunksHint = 0 - requireErrorOnNewTxCache(t, badConfig, "config.NumChunksHint") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumChunksHint") badConfig = config badConfig.NumBytesPerSenderThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, "config.NumBytesPerSenderThreshold") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumBytesPerSenderThreshold") badConfig = config badConfig.CountPerSenderThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, "config.CountPerSenderThreshold") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.CountPerSenderThreshold") badConfig = config badConfig.MinGasPriceNanoErd = 0 - requireErrorOnNewTxCache(t, badConfig, "config.MinGasPriceNanoErd") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.MinGasPriceNanoErd") badConfig = withEvictionConfig badConfig.NumBytesThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, "config.NumBytesThreshold") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumBytesThreshold") badConfig = withEvictionConfig badConfig.CountThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, "config.CountThreshold") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.CountThreshold") badConfig = withEvictionConfig badConfig.NumSendersToEvictInOneStep = 0 - requireErrorOnNewTxCache(t, badConfig, "config.NumSendersToEvictInOneStep") + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumSendersToEvictInOneStep") } -func requireErrorOnNewTxCache(t *testing.T, config CacheConfig, errMessage string) { - cache, err := NewTxCache(config) - require.Contains(t, err.Error(), errMessage) +func requireErrorOnNewTxCache(t *testing.T, config CacheConfig, errExpected error, errPartialMessage string) { + cache, errReceived := NewTxCache(config) require.Nil(t, cache) + require.True(t, errors.Is(errReceived, errExpected)) + require.Contains(t, errReceived.Error(), errPartialMessage) } func Test_AddTx(t *testing.T) { From b488b4d9a033af21075f553f4411b974ecde6e6b Mon Sep 17 00:00:00 2001 From: Andrei Bancioiu Date: Thu, 14 May 2020 14:35:47 +0300 Subject: [PATCH 25/25] Fix after review, plus fix tests to use the correct Equals() function. --- storage/txcache/maps/bucketSortedMap_test.go | 8 +-- storage/txcache/txCache_test.go | 16 +++--- storage/txcache/txListForSender.go | 13 ++--- storage/txcache/txListForSender_test.go | 51 +++++++++++++------- 4 files changed, 52 insertions(+), 36 deletions(-) diff --git a/storage/txcache/maps/bucketSortedMap_test.go b/storage/txcache/maps/bucketSortedMap_test.go index 98cf23948cc..94c075a0c80 100644 --- a/storage/txcache/maps/bucketSortedMap_test.go +++ b/storage/txcache/maps/bucketSortedMap_test.go @@ -241,7 +241,7 @@ func TestBucketSortedMap_GetSnapshotAscending(t *testing.T) { myMap := NewBucketSortedMap(4, 100) snapshot := myMap.GetSnapshotAscending() - require.ElementsMatch(t, []BucketSortedMapItem{}, snapshot) + require.Equal(t, []BucketSortedMapItem{}, snapshot) a := newScoredDummyItem("a", 15) b := newScoredDummyItem("b", 101) @@ -256,14 +256,14 @@ func TestBucketSortedMap_GetSnapshotAscending(t *testing.T) { simulateMutationThatChangesScore(myMap, "c") snapshot = myMap.GetSnapshotAscending() - require.ElementsMatch(t, []BucketSortedMapItem{c, a, b}, snapshot) + require.Equal(t, []BucketSortedMapItem{c, a, b}, snapshot) } func TestBucketSortedMap_GetSnapshotDescending(t *testing.T) { myMap := NewBucketSortedMap(4, 100) snapshot := myMap.GetSnapshotDescending() - require.ElementsMatch(t, []BucketSortedMapItem{}, snapshot) + require.Equal(t, []BucketSortedMapItem{}, snapshot) a := newScoredDummyItem("a", 15) b := newScoredDummyItem("b", 101) @@ -278,7 +278,7 @@ func TestBucketSortedMap_GetSnapshotDescending(t *testing.T) { simulateMutationThatChangesScore(myMap, "c") snapshot = myMap.GetSnapshotDescending() - require.ElementsMatch(t, []BucketSortedMapItem{b, a, c}, snapshot) + require.Equal(t, []BucketSortedMapItem{b, a, c}, snapshot) } func TestBucketSortedMap_AddManyItems(t *testing.T) { diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index 507db7e3757..a093a5a3d3b 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -120,13 +120,13 @@ func Test_AddTx_AppliesSizeConstraintsPerSenderForNumTransactions(t *testing.T) cache.AddTx(createTx([]byte("tx-alice-4"), "alice", 4)) cache.AddTx(createTx([]byte("tx-bob-1"), "bob", 1)) cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 2)) - require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) - require.ElementsMatch(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) cache.AddTx(createTx([]byte("tx-alice-3"), "alice", 3)) - require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) - require.ElementsMatch(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) } @@ -139,14 +139,14 @@ func Test_AddTx_AppliesSizeConstraintsPerSenderForNumBytes(t *testing.T) { cache.AddTx(createTxWithParams([]byte("tx-bob-1"), "bob", 1, 512, 42, 42)) cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 2, 513, 42, 42)) - require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) - require.ElementsMatch(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) cache.AddTx(createTxWithParams([]byte("tx-alice-3"), "alice", 3, 256, 42, 42)) cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 3, 512, 42, 42)) - require.ElementsMatch(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) - require.ElementsMatch(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) } diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index b6932b59aea..06152ce7dac 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -114,21 +114,22 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran incomingGasPrice := incomingTx.Tx.GetGasPrice() for element := listForSender.items.Back(); element != nil; element = element.Prev() { - tx := element.Value.(*WrappedTransaction) - nonce := tx.Tx.GetNonce() - gasPrice := tx.Tx.GetGasPrice() + currentTx := element.Value.(*WrappedTransaction) + currentTxNonce := currentTx.Tx.GetNonce() + currentTxGasPrice := currentTx.Tx.GetGasPrice() - if incomingTx.sameAs(tx) { + if incomingTx.sameAs(currentTx) { // The incoming transaction will be discarded return nil, errTxDuplicated } - if nonce == incomingNonce && gasPrice > incomingGasPrice { + if currentTxNonce == incomingNonce && currentTxGasPrice > incomingGasPrice { // The incoming transaction will be placed right after the existing one, which has same nonce but higher price. + // If the nonces are the same, but the incoming gas price is higher or equal, the search loop continues. return element, nil } - if nonce < incomingNonce { + if currentTxNonce < incomingNonce { // We've found the first transaction with a lower nonce than the incoming one, // thus the incoming transaction will be placed right after this one. return element, nil diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index a842a94679c..76780ec02f4 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -15,7 +15,7 @@ func TestListForSender_AddTx_Sorts(t *testing.T) { list.AddTx(createTx([]byte("d"), ".", 4)) list.AddTx(createTx([]byte("b"), ".", 2)) - require.ElementsMatch(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { @@ -27,7 +27,22 @@ func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { list.AddTx(createTxWithParams([]byte("d"), ".", 2, 128, 42, 42)) list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101)) - require.ElementsMatch(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) +} + +func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) { + list := newUnconstrainedListToTest() + + list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42)) + list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100)) + list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 42, 100)) + list.AddTx(createTxWithParams([]byte("d"), ".", 3, 128, 42, 98)) + list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101)) + list.AddTx(createTxWithParams([]byte("f"), ".", 2, 128, 42, 42)) + list.AddTx(createTxWithParams([]byte("g"), ".", 3, 128, 42, 99)) + + // In case of same-nonce, same-price transactions, the newer one has priority + require.Equal(t, []string{"a", "f", "e", "c", "b", "g", "d"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { @@ -50,21 +65,21 @@ func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing list.AddTx(createTx([]byte("tx5"), ".", 5)) list.AddTx(createTx([]byte("tx4"), ".", 4)) list.AddTx(createTx([]byte("tx2"), ".", 2)) - require.ElementsMatch(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3)) - require.ElementsMatch(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) - require.ElementsMatch(t, []string{"tx4"}, hashesAsStrings(evicted)) + require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 42, 42)) - require.ElementsMatch(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) - require.ElementsMatch(t, []string{"tx3"}, hashesAsStrings(evicted)) + require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) // Though Undesirably to some extent, "tx3++"" is added, then evicted _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 42, 42)) - require.ElementsMatch(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) - require.ElementsMatch(t, []string{"tx3++"}, hashesAsStrings(evicted)) + require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) } func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { @@ -74,21 +89,21 @@ func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { list.AddTx(createTxWithParams([]byte("tx2"), ".", 2, 512, 42, 42)) list.AddTx(createTxWithParams([]byte("tx3"), ".", 3, 256, 42, 42)) _, evicted := list.AddTx(createTxWithParams([]byte("tx5"), ".", 4, 256, 42, 42)) - require.ElementsMatch(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) - require.ElementsMatch(t, []string{"tx5"}, hashesAsStrings(evicted)) + require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx5"}, hashesAsStrings(evicted)) _, evicted = list.AddTx(createTxWithParams([]byte("tx5--"), ".", 4, 128, 42, 42)) - require.ElementsMatch(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) - require.ElementsMatch(t, []string{}, hashesAsStrings(evicted)) + require.Equal(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{}, hashesAsStrings(evicted)) _, evicted = list.AddTx(createTxWithParams([]byte("tx4"), ".", 4, 128, 42, 42)) - require.ElementsMatch(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) - require.ElementsMatch(t, []string{"tx5--"}, hashesAsStrings(evicted)) + require.Equal(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx5--"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 4, 256, 42, 100)) - require.ElementsMatch(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) - require.ElementsMatch(t, []string{"tx4"}, hashesAsStrings(evicted)) + _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 256, 42, 100)) + require.Equal(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) } func TestListForSender_findTx(t *testing.T) {