diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 3d9ebb49461..6950ac9b4cb 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -187,7 +187,7 @@ MaxOpenFiles = 10 [TrieStorageManagerConfig] - PruningBufferLen = 1000 + PruningBufferLen = 100000 SnapshotsBufferLen = 1000000 MaxSnapshots = 2 @@ -220,7 +220,9 @@ [TxDataPool] Size = 900000 + SizePerSender = 1000 SizeInBytes = 524288000 + SizeInBytesPerSender = 614400 Type = "TxCache" Shards = 16 @@ -397,6 +399,8 @@ CheckpointRoundsModulus = 100 AccountsStatePruningEnabled = true PeerStatePruningEnabled = true + MaxStateTrieLevelInMemory = 5 + MaxPeerTrieLevelInMemory = 5 [BlockSizeThrottleConfig] MinSizeInBytes = 104857 # 104857 is 10% from 1MB diff --git a/cmd/node/config/genesis.json b/cmd/node/config/genesis.json index b17f9b7f740..1a85c4d3065 100644 --- a/cmd/node/config/genesis.json +++ b/cmd/node/config/genesis.json @@ -1,98 +1,98 @@ [ { - "address": "erd1m87mx5x20lkmj3tcskv0yw0vc2m8d40mnlfyvep4lcqkyvnvlussjdkulc", - "supply": "1666791666666666666666666674", - "balance": "1666291666666666666666666674", - "stakingvalue": "500000000000000000000000", + "address": "erd1vfvqxa8geg42qsp3f6nhmu447j5n7ceaww62y4lkjmzv06jdt5ssaru9hx", + "supply": "1667291666666666666666666674", + "balance": "1664791666666666666666666674", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd1rfyjyuxl25rsp3q8j6r6y2z5tdzauu463dzdtl2293fj5yc842ssx7pdam", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd1x46jdswqyfgr5h4l2lgkk7avydx8thl3thet6htx8pa3ngu50kfstgv5p5", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd18cux4ftul77k478chttklwa94q8g205ftefam8g9z0ds253mwm8qw54xn3", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd1w4hc4q8j8twnq47cr838hkkzfzpfv7gfqkeh073vtgka8xzr9pps7y7qn4", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd1r6m0x3p5lf635g8ce3nhh0mhzv4ufsctarm2uamexxplzf3t5t2q05aqzk", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd12g7px93mzj808dw0edf05zp7lxdq9zdafuj7q3atmdpje0v9clgs0nqj2q", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd1u6fw4egq8pv8ufpk5g3nguyf42hnfknq7w7rfhvrta6p5dss5zdsmn69sz", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd17g5jmjpdlhq5fygg8qs8u85cmud7g6uc0f0yhz7k5734tcp64p0sdak5au", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd1sw93x2nrtsnsa0n3vukjke2vaknwvyyp5e0gsfe5swwxnpf9dxrs6ek2xz", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd12epqk0m225ushnvrey9tz5pp3cftt6h85xtsqcz93x369mnckx3sj4jt2m", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd1fh8xt5gw9rvextpdrkwrmwatszyl7sld4mcj0cy6nsmhxs8mpldqmwp4vz", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd1t8ztgmvu0hr0tu686kmxj4artk08njsjgpdz9plakcd09vk698cqgz2tvq", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd1jmufwnqe06dvdvy5mse79k04qvm3h9yllhup884udknd4u9l0feq5xqatx", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd1qac8llthxn8965t2adtkctjyhqfnku9mgzdqulvkkpxmdmj83xkqfyqh48", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd16h4f6u5n346kfp77k6lhnxu27c3y4sc0urwcye3q6kjgus28ywpqyfnuh8", - "supply": "1666791666666666666666666666", - "balance": "1666291666666666666666666666", - "stakingvalue": "500000000000000000000000", + "address": "erd1gxg3nnnlc7muq92auyku5un9e3a0x6q76ge6w4qtmt526sc28yfsa7shnn", + "supply": "1667291666666666666666666666", + "balance": "1664791666666666666666666666", + "stakingvalue": "0", "delegation": { - "address": "", - "value": "0" + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj", + "value": "2500000000000000000000000" } }, { - "address": "erd15khnvy0vpcfdvxd000622a3es0muct9jh9wm7klsm8nrwn3tzneqqnjhae", - "supply": "1666291666666666666666666666", - "balance": "1666291666666666666666666666", + "address": "erd1lly4c0qgl00lg2upkvjytstnge3r89385gkhlfaqyq0te6x3u2rqlz23fs", + "supply": "1664791666666666666666666666", + "balance": "1664791666666666666666666666", "stakingvalue": "0", "delegation": { "address": "", @@ -100,9 +100,9 @@ } }, { - "address": "erd183nlfwerse0nljvvlvu4d7r6d04p9hd40h5u0hxkkf378y66e3gqhw0n85", - "supply": "1666291666666666666666666666", - "balance": "1666291666666666666666666666", + "address": "erd1qwxpsuray9dyg6s7tywgp33wlm4af64h2ls5wmr8dytfr2prhtlsuq5j9a", + "supply": "1664791666666666666666666666", + "balance": "1664791666666666666666666666", "stakingvalue": "0", "delegation": { "address": "", @@ -110,13 +110,13 @@ } }, { - "address": "erd1pajyy0hn3kw5s9dn5pf8980fdy2d0evklrj03699uxtl4k0fyudqrzxf8j", - "supply": "1666291666666666666666666666", - "balance": "1666291666666666666666666666", + "address": "erd17lk50mlps7q39wktnvkg3xlzer3nst225twenjqj9880347q06jqt7t5re", + "supply": "1664791666666666666666666666", + "balance": "1664791666666666666666666666", "stakingvalue": "0", "delegation": { "address": "", "value": "0" } } -] +] \ No newline at end of file diff --git a/cmd/node/config/genesisContracts/delegation.wasm b/cmd/node/config/genesisContracts/delegation.wasm index bf8f22fc628..5bfed3fe064 100755 Binary files a/cmd/node/config/genesisContracts/delegation.wasm and b/cmd/node/config/genesisContracts/delegation.wasm differ diff --git a/cmd/node/config/genesisSmartContracts.json b/cmd/node/config/genesisSmartContracts.json index 8925746ff8f..c464c313bfc 100644 --- a/cmd/node/config/genesisSmartContracts.json +++ b/cmd/node/config/genesisSmartContracts.json @@ -1,9 +1,10 @@ [ { - "owner": "erd1pajyy0hn3kw5s9dn5pf8980fdy2d0evklrj03699uxtl4k0fyudqrzxf8j", - "filename": "./config/genesisContracts/answer.wasm", + "owner": "erd16grmckn46ry7fwyvass8e8pz88klazzpc0c5f0pnrv643td4797sgnvjkm", + "filename": "./config/genesisContracts/delegation.wasm", "vm-type": "0500", - "init-parameters": "", - "type": "test" + "init-parameters": "0BB8@%auction_sc_address%@0A61D0", + "type": "delegation", + "version": "0.2.*" } ] diff --git a/cmd/node/config/nodesSetup.json b/cmd/node/config/nodesSetup.json index e9daa2bf680..9b7d692377f 100644 --- a/cmd/node/config/nodesSetup.json +++ b/cmd/node/config/nodesSetup.json @@ -3,47 +3,47 @@ "roundDuration": 6000, "consensusGroupSize": 3, "minNodesPerShard": 3, + "chainID": "testnet", "metaChainConsensusGroupSize": 3, "metaChainMinNodes": 3, "hysteresis": 0.2, "adaptivity": false, - "chainID": "undefined", "initialNodes": [ { - "pubkey": "d5f3a29643ad04cf80645f95070cfc212f023e1088e3b5afc7de8085797a52ad13d46e6ea1f8b4c229e384b7a9e1fd183e3e4d5acdcc6db213e6237ef50d29e6fe1862b91cafb5137c8989982ced6cc84740e03b2d5a14568835663507364394", - "address": "erd1m87mx5x20lkmj3tcskv0yw0vc2m8d40mnlfyvep4lcqkyvnvlussjdkulc" + "pubkey": "ea6ad23ec8694dcc42d3f4c8adb81e2b72f08ca33241cb8910044ec1e9cb0da2635a64551319b8a744d2ef4dedf762178d1e57f8a40462378e1205e969018a94214862ec249d61cd9725ba729c1fe1acf7839de7a2ac852582e6e842dde60e13", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "a4358d8219476921fc952eb6ae1b4e10d4783751abf0f4ae0d85612ed9b23938769e14691afa0573bd203b71ad08eb014ac31392bf67c89853f4b2406cccf3010624aeeb7f25e4df6a13ed724ef9c5087e8817e0323bbfb2fa7113dfdbc6f390", - "address": "erd1rfyjyuxl25rsp3q8j6r6y2z5tdzauu463dzdtl2293fj5yc842ssx7pdam" + "pubkey": "27c210a3d9942437ff2e8dfcbb601162caf2deba7f133015b2bdb8b63c5483eb1ec5747b2ed4e3ce4b13ed89888c120365a08003de5e6c76d7c7702ffd06123c096fac4720611e278687bcab717f83bc2dd6e4832ed67aff0546c7a0a69b7000", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "5b460b8d018ddc51854632d6217f855df162cda320f4e19f6e1d80425fb123b53c324ef5765e9ea7ce017628b85a8c16e85355ab72c0718ba260f14635ed108ed06948db50a7cd852f85ecafcd6aa93cd3297c31924bbe03dd1e36eafd5b1412", - "address": "erd18cux4ftul77k478chttklwa94q8g205ftefam8g9z0ds253mwm8qw54xn3" + "pubkey": "1d834ece2215313a992dd8ef349908483addaa6d8019c72e1fbc24cdc47784ba881f12b9e4351bc42ebb78ecde5b4313edecfdce31ff29baedb00e0723871e0d02d17582e7a06e9725894267dcdfac930c86feb5d806d5c3917db6ff9eb9d98b", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "7f4c290290fb5e1fb523fd0fdc480c687f4bc8e7637b1b0d23625c58a993566ed9211bd00095b316e67d2efb5e5dcf15ce602fed89267d1f8631558dfd679e64a4feac09ccd86f55a84a6c6e47c87037da311a654c1b6303bdbb2cf27aba1416", - "address": "erd1r6m0x3p5lf635g8ce3nhh0mhzv4ufsctarm2uamexxplzf3t5t2q05aqzk" + "pubkey": "b6c9e341d68948b6259c31f64bfe3c6a5a6363db8be3eefd818c24b5bcba00e4a47227cda7c998a57d9ce60c5952b81275e342299e16129b267e4e39a4f4175ac57090060867554646c81d766279a1b6d8aea7b25a4128f81df977cb60f32589", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "be3125478c06d8ad303e1c08ffb1a7643de44c15c613888f09319658c9027536b5524d75d37f8551e24b4eb0b6583b0bc85b28ed500627a01a2fae6a95a6e888952633025d6b6770b30355d4deffd13e62e44ca91d2dd1efeb7a6e11eb5cab8e", - "address": "erd1u6fw4egq8pv8ufpk5g3nguyf42hnfknq7w7rfhvrta6p5dss5zdsmn69sz" + "pubkey": "9bb55759790bd11971f8368f302d1a933516ea99a8381e3a93c03c753df8455942a85c41b929672474b7d63223cb6511747fc5ee3db4aac84c138f7799f1b19766e8b89b66ee296eda6b5ecac364ae54557338e03bf702ed179587602f693118", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "a1fe183b6d3b18755d3e4fb809d903ac2bd814ba0a57eaae51e6810b2a5b798c686808681eed4e605f2cf6e39bac9a0fe3dd1825f3f8dc7149310c16afd112bbacda31762486d8d4597d65e6b4bd0f2f8d57d2c3fb1767b1c29edaf01c1c0b15", - "address": "erd1sw93x2nrtsnsa0n3vukjke2vaknwvyyp5e0gsfe5swwxnpf9dxrs6ek2xz" + "pubkey": "54bc783b7e1c409f95f51174983f819b7b35ec5f0250098fca14a34ecbeb0ba28c9d5b0ce4cecb7a6d1ce2579922ce139623734e4b3ccc731867224511df11233519130b9922bdbae100e456e9cba23d19ede37f59faac90585f635cde366b19", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "7e8ee608fd43806fef1b883867b4250e3d268ff15c377887ec5af02937e5d426da29ce403a452f3d15a55fa3b2ffa51609a0296aaefe4d627955f9c5474c3a9114f507c5176dac90aa6321c71174a62011c805542203bafde753c77a6cc7c68d", - "address": "erd1fh8xt5gw9rvextpdrkwrmwatszyl7sld4mcj0cy6nsmhxs8mpldqmwp4vz" + "pubkey": "beffaa62eb14acc2ee7229432b76a978012631a341fb7cd415a613e0305b99830faaec773a9df990e938dc7704e21f076684d65449c09b5e4a52c97a1591deab90765aacc7e5356b5c5ac53e605fbd638a13e4bf55ee63d40d7ccd08c6fbdf16", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "e8bc4e021386ac00f2c103798c280308edebae5deff00260d097d8e286e37940708f65643a79ec824278d014a6140a086b5a9e6247ee5b9680c141001edb6deebf82d440296355b178a19bb2a78caa65a94f849e80b015c905884dcd3e22bd15", - "address": "erd1jmufwnqe06dvdvy5mse79k04qvm3h9yllhup884udknd4u9l0feq5xqatx" + "pubkey": "91876f8a421cc64459116bed6fe3b676104aa4fc94a81626f6b19b4b69739c32093eea35ce1d0f7fbea7923cf5cb2e042cd214dea58143584c8504484921b1a1adba67000e2613e2895a6ebfa8d16ec8ac8d4185ebf58544809cf83d452a598a", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" }, { - "pubkey": "f5066141eb750c154b4fb048487c38947e2f5a3e591adf0e87af0c9c7858bf0e7b381eb5d1ccad1db2c5d7d255272a0220d562ea5e8c50d77f4c351fc16a1e292c97b025377d8330ebd1018d3171be96ac21d160b10464d7ab9003f936294f18", - "address": "erd16h4f6u5n346kfp77k6lhnxu27c3y4sc0urwcye3q6kjgus28ywpqyfnuh8" + "pubkey": "e34f508d647b6bb7747a0d4ce2140e34ae02ff553ec1f8804fdeddb9a6d2608d7cfdb6352d88b0771b8f41e66be36c05d67c585f0971977287d7f959e915f010b628c4b5db465f348a04084c14a3ffc4dd5f6f7eb1cd3709d46b0c0b9163dd8e", + "address": "erd1qqqqqqqqqqqqqpgqtvuqj62mlpzptgfzp69thcv49z3ruang797s8204fj" } ] } diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index dbeb987461c..dddadc33488 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -345,7 +345,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } - _, err = poolsCleaner.NewMiniBlocksPoolsCleaner( + mbsPoolsCleaner, err := poolsCleaner.NewMiniBlocksPoolsCleaner( args.data.Datapool.MiniBlocks(), args.rounder, args.shardCoordinator, @@ -354,7 +354,9 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } - _, err = poolsCleaner.NewTxsPoolsCleaner( + mbsPoolsCleaner.StartCleaning() + + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner( args.state.AddressPubkeyConverter, args.data.Datapool, args.rounder, @@ -364,6 +366,8 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } + txsPoolsCleaner.StartCleaning() + interceptorContainerFactory, blackListHandler, err := newInterceptorContainerFactory( args.shardCoordinator, args.nodesCoordinator, diff --git a/cmd/node/main.go b/cmd/node/main.go index f82d1f1802c..9a44e15a35f 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -603,7 +603,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Debug("config", "file", ctx.GlobalString(nodesFile.Name)) syncer := ntp.NewSyncTime(generalConfig.NTPConfig, nil) - go syncer.StartSync() + syncer.StartSyncingTime() log.Debug("NTP average clock offset", "value", syncer.ClockOffset()) diff --git a/config/config.go b/config/config.go index ccde15a53d4..6e99fff0c3c 100644 --- a/config/config.go +++ b/config/config.go @@ -2,10 +2,12 @@ package config // CacheConfig will map the json cache configuration type CacheConfig struct { - Type string `json:"type"` - Size uint32 `json:"size"` - SizeInBytes uint32 `json:"sizeInBytes"` - Shards uint32 `json:"shards"` + Type string `json:"type"` + Size uint32 `json:"size"` + SizePerSender uint32 `json:"sizePerSender"` + SizeInBytes uint32 `json:"sizeInBytes"` + SizeInBytesPerSender uint32 `json:"sizeInBytesPerSender"` + Shards uint32 `json:"shards"` } //HeadersPoolConfig will map the headers cache configuration @@ -189,6 +191,8 @@ type StateTriesConfig struct { CheckpointRoundsModulus uint AccountsStatePruningEnabled bool PeerStatePruningEnabled bool + MaxStateTrieLevelInMemory uint + MaxPeerTrieLevelInMemory uint } // TrieStorageManagerConfig will hold config information about trie storage manager diff --git a/consensus/chronology/chronology.go b/consensus/chronology/chronology.go index 90645978c23..c5251d27448 100644 --- a/consensus/chronology/chronology.go +++ b/consensus/chronology/chronology.go @@ -1,6 +1,7 @@ package chronology import ( + "context" "fmt" "sync" "time" @@ -9,12 +10,14 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/ntp" "github.com/ElrondNetwork/elrond-go/statusHandler" ) var _ consensus.ChronologyHandler = (*chronology)(nil) +var _ close.Closer = (*chronology)(nil) var log = logger.GetOrCreate("consensus/chronology") @@ -34,6 +37,7 @@ type chronology struct { subroundHandlers []consensus.SubroundHandler mutSubrounds sync.RWMutex appStatusHandler core.AppStatusHandler + cancelFunc func() } // NewChronology creates a new chronology object @@ -114,8 +118,20 @@ func (chr *chronology) RemoveAllSubrounds() { // StartRounds actually starts the chronology and calls the DoWork() method of the subroundHandlers loaded func (chr *chronology) StartRounds() { + var ctx context.Context + ctx, chr.cancelFunc = context.WithCancel(context.Background()) + go chr.startRounds(ctx) +} + +func (chr *chronology) startRounds(ctx context.Context) { for { - time.Sleep(time.Millisecond) + select { + case <-ctx.Done(): + log.Debug("chronology's go routine is stopping...") + return + case <-time.After(time.Millisecond): + } + chr.startRound() } } @@ -199,6 +215,15 @@ func (chr *chronology) loadSubroundHandler(subroundId int) consensus.SubroundHan return chr.subroundHandlers[index] } +// Close will close the endless running go routine +func (chr *chronology) Close() error { + if chr.cancelFunc != nil { + chr.cancelFunc() + } + + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (chr *chronology) IsInterfaceNil() bool { return chr == nil diff --git a/consensus/interface.go b/consensus/interface.go index fca25d47bd5..2f45f370626 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -45,6 +45,7 @@ type SubroundHandler interface { // ChronologyHandler defines the actions which should be handled by a chronology implementation type ChronologyHandler interface { + Close() error AddSubround(SubroundHandler) RemoveAllSubrounds() // StartRounds starts rounds in a sequential manner, one after the other diff --git a/consensus/mock/bootstrapMock.go b/consensus/mock/bootstrapMock.go index 29bd9a8d7d5..1f0a768b299 100644 --- a/consensus/mock/bootstrapMock.go +++ b/consensus/mock/bootstrapMock.go @@ -11,8 +11,7 @@ type BootstrapperMock struct { CreateAndCommitEmptyBlockCalled func(uint32) (data.BodyHandler, data.HeaderHandler, error) AddSyncStateListenerCalled func(func(bool)) GetNodeStateCalled func() core.NodeState - StartSyncCalled func() - StopSyncCalled func() + StartSyncingBlocksCalled func() SetStatusHandlerCalled func(handler core.AppStatusHandler) error } @@ -41,14 +40,9 @@ func (boot *BootstrapperMock) GetNodeState() core.NodeState { return core.NsSynchronized } -// StartSync - -func (boot *BootstrapperMock) StartSync() { - boot.StartSyncCalled() -} - -// StopSync - -func (boot *BootstrapperMock) StopSync() { - boot.StopSyncCalled() +// StartSyncingBlocks - +func (boot *BootstrapperMock) StartSyncingBlocks() { + boot.StartSyncingBlocksCalled() } // SetStatusHandler - @@ -56,6 +50,11 @@ func (boot *BootstrapperMock) SetStatusHandler(handler core.AppStatusHandler) er return boot.SetStatusHandlerCalled(handler) } +// Close - +func (boot *BootstrapperMock) Close() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (boot *BootstrapperMock) IsInterfaceNil() bool { return boot == nil diff --git a/consensus/mock/chronologyHandlerMock.go b/consensus/mock/chronologyHandlerMock.go index 1933b2b94a1..3235ed23abe 100644 --- a/consensus/mock/chronologyHandlerMock.go +++ b/consensus/mock/chronologyHandlerMock.go @@ -41,6 +41,11 @@ func (chrm *ChronologyHandlerMock) StartRounds() { } } +// Close - +func (chrm *ChronologyHandlerMock) Close() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (chrm *ChronologyHandlerMock) IsInterfaceNil() bool { return chrm == nil diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index c6e8c413d6f..e5bb8935a17 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -98,6 +98,15 @@ func (sposWorkerMock *SposWorkerMock) SetAppStatusHandler(ash core.AppStatusHand return nil } +// Close - +func (sposWorkerMock *SposWorkerMock) Close() error { + return nil +} + +// StartWorking - +func (sposWorkerMock *SposWorkerMock) StartWorking() { +} + // IsInterfaceNil returns true if there is no value under the interface func (sposWorkerMock *SposWorkerMock) IsInterfaceNil() bool { return sposWorkerMock == nil diff --git a/consensus/mock/syncTimerMock.go b/consensus/mock/syncTimerMock.go index baa3038ae21..2fa41d42341 100644 --- a/consensus/mock/syncTimerMock.go +++ b/consensus/mock/syncTimerMock.go @@ -10,8 +10,8 @@ type SyncTimerMock struct { CurrentTimeCalled func() time.Time } -// StartSync method does the time synchronization at every syncPeriod time elapsed. This should be started as a go routine -func (stm *SyncTimerMock) StartSync() { +// StartSyncingTime method does the time synchronization at every syncPeriod time elapsed. This should be started as a go routine +func (stm *SyncTimerMock) StartSyncingTime() { panic("implement me") } @@ -38,6 +38,11 @@ func (stm *SyncTimerMock) CurrentTime() time.Time { return time.Unix(0, 0) } +// Close - +func (stm *SyncTimerMock) Close() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (stm *SyncTimerMock) IsInterfaceNil() bool { return stm == nil diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 6c0bf00e744..458b403c2e4 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -96,6 +96,8 @@ type SubroundsFactory interface { //WorkerHandler represents the interface for the SposWorker type WorkerHandler interface { + Close() error + StartWorking() //AddReceivedMessageCall adds a new handler function for a received message type AddReceivedMessageCall(messageType consensus.MessageType, receivedMessageCall func(cnsDta *consensus.Message) bool) //AddReceivedHeaderHandler adds a new handler function for a received header diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index a61c34a94da..d7ed1a1ffcc 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -1,6 +1,7 @@ package spos import ( + "context" "encoding/hex" "fmt" "sync" @@ -9,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -21,6 +23,11 @@ import ( "github.com/ElrondNetwork/elrond-go/statusHandler" ) +var _ close.Closer = (*Worker)(nil) + +// sleepTime defines the time in milliseconds between each iteration made in checkChannels method +const sleepTime = 5 * time.Millisecond + // Worker defines the data needed by spos to communicate between nodes which are in the validators group type Worker struct { consensusService ConsensusService @@ -64,6 +71,7 @@ type Worker struct { signatureSize int publicKeySize int publicKeyBitmapSize int + cancelFunc func() } // WorkerArgs holds the consensus worker arguments @@ -137,14 +145,19 @@ func NewWorker(args *WorkerArgs) (*Worker, error) { maxMessagesInARoundPerPeer := wrk.consensusService.GetMaxMessagesInARoundPerPeer() wrk.antifloodHandler.SetMaxMessagesForTopic(topic, maxMessagesInARoundPerPeer) - go wrk.checkChannels() - wrk.mapDisplayHashConsensusMessage = make(map[string][]*consensus.Message) wrk.publicKeyBitmapSize = wrk.getPublicKeyBitmapSize() return &wrk, nil } +// StartWorking actually starts the consensus working mechanism +func (wrk *Worker) StartWorking() { + var ctx context.Context + ctx, wrk.cancelFunc = context.WithCancel(context.Background()) + go wrk.checkChannels(ctx) +} + func checkNewWorkerParams( args *WorkerArgs, ) error { @@ -540,9 +553,19 @@ func (wrk *Worker) executeMessage(cnsDtaList []*consensus.Message) { // checkChannels method is used to listen to the channels through which node receives and consumes, // during the round, different messages from the nodes which are in the validators group -func (wrk *Worker) checkChannels() { +func (wrk *Worker) checkChannels(ctx context.Context) { + var rcvDta *consensus.Message + for { - rcvDta := <-wrk.executeMessageChannel + select { + case <-ctx.Done(): + log.Debug("worker's go routine is stopping...") + return + case rcvDta = <-wrk.executeMessageChannel: + case <-time.After(sleepTime): + continue + } + msgType := consensus.MessageType(rcvDta.MsgType) if callReceivedMessage, exist := wrk.receivedMessagesCalls[msgType]; exist { if callReceivedMessage(rcvDta) { @@ -625,6 +648,15 @@ func (wrk *Worker) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } +// Close will close the endless running go routine +func (wrk *Worker) Close() error { + if wrk.cancelFunc != nil { + wrk.cancelFunc() + } + + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (wrk *Worker) IsInterfaceNil() bool { return wrk == nil diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 643e905be52..0f4ac1b8a8e 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -1261,6 +1261,7 @@ func TestWorker_ExecuteSignatureMessagesShouldNotExecuteWhenBlockIsNotFinished(t func TestWorker_ExecuteMessagesShouldExecute(t *testing.T) { t.Parallel() wrk := *initWorker() + wrk.StartWorking() blk := &block.Body{} blkStr, _ := mock.MarshalizerMock{}.Marshal(blk) wrk.InitReceivedMessages() @@ -1286,11 +1287,14 @@ func TestWorker_ExecuteMessagesShouldExecute(t *testing.T) { wrk.ExecuteMessage(cnsDataList) assert.Nil(t, wrk.ReceivedMessages()[msgType][0]) + + wrk.Close() } func TestWorker_CheckChannelsShouldWork(t *testing.T) { t.Parallel() wrk := *initWorker() + wrk.StartWorking() wrk.SetReceivedMessagesCalls(bls.MtBlockHeader, func(cnsMsg *consensus.Message) bool { _ = wrk.ConsensusState().SetJobDone(wrk.ConsensusState().ConsensusGroup()[0], bls.SrBlock, true) return true @@ -1323,6 +1327,8 @@ func TestWorker_CheckChannelsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, isBlockJobDone) + + wrk.Close() } func TestWorker_ExtendShouldReturnWhenRoundIsCanceled(t *testing.T) { @@ -1429,6 +1435,7 @@ func TestWorker_ExtendShouldWork(t *testing.T) { func TestWorker_ExecuteStoredMessagesShouldWork(t *testing.T) { t.Parallel() wrk := *initWorker() + wrk.StartWorking() blk := &block.Body{} blkStr, _ := mock.MarshalizerMock{}.Marshal(blk) wrk.InitReceivedMessages() @@ -1459,6 +1466,8 @@ func TestWorker_ExecuteStoredMessagesShouldWork(t *testing.T) { rcvMsg = wrk.ReceivedMessages() assert.Equal(t, 0, len(rcvMsg[msgType])) + + wrk.Close() } func TestWorker_SetAppStatusHandlerNilShouldErr(t *testing.T) { diff --git a/core/close/interface.go b/core/close/interface.go new file mode 100644 index 00000000000..547c66c49df --- /dev/null +++ b/core/close/interface.go @@ -0,0 +1,6 @@ +package close + +// Closer closes all stuff released by an object +type Closer interface { + Close() error +} diff --git a/core/errors.go b/core/errors.go index 7e37c61052e..1b9249a62d2 100644 --- a/core/errors.go +++ b/core/errors.go @@ -52,3 +52,15 @@ var ErrInvalidIdentifierForEpochStartBlockRequest = errors.New("invalid identifi // ErrNilEpochStartNotifier signals that nil epoch start notifier has been provided var ErrNilEpochStartNotifier = errors.New("nil epoch start notifier") + +// ErrVersionNumComponents signals that a wrong number of components was provided +var ErrVersionNumComponents = errors.New("invalid version while checking number of components") + +// ErrMajorVersionMismatch signals that the major version mismatch +var ErrMajorVersionMismatch = errors.New("major version mismatch") + +// ErrMinorVersionMismatch signals that the minor version mismatch +var ErrMinorVersionMismatch = errors.New("minor version mismatch") + +// ErrReleaseVersionMismatch signals that the release version mismatch +var ErrReleaseVersionMismatch = errors.New("release version mismatch") diff --git a/core/indexer/common.go b/core/indexer/common.go index 0aa97bfd1ea..ebf286aed62 100644 --- a/core/indexer/common.go +++ b/core/indexer/common.go @@ -140,10 +140,6 @@ func (cm *commonProcessor) buildTransaction( header data.HeaderHandler, txStatus string, ) *Transaction { - gasPriceBig := big.NewInt(0).SetUint64(tx.GasPrice) - gasLimitBig := big.NewInt(0).SetUint64(tx.GasLimit) - gasUsed := big.NewInt(0).Mul(gasPriceBig, gasLimitBig).String() - return &Transaction{ Hash: hex.EncodeToString(txHash), MBHash: hex.EncodeToString(mbHash), @@ -160,7 +156,7 @@ func (cm *commonProcessor) buildTransaction( Signature: hex.EncodeToString(tx.Signature), Timestamp: time.Duration(header.GetTimeStamp()), Status: txStatus, - GasUsed: gasUsed, + GasUsed: tx.GasLimit, } } @@ -354,17 +350,14 @@ func prepareTxUpdate(tx *Transaction) ([]byte, []byte) { return nil, nil } - gasPriceBig := big.NewInt(0).SetUint64(tx.GasPrice) - gasLimitBig := big.NewInt(0).SetUint64(tx.GasLimit) - gas := big.NewInt(0).Mul(gasPriceBig, gasLimitBig).String() - if tx.GasUsed == gas { + if tx.GasUsed == tx.GasLimit { // do not update gasUsed because it is the same with gasUsed when transaction was saved first time in database serializedData = []byte(fmt.Sprintf(`{ "doc" : { "log" : %s, "scResults" : %s, "status": "%s", "timestamp": %s } }`, string(marshalizedLog), string(scResults), tx.Status, string(marshalizedTimestamp))) } else { // update gasUsed because was changed (is a smart contract operation) - serializedData = []byte(fmt.Sprintf(`{ "doc" : { "log" : %s, "scResults" : %s, "status": "%s", "timestamp": %s, "gasUsed" : "%s" } }`, - string(marshalizedLog), string(scResults), tx.Status, string(marshalizedTimestamp), tx.GasUsed)) + serializedData = []byte(fmt.Sprintf(`{ "doc" : { "log" : %s, "scResults" : %s, "status": "%s", "timestamp": %s, "gasUsed" : %s } }`, + string(marshalizedLog), string(scResults), tx.Status, string(marshalizedTimestamp), fmt.Sprintf("%d", tx.GasUsed))) } return meta, serializedData diff --git a/core/indexer/data.go b/core/indexer/data.go index 3fcf093ce85..c18a2fc25a7 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -21,11 +21,11 @@ type Transaction struct { SenderShard uint32 `json:"senderShard"` GasPrice uint64 `json:"gasPrice"` GasLimit uint64 `json:"gasLimit"` + GasUsed uint64 `json:"gasUsed"` Data string `json:"data"` Signature string `json:"signature"` Timestamp time.Duration `json:"timestamp"` Status string `json:"status"` - GasUsed string `json:"gasUsed"` SmartContractResults []ScResult `json:"scResults"` Log TxLog `json:"-"` } diff --git a/core/indexer/elasticsearchDatabase_test.go b/core/indexer/elasticsearchDatabase_test.go index 548bfb4f855..6038e23548e 100644 --- a/core/indexer/elasticsearchDatabase_test.go +++ b/core/indexer/elasticsearchDatabase_test.go @@ -20,6 +20,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/receipt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/elastic/go-elasticsearch/v7/esapi" @@ -387,14 +388,14 @@ func TestUpdateTransaction(t *testing.T) { txHash1 := []byte("txHash1") tx1 := &transaction.Transaction{ - GasPrice: 1, + GasPrice: 10, GasLimit: 500, } txHash2 := []byte("txHash2") sndAddr := []byte("snd") tx2 := &transaction.Transaction{ - GasLimit: 1, - GasPrice: 500, + GasPrice: 10, + GasLimit: 500, SndAddr: sndAddr, } txHash3 := []byte("txHash3") @@ -420,6 +421,15 @@ func TestUpdateTransaction(t *testing.T) { Value: big.NewInt(150), } + rTx1Hash := []byte("rTxHash1") + rTx1 := &rewardTx.RewardTx{ + Round: 1113, + } + rTx2Hash := []byte("rTxHash2") + rTx2 := &rewardTx.RewardTx{ + Round: 1114, + } + body := &dataBlock.Body{ MiniBlocks: []*dataBlock.MiniBlock{ { @@ -430,6 +440,10 @@ func TestUpdateTransaction(t *testing.T) { TxHashes: [][]byte{txHash3}, Type: dataBlock.TxBlock, }, + { + Type: dataBlock.RewardsBlock, + TxHashes: [][]byte{rTx1Hash, rTx2Hash}, + }, { TxHashes: [][]byte{recHash1}, Type: dataBlock.ReceiptBlock, @@ -446,6 +460,8 @@ func TestUpdateTransaction(t *testing.T) { string(txHash2): tx2, string(txHash3): tx3, string(recHash1): rec1, + string(rTx1Hash): rTx1, + string(rTx2Hash): rTx2, } body.MiniBlocks[0].ReceiverShardID = 1 diff --git a/core/indexer/processTransactions.go b/core/indexer/processTransactions.go index 99816ac3092..57c2d03657b 100644 --- a/core/indexer/processTransactions.go +++ b/core/indexer/processTransactions.go @@ -69,8 +69,9 @@ func (tdp *txDatabaseProcessor) prepareTransactionsForDatabase( gasUsed := big.NewInt(0).SetUint64(tx.GasPrice) gasUsed.Mul(gasUsed, big.NewInt(0).SetUint64(tx.GasLimit)) gasUsed.Sub(gasUsed, rec.Value) + gasUsed.Div(gasUsed, big.NewInt(0).SetUint64(tx.GasPrice)) - tx.GasUsed = gasUsed.String() + tx.GasUsed = gasUsed.Uint64() } countScResults := make(map[string]int) @@ -116,10 +117,8 @@ func (tdp *txDatabaseProcessor) addScResultInfoInTx(scr *smartContractResult.Sma tx.SmartContractResults = append(tx.SmartContractResults, dbScResult) if dbScResult.GasLimit != 0 && dbScResult.Value != "0" { - gasUsed := big.NewInt(0).SetUint64(tx.GasPrice) - gasUsed.Mul(gasUsed, big.NewInt(0).SetUint64(tx.GasLimit)) - gasUsed.Sub(gasUsed, scr.Value) - tx.GasUsed = gasUsed.String() + gasUsed := tx.GasLimit - scr.GasLimit + tx.GasUsed = gasUsed } return tx diff --git a/core/indexer/processTransactions_test.go b/core/indexer/processTransactions_test.go index 803f0117f76..067a99d3382 100644 --- a/core/indexer/processTransactions_test.go +++ b/core/indexer/processTransactions_test.go @@ -19,9 +19,15 @@ func TestPrepareTransactionsForDatabase(t *testing.T) { t.Parallel() txHash1 := []byte("txHash1") - tx1 := &transaction.Transaction{} + tx1 := &transaction.Transaction{ + GasLimit: 100, + GasPrice: 100, + } txHash2 := []byte("txHash2") - tx2 := &transaction.Transaction{} + tx2 := &transaction.Transaction{ + GasLimit: 100, + GasPrice: 100, + } txHash3 := []byte("txHash3") tx3 := &transaction.Transaction{} txHash4 := []byte("txHash4") diff --git a/core/versioning/versionComparator.go b/core/versioning/versionComparator.go new file mode 100644 index 00000000000..402272b3fb0 --- /dev/null +++ b/core/versioning/versionComparator.go @@ -0,0 +1,71 @@ +package versioning + +import ( + "fmt" + "strings" + + "github.com/ElrondNetwork/elrond-go/core" +) + +const numComponents = 3 + +type versionComparator struct { + version string + major string + minor string + release string +} + +// NewVersionComparator returns a new version comparator instance +func NewVersionComparator(providedVersion string) (*versionComparator, error) { + vc := &versionComparator{ + version: providedVersion, + } + + var err error + vc.major, vc.minor, vc.release, err = vc.splitVersionComponents(providedVersion) + if err != nil { + return nil, err + } + + return vc, nil +} + +func (vc *versionComparator) splitVersionComponents(version string) (string, string, string, error) { + components := strings.Split(version, ".") + if len(components) != numComponents { + return "", "", "", fmt.Errorf("%w, expected %d, got %d", + core.ErrVersionNumComponents, + numComponents, + len(components), + ) + } + + return components[0], components[1], components[2], nil +} + +// Check compares if the provided value is compatible with the stored values. The comparison is done on all 3 components: +// major, minor and release. A wildcard in any of the components will accept any string +func (vc *versionComparator) Check(version string) error { + major, minor, release, err := vc.splitVersionComponents(version) + if err != nil { + return err + } + + if major != vc.major && vc.major != "*" { + return fmt.Errorf("%w, expected version %s, got %s", core.ErrMajorVersionMismatch, vc.version, version) + } + if minor != vc.minor && vc.minor != "*" { + return fmt.Errorf("%w, expected version %s, got %s", core.ErrMinorVersionMismatch, vc.version, version) + } + if release != vc.release && vc.release != "*" { + return fmt.Errorf("%w, expected version %s, got %s", core.ErrReleaseVersionMismatch, vc.version, version) + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (vc *versionComparator) IsInterfaceNil() bool { + return vc == nil +} diff --git a/core/versioning/versionComparator_test.go b/core/versioning/versionComparator_test.go new file mode 100644 index 00000000000..ea699452d3b --- /dev/null +++ b/core/versioning/versionComparator_test.go @@ -0,0 +1,154 @@ +package versioning + +import ( + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/stretchr/testify/assert" +) + +func TestNewVersionComparator_WrongVersionShouldErr(t *testing.T) { + t.Parallel() + + vc, err := NewVersionComparator("not a valid version") + + assert.True(t, errors.Is(err, core.ErrVersionNumComponents)) + assert.True(t, check.IfNil(vc)) +} + +func TestNewVersionComparator_GoodVersionShouldWork(t *testing.T) { + t.Parallel() + + major := "a" + minor := "b" + release := "*" + vc, err := NewVersionComparator(major + "." + minor + "." + release) + + assert.Nil(t, err) + assert.False(t, check.IfNil(vc)) + + assert.Equal(t, major, vc.major) + assert.Equal(t, minor, vc.minor) + assert.Equal(t, release, vc.release) +} + +//------- Check + +func TestNewVersionComparator_CheckNotAversionShouldErr(t *testing.T) { + t.Parallel() + + major := "a" + minor := "b" + release := "*" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("not a version") + + assert.True(t, errors.Is(err, core.ErrVersionNumComponents)) +} + +func TestNewVersionComparator_MajorMismatchShouldErr(t *testing.T) { + t.Parallel() + + major := "a" + minor := "b" + release := "c" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("aa.b.c") + + assert.True(t, errors.Is(err, core.ErrMajorVersionMismatch)) +} + +func TestNewVersionComparator_MinorMismatchShouldErr(t *testing.T) { + t.Parallel() + + major := "a" + minor := "b" + release := "c" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("a.bb.c") + + assert.True(t, errors.Is(err, core.ErrMinorVersionMismatch)) +} + +func TestNewVersionComparator_ReleaseMismatchShouldErr(t *testing.T) { + t.Parallel() + + major := "a" + minor := "b" + release := "c" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("a.b.cc") + + assert.True(t, errors.Is(err, core.ErrReleaseVersionMismatch)) +} + +func TestNewVersionComparator_ShouldWork(t *testing.T) { + t.Parallel() + + major := "a" + minor := "b" + release := "c" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("a.b.c") + + assert.Nil(t, err) +} + +func TestNewVersionComparator_WildCardShouldWork(t *testing.T) { + t.Parallel() + + major := "*" + minor := "*" + release := "*" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("a.b.c") + + assert.Nil(t, err) +} + +func TestNewVersionComparator_WildCardMajorShouldWork(t *testing.T) { + t.Parallel() + + major := "*" + minor := "b" + release := "c" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("a.b.c") + + assert.Nil(t, err) +} + +func TestNewVersionComparator_WildCardMinorShouldWork(t *testing.T) { + t.Parallel() + + major := "a" + minor := "*" + release := "c" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("a.b.c") + + assert.Nil(t, err) +} + +func TestNewVersionComparator_WildCardReleaseShouldWork(t *testing.T) { + t.Parallel() + + major := "a" + minor := "b" + release := "*" + vc, _ := NewVersionComparator(major + "." + minor + "." + release) + + err := vc.Check("a.b.c") + + assert.Nil(t, err) +} diff --git a/data/interface.go b/data/interface.go index 61e80b4c82a..5b107b086c7 100644 --- a/data/interface.go +++ b/data/interface.go @@ -210,7 +210,7 @@ type StorageManager interface { // TrieFactory creates new tries type TrieFactory interface { - Create(config.StorageConfig, string, bool) (StorageManager, Trie, error) + Create(config.StorageConfig, string, bool, uint) (StorageManager, Trie, error) IsInterfaceNil() bool } diff --git a/data/state/accountsDB.go b/data/state/accountsDB.go index 045da0fcbea..4d304da82f5 100644 --- a/data/state/accountsDB.go +++ b/data/state/accountsDB.go @@ -227,6 +227,11 @@ func (adb *AccountsDB) saveDataTrie(accountHandler baseAccountHandler) error { accountHandler.SetRootHash(rootHash) trackableDataTrie.ClearDataCaches() + log.Trace("accountsDB.SaveDataTrie", + "address", hex.EncodeToString(accountHandler.AddressBytes()), + "new root hash", accountHandler.GetRootHash(), + ) + return nil } diff --git a/data/state/accountsDB_test.go b/data/state/accountsDB_test.go index 33379f4a153..fb95e8520e4 100644 --- a/data/state/accountsDB_test.go +++ b/data/state/accountsDB_test.go @@ -810,7 +810,8 @@ func TestAccountsDB_RevertToSnapshotShouldWork(t *testing.T) { hsh := mock.HasherMock{} accFactory := factory.NewAccountCreator() storageManager, _ := trie.NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) - tr, _ := trie.NewTrie(storageManager, marsh, hsh) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(storageManager, marsh, hsh, maxTrieLevelInMemory) adb, _ := state.NewAccountsDB(tr, hsh, marsh, accFactory) diff --git a/data/syncer/baseAccountsSyncer.go b/data/syncer/baseAccountsSyncer.go index 9549eb25430..14f83a244a6 100644 --- a/data/syncer/baseAccountsSyncer.go +++ b/data/syncer/baseAccountsSyncer.go @@ -16,29 +16,31 @@ import ( ) type baseAccountsSyncer struct { - hasher hashing.Hasher - marshalizer marshal.Marshalizer - trieSyncers map[string]data.TrieSyncer - dataTries map[string]data.Trie - mutex sync.Mutex - trieStorageManager data.StorageManager - requestHandler trie.RequestHandler - waitTime time.Duration - shardId uint32 - cacher storage.Cacher - rootHash []byte + hasher hashing.Hasher + marshalizer marshal.Marshalizer + trieSyncers map[string]data.TrieSyncer + dataTries map[string]data.Trie + mutex sync.Mutex + trieStorageManager data.StorageManager + requestHandler trie.RequestHandler + waitTime time.Duration + shardId uint32 + cacher storage.Cacher + rootHash []byte + maxTrieLevelInMemory uint } const minWaitTime = time.Second // ArgsNewBaseAccountsSyncer defines the arguments needed for the new account syncer type ArgsNewBaseAccountsSyncer struct { - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - TrieStorageManager data.StorageManager - RequestHandler trie.RequestHandler - WaitTime time.Duration - Cacher storage.Cacher + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + TrieStorageManager data.StorageManager + RequestHandler trie.RequestHandler + WaitTime time.Duration + Cacher storage.Cacher + MaxTrieLevelInMemory uint } func checkArgs(args ArgsNewBaseAccountsSyncer) error { @@ -67,7 +69,7 @@ func checkArgs(args ArgsNewBaseAccountsSyncer) error { func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string, ctx context.Context) error { b.rootHash = rootHash - dataTrie, err := trie.NewTrie(b.trieStorageManager, b.marshalizer, b.hasher) + dataTrie, err := trie.NewTrie(b.trieStorageManager, b.marshalizer, b.hasher, b.maxTrieLevelInMemory) if err != nil { return err } diff --git a/data/syncer/userAccountsSyncer.go b/data/syncer/userAccountsSyncer.go index 8b594195463..76580000222 100644 --- a/data/syncer/userAccountsSyncer.go +++ b/data/syncer/userAccountsSyncer.go @@ -81,7 +81,7 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte, ctx context.Context) error { for _, rootHash := range rootHashes { - dataTrie, err := trie.NewTrie(u.trieStorageManager, u.marshalizer, u.hasher) + dataTrie, err := trie.NewTrie(u.trieStorageManager, u.marshalizer, u.hasher, u.maxTrieLevelInMemory) if err != nil { return err } diff --git a/data/trie/branchNode.go b/data/trie/branchNode.go index c55af367d86..3249d12da5f 100644 --- a/data/trie/branchNode.go +++ b/data/trie/branchNode.go @@ -240,7 +240,7 @@ func (bn *branchNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(bn) } -func (bn *branchNode) commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error { +func (bn *branchNode) commit(force bool, level byte, maxTrieLevelInMemory uint, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error { level++ err := bn.isEmptyOrNil() if err != nil { @@ -264,7 +264,7 @@ func (bn *branchNode) commit(force bool, level byte, originDb data.DBWriteCacher continue } - err = bn.children[i].commit(force, level, originDb, targetDb) + err = bn.children[i].commit(force, level, maxTrieLevelInMemory, originDb, targetDb) if err != nil { return err } @@ -274,7 +274,7 @@ func (bn *branchNode) commit(force bool, level byte, originDb data.DBWriteCacher if err != nil { return err } - if level == maxTrieLevelAfterCommit { + if uint(level) == maxTrieLevelInMemory { var collapsed node collapsed, err = bn.getCollapsed() if err != nil { diff --git a/data/trie/branchNode_test.go b/data/trie/branchNode_test.go index d53debce30e..7a6bb8a5287 100644 --- a/data/trie/branchNode_test.go +++ b/data/trie/branchNode_test.go @@ -1,6 +1,7 @@ package trie import ( + "bytes" "encoding/hex" "errors" "fmt" @@ -187,9 +188,10 @@ func TestBranchNode_setRootHash(t *testing.T) { marsh, hsh := getTestMarshAndHasher() trieStorage1, _ := NewTrieStorageManager(db, marsh, hsh, cfg, &mock.EvictionWaitingList{}, config.TrieStorageManagerConfig{}) trieStorage2, _ := NewTrieStorageManager(db, marsh, hsh, cfg, &mock.EvictionWaitingList{}, config.TrieStorageManagerConfig{}) + maxTrieLevelInMemory := uint(5) - tr1, _ := NewTrie(trieStorage1, marsh, hsh) - tr2, _ := NewTrie(trieStorage2, marsh, hsh) + tr1, _ := NewTrie(trieStorage1, marsh, hsh, maxTrieLevelInMemory) + tr2, _ := NewTrie(trieStorage2, marsh, hsh, maxTrieLevelInMemory) maxIterations := 10000 for i := 0; i < maxIterations; i++ { @@ -348,7 +350,7 @@ func TestBranchNode_commit(t *testing.T) { hash, _ := encodeNodeAndGetHash(collapsedBn) _ = bn.setHash() - err := bn.commit(false, 0, db, db) + err := bn.commit(false, 0, 5, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) @@ -363,7 +365,7 @@ func TestBranchNode_commitEmptyNode(t *testing.T) { bn := emptyDirtyBranchNode() - err := bn.commit(false, 0, nil, nil) + err := bn.commit(false, 0, 5, nil, nil) assert.True(t, errors.Is(err, ErrEmptyBranchNode)) } @@ -372,7 +374,7 @@ func TestBranchNode_commitNilNode(t *testing.T) { var bn *branchNode - err := bn.commit(false, 0, nil, nil) + err := bn.commit(false, 0, 5, nil, nil) assert.True(t, errors.Is(err, ErrNilBranchNode)) } @@ -417,7 +419,7 @@ func TestBranchNode_resolveCollapsed(t *testing.T) { childPos := byte(2) _ = bn.setHash() - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) resolved, _ := newLeafNode([]byte("dog"), []byte("dog"), bn.marsh, bn.hasher) resolved.dirty = false resolved.hash = bn.EncodedChildren[childPos] @@ -518,7 +520,7 @@ func TestBranchNode_tryGetCollapsedNode(t *testing.T) { bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) _ = bn.setHash() - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) childPos := byte(2) key := append([]byte{childPos}, []byte("dog")...) @@ -644,7 +646,7 @@ func TestBranchNode_insertCollapsedNode(t *testing.T) { node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) _ = bn.setHash() - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) dirty, newBn, _, err := collapsedBn.insert(node, db) assert.True(t, dirty) @@ -663,7 +665,7 @@ func TestBranchNode_insertInStoredBnOnExistingPos(t *testing.T) { key := append([]byte{childPos}, []byte("dog")...) node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) bnHash := bn.getHash() ln, _, _ := bn.getNext(key, db) lnHash := ln.getHash() @@ -684,7 +686,7 @@ func TestBranchNode_insertInStoredBnOnNilPos(t *testing.T) { key := append([]byte{nilChildPos}, []byte("dog")...) node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) bnHash := bn.getHash() expectedHashes := [][]byte{bnHash} @@ -763,7 +765,7 @@ func TestBranchNode_deleteFromStoredBn(t *testing.T) { childPos := byte(2) lnKey := append([]byte{childPos}, []byte("dog")...) - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) bnHash := bn.getHash() ln, _, _ := bn.getNext(lnKey, db) lnHash := ln.getHash() @@ -845,7 +847,7 @@ func TestBranchNode_deleteCollapsedNode(t *testing.T) { db := mock.NewMemDbMock() bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) _ = bn.setHash() - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) childPos := byte(2) key := append([]byte{childPos}, []byte("dog")...) @@ -1017,7 +1019,7 @@ func TestBranchNode_getChildrenCollapsedBn(t *testing.T) { db := mock.NewMemDbMock() bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) - _ = bn.commit(true, 0, db, db) + _ = bn.commit(true, 0, 5, db, db) children, err := collapsedBn.getChildren(db) assert.Nil(t, err) @@ -1208,3 +1210,128 @@ func BenchmarkMarshallNodeJson(b *testing.B) { _, _ = marsh.Marshal(bn) } } + +func TestBranchNode_newBranchNodeNilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + bn, err := newBranchNode(nil, mock.HasherMock{}) + assert.Nil(t, bn) + assert.Equal(t, ErrNilMarshalizer, err) +} + +func TestBranchNode_newBranchNodeNilHasherShouldErr(t *testing.T) { + t.Parallel() + + bn, err := newBranchNode(&mock.MarshalizerMock{}, nil) + assert.Nil(t, bn) + assert.Equal(t, ErrNilHasher, err) +} + +func TestBranchNode_newBranchNodeOkVals(t *testing.T) { + t.Parallel() + + var children [nrOfChildren]node + marsh, hasher := getTestMarshAndHasher() + bn, err := newBranchNode(marsh, hasher) + + assert.Nil(t, err) + assert.Equal(t, make([][]byte, nrOfChildren), bn.EncodedChildren) + assert.Equal(t, children, bn.children) + assert.Equal(t, marsh, bn.marsh) + assert.Equal(t, hasher, bn.hasher) + assert.True(t, bn.dirty) +} + +func TestBranchNode_getMarshalizer(t *testing.T) { + t.Parallel() + + expectedMarsh := &mock.MarshalizerMock{} + bn := &branchNode{ + baseNode: &baseNode{ + marsh: expectedMarsh, + }, + } + + marsh := bn.getMarshalizer() + assert.Equal(t, expectedMarsh, marsh) +} + +func TestBranchNode_setRootHashCollapsedChildren(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + bn := &branchNode{ + baseNode: &baseNode{ + marsh: marsh, + hasher: hasher, + }, + } + + _, collapsedBn := getBnAndCollapsedBn(marsh, hasher) + _, collapsedEn := getEnAndCollapsedEn() + collapsedLn := getLn(marsh, hasher) + + bn.children[0] = collapsedBn + bn.children[1] = collapsedEn + bn.children[2] = collapsedLn + + err := bn.setRootHash() + assert.Nil(t, err) +} + +func TestBranchNode_commitCollapsesTrieIfMaxTrieLevelInMemoryIsReached(t *testing.T) { + t.Parallel() + + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + _ = collapsedBn.setRootHash() + + err := bn.commit(true, 0, 1, mock.NewMemDbMock(), mock.NewMemDbMock()) + assert.Nil(t, err) + + assert.Equal(t, collapsedBn.EncodedChildren, bn.EncodedChildren) + assert.Equal(t, collapsedBn.children, bn.children) + assert.Equal(t, collapsedBn.hash, bn.hash) +} + +func TestBranchNode_reduceNodeBnChild(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + en, _ := getEnAndCollapsedEn() + pos := 5 + expectedNode, _ := newExtensionNode([]byte{byte(pos)}, en.child, marsh, hasher) + + newNode, err := en.child.reduceNode(pos) + assert.Nil(t, err) + assert.Equal(t, expectedNode, newNode) +} + +func TestBranchNode_printShouldNotPanicEvenIfNodeIsCollapsed(t *testing.T) { + t.Parallel() + + bnWriter := bytes.NewBuffer(make([]byte, 0)) + collapsedBnWriter := bytes.NewBuffer(make([]byte, 0)) + + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + _ = bn.commit(true, 0, 5, db, db) + _ = collapsedBn.commit(true, 0, 5, db, db) + + bn.print(bnWriter, 0, db) + collapsedBn.print(collapsedBnWriter, 0, db) + + assert.Equal(t, bnWriter.Bytes(), collapsedBnWriter.Bytes()) +} + +func TestBranchNode_getDirtyHashesFromCleanNode(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + _ = bn.commit(true, 0, 5, db, db) + dirtyHashes := make(data.ModifiedHashes) + + err := bn.getDirtyHashes(dirtyHashes) + assert.Nil(t, err) + assert.Equal(t, 0, len(dirtyHashes)) +} diff --git a/data/trie/extensionNode.go b/data/trie/extensionNode.go index 3b02d8b8034..5726892acdc 100644 --- a/data/trie/extensionNode.go +++ b/data/trie/extensionNode.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "sync" "github.com/ElrondNetwork/elrond-go/core/check" @@ -16,25 +15,6 @@ import ( var _ = node(&extensionNode{}) -// Save saves the serialized data of an extension node into a stream through protobuf -func (en *extensionNode) Save(w io.Writer) error { - b, err := en.Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -// Load loads the data from the stream into an extension node object through protobuf -func (en *extensionNode) Load(r io.Reader) error { - b, err := ioutil.ReadAll(r) - if err != nil { - return err - } - return en.Unmarshal(b) -} - func newExtensionNode(key []byte, child node, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*extensionNode, error) { if check.IfNil(marshalizer) { return nil, ErrNilMarshalizer @@ -175,7 +155,7 @@ func (en *extensionNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(en) } -func (en *extensionNode) commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error { +func (en *extensionNode) commit(force bool, level byte, maxTrieLevelInMemory uint, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error { level++ err := en.isEmptyOrNil() if err != nil { @@ -195,7 +175,7 @@ func (en *extensionNode) commit(force bool, level byte, originDb data.DBWriteCac } if en.child != nil { - err = en.child.commit(force, level, originDb, targetDb) + err = en.child.commit(force, level, maxTrieLevelInMemory, originDb, targetDb) if err != nil { return err } @@ -206,7 +186,7 @@ func (en *extensionNode) commit(force bool, level byte, originDb data.DBWriteCac if err != nil { return err } - if level == maxTrieLevelAfterCommit { + if uint(level) == maxTrieLevelInMemory { var collapsed node collapsed, err = en.getCollapsed() if err != nil { diff --git a/data/trie/extensionNode_test.go b/data/trie/extensionNode_test.go index 13f764331de..7d83dddc923 100644 --- a/data/trie/extensionNode_test.go +++ b/data/trie/extensionNode_test.go @@ -1,12 +1,14 @@ package trie import ( + "bytes" "encoding/hex" "errors" "fmt" "reflect" "testing" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/stretchr/testify/assert" @@ -235,7 +237,7 @@ func TestExtensionNode_commit(t *testing.T) { hash, _ := encodeNodeAndGetHash(collapsedEn) _ = en.setHash() - err := en.commit(false, 0, db, db) + err := en.commit(false, 0, 5, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) @@ -251,7 +253,7 @@ func TestExtensionNode_commitEmptyNode(t *testing.T) { en := &extensionNode{} - err := en.commit(false, 0, nil, nil) + err := en.commit(false, 0, 5, nil, nil) assert.True(t, errors.Is(err, ErrEmptyExtensionNode)) } @@ -260,7 +262,7 @@ func TestExtensionNode_commitNilNode(t *testing.T) { var en *extensionNode - err := en.commit(false, 0, nil, nil) + err := en.commit(false, 0, 5, nil, nil) assert.True(t, errors.Is(err, ErrNilExtensionNode)) } @@ -273,7 +275,7 @@ func TestExtensionNode_commitCollapsedNode(t *testing.T) { _ = collapsedEn.setHash() collapsedEn.dirty = true - err := collapsedEn.commit(false, 0, db, db) + err := collapsedEn.commit(false, 0, 5, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) @@ -323,7 +325,7 @@ func TestExtensionNode_resolveCollapsed(t *testing.T) { db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() _ = en.setHash() - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) _, resolved := getBnAndCollapsedBn(en.marsh, en.hasher) err := collapsedEn.resolveCollapsed(0, db) @@ -409,7 +411,7 @@ func TestExtensionNode_tryGetCollapsedNode(t *testing.T) { db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() _ = en.setHash() - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) enKey := []byte{100} bnKey := []byte{2} @@ -500,7 +502,7 @@ func TestExtensionNode_insertCollapsedNode(t *testing.T) { node, _ := newLeafNode(key, []byte("dogs"), en.marsh, en.hasher) _ = en.setHash() - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) dirty, newNode, _, err := collapsedEn.insert(node, db) assert.True(t, dirty) @@ -519,7 +521,7 @@ func TestExtensionNode_insertInStoredEnSameKey(t *testing.T) { key := append(enKey, []byte{11, 12}...) node, _ := newLeafNode(key, []byte("dogs"), en.marsh, en.hasher) - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) enHash := en.getHash() bn, _, _ := en.getNext(enKey, db) bnHash := bn.getHash() @@ -541,7 +543,7 @@ func TestExtensionNode_insertInStoredEnDifferentKey(t *testing.T) { nodeKey := []byte{11, 12} node, _ := newLeafNode(nodeKey, []byte("dogs"), bn.marsh, bn.hasher) - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) expectedHashes := [][]byte{en.getHash()} dirty, _, oldHashes, err := en.insert(node, db) @@ -623,7 +625,7 @@ func TestExtensionNode_deleteFromStoredEn(t *testing.T) { key = append(key, lnKey...) lnPathKey := key - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) bn, key, _ := en.getNext(key, db) ln, _, _ := bn.getNext(key, db) expectedHashes := [][]byte{ln.getHash(), bn.getHash(), en.getHash()} @@ -685,7 +687,7 @@ func TestExtensionNode_deleteCollapsedNode(t *testing.T) { db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() _ = en.setHash() - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) enKey := []byte{100} bnKey := []byte{2} @@ -814,7 +816,7 @@ func TestExtensionNode_getChildrenCollapsedEn(t *testing.T) { db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() - _ = en.commit(true, 0, db, db) + _ = en.commit(true, 0, 5, db, db) children, err := collapsedEn.getChildren(db) assert.Nil(t, err) @@ -934,3 +936,93 @@ func getExtensionNodeContents(en *extensionNode) string { return str } + +func TestExtensionNode_newExtensionNodeNilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + en, err := newExtensionNode([]byte("key"), &branchNode{}, nil, mock.HasherMock{}) + assert.Nil(t, en) + assert.Equal(t, ErrNilMarshalizer, err) +} + +func TestExtensionNode_newExtensionNodeNilHasherShouldErr(t *testing.T) { + t.Parallel() + + en, err := newExtensionNode([]byte("key"), &branchNode{}, &mock.MarshalizerMock{}, nil) + assert.Nil(t, en) + assert.Equal(t, ErrNilHasher, err) +} + +func TestExtensionNode_newExtensionNodeOkVals(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + key := []byte("key") + child := &branchNode{} + en, err := newExtensionNode(key, child, marsh, hasher) + + assert.Nil(t, err) + assert.Equal(t, key, en.Key) + assert.Nil(t, en.EncodedChild) + assert.Equal(t, child, en.child) + assert.Equal(t, hasher, en.hasher) + assert.Equal(t, marsh, en.marsh) + assert.True(t, en.dirty) +} + +func TestExtensionNode_getMarshalizer(t *testing.T) { + t.Parallel() + + marsh, _ := getTestMarshAndHasher() + en := &extensionNode{ + baseNode: &baseNode{ + marsh: marsh, + }, + } + + assert.Equal(t, marsh, en.getMarshalizer()) +} + +func TestExtensionNode_commitCollapsesTrieIfMaxTrieLevelInMemoryIsReached(t *testing.T) { + t.Parallel() + + en, collapsedEn := getEnAndCollapsedEn() + _ = collapsedEn.setRootHash() + + err := en.commit(true, 0, 1, mock.NewMemDbMock(), mock.NewMemDbMock()) + assert.Nil(t, err) + + assert.Equal(t, collapsedEn.EncodedChild, en.EncodedChild) + assert.Equal(t, collapsedEn.child, en.child) + assert.Equal(t, collapsedEn.hash, en.hash) +} + +func TestExtensionNode_printShouldNotPanicEvenIfNodeIsCollapsed(t *testing.T) { + t.Parallel() + + enWriter := bytes.NewBuffer(make([]byte, 0)) + collapsedEnWriter := bytes.NewBuffer(make([]byte, 0)) + + db := mock.NewMemDbMock() + en, collapsedEn := getEnAndCollapsedEn() + _ = en.commit(true, 0, 5, db, db) + _ = collapsedEn.commit(true, 0, 5, db, db) + + en.print(enWriter, 0, db) + collapsedEn.print(collapsedEnWriter, 0, db) + + assert.Equal(t, enWriter.Bytes(), collapsedEnWriter.Bytes()) +} + +func TestExtensionNode_getDirtyHashesFromCleanNode(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() + _ = en.commit(true, 0, 5, db, db) + dirtyHashes := make(data.ModifiedHashes) + + err := en.getDirtyHashes(dirtyHashes) + assert.Nil(t, err) + assert.Equal(t, 0, len(dirtyHashes)) +} diff --git a/data/trie/factory/trieCreator.go b/data/trie/factory/trieCreator.go index dfd8b958fe4..5b13687156b 100644 --- a/data/trie/factory/trieCreator.go +++ b/data/trie/factory/trieCreator.go @@ -57,6 +57,7 @@ func (tc *trieCreator) Create( trieStorageCfg config.StorageConfig, shardID string, pruningEnabled bool, + maxTrieLevelInMem uint, ) (data.StorageManager, data.Trie, error) { trieStoragePath, mainDb := path.Split(tc.pathManager.PathForStatic(shardID, trieStorageCfg.DB.FilePath)) @@ -78,7 +79,7 @@ func (tc *trieCreator) Create( return nil, nil, errNewTrie } - newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher, maxTrieLevelInMem) if err != nil { return nil, nil, err } @@ -123,7 +124,7 @@ func (tc *trieCreator) Create( return nil, nil, err } - newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher, maxTrieLevelInMem) if err != nil { return nil, nil, err } diff --git a/data/trie/factory/trieCreator_test.go b/data/trie/factory/trieCreator_test.go index 0188ebad533..8894c37b336 100644 --- a/data/trie/factory/trieCreator_test.go +++ b/data/trie/factory/trieCreator_test.go @@ -80,7 +80,8 @@ func TestTrieFactory_CreateNotSupportedCacheType(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := config.StorageConfig{} - _, tr, err := tf.Create(trieStorageCfg, "0", false) + maxTrieLevelInMemory := uint(5) + _, tr, err := tf.Create(trieStorageCfg, "0", false, maxTrieLevelInMemory) require.Nil(t, tr) require.Equal(t, storage.ErrNotSupportedCacheType, err) } @@ -92,7 +93,8 @@ func TestTrieFactory_CreateWithoutPrunningWork(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - _, tr, err := tf.Create(trieStorageCfg, "0", false) + maxTrieLevelInMemory := uint(5) + _, tr, err := tf.Create(trieStorageCfg, "0", false, maxTrieLevelInMemory) require.NotNil(t, tr) require.Nil(t, err) } @@ -104,7 +106,8 @@ func TestTrieFactory_CreateWithPrunningWrongDbType(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - _, tr, err := tf.Create(trieStorageCfg, "0", true) + maxTrieLevelInMemory := uint(5) + _, tr, err := tf.Create(trieStorageCfg, "0", true, maxTrieLevelInMemory) require.Nil(t, tr) require.Equal(t, storage.ErrNotSupportedDBType, err) } @@ -119,7 +122,8 @@ func TestTrieFactory_CreateInvalidCacheSize(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - _, tr, err := tf.Create(trieStorageCfg, "0", true) + maxTrieLevelInMemory := uint(5) + _, tr, err := tf.Create(trieStorageCfg, "0", true, maxTrieLevelInMemory) require.Nil(t, tr) require.Equal(t, data.ErrInvalidCacheSize, err) } @@ -135,7 +139,8 @@ func TestTrieFactory_CreateWithPRunningShouldWork(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - _, tr, err := tf.Create(trieStorageCfg, "0", true) + maxTrieLevelInMemory := uint(5) + _, tr, err := tf.Create(trieStorageCfg, "0", true, maxTrieLevelInMemory) require.NotNil(t, tr) require.Nil(t, err) } diff --git a/data/trie/interceptedNode.go b/data/trie/interceptedNode.go index f7223b23737..afa42175fa5 100644 --- a/data/trie/interceptedNode.go +++ b/data/trie/interceptedNode.go @@ -129,11 +129,3 @@ func (inTn *InterceptedTrieNode) Fee() *big.Int { func (inTn *InterceptedTrieNode) Identifiers() [][]byte { return [][]byte{inTn.hash} } - -// CreateEndOfProcessingTriggerNode changes the hash of the current node by appending the hash to the current hash. -// This construction will be used to trigger the end of processing for all of the received data -func (inTn *InterceptedTrieNode) CreateEndOfProcessingTriggerNode() { - inTn.mutex.Lock() - inTn.hash = append(inTn.hash, inTn.hash...) - inTn.mutex.Unlock() -} diff --git a/data/trie/interceptedNode_test.go b/data/trie/interceptedNode_test.go index eee6239a724..2bf221802a9 100644 --- a/data/trie/interceptedNode_test.go +++ b/data/trie/interceptedNode_test.go @@ -1,8 +1,10 @@ package trie_test import ( + "math/big" "testing" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/trie" @@ -46,7 +48,7 @@ func TestNewInterceptedTrieNode_EmptyBufferShouldFail(t *testing.T) { _, marsh, hasher := getDefaultInterceptedTrieNodeParameters() interceptedNode, err := trie.NewInterceptedTrieNode([]byte{}, marsh, hasher) - assert.Nil(t, interceptedNode) + assert.True(t, check.IfNil(interceptedNode)) assert.Equal(t, trie.ErrValueTooShort, err) } @@ -55,7 +57,7 @@ func TestNewInterceptedTrieNode_NilMarshalizerShouldFail(t *testing.T) { buff, _, hasher := getDefaultInterceptedTrieNodeParameters() interceptedNode, err := trie.NewInterceptedTrieNode(buff, nil, hasher) - assert.Nil(t, interceptedNode) + assert.True(t, check.IfNil(interceptedNode)) assert.Equal(t, trie.ErrNilMarshalizer, err) } @@ -64,7 +66,7 @@ func TestNewInterceptedTrieNode_NilHasherShouldFail(t *testing.T) { buff, marsh, _ := getDefaultInterceptedTrieNodeParameters() interceptedNode, err := trie.NewInterceptedTrieNode(buff, marsh, nil) - assert.Nil(t, interceptedNode) + assert.True(t, check.IfNil(interceptedNode)) assert.Equal(t, trie.ErrNilHasher, err) } @@ -72,7 +74,7 @@ func TestNewInterceptedTrieNode_OkParametersShouldWork(t *testing.T) { t.Parallel() interceptedNode, err := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) - assert.NotNil(t, interceptedNode) + assert.False(t, check.IfNil(interceptedNode)) assert.Nil(t, err) } @@ -106,3 +108,66 @@ func TestInterceptedTrieNode_EncodedNode(t *testing.T) { encNode := interceptedNode.EncodedNode() assert.Equal(t, nodes[0], encNode) } + +func TestInterceptedTrieNode_IsForCurrentShard(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.True(t, interceptedNode.IsForCurrentShard()) +} + +func TestInterceptedTrieNode_Type(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.Equal(t, "intercepted trie node", interceptedNode.Type()) +} + +func TestInterceptedTrieNode_String(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.NotEqual(t, 0, interceptedNode.String()) +} + +func TestInterceptedTrieNode_SenderShardId(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.NotEqual(t, 0, interceptedNode.SenderShardId()) +} + +func TestInterceptedTrieNode_ReceiverShardId(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.NotEqual(t, 0, interceptedNode.ReceiverShardId()) +} + +func TestInterceptedTrieNode_Nonce(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.NotEqual(t, 0, interceptedNode.Nonce()) +} + +func TestInterceptedTrieNode_SenderAddress(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.Nil(t, interceptedNode.SenderAddress()) +} + +func TestInterceptedTrieNode_Fee(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.Equal(t, big.NewInt(0), interceptedNode.Fee()) +} + +func TestInterceptedTrieNode_Identifiers(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.Equal(t, [][]byte{interceptedNode.Hash()}, interceptedNode.Identifiers()) +} diff --git a/data/trie/interface.go b/data/trie/interface.go index 4d73f1ab914..2daceff064a 100644 --- a/data/trie/interface.go +++ b/data/trie/interface.go @@ -21,7 +21,7 @@ type node interface { isPosCollapsed(pos int) bool isDirty() bool getEncodedNode() ([]byte, error) - commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error + commit(force bool, level byte, maxTrieLevelInMemory uint, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error resolveCollapsed(pos byte, db data.DBWriteCacher) error hashNode() ([]byte, error) hashChildren() error @@ -53,7 +53,7 @@ type atomicBuffer interface { } type snapshotNode interface { - commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error + commit(force bool, level byte, maxTrieLevelInMemory uint, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error } // RequestHandler defines the methods through which request to data can be made diff --git a/data/trie/leafNode.go b/data/trie/leafNode.go index de8fddf4333..aae2f9e371f 100644 --- a/data/trie/leafNode.go +++ b/data/trie/leafNode.go @@ -108,7 +108,7 @@ func (ln *leafNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(ln) } -func (ln *leafNode) commit(force bool, _ byte, _ data.DBWriteCacher, targetDb data.DBWriteCacher) error { +func (ln *leafNode) commit(force bool, _ byte, _ uint, _ data.DBWriteCacher, targetDb data.DBWriteCacher) error { err := ln.isEmptyOrNil() if err != nil { return fmt.Errorf("commit error %w", err) diff --git a/data/trie/leafNode_test.go b/data/trie/leafNode_test.go index a265fbf15a0..d7406bfcd21 100644 --- a/data/trie/leafNode_test.go +++ b/data/trie/leafNode_test.go @@ -154,7 +154,7 @@ func TestLeafNode_commit(t *testing.T) { hash, _ := encodeNodeAndGetHash(ln) _ = ln.setHash() - err := ln.commit(false, 0, db, db) + err := ln.commit(false, 0, 5, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) @@ -169,7 +169,7 @@ func TestLeafNode_commitEmptyNode(t *testing.T) { ln := &leafNode{} - err := ln.commit(false, 0, nil, nil) + err := ln.commit(false, 0, 5, nil, nil) assert.True(t, errors.Is(err, ErrEmptyLeafNode)) } @@ -178,7 +178,7 @@ func TestLeafNode_commitNilNode(t *testing.T) { var ln *leafNode - err := ln.commit(false, 0, nil, nil) + err := ln.commit(false, 0, 5, nil, nil) assert.True(t, errors.Is(err, ErrNilLeafNode)) } @@ -352,7 +352,7 @@ func TestLeafNode_insertInStoredLnAtSameKey(t *testing.T) { db := mock.NewMemDbMock() ln := getLn(getTestMarshAndHasher()) node, _ := newLeafNode([]byte("dog"), []byte("dogs"), ln.marsh, ln.hasher) - _ = ln.commit(false, 0, db, db) + _ = ln.commit(false, 0, 5, db, db) lnHash := ln.getHash() dirty, _, oldHashes, err := ln.insert(node, db) @@ -368,7 +368,7 @@ func TestLeafNode_insertInStoredLnAtDifferentKey(t *testing.T) { marsh, hasher := getTestMarshAndHasher() ln, _ := newLeafNode([]byte{1, 2, 3}, []byte("dog"), marsh, hasher) node, _ := newLeafNode([]byte{4, 5, 6}, []byte("dogs"), marsh, hasher) - _ = ln.commit(false, 0, db, db) + _ = ln.commit(false, 0, 5, db, db) lnHash := ln.getHash() dirty, _, oldHashes, err := ln.insert(node, db) @@ -429,7 +429,7 @@ func TestLeafNode_deleteFromStoredLnAtSameKey(t *testing.T) { db := mock.NewMemDbMock() ln := getLn(getTestMarshAndHasher()) - _ = ln.commit(false, 0, db, db) + _ = ln.commit(false, 0, 5, db, db) lnHash := ln.getHash() dirty, _, oldHashes, err := ln.delete([]byte("dog"), db) @@ -443,7 +443,7 @@ func TestLeafNode_deleteFromLnAtDifferentKey(t *testing.T) { db := mock.NewMemDbMock() ln := getLn(getTestMarshAndHasher()) - _ = ln.commit(false, 0, db, db) + _ = ln.commit(false, 0, 5, db, db) wrongKey := []byte{1, 2, 3} dirty, _, oldHashes, err := ln.delete(wrongKey, db) @@ -694,3 +694,48 @@ func TestLeafNode_deleteDifferentKeyShouldNotModifyTrie(t *testing.T) { assert.Equal(t, rootHash, tr.root.getHash()) assert.Equal(t, [][]byte{}, tr.oldHashes) } + +func TestLeafNode_newLeafNodeNilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + ln, err := newLeafNode([]byte("key"), []byte("val"), nil, mock.HasherMock{}) + assert.Nil(t, ln) + assert.Equal(t, ErrNilMarshalizer, err) +} + +func TestLeafNode_newLeafNodeNilHasherShouldErr(t *testing.T) { + t.Parallel() + + ln, err := newLeafNode([]byte("key"), []byte("val"), &mock.MarshalizerMock{}, nil) + assert.Nil(t, ln) + assert.Equal(t, ErrNilHasher, err) +} + +func TestLeafNode_newLeafNodeOkVals(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + key := []byte("key") + val := []byte("val") + ln, err := newLeafNode(key, val, marsh, hasher) + + assert.Nil(t, err) + assert.Equal(t, key, ln.Key) + assert.Equal(t, val, ln.Value) + assert.Equal(t, hasher, ln.hasher) + assert.Equal(t, marsh, ln.marsh) + assert.True(t, ln.dirty) +} + +func TestLeafNode_getMarshalizer(t *testing.T) { + t.Parallel() + + marsh, _ := getTestMarshAndHasher() + ln := &leafNode{ + baseNode: &baseNode{ + marsh: marsh, + }, + } + + assert.Equal(t, marsh, ln.getMarshalizer()) +} diff --git a/data/trie/node.go b/data/trie/node.go index ae50e989d53..00ddce48b43 100644 --- a/data/trie/node.go +++ b/data/trie/node.go @@ -11,12 +11,11 @@ import ( ) const ( - nrOfChildren = 17 - firstByte = 0 - maxTrieLevelAfterCommit = 6 - hexTerminator = 16 - nibbleSize = 4 - nibbleMask = 0x0f + nrOfChildren = 17 + firstByte = 0 + hexTerminator = 16 + nibbleSize = 4 + nibbleMask = 0x0f ) type baseNode struct { diff --git a/data/trie/node_test.go b/data/trie/node_test.go index 55aed19b364..18c8a78d538 100644 --- a/data/trie/node_test.go +++ b/data/trie/node_test.go @@ -153,7 +153,7 @@ func TestNode_getNodeFromDBAndDecodeBranchNode(t *testing.T) { db := mock.NewMemDbMock() bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) encNode, _ := bn.marsh.Marshal(collapsedBn) encNode = append(encNode, branch) @@ -172,7 +172,7 @@ func TestNode_getNodeFromDBAndDecodeExtensionNode(t *testing.T) { db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) encNode, _ := en.marsh.Marshal(collapsedEn) encNode = append(encNode, extension) @@ -191,7 +191,7 @@ func TestNode_getNodeFromDBAndDecodeLeafNode(t *testing.T) { db := mock.NewMemDbMock() ln := getLn(getTestMarshAndHasher()) - _ = ln.commit(false, 0, db, db) + _ = ln.commit(false, 0, 5, db, db) encNode, _ := ln.marsh.Marshal(ln) encNode = append(encNode, leaf) @@ -211,7 +211,7 @@ func TestNode_resolveIfCollapsedBranchNode(t *testing.T) { db := mock.NewMemDbMock() bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) childPos := byte(2) - _ = bn.commit(false, 0, db, db) + _ = bn.commit(false, 0, 5, db, db) err := resolveIfCollapsed(collapsedBn, childPos, db) assert.Nil(t, err) @@ -223,7 +223,7 @@ func TestNode_resolveIfCollapsedExtensionNode(t *testing.T) { db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() - _ = en.commit(false, 0, db, db) + _ = en.commit(false, 0, 5, db, db) err := resolveIfCollapsed(collapsedEn, 0, db) assert.Nil(t, err) @@ -235,7 +235,7 @@ func TestNode_resolveIfCollapsedLeafNode(t *testing.T) { db := mock.NewMemDbMock() ln := getLn(getTestMarshAndHasher()) - _ = ln.commit(false, 0, db, db) + _ = ln.commit(false, 0, 5, db, db) err := resolveIfCollapsed(ln, 0, db) assert.Nil(t, err) diff --git a/data/trie/patriciaMerkleTrie.go b/data/trie/patriciaMerkleTrie.go index 90931db1a83..6bfd0af8f3e 100644 --- a/data/trie/patriciaMerkleTrie.go +++ b/data/trie/patriciaMerkleTrie.go @@ -38,6 +38,8 @@ type patriciaMerkleTrie struct { oldHashes [][]byte oldRoot []byte newHashes data.ModifiedHashes + + maxTrieLevelInMemory uint } // NewTrie creates a new Patricia Merkle Trie @@ -45,6 +47,7 @@ func NewTrie( trieStorage data.StorageManager, msh marshal.Marshalizer, hsh hashing.Hasher, + maxTrieLevelInMemory uint, ) (*patriciaMerkleTrie, error) { if check.IfNil(trieStorage) { return nil, ErrNilTrieStorage @@ -55,14 +58,16 @@ func NewTrie( if check.IfNil(hsh) { return nil, ErrNilHasher } + log.Debug("created new trie", "max trie level in memory", maxTrieLevelInMemory) return &patriciaMerkleTrie{ - trieStorage: trieStorage, - marshalizer: msh, - hasher: hsh, - oldHashes: make([][]byte, 0), - oldRoot: make([]byte, 0), - newHashes: make(data.ModifiedHashes), + trieStorage: trieStorage, + marshalizer: msh, + hasher: hsh, + oldHashes: make([][]byte, 0), + oldRoot: make([]byte, 0), + newHashes: make(data.ModifiedHashes), + maxTrieLevelInMemory: maxTrieLevelInMemory, }, nil } @@ -209,7 +214,7 @@ func (tr *patriciaMerkleTrie) Commit() error { } } - err = tr.root.commit(false, 0, tr.trieStorage.Database(), tr.trieStorage.Database()) + err = tr.root.commit(false, 0, tr.maxTrieLevelInMemory, tr.trieStorage.Database(), tr.trieStorage.Database()) if err != nil { return err } @@ -278,6 +283,7 @@ func (tr *patriciaMerkleTrie) Recreate(root []byte) (data.Trie, error) { tr.trieStorage, tr.marshalizer, tr.hasher, + tr.maxTrieLevelInMemory, ) } @@ -424,6 +430,7 @@ func (tr *patriciaMerkleTrie) recreateFromDb(rootHash []byte) (data.Trie, error) tr.trieStorage, tr.marshalizer, tr.hasher, + tr.maxTrieLevelInMemory, ) if err != nil { return nil, err @@ -438,7 +445,7 @@ func (tr *patriciaMerkleTrie) recreateFromDb(rootHash []byte) (data.Trie, error) newTr.root = newRoot if db != tr.Database() { - err = newTr.root.commit(true, 0, db, tr.Database()) + err = newTr.root.commit(true, 0, tr.maxTrieLevelInMemory, db, tr.Database()) if err != nil { return nil, err } diff --git a/data/trie/patriciaMerkleTrie_test.go b/data/trie/patriciaMerkleTrie_test.go index 588d2236909..d4f462b31de 100644 --- a/data/trie/patriciaMerkleTrie_test.go +++ b/data/trie/patriciaMerkleTrie_test.go @@ -17,6 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/keccak" "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" ) @@ -29,7 +30,7 @@ func emptyTrie() data.Trie { return tr } -func getDefaultTrieParameters() (data.StorageManager, marshal.Marshalizer, hashing.Hasher) { +func getDefaultTrieParameters() (data.StorageManager, marshal.Marshalizer, hashing.Hasher, uint) { db := mock.NewMemDbMock() marshalizer := &mock.ProtobufMarshalizerMock{} hasher := &mock.KeccakMock{} @@ -51,8 +52,9 @@ func getDefaultTrieParameters() (data.StorageManager, marshal.Marshalizer, hashi evictionWaitingList, _ := mock.NewEvictionWaitingList(100, mock.NewMemDbMock(), marshalizer) trieStorageManager, _ := trie.NewTrieStorageManager(db, marshalizer, hasher, cfg, evictionWaitingList, generalCfg) + maxTrieLevelInMemory := uint(5) - return trieStorageManager, marshalizer, hasher + return trieStorageManager, marshalizer, hasher, maxTrieLevelInMemory } func initTrieMultipleValues(nr int) (data.Trie, [][]byte) { @@ -81,8 +83,8 @@ func initTrie() data.Trie { func TestNewTrieWithNilTrieStorage(t *testing.T) { t.Parallel() - _, marshalizer, hasher := getDefaultTrieParameters() - tr, err := trie.NewTrie(nil, marshalizer, hasher) + _, marshalizer, hasher, maxTrieLevelInMemory := getDefaultTrieParameters() + tr, err := trie.NewTrie(nil, marshalizer, hasher, maxTrieLevelInMemory) assert.Nil(t, tr) assert.Equal(t, trie.ErrNilTrieStorage, err) @@ -91,8 +93,8 @@ func TestNewTrieWithNilTrieStorage(t *testing.T) { func TestNewTrieWithNilMarshalizer(t *testing.T) { t.Parallel() - trieStorage, _, hasher := getDefaultTrieParameters() - tr, err := trie.NewTrie(trieStorage, nil, hasher) + trieStorage, _, hasher, maxTrieLevelInMemory := getDefaultTrieParameters() + tr, err := trie.NewTrie(trieStorage, nil, hasher, maxTrieLevelInMemory) assert.Nil(t, tr) assert.Equal(t, trie.ErrNilMarshalizer, err) @@ -101,8 +103,8 @@ func TestNewTrieWithNilMarshalizer(t *testing.T) { func TestNewTrieWithNilHasher(t *testing.T) { t.Parallel() - trieStorage, marshalizer, _ := getDefaultTrieParameters() - tr, err := trie.NewTrie(trieStorage, marshalizer, nil) + trieStorage, marshalizer, _, maxTrieLevelInMemory := getDefaultTrieParameters() + tr, err := trie.NewTrie(trieStorage, marshalizer, nil, maxTrieLevelInMemory) assert.Nil(t, tr) assert.Equal(t, trie.ErrNilHasher, err) @@ -471,9 +473,55 @@ func TestPatriciaMerkleTrie_GetAllLeaves(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 3, len(leaves)) - assert.Equal(t, []byte("reindeer"), leaves[string([]byte("doe"))]) - assert.Equal(t, []byte("puppy"), leaves[string([]byte("dog"))]) - assert.Equal(t, []byte("cat"), leaves[string([]byte("ddog"))]) + assert.Equal(t, []byte("reindeer"), leaves["doe"]) + assert.Equal(t, []byte("puppy"), leaves["dog"]) + assert.Equal(t, []byte("cat"), leaves["ddog"]) +} + +func TestPatriciaMerkleTrie_String(t *testing.T) { + t.Parallel() + + tr := initTrie() + str := tr.String() + assert.NotEqual(t, 0, len(str)) + + tr = emptyTrie() + str = tr.String() + assert.Equal(t, "*** EMPTY TRIE ***\n", str) +} + +func TestPatriciaMerkleTrie_ClosePersister(t *testing.T) { + t.Parallel() + + tempDir, _ := ioutil.TempDir("", strconv.Itoa(rand.Intn(100000))) + arg := storageUnit.ArgDB{ + DBType: storageUnit.LvlDBSerial, + Path: tempDir, + BatchDelaySeconds: 1, + MaxBatchSize: 1, + MaxOpenFiles: 10, + } + db, _ := storageUnit.NewDB(arg) + marshalizer := &mock.ProtobufMarshalizerMock{} + hasher := &mock.KeccakMock{} + + trieStorageManager, _ := trie.NewTrieStorageManager( + db, + marshalizer, + hasher, + config.DBConfig{}, + &mock.EvictionWaitingList{}, + config.TrieStorageManagerConfig{}, + ) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorageManager, marshalizer, hasher, maxTrieLevelInMemory) + + err := tr.ClosePersister() + assert.Nil(t, err) + + key, err := tr.Database().Get([]byte("key")) + assert.Nil(t, key) + assert.Equal(t, storage.ErrSerialDBIsClosed, err) } func BenchmarkPatriciaMerkleTree_Insert(b *testing.B) { diff --git a/data/trie/pruningBuffer.go b/data/trie/pruningBuffer.go index b4097073ca2..61a8a685dda 100644 --- a/data/trie/pruningBuffer.go +++ b/data/trie/pruningBuffer.go @@ -22,7 +22,7 @@ func (pb *pruningBuffer) add(rootHash []byte) { defer pb.mutOp.Unlock() if uint32(len(pb.buffer)) == pb.size { - log.Trace("pruning buffer is full", "rootHash", rootHash) + log.Warn("pruning buffer is full", "rootHash", rootHash) return } diff --git a/data/trie/trieStorageManager.go b/data/trie/trieStorageManager.go index 8e4f8a52152..7a9609fd904 100644 --- a/data/trie/trieStorageManager.go +++ b/data/trie/trieStorageManager.go @@ -376,7 +376,8 @@ func (tsm *trieStorageManager) takeSnapshot(snapshot *snapshotsQueueEntry, msh m return } - err = newRoot.commit(true, 0, tsm.db, db) + maxTrieLevelInMemory := uint(5) + err = newRoot.commit(true, 0, maxTrieLevelInMemory, tsm.db, db) if err != nil { log.Error("trie storage manager: commit", "error", err.Error()) return diff --git a/data/trie/trieStorageManager_test.go b/data/trie/trieStorageManager_test.go index 3eb045de4b3..b775d1dd614 100644 --- a/data/trie/trieStorageManager_test.go +++ b/data/trie/trieStorageManager_test.go @@ -82,7 +82,8 @@ func TestNewTrieStorageManagerWithExistingSnapshot(t *testing.T) { size := uint(100) evictionWaitList, _ := mock.NewEvictionWaitingList(size, mock.NewMemDbMock(), msh) trieStorage, _ := NewTrieStorageManager(db, msh, hsh, cfg, evictionWaitList, generalCfg) - tr, _ := NewTrie(trieStorage, msh, hsh) + maxTrieLevelInMemory := uint(5) + tr, _ := NewTrie(trieStorage, msh, hsh, maxTrieLevelInMemory) _ = tr.Update([]byte("doe"), []byte("reindeer")) _ = tr.Update([]byte("dog"), []byte("puppy")) @@ -386,3 +387,132 @@ func TestTrieSnapshottingAndCheckpointConcurrently(t *testing.T) { assert.NotNil(t, val) assert.Nil(t, err) } + +func TestTriePruneAndCancelPruneWhileSnapshotInProgressAddsToPruningBuffer(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + oldRootHash, _ := tr.Root() + + _ = tr.Update([]byte("dogglesworth"), []byte("catnip")) + _ = tr.Commit() + newRootHash, _ := tr.Root() + + tr.EnterSnapshotMode() + tr.Prune(oldRootHash, data.OldRoot) + tr.CancelPrune(newRootHash, data.NewRoot) + tr.ExitSnapshotMode() + + assert.Equal(t, 2, trieStorage.pruningBuffer.len()) +} + +func TestTriePruneOnRollbackWhileSnapshotInProgressCancelsPrune(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + oldRootHash, _ := tr.Root() + + _ = tr.Update([]byte("dogglesworth"), []byte("catnip")) + _ = tr.Commit() + newRootHash, _ := tr.Root() + + tr.EnterSnapshotMode() + tr.CancelPrune(oldRootHash, data.OldRoot) + tr.Prune(newRootHash, data.NewRoot) + tr.ExitSnapshotMode() + + assert.Equal(t, 1, trieStorage.pruningBuffer.len()) +} + +func TestTriePruneAfterSnapshotIsDonePrunesBufferedHashes(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + newHashes, _ := tr.GetDirtyHashes() + tr.SetNewHashes(newHashes) + _ = tr.Commit() + oldRootHash, _ := tr.Root() + + _ = tr.Update([]byte("dogglesworth"), []byte("catnip")) + newHashes, _ = tr.GetDirtyHashes() + tr.SetNewHashes(newHashes) + _ = tr.Commit() + newRootHash, _ := tr.Root() + + tr.EnterSnapshotMode() + tr.Prune(oldRootHash, data.OldRoot) + tr.CancelPrune(newRootHash, data.NewRoot) + tr.ExitSnapshotMode() + + tr.Prune(oldRootHash, data.NewRoot) + + assert.Equal(t, 0, trieStorage.pruningBuffer.len()) +} + +func TestTrieCancelPruneAndPruningBufferNotEmptyAddsToPruningBuffer(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + oldRootHash, _ := tr.Root() + + _ = tr.Update([]byte("dogglesworth"), []byte("catnip")) + _ = tr.Commit() + newRootHash, _ := tr.Root() + + tr.EnterSnapshotMode() + tr.Prune(oldRootHash, data.OldRoot) + tr.CancelPrune(newRootHash, data.NewRoot) + tr.ExitSnapshotMode() + + tr.CancelPrune(oldRootHash, data.NewRoot) + + assert.Equal(t, 3, trieStorage.pruningBuffer.len()) +} + +func TestTriePruneAndCancelPruneAddedToBufferInOrder(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + oldRootHash, _ := tr.Root() + + _ = tr.Update([]byte("dogglesworth"), []byte("catnip")) + _ = tr.Commit() + newRootHash, _ := tr.Root() + + tr.EnterSnapshotMode() + tr.Prune(oldRootHash, data.OldRoot) + tr.CancelPrune(newRootHash, data.NewRoot) + tr.ExitSnapshotMode() + + tr.CancelPrune(oldRootHash, data.NewRoot) + + bufferedHashes := trieStorage.pruningBuffer.removeAll() + + expectedHash := append(oldRootHash, byte(data.OldRoot)) + assert.Equal(t, append(expectedHash, byte(prune)), bufferedHashes[0]) + + expectedHash = append(newRootHash, byte(data.NewRoot)) + assert.Equal(t, append(expectedHash, byte(cancelPrune)), bufferedHashes[1]) + + expectedHash = append(oldRootHash, byte(data.NewRoot)) + assert.Equal(t, append(expectedHash, byte(cancelPrune)), bufferedHashes[2]) +} diff --git a/dataRetriever/constants.go b/dataRetriever/constants.go index a3688510364..98ebbc2b511 100644 --- a/dataRetriever/constants.go +++ b/dataRetriever/constants.go @@ -2,14 +2,3 @@ package dataRetriever // TxPoolNumSendersToEvictInOneStep instructs tx pool eviction algorithm to remove this many senders when eviction takes place const TxPoolNumSendersToEvictInOneStep = uint32(100) - -// TxPoolLargeNumOfTxsForASender instructs tx pool eviction algorithm to tag a sender with more transactions than this value -// as a "sender with a large number of transactions" -const TxPoolLargeNumOfTxsForASender = uint32(500) - -// TxPoolNumTxsToEvictFromASender instructs tx pool eviction algorithm to remove this many transactions -// for "a sender with a large number of transactions" when eviction takes place -const TxPoolNumTxsToEvictFromASender = uint32(100) - -// TxPoolMinSizeInBytes is the lower limit of the tx cache / eviction parameter "sizeInBytes" -const TxPoolMinSizeInBytes = uint32(40960) diff --git a/dataRetriever/factory/txpool/txPoolFactory_test.go b/dataRetriever/factory/txpool/txPoolFactory_test.go index 3c6c9745aad..dd7c192820a 100644 --- a/dataRetriever/factory/txpool/txPoolFactory_test.go +++ b/dataRetriever/factory/txpool/txPoolFactory_test.go @@ -10,22 +10,22 @@ import ( func Test_CreateNewTxPool_ShardedData(t *testing.T) { config := storageUnit.CacheConfig{Type: storageUnit.FIFOShardedCache, Size: 100, SizeInBytes: 40960, Shards: 1} - args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 1} + args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 1} txPool, err := CreateTxPool(args) require.Nil(t, err) require.NotNil(t, txPool) config = storageUnit.CacheConfig{Type: storageUnit.LRUCache, Size: 100, SizeInBytes: 40960, Shards: 1} - args = txpool.ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 1} + args = txpool.ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 1} txPool, err = CreateTxPool(args) require.Nil(t, err) require.NotNil(t, txPool) } func Test_CreateNewTxPool_ShardedTxPool(t *testing.T) { - config := storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 1} - args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 1} + config := storageUnit.CacheConfig{Size: 100, SizePerSender: 1, SizeInBytes: 40960, SizeInBytesPerSender: 40960, Shards: 1} + args := txpool.ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 1} txPool, err := CreateTxPool(args) require.Nil(t, err) diff --git a/dataRetriever/txpool/argShardedTxPool.go b/dataRetriever/txpool/argShardedTxPool.go index 362c6f47cee..a99600ab929 100644 --- a/dataRetriever/txpool/argShardedTxPool.go +++ b/dataRetriever/txpool/argShardedTxPool.go @@ -18,20 +18,26 @@ type ArgShardedTxPool struct { func (args *ArgShardedTxPool) verify() error { config := args.Config - if config.SizeInBytes < dataRetriever.TxPoolMinSizeInBytes { - return fmt.Errorf("%w: config.SizeInBytes is less than [dataRetriever.TxPoolMinSizeInBytes]", dataRetriever.ErrCacheConfigInvalidSizeInBytes) + if config.SizeInBytes == 0 { + return fmt.Errorf("%w: config.SizeInBytes is not valid", dataRetriever.ErrCacheConfigInvalidSizeInBytes) } - if config.Size < 1 { - return fmt.Errorf("%w: config.Size is less than 1", dataRetriever.ErrCacheConfigInvalidSize) + if config.SizeInBytesPerSender == 0 { + return fmt.Errorf("%w: config.SizeInBytesPerSender is not valid", dataRetriever.ErrCacheConfigInvalidSizeInBytes) } - if config.Shards < 1 { - return fmt.Errorf("%w: config.Shards (map chunks) is less than 1", dataRetriever.ErrCacheConfigInvalidShards) + if config.Size == 0 { + return fmt.Errorf("%w: config.Size is not valid", dataRetriever.ErrCacheConfigInvalidSize) } - if args.MinGasPrice < 1 { - return fmt.Errorf("%w: MinGasPrice is less than 1", dataRetriever.ErrCacheConfigInvalidEconomics) + if config.SizePerSender == 0 { + return fmt.Errorf("%w: config.SizePerSender is not valid", dataRetriever.ErrCacheConfigInvalidSize) } - if args.NumberOfShards < 1 { - return fmt.Errorf("%w: NumberOfShards is less than 1", dataRetriever.ErrCacheConfigInvalidSharding) + if config.Shards == 0 { + return fmt.Errorf("%w: config.Shards (map chunks) is not valid", dataRetriever.ErrCacheConfigInvalidShards) + } + if args.MinGasPrice == 0 { + return fmt.Errorf("%w: MinGasPrice is not valid", dataRetriever.ErrCacheConfigInvalidEconomics) + } + if args.NumberOfShards == 0 { + return fmt.Errorf("%w: NumberOfShards is not valid", dataRetriever.ErrCacheConfigInvalidSharding) } return nil diff --git a/dataRetriever/txpool/interface.go b/dataRetriever/txpool/interface.go new file mode 100644 index 00000000000..bcf9c2fb306 --- /dev/null +++ b/dataRetriever/txpool/interface.go @@ -0,0 +1,16 @@ +package txpool + +import ( + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/txcache" +) + +type txCache interface { + storage.Cacher + + AddTx(tx *txcache.WrappedTransaction) (ok bool, added bool) + GetByTxHash(txHash []byte) (*txcache.WrappedTransaction, bool) + RemoveTxByHash(txHash []byte) error + CountTx() int64 + ForEachTransaction(function txcache.ForEachTransaction) +} diff --git a/dataRetriever/txpool/shardedTxPool.go b/dataRetriever/txpool/shardedTxPool.go index 18a7cd9c958..5961b7f5356 100644 --- a/dataRetriever/txpool/shardedTxPool.go +++ b/dataRetriever/txpool/shardedTxPool.go @@ -4,7 +4,7 @@ import ( "strconv" "sync" - "github.com/ElrondNetwork/elrond-go-logger" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/core/counting" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -31,31 +31,31 @@ type shardedTxPool struct { type txPoolShard struct { CacheID string - Cache *txcache.TxCache + Cache txCache } // NewShardedTxPool creates a new sharded tx pool // Implements "dataRetriever.TxPool" func NewShardedTxPool(args ArgShardedTxPool) (dataRetriever.ShardedDataCacherNotifier, error) { - log.Trace("NewShardedTxPool", "args", args) + log.Info("NewShardedTxPool", "args", args) err := args.verify() if err != nil { return nil, err } - const oneTrilion = 1000000 * 1000000 + const oneBillion = 1000000 * 1000 numCaches := 2*args.NumberOfShards - 1 cacheConfigPrototype := txcache.CacheConfig{ NumChunksHint: args.Config.Shards, EvictionEnabled: true, NumBytesThreshold: args.Config.SizeInBytes / numCaches, + NumBytesPerSenderThreshold: args.Config.SizeInBytesPerSender, CountThreshold: args.Config.Size / numCaches, + CountPerSenderThreshold: args.Config.SizePerSender, NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, - LargeNumOfTxsForASender: dataRetriever.TxPoolLargeNumOfTxsForASender, - NumTxsToEvictFromASender: dataRetriever.TxPoolNumTxsToEvictFromASender, - MinGasPriceMicroErd: uint32(args.MinGasPrice / oneTrilion), + MinGasPriceNanoErd: uint32(args.MinGasPrice / oneBillion), } cacheConfigPrototypeForSelfShard := cacheConfigPrototype @@ -82,7 +82,7 @@ func (txPool *shardedTxPool) ShardDataStore(cacheID string) storage.Cacher { } // getTxCache returns the requested cache -func (txPool *shardedTxPool) getTxCache(cacheID string) *txcache.TxCache { +func (txPool *shardedTxPool) getTxCache(cacheID string) txCache { shard := txPool.getOrCreateShard(cacheID) return shard.Cache } @@ -108,8 +108,7 @@ func (txPool *shardedTxPool) createShard(cacheID string) *txPoolShard { shard, ok := txPool.backingMap[cacheID] if !ok { - cacheConfig := txPool.getCacheConfig(cacheID) - cache := txcache.NewTxCache(cacheConfig) + cache := txPool.createTxCache(cacheID) shard = &txPoolShard{ CacheID: cacheID, Cache: cache, @@ -121,6 +120,17 @@ func (txPool *shardedTxPool) createShard(cacheID string) *txPoolShard { return shard } +func (txPool *shardedTxPool) createTxCache(cacheID string) txCache { + cacheConfig := txPool.getCacheConfig(cacheID) + cache, err := txcache.NewTxCache(cacheConfig) + if err != nil { + log.Error("shardedTxPool.createTxCache()", "err", err) + return txcache.NewDisabledCache() + } + + return cache +} + func (txPool *shardedTxPool) getCacheConfig(cacheID string) txcache.CacheConfig { var cacheConfig txcache.CacheConfig @@ -144,6 +154,7 @@ func (txPool *shardedTxPool) AddData(key []byte, value interface{}, cacheID stri sourceShardID, destinationShardID, err := process.ParseShardCacherIdentifier(cacheID) if err != nil { + log.Error("shardedTxPool.AddData()", "err", err) return } diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index a24de4aec61..94aa8fa53d8 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -24,29 +24,43 @@ func Test_NewShardedTxPool(t *testing.T) { } func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { - goodArgs := ArgShardedTxPool{Config: storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 16}, MinGasPrice: 100000000000000, NumberOfShards: 1} + goodArgs := ArgShardedTxPool{Config: storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16}, MinGasPrice: 200000000000, NumberOfShards: 1} args := goodArgs - args.Config = storageUnit.CacheConfig{SizeInBytes: 1} + args.Config.SizeInBytes = 0 pool, err := NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSizeInBytes.Error()) args = goodArgs - args.Config = storageUnit.CacheConfig{SizeInBytes: 40960, Size: 1} + args.Config.SizeInBytesPerSender = 0 pool, err = NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) - require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidShards.Error()) + require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSizeInBytes.Error()) + + args = goodArgs + args.Config.Size = 0 + pool, err = NewShardedTxPool(args) + require.Nil(t, pool) + require.NotNil(t, err) + require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSize.Error()) args = goodArgs - args.Config = storageUnit.CacheConfig{SizeInBytes: 40960, Shards: 1} + args.Config.SizePerSender = 0 pool, err = NewShardedTxPool(args) require.Nil(t, pool) require.NotNil(t, err) require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidSize.Error()) + args = goodArgs + args.Config.Shards = 0 + pool, err = NewShardedTxPool(args) + require.Nil(t, pool) + require.NotNil(t, err) + require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidShards.Error()) + args = goodArgs args.MinGasPrice = 0 pool, err = NewShardedTxPool(args) @@ -63,8 +77,8 @@ func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { } func Test_NewShardedTxPool_ComputesCacheConfig(t *testing.T) { - config := storageUnit.CacheConfig{SizeInBytes: 524288000, Size: 900000, Shards: 1} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 5} + config := storageUnit.CacheConfig{SizeInBytes: 524288000, SizeInBytesPerSender: 614400, Size: 900000, SizePerSender: 1000, Shards: 1} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 5} poolAsInterface, err := NewShardedTxPool(args) require.Nil(t, err) @@ -73,11 +87,11 @@ func Test_NewShardedTxPool_ComputesCacheConfig(t *testing.T) { require.Equal(t, true, pool.cacheConfigPrototype.EvictionEnabled) require.Equal(t, uint32(58254222), pool.cacheConfigPrototype.NumBytesThreshold) + require.Equal(t, uint32(614400), pool.cacheConfigPrototype.NumBytesPerSenderThreshold) require.Equal(t, uint32(100000), pool.cacheConfigPrototype.CountThreshold) + require.Equal(t, uint32(1000), pool.cacheConfigPrototype.CountPerSenderThreshold) require.Equal(t, uint32(100), pool.cacheConfigPrototype.NumSendersToEvictInOneStep) - require.Equal(t, uint32(500), pool.cacheConfigPrototype.LargeNumOfTxsForASender) - require.Equal(t, uint32(100), pool.cacheConfigPrototype.NumTxsToEvictFromASender) - require.Equal(t, uint32(100), pool.cacheConfigPrototype.MinGasPriceMicroErd) + require.Equal(t, uint32(200), pool.cacheConfigPrototype.MinGasPriceNanoErd) require.Equal(t, uint32(291271110), pool.cacheConfigPrototypeForSelfShard.NumBytesThreshold) require.Equal(t, uint32(500000), pool.cacheConfigPrototypeForSelfShard.CountThreshold) } @@ -161,7 +175,7 @@ func Test_AddData_CallsOnAddedHandlers(t *testing.T) { // Second addition is ignored (txhash-based deduplication) pool.AddData([]byte("hash-1"), createTx("alice", 42), "1") - pool.AddData([]byte("hash-1"), createTx("whatever", 43), "1") + pool.AddData([]byte("hash-1"), createTx("alice", 42), "1") waitABit() require.Equal(t, uint32(1), atomic.LoadUint32(&numAdded)) @@ -304,8 +318,8 @@ func Test_NotImplementedFunctions(t *testing.T) { } func Test_routeToCacheUnions(t *testing.T) { - config := storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 16} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 4, SelfShardID: 42} + config := storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 4, SelfShardID: 42} poolAsInterface, _ := NewShardedTxPool(args) pool := poolAsInterface.(*shardedTxPool) @@ -319,8 +333,8 @@ func Test_routeToCacheUnions(t *testing.T) { } func Test_getCacheConfig(t *testing.T) { - config := storageUnit.CacheConfig{Size: 150, SizeInBytes: 61440, Shards: 16} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 8, SelfShardID: 4} + config := storageUnit.CacheConfig{Size: 150, SizePerSender: 1, SizeInBytes: 61440, SizeInBytesPerSender: 40960, Shards: 16} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 8, SelfShardID: 4} poolAsInterface, _ := NewShardedTxPool(args) pool := poolAsInterface.(*shardedTxPool) @@ -353,7 +367,7 @@ type thisIsNotATransaction struct { } func newTxPoolToTest() (dataRetriever.ShardedDataCacherNotifier, error) { - config := storageUnit.CacheConfig{Size: 100, SizeInBytes: 40960, Shards: 16} - args := ArgShardedTxPool{Config: config, MinGasPrice: 100000000000000, NumberOfShards: 4} + config := storageUnit.CacheConfig{Size: 100, SizePerSender: 10, SizeInBytes: 409600, SizeInBytesPerSender: 40960, Shards: 16} + args := ArgShardedTxPool{Config: config, MinGasPrice: 200000000000, NumberOfShards: 4} return NewShardedTxPool(args) } diff --git a/epochStart/bootstrap/epochStartMetaBlockProcessor.go b/epochStart/bootstrap/epochStartMetaBlockProcessor.go index 42034bba395..83234e3639c 100644 --- a/epochStart/bootstrap/epochStartMetaBlockProcessor.go +++ b/epochStart/bootstrap/epochStartMetaBlockProcessor.go @@ -263,10 +263,6 @@ func (e *epochStartMetaBlockProcessor) processEntry( return false } -// SignalEndOfProcessing won't do anything -func (e *epochStartMetaBlockProcessor) SignalEndOfProcessing(_ []process.InterceptedData) { -} - // IsInterfaceNil returns true if there is no value under the interface func (e *epochStartMetaBlockProcessor) IsInterfaceNil() bool { return e == nil diff --git a/epochStart/bootstrap/export_test.go b/epochStart/bootstrap/export_test.go index 3b38ca7e515..48726026c55 100644 --- a/epochStart/bootstrap/export_test.go +++ b/epochStart/bootstrap/export_test.go @@ -6,6 +6,10 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) +func (e *epochStartMetaSyncer) SetEpochStartMetaBlockInterceptorProcessor(proc EpochStartMetaBlockInterceptorProcessor) { + e.metaBlockProcessor = proc +} + // TODO: We should remove this type of configs hidden in tests func getGeneralConfig() config.Config { return config.Config{ diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index 8a564cea3b0..f665ea17985 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -1,6 +1,8 @@ package bootstrap import ( + "context" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -15,10 +17,10 @@ type StartOfEpochNodesConfigHandler interface { IsInterfaceNil() bool } -// EpochStartInterceptor defines the methods to sync an epoch start metablock -type EpochStartInterceptor interface { - process.Interceptor - GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) +// EpochStartMetaBlockInterceptorProcessor defines the methods to sync an epoch start metablock +type EpochStartMetaBlockInterceptorProcessor interface { + process.InterceptorProcessor + GetEpochStartMetaBlock(ctx context.Context) (*block.MetaBlock, error) } // StartInEpochNodesCoordinator defines the methods to process and save nodesCoordinator information to storage diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 32672e65d5c..5f580b3865a 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -718,12 +718,13 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { argsUserAccountsSyncer := syncer.ArgsNewUserAccountsSyncer{ ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ - Hasher: e.hasher, - Marshalizer: e.marshalizer, - TrieStorageManager: e.trieStorageManagers[factory.UserAccountTrie], - RequestHandler: e.requestHandler, - WaitTime: trieSyncWaitTime, - Cacher: e.dataPool.TrieNodes(), + Hasher: e.hasher, + Marshalizer: e.marshalizer, + TrieStorageManager: e.trieStorageManagers[factory.UserAccountTrie], + RequestHandler: e.requestHandler, + WaitTime: trieSyncWaitTime, + Cacher: e.dataPool.TrieNodes(), + MaxTrieLevelInMemory: e.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, }, ShardId: e.shardCoordinator.SelfId(), } @@ -759,6 +760,7 @@ func (e *epochStartBootstrap) createTriesForNewShardId(shardId uint32) error { e.generalConfig.AccountsTrieStorage, core.GetShardIdString(shardId), e.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, + e.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, ) if err != nil { return err @@ -771,6 +773,7 @@ func (e *epochStartBootstrap) createTriesForNewShardId(shardId uint32) error { e.generalConfig.PeerAccountsTrieStorage, core.GetShardIdString(shardId), e.generalConfig.StateTriesConfig.PeerStatePruningEnabled, + e.generalConfig.StateTriesConfig.MaxPeerTrieLevelInMemory, ) if err != nil { return err @@ -785,12 +788,13 @@ func (e *epochStartBootstrap) createTriesForNewShardId(shardId uint32) error { func (e *epochStartBootstrap) syncPeerAccountsState(rootHash []byte) error { argsValidatorAccountsSyncer := syncer.ArgsNewValidatorAccountsSyncer{ ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ - Hasher: e.hasher, - Marshalizer: e.marshalizer, - TrieStorageManager: e.trieStorageManagers[factory.PeerAccountTrie], - RequestHandler: e.requestHandler, - WaitTime: trieSyncWaitTime, - Cacher: e.dataPool.TrieNodes(), + Hasher: e.hasher, + Marshalizer: e.marshalizer, + TrieStorageManager: e.trieStorageManagers[factory.PeerAccountTrie], + RequestHandler: e.requestHandler, + WaitTime: trieSyncWaitTime, + Cacher: e.dataPool.TrieNodes(), + MaxTrieLevelInMemory: e.generalConfig.StateTriesConfig.MaxPeerTrieLevelInMemory, }, } accountsDBSyncer, err := syncer.NewValidatorAccountsSyncer(argsValidatorAccountsSyncer) diff --git a/epochStart/bootstrap/syncEpochStartMeta.go b/epochStart/bootstrap/syncEpochStartMeta.go index d709db544a1..925626d6bfa 100644 --- a/epochStart/bootstrap/syncEpochStartMeta.go +++ b/epochStart/bootstrap/syncEpochStartMeta.go @@ -30,7 +30,7 @@ type epochStartMetaSyncer struct { marshalizer marshal.Marshalizer hasher hashing.Hasher singleDataInterceptor process.Interceptor - metaBlockProcessor *epochStartMetaBlockProcessor + metaBlockProcessor EpochStartMetaBlockInterceptorProcessor } // ArgsNewEpochStartMetaSyncer - diff --git a/epochStart/bootstrap/syncEpochStartMeta_test.go b/epochStart/bootstrap/syncEpochStartMeta_test.go new file mode 100644 index 00000000000..d44967ece0d --- /dev/null +++ b/epochStart/bootstrap/syncEpochStartMeta_test.go @@ -0,0 +1,120 @@ +package bootstrap + +import ( + "errors" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/stretchr/testify/require" +) + +func TestNewEpochStartMetaSyncer_ShouldWork(t *testing.T) { + t.Parallel() + + args := getEpochStartSyncerArgs() + ess, err := NewEpochStartMetaSyncer(args) + require.NoError(t, err) + require.False(t, check.IfNil(ess)) +} + +func TestEpochStartMetaSyncer_SyncEpochStartMetaRegisterMessengerProcessorFailsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + + args := getEpochStartSyncerArgs() + messenger := &mock.MessengerStub{ + RegisterMessageProcessorCalled: func(_ string, _ p2p.MessageProcessor) error { + return expectedErr + }, + } + args.Messenger = messenger + ess, _ := NewEpochStartMetaSyncer(args) + + mb, err := ess.SyncEpochStartMeta(time.Second) + require.Equal(t, expectedErr, err) + require.Nil(t, mb) +} + +func TestEpochStartMetaSyncer_SyncEpochStartMetaProcessorFailsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + + args := getEpochStartSyncerArgs() + messenger := &mock.MessengerStub{ + ConnectedPeersCalled: func() []p2p.PeerID { + return []p2p.PeerID{"peer_0", "peer_1", "peer_2", "peer_3", "peer_4", "peer_5"} + }, + } + args.Messenger = messenger + ess, _ := NewEpochStartMetaSyncer(args) + + mbIntercProc := &mock.MetaBlockInterceptorProcessorStub{ + GetEpochStartMetaBlockCalled: func() (*block.MetaBlock, error) { + return nil, expectedErr + }, + } + ess.SetEpochStartMetaBlockInterceptorProcessor(mbIntercProc) + + mb, err := ess.SyncEpochStartMeta(time.Second) + require.Equal(t, expectedErr, err) + require.Nil(t, mb) +} + +func TestEpochStartMetaSyncer_SyncEpochStartMetaShouldWork(t *testing.T) { + t.Parallel() + + expectedMb := &block.MetaBlock{Nonce: 37} + + args := getEpochStartSyncerArgs() + messenger := &mock.MessengerStub{ + ConnectedPeersCalled: func() []p2p.PeerID { + return []p2p.PeerID{"peer_0", "peer_1", "peer_2", "peer_3", "peer_4", "peer_5"} + }, + } + args.Messenger = messenger + ess, _ := NewEpochStartMetaSyncer(args) + + mbIntercProc := &mock.MetaBlockInterceptorProcessorStub{ + GetEpochStartMetaBlockCalled: func() (*block.MetaBlock, error) { + return expectedMb, nil + }, + } + ess.SetEpochStartMetaBlockInterceptorProcessor(mbIntercProc) + + mb, err := ess.SyncEpochStartMeta(time.Second) + require.NoError(t, err) + require.Equal(t, expectedMb, mb) +} + +func getEpochStartSyncerArgs() ArgsNewEpochStartMetaSyncer { + return ArgsNewEpochStartMetaSyncer{ + RequestHandler: &mock.RequestHandlerStub{}, + Messenger: &mock.MessengerStub{}, + Marshalizer: &mock.MarshalizerMock{}, + TxSignMarshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), + KeyGen: &mock.KeyGenMock{}, + BlockKeyGen: &mock.KeyGenMock{}, + Hasher: &mock.HasherMock{}, + Signer: &mock.SignerStub{}, + BlockSigner: &mock.SignerStub{}, + ChainID: []byte("chain-ID"), + EconomicsData: &economics.EconomicsData{}, + WhitelistHandler: &mock.WhiteListHandlerStub{}, + AddressPubkeyConv: mock.NewPubkeyConverterMock(32), + NonceConverter: &mock.Uint64ByteSliceConverterMock{}, + StartInEpochConfig: config.EpochStartConfig{ + MinNumConnectedPeersToStart: 2, + MinNumOfPeersToConsiderBlockValid: 2, + }, + } + +} diff --git a/epochStart/bootstrap/syncValidatorStatus_test.go b/epochStart/bootstrap/syncValidatorStatus_test.go new file mode 100644 index 00000000000..373ae43841f --- /dev/null +++ b/epochStart/bootstrap/syncValidatorStatus_test.go @@ -0,0 +1,148 @@ +package bootstrap + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/require" +) + +func TestNewSyncValidatorStatus_ShouldWork(t *testing.T) { + t.Parallel() + + args := getSyncValidatorStatusArgs() + svs, err := NewSyncValidatorStatus(args) + require.NoError(t, err) + require.False(t, check.IfNil(svs)) +} + +func TestSyncValidatorStatus_NodesConfigFromMetaBlock(t *testing.T) { + t.Parallel() + + args := getSyncValidatorStatusArgs() + svs, _ := NewSyncValidatorStatus(args) + + currMb := &block.MetaBlock{ + Nonce: 37, + Epoch: 0, + MiniBlockHeaders: []block.MiniBlockHeader{ + { + Hash: []byte("mb0-hash"), + ReceiverShardID: 0, + SenderShardID: 0, + Type: block.TxBlock, + TxCount: 0, + }, + }, + EpochStart: block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{ + { + ShardID: 0, + Epoch: 0, + Round: 0, + Nonce: 0, + HeaderHash: []byte("hash"), + RootHash: []byte("rootHash"), + FirstPendingMetaBlock: []byte("hash"), + LastFinishedMetaBlock: []byte("hash"), + PendingMiniBlockHeaders: nil, + }, + }, + }} + prevMb := &block.MetaBlock{ + Nonce: 36, + Epoch: 0, + MiniBlockHeaders: []block.MiniBlockHeader{ + { + Hash: []byte("mb0-hash"), + ReceiverShardID: 0, + SenderShardID: 0, + Type: block.TxBlock, + TxCount: 0, + }, + }, + EpochStart: block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{ + { + ShardID: 0, + Epoch: 0, + Round: 0, + Nonce: 0, + HeaderHash: []byte("hash"), + RootHash: []byte("rootHash"), + FirstPendingMetaBlock: []byte("hash"), + LastFinishedMetaBlock: []byte("hash"), + PendingMiniBlockHeaders: nil, + }, + }, + }, + } + + registry, _, err := svs.NodesConfigFromMetaBlock(currMb, prevMb) + require.NoError(t, err) + require.NotNil(t, registry) +} + +func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { + return ArgsNewSyncValidatorStatus{ + DataPool: &mock.PoolsHolderStub{ + MiniBlocksCalled: func() storage.Cacher { + return &mock.CacherStub{} + }, + }, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &mock.HasherMock{}, + RequestHandler: &mock.RequestHandlerStub{}, + ChanceComputer: &mock.NodesCoordinatorStub{}, + GenesisNodesConfig: &mock.NodesSetupStub{ + NumberOfShardsCalled: func() uint32 { + return 1 + }, + InitialNodesInfoForShardCalled: func(shardID uint32) ([]sharding.GenesisNodeInfoHandler, []sharding.GenesisNodeInfoHandler, error) { + if shardID == core.MetachainShardId { + return []sharding.GenesisNodeInfoHandler{ + mock.NewNodeInfo([]byte("addr0"), []byte("pubKey0"), core.MetachainShardId), + mock.NewNodeInfo([]byte("addr1"), []byte("pubKey1"), core.MetachainShardId), + }, + []sharding.GenesisNodeInfoHandler{&mock.NodeInfoMock{}}, + nil + } + return []sharding.GenesisNodeInfoHandler{ + mock.NewNodeInfo([]byte("addr0"), []byte("pubKey0"), 0), + mock.NewNodeInfo([]byte("addr1"), []byte("pubKey1"), 0), + }, + []sharding.GenesisNodeInfoHandler{&mock.NodeInfoMock{}}, + nil + }, + InitialNodesInfoCalled: func() (map[uint32][]sharding.GenesisNodeInfoHandler, map[uint32][]sharding.GenesisNodeInfoHandler) { + return map[uint32][]sharding.GenesisNodeInfoHandler{ + 0: { + mock.NewNodeInfo([]byte("addr0"), []byte("pubKey0"), 0), + mock.NewNodeInfo([]byte("addr1"), []byte("pubKey1"), 0), + }, + core.MetachainShardId: { + mock.NewNodeInfo([]byte("addr0"), []byte("pubKey0"), core.MetachainShardId), + mock.NewNodeInfo([]byte("addr1"), []byte("pubKey1"), core.MetachainShardId), + }, + }, map[uint32][]sharding.GenesisNodeInfoHandler{0: { + mock.NewNodeInfo([]byte("addr2"), []byte("pubKey2"), 0), + mock.NewNodeInfo([]byte("addr3"), []byte("pubKey3"), 0), + }} + }, + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + }, + NodeShuffler: &mock.NodeShufflerMock{}, + PubKey: []byte("public key"), + ShardIdAsObserver: 0, + } +} diff --git a/epochStart/interface.go b/epochStart/interface.go index 452ac1a0995..7db41ca29fb 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -11,6 +11,7 @@ import ( // TriggerHandler defines the functionalities for an start of epoch trigger type TriggerHandler interface { + Close() error ForceEpochStart(round uint64) error IsEpochStart() bool Epoch() uint32 diff --git a/epochStart/metachain/economics_test.go b/epochStart/metachain/economics_test.go index 16c76c691b2..9d5e65e8b14 100644 --- a/epochStart/metachain/economics_test.go +++ b/epochStart/metachain/economics_test.go @@ -2,6 +2,7 @@ package metachain import ( "encoding/json" + "fmt" "math/big" "testing" "time" @@ -12,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -442,6 +444,185 @@ func TestEconomics_VerifyRewardsPerBlock_DifferentHitRates(t *testing.T) { } } +type testInput struct { + blockPerEpochOneShard uint64 + accumulatedFeesInEpoch *big.Int + devFeesInEpoch *big.Int +} + +func TestComputeEndOfEpochEconomics(t *testing.T) { + t.Parallel() + + totalSupply, _ := big.NewInt(0).SetString("20000000000000000000000000000", 10) // 20 Billions ERD + nodePrice, _ := big.NewInt(0).SetString("1000000000000000000000", 10) // 1000 ERD + roundDuration := 4 + + args := createArgsForComputeEndOfEpochEconomics(roundDuration, totalSupply, nodePrice) + ec, _ := NewEndOfEpochEconomicsDataCreator(args) + + epochDuration := numberOfSecondsInDay + roundsPerEpoch := uint64(epochDuration / roundDuration) + + testInputs := []testInput{ + {blockPerEpochOneShard: roundsPerEpoch, accumulatedFeesInEpoch: intToErd(1000), devFeesInEpoch: intToErd(1000)}, + {blockPerEpochOneShard: roundsPerEpoch / 2, accumulatedFeesInEpoch: intToErd(1000), devFeesInEpoch: intToErd(1000)}, + {blockPerEpochOneShard: roundsPerEpoch / 4, accumulatedFeesInEpoch: intToErd(1000), devFeesInEpoch: intToErd(1000)}, + {blockPerEpochOneShard: roundsPerEpoch / 8, accumulatedFeesInEpoch: intToErd(1000), devFeesInEpoch: intToErd(1000)}, + {blockPerEpochOneShard: roundsPerEpoch / 16, accumulatedFeesInEpoch: intToErd(1000), devFeesInEpoch: intToErd(1000)}, + {blockPerEpochOneShard: roundsPerEpoch / 32, accumulatedFeesInEpoch: intToErd(1000), devFeesInEpoch: intToErd(1000)}, + {blockPerEpochOneShard: roundsPerEpoch / 64, accumulatedFeesInEpoch: intToErd(100000000000000), devFeesInEpoch: intToErd(10000000)}, + {blockPerEpochOneShard: roundsPerEpoch, accumulatedFeesInEpoch: intToErd(100000000000000), devFeesInEpoch: intToErd(30000000000000)}, + } + + rewardsPerBlock, _ := big.NewInt(0).SetString("84559445290038908043", 10) // *based on 0.1 inflation + for _, input := range testInputs { + meta := &block.MetaBlock{ + AccumulatedFeesInEpoch: input.accumulatedFeesInEpoch, + DevFeesInEpoch: input.devFeesInEpoch, + Epoch: 1, + Round: roundsPerEpoch, + Nonce: input.blockPerEpochOneShard, + EpochStart: block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{ + {ShardID: 0, Round: roundsPerEpoch, Nonce: input.blockPerEpochOneShard}, + {ShardID: 1, Round: roundsPerEpoch, Nonce: input.blockPerEpochOneShard}, + }, + }, + } + + economicsBlock, err := ec.ComputeEndOfEpochEconomics(meta) + assert.Nil(t, err) + + verifyEconomicsBlock(t, economicsBlock, input, rewardsPerBlock, nodePrice, totalSupply, roundsPerEpoch, args.RewardsHandler) + } + +} + +func createArgsForComputeEndOfEpochEconomics( + roundDuration int, + totalSupply *big.Int, + nodePrice *big.Int, +) ArgsNewEpochEconomics { + commAddress := "communityAddress" + + args := getArguments() + args.RewardsHandler = &mock.RewardsHandlerStub{ + MaxInflationRateCalled: func() float64 { + return 0.1 + }, + CommunityAddressCalled: func() string { + return commAddress + }, + CommunityPercentageCalled: func() float64 { + return 0.1 + }, + LeaderPercentageCalled: func() float64 { + return 0.1 + }, + } + args.RoundTime = &mock.RoundTimeDurationHandler{ + TimeDurationCalled: func() time.Duration { + return time.Duration(roundDuration) * time.Second + }, + } + hdrPrevEpochStart := block.MetaBlock{ + Round: 0, + Nonce: 0, + Epoch: 0, + EpochStart: block.EpochStart{ + Economics: block.Economics{ + TotalSupply: totalSupply, + TotalToDistribute: big.NewInt(10), + TotalNewlyMinted: big.NewInt(10), + RewardsPerBlockPerNode: big.NewInt(10), + NodePrice: nodePrice, + RewardsForCommunity: big.NewInt(10), + }, + LastFinalizedHeaders: []block.EpochStartShardData{ + {ShardID: 0, Nonce: 0}, + {ShardID: 1, Nonce: 0}, + }, + }, + } + args.Store = &mock.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + // this will be the previous epoch meta block. It has initial 0 values so it can be considered at genesis + return &mock.StorerStub{GetCalled: func(key []byte) ([]byte, error) { + hdrBytes, _ := json.Marshal(hdrPrevEpochStart) + return hdrBytes, nil + }} + }, + } + + return args +} + +func verifyEconomicsBlock( + t *testing.T, + economicsBlock *block.Economics, + input testInput, + rewardsPerBlock *big.Int, + nodePrice *big.Int, + totalSupply *big.Int, + roundsPerEpoch uint64, + rewardsHandler process.RewardsHandler, +) { + totalBlocksPerEpoch := int64(input.blockPerEpochOneShard * 3) + hitRate := float64(input.blockPerEpochOneShard) / float64(roundsPerEpoch) * 100 + printEconomicsData(economicsBlock, hitRate, totalBlocksPerEpoch) + + expectedTotalRewardsToBeDistributed := big.NewInt(0).Mul(rewardsPerBlock, big.NewInt(totalBlocksPerEpoch)) + expectedNewTokens := big.NewInt(0).Sub(expectedTotalRewardsToBeDistributed, input.accumulatedFeesInEpoch) + if expectedNewTokens.Cmp(big.NewInt(0)) < 0 { + expectedNewTokens = big.NewInt(0) + expectedTotalRewardsToBeDistributed = input.accumulatedFeesInEpoch + } + + adjustedRewardsPerBlock := big.NewInt(0).Div(expectedTotalRewardsToBeDistributed, big.NewInt(totalBlocksPerEpoch)) + + // subtract developer rewards per block + developerFeesPerBlock := big.NewInt(0).Div(input.devFeesInEpoch, big.NewInt(totalBlocksPerEpoch)) + adjustedRewardsPerBlock.Sub(adjustedRewardsPerBlock, developerFeesPerBlock) + // subtract leader percentage per block + rewardsForLeader := core.GetPercentageOfValue(input.accumulatedFeesInEpoch, rewardsHandler.LeaderPercentage()) + rewardsForLeaderPerBlock := big.NewInt(0).Div(rewardsForLeader, big.NewInt(totalBlocksPerEpoch)) + adjustedRewardsPerBlock.Sub(adjustedRewardsPerBlock, rewardsForLeaderPerBlock) + // communityPercentage + expectedCommunityRewards := core.GetPercentageOfValue(expectedTotalRewardsToBeDistributed, rewardsHandler.CommunityPercentage()) + // subtract community percentage per block + communityRewardsPerBlock := big.NewInt(0).Div(expectedCommunityRewards, big.NewInt(totalBlocksPerEpoch)) + adjustedRewardsPerBlock.Sub(adjustedRewardsPerBlock, communityRewardsPerBlock) + + assert.Equal(t, expectedNewTokens, economicsBlock.TotalNewlyMinted) + assert.Equal(t, big.NewInt(0).Add(totalSupply, expectedNewTokens), economicsBlock.TotalSupply) + assert.Equal(t, expectedTotalRewardsToBeDistributed, economicsBlock.TotalToDistribute) + assert.Equal(t, expectedCommunityRewards, economicsBlock.RewardsForCommunity) + assert.Equal(t, nodePrice, economicsBlock.NodePrice) + assert.Equal(t, adjustedRewardsPerBlock, economicsBlock.RewardsPerBlockPerNode) +} + +func printEconomicsData(eb *block.Economics, hitRate float64, numBlocksTotal int64) { + fmt.Printf("Hit rate per shard %.4f%%, Total block produced: %d \n", hitRate, numBlocksTotal) + fmt.Printf("Total supply: %vERD, TotalToDistribute %vERD, "+ + "TotalNewlyMinted %vERD, RewardsPerBlockPerNode %vERD, RewardsForCommunity %vERD, NodePrice: %vERD", + denomination(eb.TotalSupply), denomination(eb.TotalToDistribute), denomination(eb.TotalNewlyMinted), + denomination(eb.RewardsPerBlockPerNode), denomination(eb.RewardsForCommunity), denomination(eb.NodePrice)) + fmt.Println() +} + +func intToErd(value int) *big.Int { + denomination, _ := big.NewInt(0).SetString("1000000000000000000", 10) + + return big.NewInt(0).Mul(denomination, big.NewInt(int64(value))) +} + +func denomination(value *big.Int) string { + denomination, _ := big.NewInt(0).SetString("1000000000000000000", 10) + cpValue := big.NewInt(0).Set(value) + + return cpValue.Div(cpValue, denomination).String() +} + func getArguments() ArgsNewEpochEconomics { return ArgsNewEpochEconomics{ Marshalizer: &mock.MarshalizerMock{}, diff --git a/epochStart/metachain/epochStartData_test.go b/epochStart/metachain/epochStartData_test.go index d9b1ca3a913..e18704570b1 100644 --- a/epochStart/metachain/epochStartData_test.go +++ b/epochStart/metachain/epochStartData_test.go @@ -102,11 +102,13 @@ func createTxPool(selfShardID uint32) (dataRetriever.ShardedDataCacherNotifier, return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, SelfShardID: selfShardID, }, diff --git a/epochStart/metachain/rewards.go b/epochStart/metachain/rewards.go index a79c8bb0aa6..5b4d86d9868 100644 --- a/epochStart/metachain/rewards.go +++ b/epochStart/metachain/rewards.go @@ -292,7 +292,11 @@ func (rc *rewardsCreator) VerifyRewardsMiniBlocks(metaBlock *block.MetaBlock, va } numReceivedRewardsMBs++ - createdMiniBlock := createdMiniBlocks[miniBlockHdr.ReceiverShardID] + createdMiniBlock := getMiniBlockWithReceiverShardID(miniBlockHdr.ReceiverShardID, createdMiniBlocks) + if createdMiniBlock == nil { + return epochStart.ErrRewardMiniBlockHashDoesNotMatch + } + createdMBHash, errComputeHash := core.CalculateHash(rc.marshalizer, rc.hasher, createdMiniBlock) if errComputeHash != nil { return errComputeHash @@ -311,6 +315,15 @@ func (rc *rewardsCreator) VerifyRewardsMiniBlocks(metaBlock *block.MetaBlock, va return nil } +func getMiniBlockWithReceiverShardID(shardId uint32, miniBlocks block.MiniBlockSlice) *block.MiniBlock { + for _, miniBlock := range miniBlocks { + if miniBlock.ReceiverShardID == shardId { + return miniBlock + } + } + return nil +} + // CreateMarshalizedData creates the marshalized data to be sent to shards func (rc *rewardsCreator) CreateMarshalizedData(body *block.Body) map[string][][]byte { if check.IfNil(body) { diff --git a/epochStart/metachain/rewards_test.go b/epochStart/metachain/rewards_test.go index 223a479143e..96e4b0633f0 100644 --- a/epochStart/metachain/rewards_test.go +++ b/epochStart/metachain/rewards_test.go @@ -296,6 +296,71 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWorkEvenIfNotAllShardsHaveRewards(t *testing.T) { + t.Parallel() + + receivedShardID := uint32(5) + shardCoordinator := &mock.ShardCoordinatorStub{ + ComputeIdCalled: func(address []byte) uint32 { + return receivedShardID + }, + NumberOfShardsCalled: func() uint32 { + return receivedShardID + 1 + }} + args := getRewardsArguments() + args.ShardCoordinator = shardCoordinator + rwd, _ := NewEpochStartRewardsCreator(args) + rwdTx := rewardTx.RewardTx{ + Round: 0, + Value: big.NewInt(100), + RcvAddr: []byte{}, + Epoch: 0, + } + rwdTxHash, _ := core.CalculateHash(&marshal.JsonMarshalizer{}, &mock.HasherMock{}, rwdTx) + + communityRewardTx := rewardTx.RewardTx{ + Round: 0, + Value: big.NewInt(50), + RcvAddr: []byte{17}, + Epoch: 0, + } + commRwdTxHash, _ := core.CalculateHash(&marshal.JsonMarshalizer{}, &mock.HasherMock{}, communityRewardTx) + + bdy := block.MiniBlock{ + TxHashes: [][]byte{commRwdTxHash, rwdTxHash}, + ReceiverShardID: receivedShardID, + SenderShardID: core.MetachainShardId, + Type: block.RewardsBlock, + } + mbh := block.MiniBlockHeader{ + Hash: nil, + SenderShardID: core.MetachainShardId, + ReceiverShardID: receivedShardID, + TxCount: 2, + Type: block.RewardsBlock, + } + mbHash, _ := core.CalculateHash(&marshal.JsonMarshalizer{}, &mock.HasherMock{}, bdy) + mbh.Hash = mbHash + + mb := &block.MetaBlock{ + EpochStart: getDefaultEpochStart(), + MiniBlockHeaders: []block.MiniBlockHeader{ + mbh, + }, + } + valInfo := make(map[uint32][]*state.ValidatorInfo) + valInfo[0] = []*state.ValidatorInfo{ + { + PublicKey: []byte("pubkey"), + ShardId: receivedShardID, + AccumulatedFees: big.NewInt(100), + }, + } + + err := rwd.VerifyRewardsMiniBlocks(mb, valInfo) + assert.Nil(t, err) +} + func TestRewardsCreator_CreateMarshalizedData(t *testing.T) { t.Parallel() diff --git a/epochStart/metachain/trigger.go b/epochStart/metachain/trigger.go index e1fa1febdf7..dfe29df5fe1 100644 --- a/epochStart/metachain/trigger.go +++ b/epochStart/metachain/trigger.go @@ -10,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -28,6 +29,7 @@ var _ dataRetriever.EpochHandler = (*trigger)(nil) var _ epochStart.TriggerHandler = (*trigger)(nil) var _ process.EpochStartTriggerHandler = (*trigger)(nil) var _ process.EpochBootstrapper = (*trigger)(nil) +var _ close.Closer = (*trigger)(nil) const minimumNonceToStartEpoch = 4 @@ -423,6 +425,11 @@ func (t *trigger) SetCurrentEpochStartRound(round uint64) { t.mutTrigger.Unlock() } +// Close will close the endless running go routine +func (t *trigger) Close() error { + return nil +} + // IsInterfaceNil return true if underlying object is nil func (t *trigger) IsInterfaceNil() bool { return t == nil diff --git a/epochStart/mock/messengerStub.go b/epochStart/mock/messengerStub.go index 55b7ad54f48..f9a10f57f2d 100644 --- a/epochStart/mock/messengerStub.go +++ b/epochStart/mock/messengerStub.go @@ -6,7 +6,8 @@ import ( // MessengerStub - type MessengerStub struct { - ConnectedPeersCalled func() []p2p.PeerID + ConnectedPeersCalled func() []p2p.PeerID + RegisterMessageProcessorCalled func(topic string, handler p2p.MessageProcessor) error } // ConnectedPeersOnTopic - @@ -36,6 +37,10 @@ func (m *MessengerStub) CreateTopic(name string, createChannelForTopic bool) err // RegisterMessageProcessor - func (m *MessengerStub) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { + if m.RegisterMessageProcessorCalled != nil { + return m.RegisterMessageProcessorCalled(topic, handler) + } + return nil } diff --git a/epochStart/mock/metaBlockInterceptorProcessorStub.go b/epochStart/mock/metaBlockInterceptorProcessorStub.go new file mode 100644 index 00000000000..915d3332a51 --- /dev/null +++ b/epochStart/mock/metaBlockInterceptorProcessorStub.go @@ -0,0 +1,42 @@ +package mock + +import ( + "context" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" +) + +// MetaBlockInterceptorProcessorStub - +type MetaBlockInterceptorProcessorStub struct { + GetEpochStartMetaBlockCalled func() (*block.MetaBlock, error) +} + +// Validate - +func (m *MetaBlockInterceptorProcessorStub) Validate(data process.InterceptedData, fromConnectedPeer p2p.PeerID) error { + return nil +} + +// Save - +func (m *MetaBlockInterceptorProcessorStub) Save(data process.InterceptedData, fromConnectedPeer p2p.PeerID) error { + return nil +} + +// SignalEndOfProcessing - +func (m *MetaBlockInterceptorProcessorStub) SignalEndOfProcessing(data []process.InterceptedData) { +} + +// IsInterfaceNil - +func (m *MetaBlockInterceptorProcessorStub) IsInterfaceNil() bool { + return m == nil +} + +// GetEpochStartMetaBlock - +func (m *MetaBlockInterceptorProcessorStub) GetEpochStartMetaBlock(_ context.Context) (*block.MetaBlock, error) { + if m.GetEpochStartMetaBlockCalled != nil { + return m.GetEpochStartMetaBlockCalled() + } + + return &block.MetaBlock{}, nil +} diff --git a/epochStart/mock/syncTimerStub.go b/epochStart/mock/syncTimerStub.go index 16b59d020ca..8e9bb19c31e 100644 --- a/epochStart/mock/syncTimerStub.go +++ b/epochStart/mock/syncTimerStub.go @@ -6,33 +6,38 @@ import ( // SyncTimerStub is a mock implementation of SyncTimer interface type SyncTimerStub struct { - StartSyncCalled func() + StartSyncingTimeCalled func() ClockOffsetCalled func() time.Duration FormattedCurrentTimeCalled func() string CurrentTimeCalled func() time.Time } -// StartSync is a mock implementation for StartSync -func (stm *SyncTimerStub) StartSync() { - stm.StartSyncCalled() +// StartSyncingTime is a mock implementation for StartSyncingTime +func (sts *SyncTimerStub) StartSyncingTime() { + sts.StartSyncingTimeCalled() } // ClockOffset is a mock implementation for ClockOffset -func (stm *SyncTimerStub) ClockOffset() time.Duration { - return stm.ClockOffsetCalled() +func (sts *SyncTimerStub) ClockOffset() time.Duration { + return sts.ClockOffsetCalled() } // FormattedCurrentTime is a mock implementation for FormattedCurrentTime -func (stm *SyncTimerStub) FormattedCurrentTime() string { - return stm.FormattedCurrentTimeCalled() +func (sts *SyncTimerStub) FormattedCurrentTime() string { + return sts.FormattedCurrentTimeCalled() } // CurrentTime is a mock implementation for CurrentTime -func (stm *SyncTimerStub) CurrentTime() time.Time { - return stm.CurrentTimeCalled() +func (sts *SyncTimerStub) CurrentTime() time.Time { + return sts.CurrentTimeCalled() +} + +// Close - +func (sts *SyncTimerStub) Close() error { + return nil } // IsInterfaceNil returns true if there is no value under the interface -func (stm *SyncTimerStub) IsInterfaceNil() bool { - return stm == nil +func (sts *SyncTimerStub) IsInterfaceNil() bool { + return sts == nil } diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index 6ddab642a28..6a09ddfa8e2 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -2,6 +2,7 @@ package shardchain import ( "bytes" + "context" "fmt" "math" "sort" @@ -11,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/typeConverters" @@ -30,6 +32,7 @@ var _ dataRetriever.EpochHandler = (*trigger)(nil) var _ epochStart.TriggerHandler = (*trigger)(nil) var _ process.EpochStartTriggerHandler = (*trigger)(nil) var _ process.EpochBootstrapper = (*trigger)(nil) +var _ close.Closer = (*trigger)(nil) // sleepTime defines the time in milliseconds between each iteration made in requestMissingMiniblocks method const sleepTime = 1 * time.Second @@ -97,6 +100,7 @@ type trigger struct { mapMissingMiniblocks map[string]uint32 mutMissingMiniblocks sync.RWMutex + cancelFunc func() } type metaInfo struct { @@ -227,7 +231,10 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { } t.mapMissingMiniblocks = make(map[string]uint32) - go t.requestMissingMiniblocks() + + var ctx context.Context + ctx, t.cancelFunc = context.WithCancel(context.Background()) + go t.requestMissingMiniblocks(ctx) return t, nil } @@ -243,9 +250,14 @@ func (t *trigger) clearMissingMiniblocksMap(epoch uint32) { } } -func (t *trigger) requestMissingMiniblocks() { +func (t *trigger) requestMissingMiniblocks(ctx context.Context) { for { - time.Sleep(sleepTime) + select { + case <-ctx.Done(): + log.Debug("trigger's go routine is stopping...") + return + case <-time.After(sleepTime): + } t.mutMissingMiniblocks.RLock() if len(t.mapMissingMiniblocks) == 0 { @@ -261,7 +273,14 @@ func (t *trigger) requestMissingMiniblocks() { t.mutMissingMiniblocks.RUnlock() go t.requestHandler.RequestMiniBlocks(core.MetachainShardId, missingMiniblocks) - time.Sleep(waitTime) + + select { + case <-ctx.Done(): + log.Debug("trigger's go routine is stopping...") + return + case <-time.After(waitTime): + } + t.updateMissingMiniblocks() } } @@ -918,6 +937,15 @@ func (t *trigger) saveCurrentState(round uint64) { } } +// Close will close the endless running go routine +func (t *trigger) Close() error { + if t.cancelFunc != nil { + t.cancelFunc() + } + + return nil +} + // IsInterfaceNil returns true if underlying object is nil func (t *trigger) IsInterfaceNil() bool { return t == nil diff --git a/epochStart/shardchain/triggerRegistry_test.go b/epochStart/shardchain/triggerRegistry_test.go index ccd2ed339c7..d36c1df916f 100644 --- a/epochStart/shardchain/triggerRegistry_test.go +++ b/epochStart/shardchain/triggerRegistry_test.go @@ -73,6 +73,7 @@ func TestTrigger_LoadStateAfterSave(t *testing.T) { epochStartTrigger1.epochMetaBlockHash = []byte("meta block hash") epochStartTrigger1.isEpochStart = false epochStartTrigger1.epochFinalityAttestingRound = 680 + epochStartTrigger1.cancelFunc = nil err := epochStartTrigger1.saveState(key) assert.Nil(t, err) assert.NotEqual(t, epochStartTrigger1, epochStartTrigger2) diff --git a/facade/mock/syncTimerMock.go b/facade/mock/syncTimerMock.go index d68c83d6719..b440600b7d8 100644 --- a/facade/mock/syncTimerMock.go +++ b/facade/mock/syncTimerMock.go @@ -6,15 +6,15 @@ import ( // SyncTimerMock is a mock implementation of SyncTimer interface type SyncTimerMock struct { - StartSyncCalled func() + StartSyncingTimeCalled func() ClockOffsetCalled func() time.Duration FormattedCurrentTimeCalled func() string CurrentTimeCalled func() time.Time } -// StartSync is a mock implementation for StartSync -func (stm *SyncTimerMock) StartSync() { - stm.StartSyncCalled() +// StartSyncingTime is a mock implementation for StartSyncingTime +func (stm *SyncTimerMock) StartSyncingTime() { + stm.StartSyncingTimeCalled() } // ClockOffset is a mock implementation for ClockOffset @@ -32,6 +32,11 @@ func (stm *SyncTimerMock) CurrentTime() time.Time { return stm.CurrentTimeCalled() } +// Close - +func (stm *SyncTimerMock) Close() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (stm *SyncTimerMock) IsInterfaceNil() bool { return stm == nil diff --git a/factory/triesComponents.go b/factory/triesComponents.go index c759122d877..6a33609d2ed 100644 --- a/factory/triesComponents.go +++ b/factory/triesComponents.go @@ -79,6 +79,7 @@ func (tcf *triesComponentsFactory) Create() (*TriesComponents, error) { tcf.config.AccountsTrieStorage, shardIDString, tcf.config.StateTriesConfig.AccountsStatePruningEnabled, + tcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, ) if err != nil { return nil, err @@ -90,6 +91,7 @@ func (tcf *triesComponentsFactory) Create() (*TriesComponents, error) { tcf.config.PeerAccountsTrieStorage, shardIDString, tcf.config.StateTriesConfig.PeerStatePruningEnabled, + tcf.config.StateTriesConfig.MaxPeerTrieLevelInMemory, ) if err != nil { return nil, err diff --git a/genesis/data/initialSmartContract.go b/genesis/data/initialSmartContract.go index 96a7861603d..eb5edf52c42 100644 --- a/genesis/data/initialSmartContract.go +++ b/genesis/data/initialSmartContract.go @@ -7,9 +7,11 @@ type InitialSmartContract struct { VmType string `json:"vm-type"` InitParameters string `json:"init-parameters"` Type string `json:"type"` + Version string `json:"version"` ownerBytes []byte vmTypeBytes []byte addressBytes []byte + address string } // OwnerBytes will return the owner's address as raw bytes @@ -67,6 +69,21 @@ func (isc *InitialSmartContract) AddressBytes() []byte { return isc.addressBytes } +// SetAddress sets the initial smart contract address as string +func (isc *InitialSmartContract) SetAddress(address string) { + isc.address = address +} + +// Address returns the smart contract address string +func (isc *InitialSmartContract) Address() string { + return isc.address +} + +// GetVersion returns the recorded version (if existing) of the SC +func (isc *InitialSmartContract) GetVersion() string { + return isc.Version +} + // IsInterfaceNil returns if underlying object is true func (isc *InitialSmartContract) IsInterfaceNil() bool { return isc == nil diff --git a/genesis/data/initialSmartContract_test.go b/genesis/data/initialSmartContract_test.go index e4c3ae7bf25..5142a1f579d 100644 --- a/genesis/data/initialSmartContract_test.go +++ b/genesis/data/initialSmartContract_test.go @@ -37,12 +37,14 @@ func TestInitialSmartContract_Getters(t *testing.T) { vmType := "vm type" initParams := "init parameters" scType := "type" + version := "version" isc := &InitialSmartContract{ Owner: owner, Filename: filename, VmType: vmType, Type: scType, InitParameters: initParams, + Version: version, } assert.False(t, check.IfNil(isc)) @@ -51,6 +53,7 @@ func TestInitialSmartContract_Getters(t *testing.T) { assert.Equal(t, vmType, isc.GetVmType()) assert.Equal(t, scType, isc.GetType()) assert.Equal(t, initParams, isc.GetInitParameters()) + assert.Equal(t, version, isc.GetVersion()) } func TestInitialSmartContract_AddressBytes(t *testing.T) { @@ -63,3 +66,14 @@ func TestInitialSmartContract_AddressBytes(t *testing.T) { assert.Equal(t, addrBytes, recoverdAddrBytes) } + +func TestInitialSmartContract_Address(t *testing.T) { + t.Parallel() + + ia := &InitialSmartContract{} + address := "address" + ia.SetAddress(address) + recoverdAddress := ia.Address() + + assert.Equal(t, address, recoverdAddress) +} diff --git a/genesis/errors.go b/genesis/errors.go index a8404f64e74..acf5fecba9d 100644 --- a/genesis/errors.go +++ b/genesis/errors.go @@ -113,8 +113,23 @@ var ErrNilNodesListSplitter = errors.New("nil nodes list splitter") // ErrNilNodesSetup signals that a nil nodes setup handler has been provided var ErrNilNodesSetup = errors.New("nil nodes setup") -// ErrNilDeployProcessor signals that a nil deploy processor has been provided -var ErrNilDeployProcessor = errors.New("nil deploy processor") +// ErrAccountAlreadyExists signals that an account already exists +var ErrAccountAlreadyExists = errors.New("account already exists") + +// ErrAccountNotCreated signals that an account could not have been created +var ErrAccountNotCreated = errors.New("account not created") // ErrNilTrieStorageManager signals that a nil trie storage manager has been provided var ErrNilTrieStorageManager = errors.New("nil trie storage manager") + +// ErrWhileVerifyingDelegation signals that a verification error occurred +var ErrWhileVerifyingDelegation = errors.New("error occurred while verifying delegation SC") + +// ErrNilQueryService signals that a nil query service has been provided +var ErrNilQueryService = errors.New("nil query service") + +// ErrMissingElement signals a missing element event +var ErrMissingElement = errors.New("missing element") + +// ErrGetVersionFromSC signals that a call to "version" function on a contract resulted in an unexpected result +var ErrGetVersionFromSC = errors.New("get version from contract returned an invalid response") diff --git a/genesis/interface.go b/genesis/interface.go index ca3f3838833..97d8387f579 100644 --- a/genesis/interface.go +++ b/genesis/interface.go @@ -62,6 +62,9 @@ type InitialSmartContractHandler interface { VmTypeBytes() []byte SetAddressBytes(addressBytes []byte) AddressBytes() []byte + SetAddress(address string) + Address() string + GetVersion() string IsInterfaceNil() bool } @@ -76,6 +79,7 @@ type InitialSmartContractParser interface { // TxExecutionProcessor represents a transaction builder and executor containing also related helper functions type TxExecutionProcessor interface { ExecuteTransaction(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error + AccountExists(address []byte) bool GetNonce(senderBytes []byte) (uint64, error) AddBalance(senderBytes []byte, value *big.Int) error AddNonce(senderBytes []byte, nonce uint64) error @@ -92,6 +96,5 @@ type NodesListSplitter interface { // DeployProcessor is able to deploy a smart contract type DeployProcessor interface { Deploy(sc InitialSmartContractHandler) error - SetReplacePlaceholders(handler func(txData string, scResultingAddressBytes []byte) (string, error)) IsInterfaceNil() bool } diff --git a/genesis/mock/deployProcessorStub.go b/genesis/mock/deployProcessorStub.go index 44d1dfbf9ef..d9a1f0bc070 100644 --- a/genesis/mock/deployProcessorStub.go +++ b/genesis/mock/deployProcessorStub.go @@ -17,13 +17,6 @@ func (dps *DeployProcessorStub) Deploy(sc genesis.InitialSmartContractHandler) e return nil } -// SetReplacePlaceholders - -func (dps *DeployProcessorStub) SetReplacePlaceholders(handler func(txData string, scResultingAddressBytes []byte) (string, error)) { - if dps.SetReplacePlaceholdersCalled != nil { - dps.SetReplacePlaceholdersCalled(handler) - } -} - // IsInterfaceNil - func (dps *DeployProcessorStub) IsInterfaceNil() bool { return dps == nil diff --git a/genesis/mock/poolsHolderMock.go b/genesis/mock/poolsHolderMock.go index 89f8ac9b3bb..40b22d2cfe5 100644 --- a/genesis/mock/poolsHolderMock.go +++ b/genesis/mock/poolsHolderMock.go @@ -30,11 +30,13 @@ func NewPoolsHolderMock() *PoolsHolderMock { phf.transactions, _ = txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 10000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/genesis/mock/queryServiceStub.go b/genesis/mock/queryServiceStub.go new file mode 100644 index 00000000000..66a3a88b889 --- /dev/null +++ b/genesis/mock/queryServiceStub.go @@ -0,0 +1,36 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/process" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" +) + +// QueryServiceStub - +type QueryServiceStub struct { + ComputeScCallGasLimitCalled func(tx *transaction.Transaction) (uint64, error) + ExecuteQueryCalled func(query *process.SCQuery) (*vmcommon.VMOutput, error) +} + +// ComputeScCallGasLimit - +func (qss *QueryServiceStub) ComputeScCallGasLimit(tx *transaction.Transaction) (uint64, error) { + if qss.ComputeScCallGasLimitCalled != nil { + return qss.ComputeScCallGasLimitCalled(tx) + } + + return 0, nil +} + +// ExecuteQuery - +func (qss *QueryServiceStub) ExecuteQuery(query *process.SCQuery) (*vmcommon.VMOutput, error) { + if qss.ExecuteQueryCalled != nil { + return qss.ExecuteQueryCalled(query) + } + + return &vmcommon.VMOutput{}, nil +} + +// IsInterfaceNil - +func (qss *QueryServiceStub) IsInterfaceNil() bool { + return qss == nil +} diff --git a/genesis/mock/shardCoordinatorMock.go b/genesis/mock/shardCoordinatorMock.go index 77cd9928b9b..65681981ec6 100644 --- a/genesis/mock/shardCoordinatorMock.go +++ b/genesis/mock/shardCoordinatorMock.go @@ -61,7 +61,7 @@ func (scm *ShardCoordinatorMock) SameShard(address1, address2 []byte) bool { return false } - return address1[len(address1)-1] == address2[len(address2)-1] + return scm.ComputeId(address1) == scm.ComputeId(address2) } // CommunicationIdentifier - diff --git a/genesis/mock/txExecutionProcessorStub.go b/genesis/mock/txExecutionProcessorStub.go index 606c6bca6bc..be3bd1982c6 100644 --- a/genesis/mock/txExecutionProcessorStub.go +++ b/genesis/mock/txExecutionProcessorStub.go @@ -5,6 +5,7 @@ import "math/big" // TxExecutionProcessorStub - type TxExecutionProcessorStub struct { ExecuteTransactionCalled func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error + AccountExistsCalled func(address []byte) bool GetNonceCalled func(senderBytes []byte) (uint64, error) AddBalanceCalled func(senderBytes []byte, value *big.Int) error AddNonceCalled func(senderBytes []byte, nonce uint64) error @@ -19,6 +20,15 @@ func (teps *TxExecutionProcessorStub) ExecuteTransaction(nonce uint64, sndAddr [ return nil } +// AccountExists - +func (teps *TxExecutionProcessorStub) AccountExists(address []byte) bool { + if teps.AccountExistsCalled != nil { + return teps.AccountExistsCalled(address) + } + + return false +} + // GetNonce - func (teps *TxExecutionProcessorStub) GetNonce(senderBytes []byte) (uint64, error) { if teps.GetNonceCalled != nil { diff --git a/genesis/process/argGenesisBlockCreator.go b/genesis/process/argGenesisBlockCreator.go index 3b9430241b7..2f6165ddcd4 100644 --- a/genesis/process/argGenesisBlockCreator.go +++ b/genesis/process/argGenesisBlockCreator.go @@ -40,7 +40,6 @@ type ArgsGenesisBlockCreator struct { TrieStorageManagers map[string]data.StorageManager ChainID string SystemSCConfig config.SystemSmartContractsConfig - // created component needed only for hardfork importHandler update.ImportHandler } diff --git a/genesis/process/baseGenesisProcessorsFactory.go b/genesis/process/baseGenesisProcessorsFactory.go index 501a087d919..82979839e1f 100644 --- a/genesis/process/baseGenesisProcessorsFactory.go +++ b/genesis/process/baseGenesisProcessorsFactory.go @@ -1,6 +1,7 @@ package process import ( + "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/vm" ) @@ -13,4 +14,5 @@ type genesisProcessors struct { scrProcessor process.SmartContractResultProcessor rwdProcessor process.RewardTransactionProcessor blockchainHook process.BlockChainHookHandler + queryService external.SCQueryService } diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 4ea15ec0209..b636b3aa416 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -169,6 +169,9 @@ func (gbc *genesisBlockCreator) CreateGenesisBlocks() (map[uint32]data.HeaderHan } for shardID := uint32(0); shardID < gbc.arg.ShardCoordinator.NumberOfShards(); shardID++ { + log.Debug("genesis block creator", + "shard ID", shardID, + ) newArgument, err = gbc.getNewArgForShard(shardID) if err != nil { return nil, fmt.Errorf("'%w' while creating new argument for shard %d", @@ -189,6 +192,10 @@ func (gbc *genesisBlockCreator) CreateGenesisBlocks() (map[uint32]data.HeaderHan } } + log.Debug("genesis block creator", + "shard ID", "meta", + ) + newArgument, err = gbc.getNewArgForShard(core.MetachainShardId) if err != nil { return nil, fmt.Errorf("'%w' while creating new argument for metachain", err) @@ -206,6 +213,8 @@ func (gbc *genesisBlockCreator) CreateGenesisBlocks() (map[uint32]data.HeaderHan return nil, fmt.Errorf("'%w' while saving genesis block for metachain", err) } + //TODO call here trie pruning on all roothashes not from current shard + return genesisBlocks, nil } diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 93f4be04306..3a7aeb0a76c 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -1,6 +1,9 @@ package process import ( + "bytes" + "encoding/hex" + "math" "math/big" "testing" @@ -8,23 +11,27 @@ import ( "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" + factoryState "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/ElrondNetwork/elrond-go/data/trie" "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/genesis/mock" "github.com/ElrondNetwork/elrond-go/genesis/parsing" "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/vm/systemSmartContracts/defaults" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +var entireGenesisSupply = big.NewInt(22000) +var nodePrice = big.NewInt(5000) + //TODO improve code coverage of this package -func createMockArgument() ArgsGenesisBlockCreator { +func createMockArgument(t *testing.T) ArgsGenesisBlockCreator { memDBMock := mock.NewMemDbMock() - storageManager := &mock.StorageManagerStub{DatabaseCalled: func() data.DBWriteCacher { - return memDBMock - }} + storageManager, _ := trie.NewTrieStorageManagerWithoutPruning(memDBMock) trieStorageManagers := make(map[string]data.StorageManager) trieStorageManagers[factory.UserAccountTrie] = storageManager @@ -34,7 +41,6 @@ func createMockArgument() ArgsGenesisBlockCreator { GenesisTime: 0, StartEpochNum: 0, PubkeyConv: mock.NewPubkeyConverterMock(32), - InitialNodesSetup: &mock.InitialNodesSetupHandlerStub{}, Blkc: &mock.BlockChainStub{}, Marshalizer: &mock.MarshalizerMock{}, Hasher: &mock.HasherMock{}, @@ -57,20 +63,14 @@ func createMockArgument() ArgsGenesisBlockCreator { SelfShardId: 0, } - arg.Accounts = &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return make([]byte, 0), nil - }, - CommitCalled: func() ([]byte, error) { - return make([]byte, 0), nil - }, - SaveAccountCalled: func(account state.AccountHandler) error { - return nil - }, - LoadAccountCalled: func(address []byte) (state.AccountHandler, error) { - return state.NewEmptyUserAccount(), nil - }, - } + var err error + arg.Accounts, err = createAccountAdapter( + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + factoryState.NewAccountCreator(), + trieStorageManagers[factory.UserAccountTrie], + ) + require.Nil(t, err) arg.ValidatorAccounts = &mock.AccountsStub{ RootHashCalled: func() ([]byte, error) { @@ -87,16 +87,17 @@ func createMockArgument() ArgsGenesisBlockCreator { }, } - arg.GasMap = arwenConfig.MakeGasMap(1) + arg.GasMap = arwenConfig.MakeGasMapForTests() defaults.FillGasMapInternal(arg.GasMap, 1) ted := &economics.TestEconomicsData{ EconomicsData: &economics.EconomicsData{}, } - ted.SetGenesisNodePrice(big.NewInt(100)) + ted.SetGenesisNodePrice(nodePrice) ted.SetMinStep(big.NewInt(1)) - ted.SetTotalSupply(big.NewInt(10000)) + ted.SetTotalSupply(entireGenesisSupply) ted.SetUnJailPrice(big.NewInt(1)) + ted.SetMaxGasLimitPerBlock(math.MaxUint64) arg.Economics = ted.EconomicsData arg.Store = &mock.ChainStorerMock{ @@ -105,16 +106,46 @@ func createMockArgument() ArgsGenesisBlockCreator { }, } - arg.AccountsParser, _ = parsing.NewAccountsParser( + arg.AccountsParser, err = parsing.NewAccountsParser( "testdata/genesis.json", arg.Economics.TotalSupply(), arg.PubkeyConv, ) + require.Nil(t, err) - arg.SmartContractParser, _ = parsing.NewSmartContractsParser( + arg.SmartContractParser, err = parsing.NewSmartContractsParser( "testdata/smartcontracts.json", arg.PubkeyConv, ) + require.Nil(t, err) + + scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") + stakedAddr, _ := hex.DecodeString("b00102030405060708090001020304050607080900010203040506070809000b") + arg.InitialNodesSetup = &mock.InitialNodesHandlerStub{ + InitialNodesInfoCalled: func() (map[uint32][]sharding.GenesisNodeInfoHandler, map[uint32][]sharding.GenesisNodeInfoHandler) { + return map[uint32][]sharding.GenesisNodeInfoHandler{ + 0: { + &mock.GenesisNodeInfoHandlerMock{ + AddressBytesValue: scAddressBytes, + PubKeyBytesValue: bytes.Repeat([]byte{1}, 96), + }, + &mock.GenesisNodeInfoHandlerMock{ + AddressBytesValue: stakedAddr, + PubKeyBytesValue: bytes.Repeat([]byte{2}, 96), + }, + }, + 1: { + &mock.GenesisNodeInfoHandlerMock{ + AddressBytesValue: scAddressBytes, + PubKeyBytesValue: bytes.Repeat([]byte{3}, 96), + }, + }, + }, make(map[uint32][]sharding.GenesisNodeInfoHandler) + }, + MinNumberOfNodesCalled: func() uint32 { + return 1 + }, + } return arg } @@ -122,7 +153,7 @@ func createMockArgument() ArgsGenesisBlockCreator { func TestGenesisBlockCreator_CreateGenesisBlocksShouldWork(t *testing.T) { t.Parallel() - arg := createMockArgument() + arg := createMockArgument(t) gbc, err := NewGenesisBlockCreator(arg) require.Nil(t, err) diff --git a/genesis/process/intermediate/delegationDeployProcessor.go b/genesis/process/intermediate/delegationDeployProcessor.go deleted file mode 100644 index 5eb143038d9..00000000000 --- a/genesis/process/intermediate/delegationDeployProcessor.go +++ /dev/null @@ -1,85 +0,0 @@ -package intermediate - -import ( - "fmt" - "math/big" - "strings" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/genesis" -) - -const stakedPlaceholder = "%sc_total_stake%" - -var zero = big.NewInt(0) - -type delegationDeployProcessor struct { - genesis.DeployProcessor - accountsParser genesis.AccountsParser - pubkeyConv state.PubkeyConverter - nodePrice *big.Int -} - -// NewDelegationDeployProcessor returns a new deploy processor specialized for deploying delegation SC -func NewDelegationDeployProcessor( - deployProcessor genesis.DeployProcessor, - accountsParser genesis.AccountsParser, - pubkeyConv state.PubkeyConverter, - nodePrice *big.Int, -) (*delegationDeployProcessor, error) { - if check.IfNil(deployProcessor) { - return nil, genesis.ErrNilDeployProcessor - } - if check.IfNil(accountsParser) { - return nil, genesis.ErrNilAccountsParser - } - if check.IfNil(pubkeyConv) { - return nil, genesis.ErrNilPubkeyConverter - } - if nodePrice == nil { - return nil, genesis.ErrNilInitialNodePrice - } - if nodePrice.Cmp(zero) < 1 { - return nil, genesis.ErrInvalidInitialNodePrice - } - - ddp := &delegationDeployProcessor{ - DeployProcessor: deployProcessor, - accountsParser: accountsParser, - pubkeyConv: pubkeyConv, - nodePrice: nodePrice, - } - ddp.SetReplacePlaceholders(ddp.replaceDelegationPlaceholders) - - return ddp, nil -} - -func (ddp *delegationDeployProcessor) replaceDelegationPlaceholders( - txData string, - scResultingAddressBytes []byte, -) (string, error) { - - scResultingAddress := ddp.pubkeyConv.Encode(scResultingAddressBytes) - val := ddp.accountsParser.GetTotalStakedForDelegationAddress(scResultingAddress) - if val.Cmp(zero) < 1 { - return "", fmt.Errorf("%w, 0 or negative delegated value for resulting address %s", - genesis.ErrInvalidDelegationValue, scResultingAddress) - } - - exactDiv := big.NewInt(0).Set(val) - exactDiv.Mod(exactDiv, ddp.nodePrice) - if exactDiv.Cmp(zero) != 0 { - return "", fmt.Errorf("%w, not a node price multiple value, for resulting address %s", - genesis.ErrInvalidDelegationValue, scResultingAddress) - } - - txData = strings.Replace(txData, stakedPlaceholder, val.Text(16), -1) - - return txData, nil -} - -// IsInterfaceNil returns if underlying object is true -func (ddp *delegationDeployProcessor) IsInterfaceNil() bool { - return ddp == nil || ddp.DeployProcessor == nil -} diff --git a/genesis/process/intermediate/delegationDeployProcessor_test.go b/genesis/process/intermediate/delegationDeployProcessor_test.go deleted file mode 100644 index 4971ef0e670..00000000000 --- a/genesis/process/intermediate/delegationDeployProcessor_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package intermediate - -import ( - "errors" - "math/big" - "testing" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/genesis" - "github.com/ElrondNetwork/elrond-go/genesis/mock" - "github.com/stretchr/testify/assert" -) - -func TestNewDelegationDeployProcessor_NilDeployProcessorShouldErr(t *testing.T) { - t.Parallel() - - ddp, err := NewDelegationDeployProcessor( - nil, - &mock.AccountsParserStub{}, - mock.NewPubkeyConverterMock(32), - big.NewInt(1), - ) - - assert.True(t, check.IfNil(ddp)) - assert.Equal(t, genesis.ErrNilDeployProcessor, err) -} - -func TestNewDelegationDeployProcessor_NilAccountsParserShouldErr(t *testing.T) { - t.Parallel() - - ddp, err := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - nil, - mock.NewPubkeyConverterMock(32), - big.NewInt(1), - ) - - assert.True(t, check.IfNil(ddp)) - assert.Equal(t, genesis.ErrNilAccountsParser, err) -} - -func TestNewDelegationDeployProcessor_NilPubkeyConverterShouldErr(t *testing.T) { - t.Parallel() - - ddp, err := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - &mock.AccountsParserStub{}, - nil, - big.NewInt(1), - ) - - assert.True(t, check.IfNil(ddp)) - assert.Equal(t, genesis.ErrNilPubkeyConverter, err) -} - -func TestNewDelegationDeployProcessor_NilInitialNodePriceShouldErr(t *testing.T) { - t.Parallel() - - ddp, err := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - &mock.AccountsParserStub{}, - mock.NewPubkeyConverterMock(32), - nil, - ) - - assert.True(t, check.IfNil(ddp)) - assert.Equal(t, genesis.ErrNilInitialNodePrice, err) -} - -func TestNewDelegationDeployProcessor_InvalidInitialNodePriceShouldErr(t *testing.T) { - t.Parallel() - - ddp, err := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - &mock.AccountsParserStub{}, - mock.NewPubkeyConverterMock(32), - big.NewInt(0), - ) - - assert.True(t, check.IfNil(ddp)) - assert.Equal(t, genesis.ErrInvalidInitialNodePrice, err) -} - -func TestNewDelegationDeployProcessor_ShouldWork(t *testing.T) { - t.Parallel() - - ddp, err := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - &mock.AccountsParserStub{}, - mock.NewPubkeyConverterMock(32), - big.NewInt(1), - ) - - assert.False(t, check.IfNil(ddp)) - assert.Nil(t, err) -} - -//------- replaceDelegationPlaceholders - -func TestDelegationDeployProcessor_ReplaceDelegationPlaceholdersNotStakedShouldErr(t *testing.T) { - t.Parallel() - - ddp, _ := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - &mock.AccountsParserStub{ - GetTotalStakedForDelegationAddressCalled: func(delegationAddress string) *big.Int { - return big.NewInt(0) - }, - }, - mock.NewPubkeyConverterMock(32), - big.NewInt(1), - ) - - str, err := ddp.replaceDelegationPlaceholders("data", []byte("sc address")) - - assert.Equal(t, "", str) - assert.True(t, errors.Is(err, genesis.ErrInvalidDelegationValue)) -} - -func TestDelegationDeployProcessor_ReplaceDelegationPlaceholdersNotAnExactValueShouldErr(t *testing.T) { - t.Parallel() - - ddp, _ := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - &mock.AccountsParserStub{ - GetTotalStakedForDelegationAddressCalled: func(delegationAddress string) *big.Int { - return big.NewInt(4) - }, - }, - mock.NewPubkeyConverterMock(32), - big.NewInt(3), - ) - - str, err := ddp.replaceDelegationPlaceholders("data", []byte("sc address")) - - assert.Equal(t, "", str) - assert.True(t, errors.Is(err, genesis.ErrInvalidDelegationValue)) -} - -func TestDelegationDeployProcessor_ReplaceDelegationPlaceholdersShouldWork(t *testing.T) { - t.Parallel() - - ddp, _ := NewDelegationDeployProcessor( - &mock.DeployProcessorStub{}, - &mock.AccountsParserStub{ - GetTotalStakedForDelegationAddressCalled: func(delegationAddress string) *big.Int { - return big.NewInt(6) - }, - }, - mock.NewPubkeyConverterMock(32), - big.NewInt(3), - ) - - str, err := ddp.replaceDelegationPlaceholders(stakedPlaceholder, []byte("sc address")) - - assert.Equal(t, "6", str) - assert.Nil(t, err) -} diff --git a/genesis/process/intermediate/delegationProcessor.go b/genesis/process/intermediate/delegationProcessor.go deleted file mode 100644 index ae643f8060c..00000000000 --- a/genesis/process/intermediate/delegationProcessor.go +++ /dev/null @@ -1,214 +0,0 @@ -package intermediate - -import ( - "fmt" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/genesis" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -const stakeFunction = "stake" - -type delegationProcessor struct { - genesis.TxExecutionProcessor - shardCoordinator sharding.Coordinator - accuntsParser genesis.AccountsParser - smartContractsParser genesis.InitialSmartContractParser - nodesListSplitter genesis.NodesListSplitter -} - -// NewDelegationProcessor returns a new delegation processor instance -func NewDelegationProcessor( - executor genesis.TxExecutionProcessor, - shardCoordinator sharding.Coordinator, - accountsParser genesis.AccountsParser, - smartContractParser genesis.InitialSmartContractParser, - nodesListSplitter genesis.NodesListSplitter, -) (*delegationProcessor, error) { - if check.IfNil(executor) { - return nil, genesis.ErrNilTxExecutionProcessor - } - if check.IfNil(shardCoordinator) { - return nil, genesis.ErrNilShardCoordinator - } - if check.IfNil(accountsParser) { - return nil, genesis.ErrNilAccountsParser - } - if check.IfNil(smartContractParser) { - return nil, genesis.ErrNilSmartContractParser - } - if check.IfNil(nodesListSplitter) { - return nil, genesis.ErrNilNodesListSplitter - } - - return &delegationProcessor{ - TxExecutionProcessor: executor, - shardCoordinator: shardCoordinator, - accuntsParser: accountsParser, - smartContractsParser: smartContractParser, - nodesListSplitter: nodesListSplitter, - }, nil -} - -// ExecuteDelegation will execute stake, set bls keys and activate on all delegation contracts from this shard -func (dp *delegationProcessor) ExecuteDelegation() (genesis.DelegationResult, error) { - smartContracts, err := dp.getDelegationScOnCurrentShard() - if err != nil { - return genesis.DelegationResult{}, err - } - - if len(smartContracts) == 0 { - return genesis.DelegationResult{}, nil - } - - dr := genesis.DelegationResult{} - dr.NumTotalStaked, err = dp.executeStake(smartContracts) - if err != nil { - return genesis.DelegationResult{}, err - } - - dr.NumTotalDelegated, err = dp.activateBlsKeys(smartContracts) - return dr, err -} - -func (dp *delegationProcessor) getDelegationScOnCurrentShard() ([]genesis.InitialSmartContractHandler, error) { - allSmartContracts, err := dp.smartContractsParser.InitialSmartContractsSplitOnOwnersShards(dp.shardCoordinator) - if err != nil { - return nil, err - } - - smartContracts := make([]genesis.InitialSmartContractHandler, 0) - smartContractsForCurrentShard := allSmartContracts[dp.shardCoordinator.SelfId()] - for _, sc := range smartContractsForCurrentShard { - if sc.GetType() == genesis.DelegationType { - smartContracts = append(smartContracts, sc) - } - } - - return smartContracts, nil -} - -func (dp *delegationProcessor) executeStake(smartContracts []genesis.InitialSmartContractHandler) (int, error) { - stakedOnDelegation := 0 - - for _, sc := range smartContracts { - accounts := dp.accuntsParser.GetInitialAccountsForDelegated(sc.AddressBytes()) - for _, ac := range accounts { - err := dp.stake(ac, sc) - if err != nil { - return 0, fmt.Errorf("%w while calling stake function from account %s", err, ac.GetAddress()) - } - } - stakedOnDelegation += len(accounts) - } - - return stakedOnDelegation, nil -} - -func (dp *delegationProcessor) stake(ac genesis.InitialAccountHandler, sc genesis.InitialSmartContractHandler) error { - isIntraShardCall := dp.shardCoordinator.SameShard(ac.AddressBytes(), sc.AddressBytes()) - - dh := ac.GetDelegationHandler() - if check.IfNil(dh) { - return genesis.ErrNilDelegationHandler - } - - var err error - if isIntraShardCall { - //intra shard transaction, get current nonce, add to balance the delegation value - // in order to make the tx processor work - nonce, errGetNonce := dp.GetNonce(ac.AddressBytes()) - if errGetNonce != nil { - return errGetNonce - } - - err = dp.AddBalance(ac.AddressBytes(), dh.GetValue()) - if err != nil { - return err - } - - return dp.ExecuteTransaction( - nonce, - ac.AddressBytes(), - sc.AddressBytes(), - dh.GetValue(), - []byte(stakeFunction), - ) - } - - err = dp.ExecuteTransaction( - 0, - ac.AddressBytes(), - sc.AddressBytes(), - dh.GetValue(), - []byte(stakeFunction), - ) - if err != nil { - return err - } - - return nil -} - -func (dp *delegationProcessor) activateBlsKeys(smartContracts []genesis.InitialSmartContractHandler) (int, error) { - //mockSignature := "genesis" - - totalDelegated := 0 - for _, sc := range smartContracts { - delegatedNodes := dp.nodesListSplitter.GetDelegatedNodes(sc.AddressBytes()) - - lenDelegated := len(delegatedNodes) - if lenDelegated == 0 { - continue - } - totalDelegated += lenDelegated - - //TODO refactor this: use the new delegation contract version that will only activate the nodes internally - //setBlsKeys := make([]string, 0, lenDelegated) - //activateKeys := make([]string, 0, lenDelegated) - //for _, node := range delegatedNodes { - // setBlsKeys = append(setBlsKeys, hex.EncodeToString(node.PubKeyBytes())) - // activateKeys = append(activateKeys, mockSignature) - //} - // - //nonce, err := dp.GetNonce(sc.OwnerBytes()) - //if err != nil { - // return 0, err - //} - // - //setString := fmt.Sprintf("setBlsKeys@%d@%s", lenDelegated, strings.Join(setBlsKeys, "@")) - //err = dp.ExecuteTransaction( - // nonce, - // sc.OwnerBytes(), - // sc.AddressBytes(), - // big.NewInt(0), - // []byte(setString), - //) - //if err != nil { - // return 0, err - //} - // - //nonce++ - // - //hexLenDelegated := hex.EncodeToString(big.NewInt(int64(lenDelegated)).Bytes()) - //activateString := fmt.Sprintf("activate@%s@%s", hexLenDelegated, strings.Join(activateKeys, "@")) - //err = dp.ExecuteTransaction( - // nonce, - // sc.OwnerBytes(), - // sc.AddressBytes(), - // big.NewInt(0), - // []byte(activateString), - //) - //if err != nil { - // return 0, err - //} - } - - return totalDelegated, nil -} - -// IsInterfaceNil returns if underlying object is true -func (dp *delegationProcessor) IsInterfaceNil() bool { - return dp == nil || dp.TxExecutionProcessor == nil -} diff --git a/genesis/process/intermediate/delegationProcessor_test.go b/genesis/process/intermediate/delegationProcessor_test.go deleted file mode 100644 index 7b7b207f296..00000000000 --- a/genesis/process/intermediate/delegationProcessor_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package intermediate - -import ( - "bytes" - "fmt" - "math/big" - "strings" - "testing" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/genesis" - "github.com/ElrondNetwork/elrond-go/genesis/data" - "github.com/ElrondNetwork/elrond-go/genesis/mock" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/stretchr/testify/assert" -) - -func TestNewDelegationProcessor_NilExecutorShouldErr(t *testing.T) { - t.Parallel() - - dp, err := NewDelegationProcessor( - nil, - &mock.ShardCoordinatorMock{}, - &mock.AccountsParserStub{}, - &mock.SmartContractParserStub{}, - &mock.NodesListSplitterStub{}, - ) - - assert.True(t, check.IfNil(dp)) - assert.Equal(t, genesis.ErrNilTxExecutionProcessor, err) -} - -func TestNewDelegationProcessor_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - dp, err := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{}, - nil, - &mock.AccountsParserStub{}, - &mock.SmartContractParserStub{}, - &mock.NodesListSplitterStub{}, - ) - - assert.True(t, check.IfNil(dp)) - assert.Equal(t, genesis.ErrNilShardCoordinator, err) -} - -func TestNewDelegationProcessor_NilAccountsParserShouldErr(t *testing.T) { - t.Parallel() - - dp, err := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{}, - &mock.ShardCoordinatorMock{}, - nil, - &mock.SmartContractParserStub{}, - &mock.NodesListSplitterStub{}, - ) - - assert.True(t, check.IfNil(dp)) - assert.Equal(t, genesis.ErrNilAccountsParser, err) -} - -func TestNewDelegationProcessor_NilSmartContractParserShouldErr(t *testing.T) { - t.Parallel() - - dp, err := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{}, - &mock.ShardCoordinatorMock{}, - &mock.AccountsParserStub{}, - nil, - &mock.NodesListSplitterStub{}, - ) - - assert.True(t, check.IfNil(dp)) - assert.Equal(t, genesis.ErrNilSmartContractParser, err) -} - -func TestNewDelegationProcessor_NilNodesSplitterShouldErr(t *testing.T) { - t.Parallel() - - dp, err := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{}, - &mock.ShardCoordinatorMock{}, - &mock.AccountsParserStub{}, - &mock.SmartContractParserStub{}, - nil, - ) - - assert.True(t, check.IfNil(dp)) - assert.Equal(t, genesis.ErrNilNodesListSplitter, err) -} - -func TestNewDelegationProcessor_ShouldWork(t *testing.T) { - t.Parallel() - - dp, err := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{}, - &mock.ShardCoordinatorMock{}, - &mock.AccountsParserStub{}, - &mock.SmartContractParserStub{}, - &mock.NodesListSplitterStub{}, - ) - - assert.False(t, check.IfNil(dp)) - assert.Nil(t, err) -} - -//------- ExecuteDelegation - -func TestDelegationProcessor_ExecuteDelegationSplitFailsShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := fmt.Errorf("expected error") - dp, _ := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{ - ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { - assert.Fail(t, "should have not execute a transaction") - - return nil - }, - }, - &mock.ShardCoordinatorMock{}, - &mock.AccountsParserStub{}, - &mock.SmartContractParserStub{ - InitialSmartContractsSplitOnOwnersShardsCalled: func(shardCoordinator sharding.Coordinator) (map[uint32][]genesis.InitialSmartContractHandler, error) { - return nil, expectedErr - }, - }, - &mock.NodesListSplitterStub{}, - ) - - result, err := dp.ExecuteDelegation() - - assert.Equal(t, expectedErr, err) - assert.Equal(t, genesis.DelegationResult{}, result) -} - -func TestDelegationProcessor_ExecuteDelegationNoDelegationScShouldRetNil(t *testing.T) { - t.Parallel() - - dp, _ := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{ - ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { - assert.Fail(t, "should have not execute a transaction") - - return nil - }, - }, - &mock.ShardCoordinatorMock{}, - &mock.AccountsParserStub{}, - &mock.SmartContractParserStub{ - InitialSmartContractsSplitOnOwnersShardsCalled: func(shardCoordinator sharding.Coordinator) (map[uint32][]genesis.InitialSmartContractHandler, error) { - return map[uint32][]genesis.InitialSmartContractHandler{ - 0: { - &data.InitialSmartContract{ - Type: "test", - }, - }, - }, nil - }, - }, - &mock.NodesListSplitterStub{}, - ) - - result, err := dp.ExecuteDelegation() - - assert.Nil(t, err) - assert.Equal(t, genesis.DelegationResult{}, result) -} - -func TestDelegationProcessor_ExecuteDelegationStakeShouldWork(t *testing.T) { - t.Parallel() - - staker1 := []byte("stakerB") - staker2 := []byte("stakerC") - delegationSc := []byte("delegation SC") - - dp, _ := NewDelegationProcessor( - &mock.TxExecutionProcessorStub{ - ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { - isStakeCall := strings.Contains(string(data), "stake") - isStaker := bytes.Equal(sndAddr, staker1) || bytes.Equal(sndAddr, staker2) - if isStakeCall && !isStaker { - assert.Fail(t, "stake should have been called by the one of the stakers") - } - - return nil - }, - }, - &mock.ShardCoordinatorMock{ - SelfShardId: 0, - NumOfShards: 2, - }, - &mock.AccountsParserStub{ - GetInitialAccountsForDelegatedCalled: func(addressBytes []byte) []genesis.InitialAccountHandler { - if bytes.Equal(addressBytes, delegationSc) { - ia1 := &data.InitialAccount{ - Delegation: &data.DelegationData{}, - } - ia1.SetAddressBytes(staker1) - - ia2 := &data.InitialAccount{ - Delegation: &data.DelegationData{}, - } - ia2.SetAddressBytes(staker2) - - return []genesis.InitialAccountHandler{ia1, ia2} - } - - return make([]genesis.InitialAccountHandler, 0) - }, - }, - &mock.SmartContractParserStub{ - InitialSmartContractsSplitOnOwnersShardsCalled: func(shardCoordinator sharding.Coordinator) (map[uint32][]genesis.InitialSmartContractHandler, error) { - sc := &data.InitialSmartContract{ - Type: genesis.DelegationType, - } - sc.SetAddressBytes(delegationSc) - - return map[uint32][]genesis.InitialSmartContractHandler{ - 0: {sc}, - }, nil - }, - }, - &mock.NodesListSplitterStub{}, - ) - - result, err := dp.ExecuteDelegation() - - expectedResult := genesis.DelegationResult{ - NumTotalDelegated: 0, - NumTotalStaked: 2, - } - - assert.Nil(t, err) - assert.Equal(t, expectedResult, result) -} diff --git a/genesis/process/intermediate/deployProcessor.go b/genesis/process/intermediate/deployProcessor.go index ab1106297ec..2f63ffcd98a 100644 --- a/genesis/process/intermediate/deployProcessor.go +++ b/genesis/process/intermediate/deployProcessor.go @@ -2,15 +2,17 @@ package intermediate import ( "encoding/hex" + "fmt" "io/ioutil" "math/big" "path/filepath" "strings" - "sync" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/versioning" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/genesis" + "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/process" vmFactory "github.com/ElrondNetwork/elrond-go/vm/factory" ) @@ -18,37 +20,45 @@ import ( // codeMetadataHexForInitialSC used for initial SC deployment, set to upgrade-able const codeMetadataHexForInitialSC = "0100" const auctionScAddressPlaceholder = "%auction_sc_address%" +const versionFunction = "version" + +// ArgDeployProcessor is the argument used to create a deployProcessor instance +type ArgDeployProcessor struct { + Executor genesis.TxExecutionProcessor + PubkeyConv state.PubkeyConverter + BlockchainHook process.BlockChainHookHandler + QueryService external.SCQueryService +} type deployProcessor struct { genesis.TxExecutionProcessor - pubkeyConv state.PubkeyConverter - mutReplacePlaceholders sync.RWMutex - replacePlaceholders func(txData string, scResultingAddressBytes []byte) (string, error) - getScCodeAsHex func(filename string) (string, error) - blockchainHook process.BlockChainHookHandler - emptyAddress []byte + pubkeyConv state.PubkeyConverter + getScCodeAsHex func(filename string) (string, error) + blockchainHook process.BlockChainHookHandler + scQueryService process.SCQueryService + emptyAddress []byte } // NewDeployProcessor returns a new instance of deploy processor able to deploy SC -func NewDeployProcessor( - executor genesis.TxExecutionProcessor, - pubkeyConv state.PubkeyConverter, - blockchainHook process.BlockChainHookHandler, -) (*deployProcessor, error) { - if check.IfNil(executor) { +func NewDeployProcessor(arg ArgDeployProcessor) (*deployProcessor, error) { + if check.IfNil(arg.Executor) { return nil, genesis.ErrNilTxExecutionProcessor } - if check.IfNil(pubkeyConv) { + if check.IfNil(arg.PubkeyConv) { return nil, genesis.ErrNilPubkeyConverter } - if check.IfNil(blockchainHook) { + if check.IfNil(arg.BlockchainHook) { return nil, process.ErrNilBlockChainHook } + if check.IfNil(arg.QueryService) { + return nil, genesis.ErrNilQueryService + } dp := &deployProcessor{ - TxExecutionProcessor: executor, - pubkeyConv: pubkeyConv, - blockchainHook: blockchainHook, + TxExecutionProcessor: arg.Executor, + pubkeyConv: arg.PubkeyConv, + blockchainHook: arg.BlockchainHook, + scQueryService: arg.QueryService, } dp.getScCodeAsHex = dp.getSCCodeAsHex dp.emptyAddress = make([]byte, dp.pubkeyConv.Len()) @@ -78,27 +88,56 @@ func (dp *deployProcessor) Deploy(sc genesis.InitialSmartContractHandler) error } sc.SetAddressBytes(scResultingAddressBytes) + sc.SetAddress(dp.pubkeyConv.Encode(scResultingAddressBytes)) vmType := sc.GetVmType() - deployTxData := strings.Join([]string{code, vmType, codeMetadataHexForInitialSC}, "@") - deployTxData = dp.applyCommonPlaceholders(deployTxData) + initParams := dp.applyCommonPlaceholders(sc.GetInitParameters()) + arguments := []string{code, vmType, codeMetadataHexForInitialSC} + if len(initParams) > 0 { + arguments = append(arguments, initParams) + } + deployTxData := strings.Join(arguments, "@") + + log.Trace("deploying genesis SC", + "SC owner", sc.GetOwner(), + "SC address", sc.Address(), + "type", sc.GetType(), + "VM type", sc.GetVmType(), + "init params", initParams, + ) - dp.mutReplacePlaceholders.RLock() - if dp.replacePlaceholders != nil { - deployTxData, err = dp.replacePlaceholders(deployTxData, scResultingAddressBytes) - if err != nil { - return err - } + accountExists := dp.AccountExists(scResultingAddressBytes) + if accountExists { + return fmt.Errorf("%w for SC address %s, owner %s with nonce %d", + genesis.ErrAccountAlreadyExists, + sc.Address(), + sc.GetOwner(), + nonce, + ) } - dp.mutReplacePlaceholders.RUnlock() - return dp.ExecuteTransaction( + err = dp.ExecuteTransaction( nonce, sc.OwnerBytes(), dp.emptyAddress, big.NewInt(0), []byte(deployTxData), ) + if err != nil { + return err + } + + accountExists = dp.AccountExists(scResultingAddressBytes) + if !accountExists { + return fmt.Errorf("%w for SC address %s, owner %s with nonce %d", + genesis.ErrAccountNotCreated, + sc.Address(), + sc.GetOwner(), + nonce, + ) + } + + return dp.checkVersion(sc, scResultingAddressBytes) } func (dp *deployProcessor) applyCommonPlaceholders(txData string) string { @@ -117,11 +156,41 @@ func (dp *deployProcessor) getSCCodeAsHex(filename string) (string, error) { return hex.EncodeToString(code), nil } -// SetReplacePlaceholders sets the replace placeholder custom handler -func (dp *deployProcessor) SetReplacePlaceholders(handler func(txData string, scResultingAddressBytes []byte) (string, error)) { - dp.mutReplacePlaceholders.Lock() - dp.replacePlaceholders = handler - dp.mutReplacePlaceholders.Unlock() +func (dp *deployProcessor) checkVersion(sc genesis.InitialSmartContractHandler, scResultingAddressBytes []byte) error { + if len(sc.GetVersion()) == 0 { + //no version info, assuming deployed contract is up-to-date (let contracts that do not provide "version" function + // to be deployed at genesis time) + return nil + } + + vc, err := versioning.NewVersionComparator(sc.GetVersion()) + if err != nil { + return err + } + + scQueryVersion := &process.SCQuery{ + ScAddress: scResultingAddressBytes, + FuncName: versionFunction, + Arguments: [][]byte{}, + } + + vmOutputVersion, err := dp.scQueryService.ExecuteQuery(scQueryVersion) + if err != nil { + return err + } + if len(vmOutputVersion.ReturnData) != 1 { + return genesis.ErrGetVersionFromSC + } + + version := string(vmOutputVersion.ReturnData[0]) + + log.Debug("SC version", + "SC address", sc.Address(), + "SC owner", sc.GetOwner(), + "version", version, + ) + + return vc.Check(version) } // IsInterfaceNil returns if underlying object is true diff --git a/genesis/process/intermediate/deployProcessor_test.go b/genesis/process/intermediate/deployProcessor_test.go index 7d7880b1b2f..253ae1a5e53 100644 --- a/genesis/process/intermediate/deployProcessor_test.go +++ b/genesis/process/intermediate/deployProcessor_test.go @@ -12,17 +12,25 @@ import ( "github.com/ElrondNetwork/elrond-go/genesis/data" "github.com/ElrondNetwork/elrond-go/genesis/mock" "github.com/ElrondNetwork/elrond-go/process" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/stretchr/testify/assert" ) +func createMockDeployArg() ArgDeployProcessor { + return ArgDeployProcessor{ + Executor: &mock.TxExecutionProcessorStub{}, + PubkeyConv: mock.NewPubkeyConverterMock(32), + BlockchainHook: &mock.BlockChainHookHandlerMock{}, + QueryService: &mock.QueryServiceStub{}, + } +} + func TestNewDeployProcessor_NilExecutorShouldErr(t *testing.T) { t.Parallel() - dp, err := NewDeployProcessor( - nil, - mock.NewPubkeyConverterMock(32), - &mock.BlockChainHookHandlerMock{}, - ) + arg := createMockDeployArg() + arg.Executor = nil + dp, err := NewDeployProcessor(arg) assert.True(t, check.IfNil(dp)) assert.Equal(t, genesis.ErrNilTxExecutionProcessor, err) @@ -31,11 +39,9 @@ func TestNewDeployProcessor_NilExecutorShouldErr(t *testing.T) { func TestNewDeployProcessor_NilPubkeyConverterShouldErr(t *testing.T) { t.Parallel() - dp, err := NewDeployProcessor( - &mock.TxExecutionProcessorStub{}, - nil, - &mock.BlockChainHookHandlerMock{}, - ) + arg := createMockDeployArg() + arg.PubkeyConv = nil + dp, err := NewDeployProcessor(arg) assert.True(t, check.IfNil(dp)) assert.Equal(t, genesis.ErrNilPubkeyConverter, err) @@ -44,24 +50,30 @@ func TestNewDeployProcessor_NilPubkeyConverterShouldErr(t *testing.T) { func TestNewDeployProcessor_NilBlockchainHookShouldErr(t *testing.T) { t.Parallel() - dp, err := NewDeployProcessor( - &mock.TxExecutionProcessorStub{}, - mock.NewPubkeyConverterMock(32), - nil, - ) + arg := createMockDeployArg() + arg.BlockchainHook = nil + dp, err := NewDeployProcessor(arg) assert.True(t, check.IfNil(dp)) assert.Equal(t, process.ErrNilBlockChainHook, err) } +func TestNewDeployProcessor_NilQueryServiceShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockDeployArg() + arg.QueryService = nil + dp, err := NewDeployProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilQueryService, err) +} + func TestNewDeployProcessor_ShouldWork(t *testing.T) { t.Parallel() - dp, err := NewDeployProcessor( - &mock.TxExecutionProcessorStub{}, - mock.NewPubkeyConverterMock(32), - &mock.BlockChainHookHandlerMock{}, - ) + arg := createMockDeployArg() + dp, err := NewDeployProcessor(arg) assert.False(t, check.IfNil(dp)) assert.Nil(t, err) @@ -72,11 +84,8 @@ func TestNewDeployProcessor_ShouldWork(t *testing.T) { func TestDeployProcessor_DeployGetCodeFailsShouldErr(t *testing.T) { t.Parallel() - dp, _ := NewDeployProcessor( - &mock.TxExecutionProcessorStub{}, - mock.NewPubkeyConverterMock(0), - &mock.BlockChainHookHandlerMock{}, - ) + arg := createMockDeployArg() + dp, _ := NewDeployProcessor(arg) expectedErr := fmt.Errorf("expected error") dp.getScCodeAsHex = func(filename string) (string, error) { return "", expectedErr @@ -91,15 +100,13 @@ func TestDeployProcessor_DeployGetNonceFailsShouldErr(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expected error") - dp, _ := NewDeployProcessor( - &mock.TxExecutionProcessorStub{ - GetNonceCalled: func(senderBytes []byte) (uint64, error) { - return 0, expectedErr - }, + arg := createMockDeployArg() + arg.Executor = &mock.TxExecutionProcessorStub{ + GetNonceCalled: func(senderBytes []byte) (uint64, error) { + return 0, expectedErr }, - mock.NewPubkeyConverterMock(0), - &mock.BlockChainHookHandlerMock{}, - ) + } + dp, _ := NewDeployProcessor(arg) dp.getScCodeAsHex = func(filename string) (string, error) { return "", nil } @@ -113,41 +120,16 @@ func TestDeployProcessor_DeployNewAddressFailsShouldErr(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expected error") - dp, _ := NewDeployProcessor( - &mock.TxExecutionProcessorStub{}, - mock.NewPubkeyConverterMock(0), - &mock.BlockChainHookHandlerMock{ - NewAddressCalled: func(creatorAddress []byte, creatorNonce uint64, vmType []byte) ([]byte, error) { - return nil, expectedErr - }, + arg := createMockDeployArg() + arg.BlockchainHook = &mock.BlockChainHookHandlerMock{ + NewAddressCalled: func(creatorAddress []byte, creatorNonce uint64, vmType []byte) ([]byte, error) { + return nil, expectedErr }, - ) - dp.getScCodeAsHex = func(filename string) (string, error) { - return "", nil } - - err := dp.Deploy(&data.InitialSmartContract{}) - - assert.Equal(t, expectedErr, err) -} - -func TestDeployProcessor_DeployReplacePlaceholdersFailsShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := fmt.Errorf("expected error") - dp, _ := NewDeployProcessor( - &mock.TxExecutionProcessorStub{}, - mock.NewPubkeyConverterMock(0), - &mock.BlockChainHookHandlerMock{}, - ) + dp, _ := NewDeployProcessor(arg) dp.getScCodeAsHex = func(filename string) (string, error) { return "", nil } - dp.SetReplacePlaceholders( - func(txData string, scResultingAddressBytes []byte) (string, error) { - return "", expectedErr - }, - ) err := dp.Deploy(&data.InitialSmartContract{}) @@ -159,57 +141,71 @@ func TestDeployProcessor_DeployShouldWork(t *testing.T) { testNonce := uint64(4453) testSender := []byte("sender") - lenAddress := 32 executeCalled := false testCode := "code" vmType := "0500" - dp, _ := NewDeployProcessor( - &mock.TxExecutionProcessorStub{ - GetNonceCalled: func(senderBytes []byte) (uint64, error) { - if bytes.Equal(senderBytes, testSender) { - return testNonce, nil - } - assert.Fail(t, "wrong sender") - - return 0, nil - }, - ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { - if nonce != testNonce { - assert.Fail(t, "nonce mismatch") - } - if !bytes.Equal(sndAddr, testSender) { - assert.Fail(t, "sender mismatch") - } - if !bytes.Equal(rcvAddress, make([]byte, lenAddress)) { - assert.Fail(t, "receiver mismatch") - } - if value.Cmp(zero) != 0 { - assert.Fail(t, "value should have been 0") - } - expectedCode := fmt.Sprintf("%s@%s@0100", testCode, vmType) - if string(data) != expectedCode { - assert.Fail(t, "code mismatch") - } - - executeCalled = true - return nil - }, + version := "1.0.0" + accountExists := false + arg := createMockDeployArg() + arg.Executor = &mock.TxExecutionProcessorStub{ + GetNonceCalled: func(senderBytes []byte) (uint64, error) { + if bytes.Equal(senderBytes, testSender) { + return testNonce, nil + } + assert.Fail(t, "wrong sender") + + return 0, nil }, - mock.NewPubkeyConverterMock(lenAddress), - &mock.BlockChainHookHandlerMock{ - NewAddressCalled: func(creatorAddress []byte, creatorNonce uint64, vmType []byte) ([]byte, error) { - buff := fmt.Sprintf("%s_%d_%s", string(creatorAddress), creatorNonce, hex.EncodeToString(vmType)) + ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { + if nonce != testNonce { + assert.Fail(t, "nonce mismatch") + } + if !bytes.Equal(sndAddr, testSender) { + assert.Fail(t, "sender mismatch") + } + if !bytes.Equal(rcvAddress, make([]byte, arg.PubkeyConv.Len())) { + assert.Fail(t, "receiver mismatch") + } + if value.Cmp(zero) != 0 { + assert.Fail(t, "value should have been 0") + } + expectedCode := fmt.Sprintf("%s@%s@0100", testCode, vmType) + if string(data) != expectedCode { + assert.Fail(t, "code mismatch") + } + + executeCalled = true + return nil + }, + AccountExistsCalled: func(address []byte) bool { + result := accountExists + accountExists = true + + return result + }, + } + arg.BlockchainHook = &mock.BlockChainHookHandlerMock{ + NewAddressCalled: func(creatorAddress []byte, creatorNonce uint64, vmType []byte) ([]byte, error) { + buff := fmt.Sprintf("%s_%d_%s", string(creatorAddress), creatorNonce, hex.EncodeToString(vmType)) - return []byte(buff), nil - }, + return []byte(buff), nil }, - ) + } + arg.QueryService = &mock.QueryServiceStub{ + ExecuteQueryCalled: func(query *process.SCQuery) (*vmcommon.VMOutput, error) { + return &vmcommon.VMOutput{ + ReturnData: [][]byte{[]byte(version)}, + }, nil + }, + } + dp, _ := NewDeployProcessor(arg) dp.getScCodeAsHex = func(filename string) (string, error) { return testCode, nil } sc := &data.InitialSmartContract{ - VmType: vmType, + VmType: vmType, + Version: version, } sc.SetOwnerBytes(testSender) diff --git a/genesis/process/intermediate/standardDelegationProcessor.go b/genesis/process/intermediate/standardDelegationProcessor.go new file mode 100644 index 00000000000..c3e0482317e --- /dev/null +++ b/genesis/process/intermediate/standardDelegationProcessor.go @@ -0,0 +1,460 @@ +package intermediate + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/big" + "sort" + "strings" + + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/genesis" + "github.com/ElrondNetwork/elrond-go/node/external" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// ArgStandardDelegationProcessor is the argument used to construct a standard delegation processor +type ArgStandardDelegationProcessor struct { + Executor genesis.TxExecutionProcessor + ShardCoordinator sharding.Coordinator + AccountsParser genesis.AccountsParser + SmartContractParser genesis.InitialSmartContractParser + NodesListSplitter genesis.NodesListSplitter + QueryService external.SCQueryService + NodePrice *big.Int +} + +const stakeFunction = "stakeGenesis" +const setBlsKeysFunction = "setBlsKeys" +const activateBlsKeysFunction = "activate" +const setNumNodesFunction = "setNumNodes" +const setStakePerNodeFunction = "setStakePerNode" + +var log = logger.GetOrCreate("genesis/process/intermediate") +var zero = big.NewInt(0) + +type standardDelegationProcessor struct { + genesis.TxExecutionProcessor + shardCoordinator sharding.Coordinator + accuntsParser genesis.AccountsParser + smartContractsParser genesis.InitialSmartContractParser + nodesListSplitter genesis.NodesListSplitter + queryService external.SCQueryService + nodePrice *big.Int +} + +// NewStandardDelegationProcessor returns a new standard delegation processor instance +func NewStandardDelegationProcessor(arg ArgStandardDelegationProcessor) (*standardDelegationProcessor, error) { + if check.IfNil(arg.Executor) { + return nil, genesis.ErrNilTxExecutionProcessor + } + if check.IfNil(arg.ShardCoordinator) { + return nil, genesis.ErrNilShardCoordinator + } + if check.IfNil(arg.AccountsParser) { + return nil, genesis.ErrNilAccountsParser + } + if check.IfNil(arg.SmartContractParser) { + return nil, genesis.ErrNilSmartContractParser + } + if check.IfNil(arg.NodesListSplitter) { + return nil, genesis.ErrNilNodesListSplitter + } + if check.IfNil(arg.QueryService) { + return nil, genesis.ErrNilQueryService + } + if arg.NodePrice == nil { + return nil, genesis.ErrNilInitialNodePrice + } + if arg.NodePrice.Cmp(zero) <= 0 { + return nil, genesis.ErrInvalidInitialNodePrice + } + + return &standardDelegationProcessor{ + TxExecutionProcessor: arg.Executor, + shardCoordinator: arg.ShardCoordinator, + accuntsParser: arg.AccountsParser, + smartContractsParser: arg.SmartContractParser, + nodesListSplitter: arg.NodesListSplitter, + queryService: arg.QueryService, + nodePrice: arg.NodePrice, + }, nil +} + +// ExecuteDelegation will execute stake, set bls keys and activate on all delegation contracts from this shard +func (sdp *standardDelegationProcessor) ExecuteDelegation() (genesis.DelegationResult, error) { + smartContracts, err := sdp.getDelegationScOnCurrentShard() + if err != nil { + return genesis.DelegationResult{}, err + } + if len(smartContracts) == 0 { + return genesis.DelegationResult{}, nil + } + + err = sdp.setDelegationStartParameters(smartContracts) + if err != nil { + return genesis.DelegationResult{}, err + } + + _, err = sdp.executeManageBlsKeys(smartContracts, sdp.getBlsKey, setBlsKeysFunction) + if err != nil { + return genesis.DelegationResult{}, err + } + + dr := genesis.DelegationResult{} + dr.NumTotalStaked, err = sdp.executeStake(smartContracts) + if err != nil { + return genesis.DelegationResult{}, err + } + + dr.NumTotalDelegated, err = sdp.executeManageBlsKeys(smartContracts, sdp.getBlsKeySig, activateBlsKeysFunction) + if err != nil { + return genesis.DelegationResult{}, err + } + + err = sdp.executeVerify(smartContracts) + if err != nil { + return genesis.DelegationResult{}, err + } + + return dr, err +} + +func (sdp *standardDelegationProcessor) getDelegationScOnCurrentShard() ([]genesis.InitialSmartContractHandler, error) { + allSmartContracts, err := sdp.smartContractsParser.InitialSmartContractsSplitOnOwnersShards(sdp.shardCoordinator) + if err != nil { + return nil, err + } + + smartContracts := make([]genesis.InitialSmartContractHandler, 0) + smartContractsForCurrentShard := allSmartContracts[sdp.shardCoordinator.SelfId()] + for _, sc := range smartContractsForCurrentShard { + if sc.GetType() == genesis.DelegationType { + smartContracts = append(smartContracts, sc) + } + } + + log.Trace("getDelegationScOnCurrentShard", + "num delegation SC", len(smartContracts), + "shard ID", sdp.shardCoordinator.SelfId(), + ) + return smartContracts, nil +} + +func (sdp *standardDelegationProcessor) setDelegationStartParameters(smartContracts []genesis.InitialSmartContractHandler) error { + for _, sc := range smartContracts { + delegatedNodes := sdp.nodesListSplitter.GetDelegatedNodes(sc.AddressBytes()) + numNodes := len(delegatedNodes) + + log.Trace("setDelegationStartParameters", + "SC owner", sc.GetOwner(), + "SC address", sc.Address(), + "num delegated nodes", numNodes, + "node price", sdp.nodePrice.String(), + "shard ID", sdp.shardCoordinator.SelfId(), + ) + + err := sdp.executeSetNumNodes(numNodes, sc) + if err != nil { + return err + } + + err = sdp.executeSetNodePrice(sc) + if err != nil { + return err + } + } + + return nil +} + +func (sdp *standardDelegationProcessor) executeSetNumNodes(numNodes int, sc genesis.InitialSmartContractHandler) error { + setNumNodesTxData := fmt.Sprintf("%s@%x", setNumNodesFunction, numNodes) + + nonce, err := sdp.GetNonce(sc.OwnerBytes()) + if err != nil { + return err + } + + return sdp.ExecuteTransaction( + nonce, + sc.OwnerBytes(), + sc.AddressBytes(), + zero, + []byte(setNumNodesTxData), + ) +} + +func (sdp *standardDelegationProcessor) executeSetNodePrice(sc genesis.InitialSmartContractHandler) error { + setStakePerNodeTxData := fmt.Sprintf("%s@%x", setStakePerNodeFunction, sdp.nodePrice) + + nonce, err := sdp.GetNonce(sc.OwnerBytes()) + if err != nil { + return err + } + + return sdp.ExecuteTransaction( + nonce, + sc.OwnerBytes(), + sc.AddressBytes(), + zero, + []byte(setStakePerNodeTxData), + ) +} + +func (sdp *standardDelegationProcessor) executeStake(smartContracts []genesis.InitialSmartContractHandler) (int, error) { + stakedOnDelegation := 0 + + for _, sc := range smartContracts { + accounts := sdp.accuntsParser.GetInitialAccountsForDelegated(sc.AddressBytes()) + if len(accounts) == 0 { + log.Debug("genesis delegation SC was not delegated by any account", + "SC owner", sc.GetOwner(), + "SC address", sc.Address(), + ) + continue + } + + totalDelegated := big.NewInt(0) + for _, ac := range accounts { + err := sdp.stake(ac, sc) + if err != nil { + return 0, fmt.Errorf("%w while calling stake function from account %s", err, ac.GetAddress()) + } + + totalDelegated.Add(totalDelegated, ac.GetDelegationHandler().GetValue()) + } + + log.Trace("executeStake", + "SC owner", sc.GetOwner(), + "SC address", sc.Address(), + "num accounts", len(accounts), + "total delegated", totalDelegated, + ) + stakedOnDelegation += len(accounts) + } + + return stakedOnDelegation, nil +} + +func (sdp *standardDelegationProcessor) stake(ac genesis.InitialAccountHandler, sc genesis.InitialSmartContractHandler) error { + isIntraShardCall := sdp.shardCoordinator.SameShard(ac.AddressBytes(), sc.AddressBytes()) + + dh := ac.GetDelegationHandler() + if check.IfNil(dh) { + return genesis.ErrNilDelegationHandler + } + if dh.GetValue() == nil { + return genesis.ErrInvalidDelegationValue + } + + var err error + var nonce = uint64(0) + if isIntraShardCall { + //intra shard transaction, get current nonce in order to make the tx processor work + nonce, err = sdp.GetNonce(ac.AddressBytes()) + if err != nil { + return err + } + } + + stakeData := fmt.Sprintf("%s@%s", stakeFunction, dh.GetValue().Text(16)) + err = sdp.ExecuteTransaction( + nonce, + ac.AddressBytes(), + sc.AddressBytes(), + zero, + []byte(stakeData), + ) + if err != nil { + return err + } + + return nil +} + +func (sdp *standardDelegationProcessor) executeManageBlsKeys( + smartContracts []genesis.InitialSmartContractHandler, + handler func(node sharding.GenesisNodeInfoHandler) string, + function string, +) (int, error) { + + log.Trace("executeManageSetBlsKeys", + "num delegation SC", len(smartContracts), + "shard ID", sdp.shardCoordinator.SelfId(), + "function", function, + ) + + totalDelegated := 0 + for _, sc := range smartContracts { + delegatedNodes := sdp.nodesListSplitter.GetDelegatedNodes(sc.AddressBytes()) + + lenDelegated := len(delegatedNodes) + if lenDelegated == 0 { + log.Debug("genesis delegation SC does not have staked nodes", + "SC owner", sc.GetOwner(), + "SC address", sc.Address(), + "function", function, + ) + continue + } + totalDelegated += lenDelegated + + log.Trace("executeSetBlsKeys", + "SC owner", sc.GetOwner(), + "SC address", sc.Address(), + "num nodes", lenDelegated, + "shard ID", sdp.shardCoordinator.SelfId(), + "function", function, + ) + + arguments := make([]string, 0, len(delegatedNodes)+1) + arguments = append(arguments, function) + for _, node := range delegatedNodes { + arg := handler(node) + arguments = append(arguments, arg) + } + + nonce, err := sdp.GetNonce(sc.OwnerBytes()) + if err != nil { + return 0, err + } + + err = sdp.ExecuteTransaction( + nonce, + sc.OwnerBytes(), + sc.AddressBytes(), + big.NewInt(0), + []byte(strings.Join(arguments, "@")), + ) + if err != nil { + return 0, err + } + } + + return totalDelegated, nil +} + +func (sdp *standardDelegationProcessor) getBlsKey(node sharding.GenesisNodeInfoHandler) string { + return hex.EncodeToString(node.PubKeyBytes()) +} + +func (sdp *standardDelegationProcessor) getBlsKeySig(_ sharding.GenesisNodeInfoHandler) string { + mockSignature := []byte("genesis signature") + + return hex.EncodeToString(mockSignature) +} + +func (sdp *standardDelegationProcessor) executeVerify(smartContracts []genesis.InitialSmartContractHandler) error { + for _, sc := range smartContracts { + err := sdp.verify(sc) + if err != nil { + return fmt.Errorf("%w for contract %s, owner %s", err, sc.Address(), sc.GetOwner()) + } + } + + return nil +} + +func (sdp *standardDelegationProcessor) verify(sc genesis.InitialSmartContractHandler) error { + err := sdp.verifyStakedValue(sc) + if err != nil { + return fmt.Errorf("%w for verifyStakedValue", err) + } + + err = sdp.verifyRegisteredNodes(sc) + if err != nil { + return fmt.Errorf("%w for verifyRegisteredNodes", err) + } + + return nil +} + +func (sdp *standardDelegationProcessor) verifyStakedValue(sc genesis.InitialSmartContractHandler) error { + scQueryStakeValue := &process.SCQuery{ + ScAddress: sc.AddressBytes(), + FuncName: "getFilledStake", + Arguments: [][]byte{}, + } + vmOutputStakeValue, err := sdp.queryService.ExecuteQuery(scQueryStakeValue) + if err != nil { + return err + } + if len(vmOutputStakeValue.ReturnData) != 1 { + return fmt.Errorf("%w return data should have contained one element", genesis.ErrWhileVerifyingDelegation) + } + scStakedValue := big.NewInt(0).SetBytes(vmOutputStakeValue.ReturnData[0]) + providedStakedValue := big.NewInt(0) + providedDelegators := sdp.accuntsParser.GetInitialAccountsForDelegated(sc.AddressBytes()) + + for _, delegator := range providedDelegators { + if check.IfNil(delegator) { + continue + } + dh := delegator.GetDelegationHandler() + if check.IfNil(dh) { + continue + } + if dh.GetValue() == nil { + continue + } + providedStakedValue.Add(providedStakedValue, dh.GetValue()) + } + if scStakedValue.Cmp(providedStakedValue) != 0 { + return fmt.Errorf("%w staked data mismatch: from SC: %s, provided: %s", + genesis.ErrWhileVerifyingDelegation, scStakedValue.String(), providedStakedValue.String()) + } + + return nil +} + +func (sdp *standardDelegationProcessor) verifyRegisteredNodes(sc genesis.InitialSmartContractHandler) error { + scQueryBlsKeys := &process.SCQuery{ + ScAddress: sc.AddressBytes(), + FuncName: "getBlsKeys", + Arguments: [][]byte{}, + } + + vmOutputBlsKeys, err := sdp.queryService.ExecuteQuery(scQueryBlsKeys) + if err != nil { + return err + } + delegatedNodes := sdp.nodesListSplitter.GetDelegatedNodes(sc.AddressBytes()) + nodesAddresses := make([][]byte, 0, len(delegatedNodes)) + for _, node := range delegatedNodes { + nodesAddresses = append(nodesAddresses, node.PubKeyBytes()) + } + + return sdp.sameElements(vmOutputBlsKeys.ReturnData, nodesAddresses) +} + +func (sdp *standardDelegationProcessor) sameElements(scReturned [][]byte, loaded [][]byte) error { + if len(scReturned) != len(loaded) { + return fmt.Errorf("%w staked nodes mismatch: %d found in SC, %d provided", + genesis.ErrWhileVerifyingDelegation, len(scReturned), len(loaded)) + } + + sort.Slice(scReturned, func(i, j int) bool { + return bytes.Compare(scReturned[i], scReturned[j]) < 0 + }) + sort.Slice(loaded, func(i, j int) bool { + return bytes.Compare(loaded[i], loaded[j]) < 0 + }) + + for i := 0; i < len(loaded); i++ { + if !bytes.Equal(loaded[i], scReturned[i]) { + return fmt.Errorf("%w, found in sc: %s, provided: %s", + genesis.ErrMissingElement, hex.EncodeToString(scReturned[i]), hex.EncodeToString(loaded[i])) + } + } + + return nil +} + +// IsInterfaceNil returns if underlying object is true +func (sdp *standardDelegationProcessor) IsInterfaceNil() bool { + return sdp == nil || sdp.TxExecutionProcessor == nil +} diff --git a/genesis/process/intermediate/standardDelegationProcessor_test.go b/genesis/process/intermediate/standardDelegationProcessor_test.go new file mode 100644 index 00000000000..25a9d21bee5 --- /dev/null +++ b/genesis/process/intermediate/standardDelegationProcessor_test.go @@ -0,0 +1,347 @@ +package intermediate + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/genesis" + "github.com/ElrondNetwork/elrond-go/genesis/data" + "github.com/ElrondNetwork/elrond-go/genesis/mock" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/stretchr/testify/assert" +) + +func createMockStandardDelegationProcessorArg() ArgStandardDelegationProcessor { + return ArgStandardDelegationProcessor{ + Executor: &mock.TxExecutionProcessorStub{}, + ShardCoordinator: &mock.ShardCoordinatorMock{}, + AccountsParser: &mock.AccountsParserStub{}, + SmartContractParser: &mock.SmartContractParserStub{}, + NodesListSplitter: &mock.NodesListSplitterStub{}, + QueryService: &mock.QueryServiceStub{}, + NodePrice: big.NewInt(10), + } +} + +func TestNewStandardDelegationProcessor_NilExecutorShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.Executor = nil + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilTxExecutionProcessor, err) +} + +func TestNewStandardDelegationProcessor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.ShardCoordinator = nil + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilShardCoordinator, err) +} + +func TestNewStandardDelegationProcessor_NilAccountsParserShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.AccountsParser = nil + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilAccountsParser, err) +} + +func TestNewStandardDelegationProcessor_NilSmartContractParserShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.SmartContractParser = nil + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilSmartContractParser, err) +} + +func TestNewStandardDelegationProcessor_NilNodesSplitterShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.NodesListSplitter = nil + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilNodesListSplitter, err) +} + +func TestNewStandardDelegationProcessor_NilQueryServiceShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.QueryService = nil + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilQueryService, err) +} + +func TestNewStandardDelegationProcessor_NilNodePriceShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.NodePrice = nil + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrNilInitialNodePrice, err) +} + +func TestNewStandardDelegationProcessor_ZeroNodePriceShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.NodePrice = big.NewInt(0) + dp, err := NewStandardDelegationProcessor(arg) + + assert.True(t, check.IfNil(dp)) + assert.Equal(t, genesis.ErrInvalidInitialNodePrice, err) +} + +func TestNewStandardDelegationProcessor_ShouldWork(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + dp, err := NewStandardDelegationProcessor(arg) + + assert.False(t, check.IfNil(dp)) + assert.Nil(t, err) +} + +//------- ExecuteDelegation + +func TestStandardDelegationProcessor_ExecuteDelegationSplitFailsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("expected error") + arg := createMockStandardDelegationProcessorArg() + arg.Executor = &mock.TxExecutionProcessorStub{ + ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { + assert.Fail(t, "should have not execute a transaction") + + return nil + }, + } + arg.SmartContractParser = &mock.SmartContractParserStub{ + InitialSmartContractsSplitOnOwnersShardsCalled: func(shardCoordinator sharding.Coordinator) (map[uint32][]genesis.InitialSmartContractHandler, error) { + return nil, expectedErr + }, + } + + dp, _ := NewStandardDelegationProcessor(arg) + + result, err := dp.ExecuteDelegation() + + assert.Equal(t, expectedErr, err) + assert.Equal(t, genesis.DelegationResult{}, result) +} + +func TestStandardDelegationProcessor_ExecuteDelegationNoDelegationScShouldRetNil(t *testing.T) { + t.Parallel() + + arg := createMockStandardDelegationProcessorArg() + arg.Executor = &mock.TxExecutionProcessorStub{ + ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { + assert.Fail(t, "should have not execute a transaction") + + return nil + }, + } + arg.SmartContractParser = &mock.SmartContractParserStub{ + InitialSmartContractsSplitOnOwnersShardsCalled: func(shardCoordinator sharding.Coordinator) (map[uint32][]genesis.InitialSmartContractHandler, error) { + return map[uint32][]genesis.InitialSmartContractHandler{ + 0: { + &data.InitialSmartContract{ + Type: "test", + }, + }, + }, nil + }, + } + dp, _ := NewStandardDelegationProcessor(arg) + + result, err := dp.ExecuteDelegation() + + assert.Nil(t, err) + assert.Equal(t, genesis.DelegationResult{}, result) +} + +func TestStandardDelegationProcessor_ExecuteDelegationStakeShouldWork(t *testing.T) { + t.Parallel() + + staker1 := []byte("stakerB") + staker2 := []byte("stakerC") + delegationSc := []byte("delegation SC") + pubkey1 := []byte("pubkey1") + pubkey2 := []byte("pubkey2") + pubkey3 := []byte("pubkey3") + + arg := createMockStandardDelegationProcessorArg() + arg.Executor = &mock.TxExecutionProcessorStub{ + ExecuteTransactionCalled: func(nonce uint64, sndAddr []byte, rcvAddress []byte, value *big.Int, data []byte) error { + isStakeCall := strings.Contains(string(data), "stake") + isStaker := bytes.Equal(sndAddr, staker1) || bytes.Equal(sndAddr, staker2) + if isStakeCall && !isStaker { + assert.Fail(t, "stake should have been called by the one of the stakers") + } + + return nil + }, + } + arg.ShardCoordinator = &mock.ShardCoordinatorMock{ + SelfShardId: 0, + NumOfShards: 2, + } + arg.AccountsParser = &mock.AccountsParserStub{ + GetInitialAccountsForDelegatedCalled: func(addressBytes []byte) []genesis.InitialAccountHandler { + if bytes.Equal(addressBytes, delegationSc) { + ia1 := &data.InitialAccount{ + Delegation: &data.DelegationData{ + Value: big.NewInt(2), + }, + } + ia1.SetAddressBytes(staker1) + ia1.Delegation.SetAddressBytes(delegationSc) + + ia2 := &data.InitialAccount{ + Delegation: &data.DelegationData{ + Value: big.NewInt(2), + }, + } + ia2.SetAddressBytes(staker2) + ia2.Delegation.SetAddressBytes(delegationSc) + + return []genesis.InitialAccountHandler{ia1, ia2} + } + + return make([]genesis.InitialAccountHandler, 0) + }, + } + arg.SmartContractParser = &mock.SmartContractParserStub{ + InitialSmartContractsSplitOnOwnersShardsCalled: func(shardCoordinator sharding.Coordinator) (map[uint32][]genesis.InitialSmartContractHandler, error) { + sc := &data.InitialSmartContract{ + Type: genesis.DelegationType, + } + sc.SetAddressBytes(delegationSc) + + return map[uint32][]genesis.InitialSmartContractHandler{ + 0: {sc}, + }, nil + }, + } + arg.QueryService = &mock.QueryServiceStub{ + ExecuteQueryCalled: func(query *process.SCQuery) (*vmcommon.VMOutput, error) { + if query.FuncName == "getFilledStake" { + return &vmcommon.VMOutput{ + ReturnData: [][]byte{big.NewInt(4).Bytes()}, + }, nil + } + if query.FuncName == "getBlsKeys" { + return &vmcommon.VMOutput{ + ReturnData: [][]byte{pubkey2, pubkey3, pubkey1}, //random order should work + }, nil + } + + return nil, fmt.Errorf("unexpected function") + }, + } + arg.NodesListSplitter = &mock.NodesListSplitterStub{ + GetDelegatedNodesCalled: func(delegationScAddress []byte) []sharding.GenesisNodeInfoHandler { + return []sharding.GenesisNodeInfoHandler{ + &mock.GenesisNodeInfoHandlerMock{ + AddressBytesValue: delegationSc, + PubKeyBytesValue: pubkey1, + }, + &mock.GenesisNodeInfoHandlerMock{ + AddressBytesValue: delegationSc, + PubKeyBytesValue: pubkey2, + }, + &mock.GenesisNodeInfoHandlerMock{ + AddressBytesValue: delegationSc, + PubKeyBytesValue: pubkey3, + }, + } + }, + } + dp, _ := NewStandardDelegationProcessor(arg) + + result, err := dp.ExecuteDelegation() + + expectedResult := genesis.DelegationResult{ + NumTotalDelegated: 3, + NumTotalStaked: 2, + } + + assert.Nil(t, err) + assert.Equal(t, expectedResult, result) +} + +//------- SameElements + +func TestSameElements_WrongNumberShouldErr(t *testing.T) { + t.Parallel() + + scReturned := [][]byte{[]byte("buf1"), []byte("buf2"), []byte("buf3")} + loaded := [][]byte{[]byte("buf1"), []byte("buf2")} + + dp := &standardDelegationProcessor{} + err := dp.sameElements(scReturned, loaded) + + assert.True(t, errors.Is(err, genesis.ErrWhileVerifyingDelegation)) +} + +func TestSameElements_MissingFromLoadedShouldErr(t *testing.T) { + t.Parallel() + + scReturned := [][]byte{[]byte("buf5"), []byte("buf2"), []byte("buf3")} + loaded := [][]byte{[]byte("buf1"), []byte("buf3"), []byte("buf2")} + + dp := &standardDelegationProcessor{} + err := dp.sameElements(scReturned, loaded) + + assert.True(t, errors.Is(err, genesis.ErrMissingElement)) +} + +func TestSameElements_DuplicateShouldErr(t *testing.T) { + t.Parallel() + + scReturned := [][]byte{[]byte("buf2"), []byte("buf2"), []byte("buf3")} + loaded := [][]byte{[]byte("buf2"), []byte("buf1"), []byte("buf1")} + + dp := &standardDelegationProcessor{} + err := dp.sameElements(scReturned, loaded) + + assert.True(t, errors.Is(err, genesis.ErrMissingElement)) +} + +func TestSameElements_ShouldWork(t *testing.T) { + t.Parallel() + + scReturned := [][]byte{[]byte("buf1"), []byte("buf2"), []byte("buf3")} + loaded := [][]byte{[]byte("buf2"), []byte("buf3"), []byte("buf1")} + + dp := &standardDelegationProcessor{} + err := dp.sameElements(scReturned, loaded) + + assert.Nil(t, err) +} diff --git a/genesis/process/intermediate/txExecutionProcessor.go b/genesis/process/intermediate/txExecutionProcessor.go index f6f86236d34..9dc46142d10 100644 --- a/genesis/process/intermediate/txExecutionProcessor.go +++ b/genesis/process/intermediate/txExecutionProcessor.go @@ -66,6 +66,12 @@ func (tep *txExecutionProcessor) GetNonce(senderBytes []byte) (uint64, error) { return accnt.GetNonce(), nil } +// AccountExists returns if an account exists in the accounts DB +func (tep *txExecutionProcessor) AccountExists(address []byte) bool { + _, err := tep.accounts.GetExistingAccount(address) + return err == nil +} + // AddBalance adds the provided value on the balance field func (tep *txExecutionProcessor) AddBalance(senderBytes []byte, value *big.Int) error { accnt, err := tep.accounts.LoadAccount(senderBytes) diff --git a/genesis/process/memoryComponents.go b/genesis/process/memoryComponents.go index 4dff6f8666b..c879c8de222 100644 --- a/genesis/process/memoryComponents.go +++ b/genesis/process/memoryComponents.go @@ -8,13 +8,15 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" ) +const maxTrieLevelInMemory = uint(5) + func createAccountAdapter( marshalizer marshal.Marshalizer, hasher hashing.Hasher, accountFactory state.AccountFactory, trieStorage data.StorageManager, ) (state.AccountsAdapter, error) { - tr, err := trie.NewTrie(trieStorage, marshalizer, hasher) + tr, err := trie.NewTrie(trieStorage, marshalizer, hasher, maxTrieLevelInMemory) if err != nil { return nil, err } diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index 20abbc902e7..86ed3ad78d7 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -48,11 +48,6 @@ func CreateMetaGenesisBlock(arg ArgsGenesisBlockCreator, nodesListSplitter genes return nil, err } - _, err = arg.Accounts.Commit() - if err != nil { - return nil, err - } - err = setStakedData(arg, processors.txProcessor, nodesListSplitter) if err != nil { return nil, err @@ -333,6 +328,11 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator) (*genesisP return nil, err } + queryService, err := smartContract.NewSCQueryService(vmContainer, arg.Economics) + if err != nil { + return nil, err + } + return &genesisProcessors{ txCoordinator: txCoordinator, systemSCs: virtualMachineFactory.SystemSmartContractContainer(), @@ -341,6 +341,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator) (*genesisP scProcessor: scProcessor, scrProcessor: scProcessor, rwdProcessor: nil, + queryService: queryService, }, nil } @@ -380,11 +381,6 @@ func deploySystemSmartContracts( } } - _, err := arg.Accounts.Commit() - if err != nil { - return err - } - return nil } diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index ffd845d1551..4150eb4ef93 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -393,6 +393,11 @@ func createProcessorsForShard(arg ArgsGenesisBlockCreator) (*genesisProcessors, return nil, err } + queryService, err := smartContract.NewSCQueryService(vmContainer, arg.Economics) + if err != nil { + return nil, err + } + return &genesisProcessors{ txCoordinator: txCoordinator, systemSCs: nil, @@ -401,6 +406,7 @@ func createProcessorsForShard(arg ArgsGenesisBlockCreator) (*genesisProcessors, scrProcessor: scProcessor, rwdProcessor: rewardsTxProcessor, blockchainHook: vmFactoryImpl.BlockChainHookImpl(), + queryService: queryService, }, nil } @@ -438,28 +444,19 @@ func deployInitialSmartContract( return err } - var deployProc genesis.DeployProcessor - deployProc, err = intermediate.NewDeployProcessor( - txExecutor, - arg.PubkeyConv, - processors.blockchainHook, - ) + argDeploy := intermediate.ArgDeployProcessor{ + Executor: txExecutor, + PubkeyConv: arg.PubkeyConv, + BlockchainHook: processors.blockchainHook, + QueryService: processors.queryService, + } + deployProc, err := intermediate.NewDeployProcessor(argDeploy) if err != nil { return err } switch sc.GetType() { case genesis.DelegationType: - deployProc, err = intermediate.NewDelegationDeployProcessor( - deployProc, - arg.AccountsParser, - arg.PubkeyConv, - arg.Economics.GenesisNodePrice(), - ) - if err != nil { - return err - } - deployMetrics.numDelegation++ default: deployMetrics.numOtherTypes++ @@ -509,13 +506,17 @@ func executeDelegation( return genesis.DelegationResult{}, err } - delegationProcessor, err := intermediate.NewDelegationProcessor( - txExecutor, - arg.ShardCoordinator, - arg.AccountsParser, - arg.SmartContractParser, - nodesListSplitter, - ) + argDP := intermediate.ArgStandardDelegationProcessor{ + Executor: txExecutor, + ShardCoordinator: arg.ShardCoordinator, + AccountsParser: arg.AccountsParser, + SmartContractParser: arg.SmartContractParser, + NodesListSplitter: nodesListSplitter, + QueryService: processors.queryService, + NodePrice: arg.Economics.GenesisNodePrice(), + } + + delegationProcessor, err := intermediate.NewStandardDelegationProcessor(argDP) if err != nil { return genesis.DelegationResult{}, err } diff --git a/genesis/process/testdata/delegation.wasm b/genesis/process/testdata/delegation.wasm new file mode 100755 index 00000000000..5bfed3fe064 Binary files /dev/null and b/genesis/process/testdata/delegation.wasm differ diff --git a/genesis/process/testdata/genesis.json b/genesis/process/testdata/genesis.json index 106c768c21c..a535a79f804 100644 --- a/genesis/process/testdata/genesis.json +++ b/genesis/process/testdata/genesis.json @@ -1,6 +1,6 @@ [ { - "address": "0001", + "address": "a00102030405060708090001020304050607080900010203040506070809000a", "supply": "5000", "balance": "5000", "stakingvalue": "0", @@ -10,13 +10,23 @@ } }, { - "address": "0002", - "supply": "5000", + "address": "b00102030405060708090001020304050607080900010203040506070809000b", + "supply": "7000", "balance": "2000", - "stakingvalue": "3000", + "stakingvalue": "5000", "delegation": { "address": "", "value": "0" } + }, + { + "address": "c00102030405060708090001020304050607080900010203040506070809000c", + "supply": "10000", + "balance": "0", + "stakingvalue": "0", + "delegation": { + "address": "00000000000000000500761b8c4a25d3979359223208b412285f635e71300102", + "value": "10000" + } } ] diff --git a/genesis/process/testdata/smartcontracts.json b/genesis/process/testdata/smartcontracts.json index 5672251414e..18057e6099c 100644 --- a/genesis/process/testdata/smartcontracts.json +++ b/genesis/process/testdata/smartcontracts.json @@ -2,6 +2,16 @@ { "owner": "0102030405060708090001020304050607080900010203040506070809000102", "filename": "testdata/answer.wasm", - "vm-type": "0501" + "vm-type": "0500", + "init-parameters": "", + "type": "test" + }, + { + "owner": "0102030405060708090001020304050607080900010203040506070809000102", + "filename": "testdata/delegation.wasm", + "vm-type": "0500", + "init-parameters": "0BB8@%auction_sc_address%@0A61D0", + "type": "delegation", + "version": "0.2.*" } ] diff --git a/go.mod b/go.mod index 6dc3c9cbf5d..74ee88cda56 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ElrondNetwork/elrond-go go 1.13 require ( - github.com/ElrondNetwork/arwen-wasm-vm v0.3.16 + github.com/ElrondNetwork/arwen-wasm-vm v0.3.18 github.com/ElrondNetwork/concurrent-map v0.1.2 github.com/ElrondNetwork/elrond-go-logger v1.0.3 github.com/ElrondNetwork/elrond-vm v0.0.25 diff --git a/go.sum b/go.sum index 4f52f7a3d43..dfea1229a43 100644 --- a/go.sum +++ b/go.sum @@ -3,8 +3,8 @@ github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOv github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ElrondNetwork/arwen-wasm-vm v0.3.16 h1:JZurmIDNlKK3W8kCClQ1DNAl7j4sLfGgc9yniVjisGE= -github.com/ElrondNetwork/arwen-wasm-vm v0.3.16/go.mod h1:Ny/uDh27JLWdFaCTp+Ek4hw5Ory/sTsjA3QWmP//mpU= +github.com/ElrondNetwork/arwen-wasm-vm v0.3.18 h1:uXo0P/V+MPYnr6f210Gt9fOTO1NhLcYbqwvlcfUf9Fs= +github.com/ElrondNetwork/arwen-wasm-vm v0.3.18/go.mod h1:cyBSzwbWSGqXJz5excz9PhF4FOhWvS3EFggiFReHsdU= github.com/ElrondNetwork/big-int-util v0.0.5 h1:e/9kK++9ZH/SdIYqLSUPRFYrDZmDWDgff3/7SCydq5I= github.com/ElrondNetwork/big-int-util v0.0.5/go.mod h1:96viBvoTXLjZOhEvE0D+QnAwg1IJLPAK6GVHMbC7Aw4= github.com/ElrondNetwork/concurrent-map v0.1.2 h1:mr2sVF2IPDsJO8DNGzCUiNQOJcadHuIRVZn+QFnCBlE= @@ -20,7 +20,7 @@ github.com/ElrondNetwork/elrond-vm-common v0.1.9/go.mod h1:ZakxPST/Wt8umnRtA9gob github.com/ElrondNetwork/elrond-vm-common v0.1.19 h1:mRO768HtMyXY23pvG18DonKVEIlNvXxoyKP94S9fb2A= github.com/ElrondNetwork/elrond-vm-common v0.1.19/go.mod h1:ZakxPST/Wt8umnRtA9gobcy3Dw2bywxwkC54P5VhO9g= github.com/ElrondNetwork/elrond-vm-util v0.1.1/go.mod h1:02LPKFh/Z5rbejgW2dazwjWGnsniuLOhRM2JjaOA3Mg= -github.com/ElrondNetwork/elrond-vm-util v0.2.3/go.mod h1:tLiuKpnQB5IrcMtZvwAJqo2WwUMzXX024SsEsTlmNL4= +github.com/ElrondNetwork/elrond-vm-util v0.2.5/go.mod h1:tLiuKpnQB5IrcMtZvwAJqo2WwUMzXX024SsEsTlmNL4= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20170410192909-ea383cf3ba6e/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 49ce75918c3..f2909384005 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -163,11 +163,13 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) @@ -225,7 +227,8 @@ func createAccountsDB(marshalizer marshal.Marshalizer) state.AccountsAdapter { } trieStorage, _ := trie.NewTrieStorageManager(store, marshalizer, hasher, cfg, ewl, generalCfg) - tr, _ := trie.NewTrie(trieStorage, marsh, hasher) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorage, marsh, hasher, maxTrieLevelInMemory) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marshalizer, &mock.AccountsFactoryStub{ CreateAccountCalled: func(address []byte) (wrapper state.AccountHandler, e error) { return state.NewUserAccount(address) @@ -353,7 +356,7 @@ func createConsensusOnlyNode( singleBlsSigner := &mclsinglesig.BlsSingleSigner{} syncer := ntp.NewSyncTime(ntp.NewNTPGoogleConfig(), nil) - go syncer.StartSync() + syncer.StartSyncingTime() rounder, _ := round.NewRound( time.Unix(startTime, 0), diff --git a/integrationTests/longTests/executingSCTransactions/executingSCTransactions_test.go b/integrationTests/longTests/executingSCTransactions/executingSCTransactions_test.go index 92409415f19..d969472387e 100644 --- a/integrationTests/longTests/executingSCTransactions/executingSCTransactions_test.go +++ b/integrationTests/longTests/executingSCTransactions/executingSCTransactions_test.go @@ -77,7 +77,7 @@ func TestProcessesJoinGameTheSamePlayerMultipleTimesRewardAndEndgameInMultipleRo integrationTests.MintAllNodes(nodes, initialVal) integrationTests.MintAllPlayers(nodes, players, initialVal) - integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine, "") time.Sleep(stepDelay) integrationTests.ProposeBlock(nodes, []int{idxProposer}, round, nonce) integrationTests.SyncBlock(t, nodes, []int{idxProposer}, round) @@ -158,7 +158,7 @@ func TestProcessesJoinGame100PlayersMultipleTimesRewardAndEndgameInMultipleRound integrationTests.MintAllNodes(nodes, initialVal) integrationTests.MintAllPlayers(nodes, players, initialVal) - integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine, "") time.Sleep(stepDelay) integrationTests.ProposeBlock(nodes, []int{idxProposer}, round, nonce) integrationTests.SyncBlock(t, nodes, []int{idxProposer}, round) @@ -249,7 +249,7 @@ func TestProcessesJoinGame100PlayersMultipleTimesRewardAndEndgameInMultipleRound nrRoundsToPropagateMultiShard = 1 } - integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine, "") time.Sleep(stepDelay) for i := 0; i < nrRoundsToPropagateMultiShard; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) @@ -345,7 +345,7 @@ func TestProcessesJoinGame100PlayersMultipleTimesRewardAndEndgameInMultipleRound idxProposers[1] = 2 idxProposers[2] = 4 - integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine, "") time.Sleep(stepDelay) for i := 0; i < nrRoundsToPropagateMultiShard; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) diff --git a/integrationTests/longTests/storage/storage_test.go b/integrationTests/longTests/storage/storage_test.go index 618f51f8486..d3663843488 100644 --- a/integrationTests/longTests/storage/storage_test.go +++ b/integrationTests/longTests/storage/storage_test.go @@ -107,7 +107,8 @@ func TestWriteContinuouslyInTree(t *testing.T) { trieStorage, _ := trie.NewTrieStorageManagerWithoutPruning(store) - tr, _ := trie.NewTrie(trieStorage, &marshal.JsonMarshalizer{}, &blake2b.Blake2b{}) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorage, &marshal.JsonMarshalizer{}, &blake2b.Blake2b{}, maxTrieLevelInMemory) defer func() { _ = store.DestroyUnit() diff --git a/integrationTests/mock/endOfEpochTriggerStub.go b/integrationTests/mock/endOfEpochTriggerStub.go index f496e55561c..6b8a8065356 100644 --- a/integrationTests/mock/endOfEpochTriggerStub.go +++ b/integrationTests/mock/endOfEpochTriggerStub.go @@ -142,6 +142,11 @@ func (e *EpochStartTriggerStub) ReceivedHeader(header data.HeaderHandler) { func (e *EpochStartTriggerStub) SetRoundsPerEpoch(_ uint64) { } +// Close - +func (e *EpochStartTriggerStub) Close() error { + return nil +} + // IsInterfaceNil - func (e *EpochStartTriggerStub) IsInterfaceNil() bool { return e == nil diff --git a/integrationTests/mock/syncTimerMock.go b/integrationTests/mock/syncTimerMock.go index baa3038ae21..2fa41d42341 100644 --- a/integrationTests/mock/syncTimerMock.go +++ b/integrationTests/mock/syncTimerMock.go @@ -10,8 +10,8 @@ type SyncTimerMock struct { CurrentTimeCalled func() time.Time } -// StartSync method does the time synchronization at every syncPeriod time elapsed. This should be started as a go routine -func (stm *SyncTimerMock) StartSync() { +// StartSyncingTime method does the time synchronization at every syncPeriod time elapsed. This should be started as a go routine +func (stm *SyncTimerMock) StartSyncingTime() { panic("implement me") } @@ -38,6 +38,11 @@ func (stm *SyncTimerMock) CurrentTime() time.Time { return time.Unix(0, 0) } +// Close - +func (stm *SyncTimerMock) Close() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (stm *SyncTimerMock) IsInterfaceNil() bool { return stm == nil diff --git a/integrationTests/multiShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go index 4c3813d5de2..a191b61ef80 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go @@ -88,7 +88,7 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { nodes[idxNodeShard1].OwnAccount.Nonce, factory.IELEVirtualMachine, ) - integrationTests.DeployScTx(nodes, idxNodeShard1, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxNodeShard1, string(scCode), factory.IELEVirtualMachine, "") integrationTests.UpdateRound(nodes, round) integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) @@ -231,12 +231,12 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { round = integrationTests.IncrementAndPrintRound(round) nonce++ - initialVal := big.NewInt(10000000) + initialVal := big.NewInt(100000000) topUpValue := big.NewInt(500) rewardValue := big.NewInt(10) integrationTests.MintAllNodes(nodes, initialVal) - integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode), factory.IELEVirtualMachine, "") round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) @@ -352,11 +352,11 @@ func TestShouldProcessWithScTxsJoinNoCommitShouldProcessedByValidators(t *testin round = integrationTests.IncrementAndPrintRound(round) nonce++ - initialVal := big.NewInt(10000000) + initialVal := big.NewInt(100000000) topUpValue := big.NewInt(500) integrationTests.MintAllNodes(nodes, initialVal) - integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode), factory.IELEVirtualMachine, "") round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) integrationTests.PlayerJoinsGame( diff --git a/integrationTests/multiShard/endOfEpoch/epochChangeWithNodesShufflingAndRater/epochChangeWithNodesShufflingAndRater_test.go b/integrationTests/multiShard/endOfEpoch/epochChangeWithNodesShufflingAndRater/epochChangeWithNodesShufflingAndRater_test.go index ee154293e30..eb192676780 100644 --- a/integrationTests/multiShard/endOfEpoch/epochChangeWithNodesShufflingAndRater/epochChangeWithNodesShufflingAndRater_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochChangeWithNodesShufflingAndRater/epochChangeWithNodesShufflingAndRater_test.go @@ -30,7 +30,6 @@ func TestEpochChangeWithNodesShufflingAndRater(t *testing.T) { seedAddress := integrationTests.GetConnectableAddress(advertiser) rater, _ := rating.NewBlockSigningRater(integrationTests.CreateRatingsData()) - coordinatorFactory := &integrationTests.IndexHashedNodesCoordinatorWithRaterFactory{ PeerAccountListAndRatingHandler: rater, } diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 77f313bedf1..7e1a717399a 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -306,6 +306,7 @@ func createTries( config.AccountsTrieStorage, core.GetShardIdString(shardId), config.StateTriesConfig.AccountsStatePruningEnabled, + config.StateTriesConfig.MaxStateTrieLevelInMemory, ) if err != nil { return nil, nil, err @@ -317,6 +318,7 @@ func createTries( config.PeerAccountsTrieStorage, core.GetShardIdString(shardId), config.StateTriesConfig.PeerStatePruningEnabled, + config.StateTriesConfig.MaxPeerTrieLevelInMemory, ) if err != nil { return nil, nil, err @@ -400,6 +402,8 @@ func getGeneralConfig() config.Config { CheckpointRoundsModulus: 100, AccountsStatePruningEnabled: false, PeerStatePruningEnabled: false, + MaxStateTrieLevelInMemory: 5, + MaxPeerTrieLevelInMemory: 5, }, TrieStorageManagerConfig: config.TrieStorageManagerConfig{ PruningBufferLen: 1000, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 6c1b8b6197a..988bcabb31b 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -212,7 +212,7 @@ func hardForkImport( importStorageConfigs []*config.StorageConfig, ) { for id, node := range nodes { - gasSchedule := arwenConfig.MakeGasMap(1) + gasSchedule := arwenConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) log.Warn("started import process") @@ -319,6 +319,7 @@ func createHardForkExporter( }, }, ExportStateStorageConfig: exportConfig, + MaxTrieLevelInMemory: uint(5), WhiteListHandler: node.WhiteListHandler, WhiteListerVerifiedTxs: node.WhiteListerVerifiedTxs, InterceptorsContainer: node.InterceptorsContainer, diff --git a/integrationTests/multiShard/smartContract/scCallingSC_test.go b/integrationTests/multiShard/smartContract/scCallingSC_test.go index e28cab3c6b8..0109209d255 100644 --- a/integrationTests/multiShard/smartContract/scCallingSC_test.go +++ b/integrationTests/multiShard/smartContract/scCallingSC_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -24,6 +25,8 @@ import ( "github.com/stretchr/testify/require" ) +var log = logger.GetOrCreate("integrationtests/multishard/smartcontract") + func TestSCCallingIntraShard(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -148,7 +151,7 @@ func TestScDeployAndChangeScOwner(t *testing.T) { firstSCOwner := nodes[0].OwnAccount.Address // deploy the smart contracts - firstSCAddress := putDeploySCToDataPool("./testdata/counter.wasm", firstSCOwner, 0, big.NewInt(50), "", nodes) + firstSCAddress := putDeploySCToDataPool("../../vm/arwen/testdata/counter/counter.wasm", firstSCOwner, 0, big.NewInt(50), "", nodes) round := uint64(0) nonce := uint64(0) @@ -249,7 +252,7 @@ func TestScDeployAndClaimSmartContractDeveloperRewards(t *testing.T) { firstSCOwner := nodes[0].OwnAccount.Address // deploy the smart contracts - firstSCAddress := putDeploySCToDataPool("./testdata/counter.wasm", firstSCOwner, 0, big.NewInt(50), "", nodes) + firstSCAddress := putDeploySCToDataPool("../../vm/arwen/testdata/counter/counter.wasm", firstSCOwner, 0, big.NewInt(50), "", nodes) round := uint64(0) nonce := uint64(0) @@ -564,22 +567,48 @@ func TestSCCallingInCrossShardDelegation(t *testing.T) { // mint smart contract holders shardNode := findAnyShardNode(nodes) delegateSCOwner := shardNode.OwnAccount.Address - totalStake := shardNode.EconomicsData.GenesisNodePrice() + stakePerNode := shardNode.EconomicsData.GenesisNodePrice() + totalStake := stakePerNode // 1 node only in this test nodeSharePer10000 := 3000 - stakerBLSKey, _ := hex.DecodeString(strings.Repeat("a", 128*2)) + time_before_force_unstake := 680400 + stakerBLSKey, _ := hex.DecodeString(strings.Repeat("a", 96*2)) stakerBLSSignature, _ := hex.DecodeString(strings.Repeat("c", 32*2)) // deploy the delegation smart contract delegateSCAddress := putDeploySCToDataPool( "./testdata/delegate/delegation.wasm", delegateSCOwner, 0, big.NewInt(0), - fmt.Sprintf("@%x@%x@%s", totalStake, nodeSharePer10000, hex.EncodeToString(factory2.AuctionSCAddress)), + fmt.Sprintf("@%x@%s@%x", nodeSharePer10000, hex.EncodeToString(factory2.AuctionSCAddress), time_before_force_unstake), nodes) shardNode.OwnAccount.Nonce++ nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 1, nonce, round, idxProposers) + // check that the version is the expected one + scQueryVersion := &process.SCQuery{ + ScAddress: delegateSCAddress, + FuncName: "version", + Arguments: [][]byte{}, + } + vmOutputVersion, _ := shardNode.SCQueryService.ExecuteQuery(scQueryVersion) + assert.NotNil(t, vmOutputVersion) + assert.Equal(t, len(vmOutputVersion.ReturnData), 1) + assert.True(t, bytes.Equal([]byte("0.2.0"), vmOutputVersion.ReturnData[0])) + log.Info("SC deployed", "version", string(vmOutputVersion.ReturnData[0])) + + // set number of nodes + setNumNodesTxData := "setNumNodes@1" + integrationTests.CreateAndSendTransaction(shardNode, big.NewInt(0), delegateSCAddress, setNumNodesTxData) + + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 1, nonce, round, idxProposers) + + // set stake per node + setStakePerNodeTxData := fmt.Sprintf("setStakePerNode@%x", stakePerNode) + integrationTests.CreateAndSendTransaction(shardNode, big.NewInt(0), delegateSCAddress, setStakePerNodeTxData) + + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 1, nonce, round, idxProposers) + // set BLS keys in the contract - setBlsTxData := "setBlsKeys@1@" + hex.EncodeToString(stakerBLSKey) + setBlsTxData := "setBlsKeys@" + hex.EncodeToString(stakerBLSKey) integrationTests.CreateAndSendTransaction(shardNode, big.NewInt(0), delegateSCAddress, setBlsTxData) nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 1, nonce, round, idxProposers) @@ -607,7 +636,7 @@ func TestSCCallingInCrossShardDelegation(t *testing.T) { // check that delegation contract was correctly initialized by querying for total stake scQuery1 := &process.SCQuery{ ScAddress: delegateSCAddress, - FuncName: "getTotalStake", + FuncName: "getExpectedStake", Arguments: [][]byte{}, } vmOutput1, _ := shardNode.SCQueryService.ExecuteQuery(scQuery1) @@ -629,13 +658,13 @@ func TestSCCallingInCrossShardDelegation(t *testing.T) { // check that the staking transaction worked scQuery3 := &process.SCQuery{ ScAddress: delegateSCAddress, - FuncName: "getUnfilledStake", + FuncName: "getFilledStake", Arguments: [][]byte{}, } vmOutput3, _ := shardNode.SCQueryService.ExecuteQuery(scQuery3) assert.NotNil(t, vmOutput3) assert.Equal(t, len(vmOutput3.ReturnData), 1) - assert.True(t, len(vmOutput3.ReturnData[0]) == 0) // unfilled stake == 0 + assert.True(t, totalStake.Cmp(big.NewInt(0).SetBytes(vmOutput3.ReturnData[0])) == 0) // filled stake == total stake // check that the staking system smart contract has the value for _, node := range nodes { @@ -674,7 +703,11 @@ func putDeploySCToDataPool( initArgs string, nodes []*integrationTests.TestProcessorNode, ) []byte { - scCode, _ := ioutil.ReadFile(fileName) + scCode, err := ioutil.ReadFile(fileName) + if err != nil { + panic(fmt.Sprintf("putDeploySCToDataPool(): %s", err)) + } + scCodeString := hex.EncodeToString(scCode) scCodeMetadataString := "0000" diff --git a/integrationTests/multiShard/smartContract/testdata/counter.wasm b/integrationTests/multiShard/smartContract/testdata/counter.wasm deleted file mode 100755 index f676e4d9a54..00000000000 Binary files a/integrationTests/multiShard/smartContract/testdata/counter.wasm and /dev/null differ diff --git a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c index df88ee1aa01..20da068dcab 100644 --- a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c +++ b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c @@ -4,8 +4,8 @@ typedef unsigned long long i64; typedef unsigned int bigInt; -int int64storageStore(byte *key, i64 value); -i64 int64storageLoad(byte *key); +int int64storageStore(byte *key, int keyLength, long long value); +long long int64storageLoad(byte *key, int keyLength); i64 int64getArgument(int argumentIndex); int getCallValue(byte *result); void asyncCall(byte *destination, byte *value, byte *data, int length); @@ -20,9 +20,9 @@ byte data[270] = "stake@01@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa void delegate() { i64 stake = int64getArgument(0); - i64 totalStake = int64storageLoad(totalStakeKey); + i64 totalStake = int64storageLoad(totalStakeKey, 32); totalStake += stake; - int64storageStore(totalStakeKey, totalStake); + int64storageStore(totalStakeKey, 32, totalStake); } void sendToStaking() diff --git a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.wasm b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.wasm index a4ffa676379..41107849d26 100755 Binary files a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.wasm and b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/delegate-mock/output/delegate.wasm b/integrationTests/multiShard/smartContract/testdata/delegate-mock/output/delegate.wasm new file mode 100755 index 00000000000..41107849d26 Binary files /dev/null and b/integrationTests/multiShard/smartContract/testdata/delegate-mock/output/delegate.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/delegate/delegation.wasm b/integrationTests/multiShard/smartContract/testdata/delegate/delegation.wasm index bf8f22fc628..5bfed3fe064 100755 Binary files a/integrationTests/multiShard/smartContract/testdata/delegate/delegation.wasm and b/integrationTests/multiShard/smartContract/testdata/delegate/delegation.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/first/first.c b/integrationTests/multiShard/smartContract/testdata/first/first.c index 95c7f5edf3e..831adda7e72 100644 --- a/integrationTests/multiShard/smartContract/testdata/first/first.c +++ b/integrationTests/multiShard/smartContract/testdata/first/first.c @@ -2,15 +2,15 @@ typedef unsigned char byte; typedef unsigned int i32; typedef unsigned long long i64; -int int64storageStore(byte *key, i64 value); -i64 int64storageLoad(byte *key); +int int64storageStore(byte *key, int keyLength, long long value); +long long int64storageLoad(byte *key, int keyLength); void int64finish(i64 value); byte counterKey[32] = {42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42}; void init() { - int64storageStore(counterKey, 0); + int64storageStore(counterKey, 32, 0); } void callBack() { @@ -18,14 +18,14 @@ void callBack() { void callMe() { - i64 counter = int64storageLoad(counterKey); + i64 counter = int64storageLoad(counterKey, 32); counter++; - int64storageStore(counterKey, counter); + int64storageStore(counterKey, 32, counter); } void numCalled() { - i64 counter = int64storageLoad(counterKey); + i64 counter = int64storageLoad(counterKey, 32); int64finish(counter); } diff --git a/integrationTests/multiShard/smartContract/testdata/first/first.wasm b/integrationTests/multiShard/smartContract/testdata/first/first.wasm index 9a197efadbf..69a78b964e8 100755 Binary files a/integrationTests/multiShard/smartContract/testdata/first/first.wasm and b/integrationTests/multiShard/smartContract/testdata/first/first.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/first/output/first.wasm b/integrationTests/multiShard/smartContract/testdata/first/output/first.wasm new file mode 100755 index 00000000000..69a78b964e8 Binary files /dev/null and b/integrationTests/multiShard/smartContract/testdata/first/output/first.wasm differ diff --git a/integrationTests/singleShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go b/integrationTests/singleShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go index aad7c843454..49d90059d8a 100644 --- a/integrationTests/singleShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go +++ b/integrationTests/singleShard/block/executingMiniblocksSc/executingMiniblocksSc_test.go @@ -81,7 +81,7 @@ func TestShouldProcessWithScTxsJoinAndRewardOneRound(t *testing.T) { integrationTests.MintAllNodes(nodes, initialVal) integrationTests.MintAllPlayers(nodes, players, initialVal) - integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine, "") time.Sleep(block.StepDelay) integrationTests.ProposeBlock(nodes, []int{idxProposer}, round, nonce) time.Sleep(block.StepDelay) @@ -171,7 +171,7 @@ func TestShouldProcessMultipleERC20ContractsInSingleShard(t *testing.T) { t.Skip("this is not a short test") } - scCode, err := ioutil.ReadFile("./wrc20_arwen_01.wasm") + scCode, err := ioutil.ReadFile("../../../vm/arwen/testdata/erc20-c-03/wrc20_arwen.wasm") assert.Nil(t, err) maxShards := uint32(1) @@ -224,7 +224,7 @@ func TestShouldProcessMultipleERC20ContractsInSingleShard(t *testing.T) { integrationTests.MintAllNodes(nodes, initialVal) integrationTests.MintAllPlayers(nodes, players, initialVal) - integrationTests.DeployScTx(nodes, idxProposer, hex.EncodeToString(scCode), factory.ArwenVirtualMachine) + integrationTests.DeployScTx(nodes, idxProposer, hex.EncodeToString(scCode), factory.ArwenVirtualMachine, "001000000000") time.Sleep(block.StepDelay) round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, []int{idxProposer}, round, nonce) diff --git a/integrationTests/singleShard/block/executingMiniblocksSc/wrc20_arwen_01.wasm b/integrationTests/singleShard/block/executingMiniblocksSc/wrc20_arwen_01.wasm deleted file mode 100755 index 5790ea43f40..00000000000 Binary files a/integrationTests/singleShard/block/executingMiniblocksSc/wrc20_arwen_01.wasm and /dev/null differ diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index 1965ea940b1..5eb90e8cafb 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -252,7 +252,8 @@ func TestTrieDB_RecreateFromStorageShouldWork(t *testing.T) { ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) trieStorage, _ := trie.NewTrieStorageManager(store, integrationTests.TestMarshalizer, hasher, config.DBConfig{}, ewl, config.TrieStorageManagerConfig{}) - tr1, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, hasher) + maxTrieLevelInMemory := uint(5) + tr1, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, hasher, maxTrieLevelInMemory) key := hasher.Compute("key") value := hasher.Compute("value") @@ -313,7 +314,8 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te ewl, _ := evictionWaitingList.NewEvictionWaitingList(100, memorydb.New(), integrationTests.TestMarshalizer) trieStorage, _ := trie.NewTrieStorageManager(mu, integrationTests.TestMarshalizer, integrationTests.TestHasher, config.DBConfig{}, ewl, config.TrieStorageManagerConfig{}) - tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher, maxTrieLevelInMemory) adb, _ = state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) //reloading a new trie to test if data is inside @@ -1029,7 +1031,8 @@ func createAccounts( ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) trieStorage, _ := trie.NewTrieStorageManager(store, integrationTests.TestMarshalizer, integrationTests.TestHasher, config.DBConfig{}, ewl, config.TrieStorageManagerConfig{}) - tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher, maxTrieLevelInMemory) adb, _ := state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) addr := make([][]byte, nrOfAccounts) @@ -1109,7 +1112,8 @@ func TestTrieDbPruning_GetAccountAfterPruning(t *testing.T) { evictionWaitListSize := uint(100) ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) trieStorage, _ := trie.NewTrieStorageManager(memorydb.New(), integrationTests.TestMarshalizer, integrationTests.TestHasher, config.DBConfig{}, ewl, generalCfg) - tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher, maxTrieLevelInMemory) adb, _ := state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) hexPubkeyConverter, _ := pubkeyConverter.NewHexPubkeyConverter(32) @@ -1150,7 +1154,8 @@ func TestAccountsDB_RecreateTrieInvalidatesDataTriesCache(t *testing.T) { evictionWaitListSize := uint(100) ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) trieStorage, _ := trie.NewTrieStorageManager(memorydb.New(), integrationTests.TestMarshalizer, integrationTests.TestHasher, config.DBConfig{}, ewl, generalCfg) - tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher, maxTrieLevelInMemory) adb, _ := state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) hexAddressPubkeyConverter, _ := pubkeyConverter.NewHexPubkeyConverter(32) @@ -1203,7 +1208,8 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { evictionWaitListSize := uint(100) ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) trieStorage, _ := trie.NewTrieStorageManager(memorydb.New(), integrationTests.TestMarshalizer, integrationTests.TestHasher, config.DBConfig{}, ewl, generalCfg) - tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) + maxTrieLevelInMemory := uint(5) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher, maxTrieLevelInMemory) adb, _ := state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) hexAddressPubkeyConverter, _ := pubkeyConverter.NewHexPubkeyConverter(32) diff --git a/integrationTests/testGameHelperFunctions.go b/integrationTests/testGameHelperFunctions.go index 5bfea8df0b6..84979b68430 100644 --- a/integrationTests/testGameHelperFunctions.go +++ b/integrationTests/testGameHelperFunctions.go @@ -50,10 +50,14 @@ func ScCallTxWithParams( } // DeployScTx creates and sends a SC tx -func DeployScTx(nodes []*TestProcessorNode, senderIdx int, scCode string, vmType []byte) { +func DeployScTx(nodes []*TestProcessorNode, senderIdx int, scCode string, vmType []byte, initArguments string) { fmt.Println("Deploying SC...") scCodeMetadataString := "0000" data := scCode + "@" + hex.EncodeToString(vmType) + "@" + scCodeMetadataString + if len(initArguments) > 0 { + data += "@" + initArguments + } + txDeploy := generateTx( nodes[senderIdx].OwnAccount.SkTxSign, nodes[senderIdx].OwnAccount.SingleSigner, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index fab47004f1d..28c50a3a2b6 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "fmt" "io/ioutil" + "math" "math/big" "strings" "sync" @@ -75,6 +76,7 @@ const ( shuffleBetweenShards = false adaptivity = false hysteresis = float32(0.2) + maxTrieLevelInMemory = uint(5) ) // Type defines account types to save in accounts trie @@ -390,7 +392,7 @@ func CreateAccountsDB( accountType Type, trieStorageManager data.StorageManager, ) (*state.AccountsDB, data.Trie) { - tr, _ := trie.NewTrie(trieStorageManager, TestMarshalizer, TestHasher) + tr, _ := trie.NewTrie(trieStorageManager, TestMarshalizer, TestHasher, maxTrieLevelInMemory) accountFactory := getAccountFactory(accountType) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) @@ -778,7 +780,8 @@ func CreateNewDefaultTrie() data.Trie { MaxSnapshots: 2, } trieStorage, _ := trie.NewTrieStorageManager(CreateMemUnit(), TestMarshalizer, TestHasher, config.DBConfig{}, ewl, generalCfg) - tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher) + + tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher, maxTrieLevelInMemory) return tr } @@ -1154,7 +1157,7 @@ func CreateAndSendTransaction( RcvAddr: rcvAddress, Data: []byte(txData), GasPrice: MinTxGasPrice, - GasLimit: MinTxGasLimit*100 + uint64(len(txData)), + GasLimit: MinTxGasLimit*1000 + uint64(len(txData)), } txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer) @@ -1738,35 +1741,71 @@ func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string, _ uint32) map[uint return validatorsMap } +// GenValidatorsFromPubKeys generates a map of validators per shard out of public keys map +func GenValidatorsFromPubKeysAndTxPubKeys( + blsPubKeysMap map[uint32][]string, + txPubKeysMap map[uint32][]string, +) map[uint32][]sharding.GenesisNodeInfoHandler { + validatorsMap := make(map[uint32][]sharding.GenesisNodeInfoHandler) + + for shardId, shardNodesPks := range blsPubKeysMap { + var shardValidators []sharding.GenesisNodeInfoHandler + for i := 0; i < len(shardNodesPks); i++ { + v := mock.NewNodeInfo([]byte(txPubKeysMap[shardId][i]), []byte(shardNodesPks[i]), shardId) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + // CreateCryptoParams generates the crypto parameters (key pairs, key generator and suite) for multiple nodes func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *CryptoParams { + txSuite := ed25519.NewEd25519() + txKeyGen := signing.NewKeyGenerator(txSuite) suite := mcl.NewSuiteBLS12() singleSigner := &ed25519SingleSig.Ed25519Signer{} keyGen := signing.NewKeyGenerator(suite) + txKeysMap := make(map[uint32][]*TestKeyPair) keysMap := make(map[uint32][]*TestKeyPair) for shardId := uint32(0); shardId < nbShards; shardId++ { + txKeyPairs := make([]*TestKeyPair, nodesPerShard) keyPairs := make([]*TestKeyPair, nodesPerShard) for n := 0; n < nodesPerShard; n++ { kp := &TestKeyPair{} kp.Sk, kp.Pk = keyGen.GeneratePair() keyPairs[n] = kp + + txKp := &TestKeyPair{} + txKp.Sk, txKp.Pk = txKeyGen.GeneratePair() + txKeyPairs[n] = txKp } keysMap[shardId] = keyPairs + txKeysMap[shardId] = txKeyPairs } + txKeyPairs := make([]*TestKeyPair, nbMetaNodes) keyPairs := make([]*TestKeyPair, nbMetaNodes) for n := 0; n < nbMetaNodes; n++ { kp := &TestKeyPair{} kp.Sk, kp.Pk = keyGen.GeneratePair() keyPairs[n] = kp + + txKp := &TestKeyPair{} + txKp.Sk, txKp.Pk = txKeyGen.GeneratePair() + txKeyPairs[n] = txKp } keysMap[core.MetachainShardId] = keyPairs + txKeysMap[core.MetachainShardId] = txKeyPairs params := &CryptoParams{ Keys: keysMap, KeyGen: keyGen, SingleSigner: singleSigner, + TxKeyGen: txKeyGen, + TxKeys: txKeysMap, } return params @@ -1949,11 +1988,13 @@ func createTxPool(selfShardID uint32) (dataRetriever.ShardedDataCacherNotifier, return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: math.MaxUint32, + SizeInBytes: 1000000000, + SizeInBytesPerSender: math.MaxUint32, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, SelfShardID: selfShardID, }, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 255f586d7a2..bdaf4f81c06 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -113,10 +113,10 @@ var TestBalanceComputationHandler, _ = preprocess.NewBalanceComputation() var MinTxGasPrice = uint64(10) // MinTxGasLimit defines minimum gas limit required by a transaction -var MinTxGasLimit = uint64(1000) +var MinTxGasLimit = uint64(1_000) // MaxGasLimitPerBlock defines maximum gas limit allowed per one block -const MaxGasLimitPerBlock = uint64(300000) +const MaxGasLimitPerBlock = uint64(3_000_000) const maxTxNonceDeltaAllowed = 8000 const minConnectedPeers = 0 @@ -155,6 +155,8 @@ type CryptoParams struct { KeyGen crypto.KeyGenerator Keys map[uint32][]*TestKeyPair SingleSigner crypto.SingleSigner + TxKeyGen crypto.KeyGenerator + TxKeys map[uint32][]*TestKeyPair } // TestProcessorNode represents a container type of class used in integration tests @@ -837,7 +839,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ShardCoordinator, ) - gasSchedule := arwenConfig.MakeGasMap(1) + gasSchedule := arwenConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasMap: gasSchedule, @@ -1167,9 +1169,8 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger argumentsBase.TxCoordinator = tpn.TxCoordinator - blsKeyedPubkeyConverter, _ := pubkeyConverter.NewHexPubkeyConverter(128) argsStakingToPeer := scToProtocol.ArgStakingToPeer{ - PubkeyConv: blsKeyedPubkeyConverter, + PubkeyConv: TestValidatorPubkeyConverter, Hasher: TestHasher, ProtoMarshalizer: TestMarshalizer, VmMarshalizer: TestVmMarshalizer, @@ -1409,7 +1410,7 @@ func (tpn *TestProcessorNode) StartSync() error { return errors.New("no bootstrapper available") } - tpn.Bootstrapper.StartSync() + tpn.Bootstrapper.StartSyncingBlocks() return nil } diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index be719c3c484..cc0c51cfaf5 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -2,6 +2,7 @@ package integrationTests import ( "bytes" + "encoding/hex" "fmt" "strconv" "testing" @@ -112,6 +113,170 @@ func CreateNodesWithNodesCoordinatorWithCacher( } +// CreateNodesWithNodesCoordinatorAndTxKeys - +func CreateNodesWithNodesCoordinatorAndTxKeys( + nodesPerShard int, + nbMetaNodes int, + nbShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + seedAddress string, +) map[uint32][]*TestProcessorNode { + rater, _ := rating.NewBlockSigningRater(CreateRatingsData()) + coordinatorFactory := &IndexHashedNodesCoordinatorWithRaterFactory{ + PeerAccountListAndRatingHandler: rater, + } + cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) + blsPubKeys := PubKeysMapFromKeysMap(cp.Keys) + txPubKeys := PubKeysMapFromKeysMap(cp.TxKeys) + validatorsMap := GenValidatorsFromPubKeysAndTxPubKeys(blsPubKeys, txPubKeys) + validatorsMapForNodesCoordinator, _ := sharding.NodesInfoToValidators(validatorsMap) + + waitingMap := make(map[uint32][]sharding.GenesisNodeInfoHandler) + for i := 0; i < nbShards; i++ { + waitingMap[uint32(i)] = make([]sharding.GenesisNodeInfoHandler, 0) + } + waitingMap[core.MetachainShardId] = make([]sharding.GenesisNodeInfoHandler, 0) + + waitingMapForNodesCoordinator := make(map[uint32][]sharding.Validator) + for i := 0; i < nbShards; i++ { + waitingMapForNodesCoordinator[uint32(i)] = make([]sharding.Validator, 0) + } + waitingMapForNodesCoordinator[core.MetachainShardId] = make([]sharding.Validator, 0) + + nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]sharding.GenesisNodeInfoHandler, m2 map[uint32][]sharding.GenesisNodeInfoHandler) { + return validatorsMap, waitingMap + }} + + nodesMap := make(map[uint32][]*TestProcessorNode) + + for shardId, validatorList := range validatorsMap { + nodesList := make([]*TestProcessorNode, len(validatorList)) + + for i := range validatorList { + dataCache, _ := lrucache.NewCache(10000) + nodesList[i] = CreateNodeWithBLSAndTxKeys( + nodesPerShard, + nbMetaNodes, + shardConsensusGroupSize, + metaConsensusGroupSize, + shardId, + nbShards, + validatorsMapForNodesCoordinator, + waitingMapForNodesCoordinator, + i, + seedAddress, + cp, + dataCache, + coordinatorFactory, + nodesSetup, + nil, + ) + } + + nodesMap[shardId] = append(nodesMap[shardId], nodesList...) + } + + return nodesMap +} + +// CreateNodeWithBLSAndTxKeys - +func CreateNodeWithBLSAndTxKeys( + nodesPerShard int, + nbMetaNodes int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + shardId uint32, + nbShards int, + validatorsMap map[uint32][]sharding.Validator, + waitingMap map[uint32][]sharding.Validator, + keyIndex int, + seedAddress string, + cp *CryptoParams, + cache sharding.Cacher, + coordinatorFactory NodesCoordinatorFactory, + nodesSetup sharding.GenesisNodesSetupHandler, + ratingsData *rating.RatingsData, +) *TestProcessorNode { + + epochStartSubscriber := &mock.EpochStartNotifierStub{} + bootStorer := CreateMemUnit() + argFactory := ArgIndexHashedNodesCoordinatorFactory{ + nodesPerShard, + nbMetaNodes, + shardConsensusGroupSize, + metaConsensusGroupSize, + shardId, + nbShards, + validatorsMap, + waitingMap, + keyIndex, + cp, + epochStartSubscriber, + TestHasher, + cache, + bootStorer, + } + nodesCoordinator := coordinatorFactory.CreateNodesCoordinator(argFactory) + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(nbShards), shardId) + + messenger := CreateMessengerWithKadDht(seedAddress) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, + ChainID: ChainID, + NodesSetup: nodesSetup, + RatingsData: ratingsData, + } + + tpn.NodeKeys = cp.Keys[shardId][keyIndex] + blsHasher := &blake2b.Blake2b{HashSize: hashing.BlsHashSize} + llsig := &mclmultisig.BlsMultiSigner{Hasher: blsHasher} + + pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) + + tpn.MultiSigner, _ = multisig.NewBLSMultisig( + llsig, + pubKeysMap[shardId], + tpn.NodeKeys.Sk, + cp.KeyGen, + 0, + ) + if tpn.MultiSigner == nil { + fmt.Println("Error generating multisigner") + } + twa := &TestWalletAccount{} + twa.SingleSigner = cp.SingleSigner + twa.BlockSingleSigner = &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + return nil + }, + } + sk := cp.TxKeys[shardId][keyIndex].Sk + pk := cp.TxKeys[shardId][keyIndex].Pk + keyGen := cp.TxKeyGen + + pkBuff, _ := pk.ToByteArray() + fmt.Printf("Found pk: %s in shard %d\n", hex.EncodeToString(pkBuff), shardId) + + twa.SkTxSign = sk + twa.PkTxSign = pk + twa.PkTxSignBytes, _ = pk.ToByteArray() + twa.KeygenTxSign = keyGen + twa.KeygenBlockSign = &mock.KeyGenMock{} + twa.Address = twa.PkTxSignBytes + tpn.OwnAccount = twa + + tpn.EpochStartNotifier = epochStartSubscriber + tpn.initDataPools() + tpn.initTestNode() + + return tpn +} + // CreateNodesWithNodesCoordinatorFactory returns a map with nodes per shard each using a real nodes coordinator func CreateNodesWithNodesCoordinatorFactory( nodesPerShard int, @@ -190,6 +355,7 @@ func CreateNodesWithNodesCoordinatorFactory( return nodesMap } +// CreateNode - func CreateNode( nodesPerShard int, nbMetaNodes int, diff --git a/integrationTests/vm/arwen/testdata/bad-misc/bad.c b/integrationTests/vm/arwen/testdata/bad-misc/bad.c index 50f3c1acb02..1425d540d23 100644 --- a/integrationTests/vm/arwen/testdata/bad-misc/bad.c +++ b/integrationTests/vm/arwen/testdata/bad-misc/bad.c @@ -86,7 +86,7 @@ void badWriteLog4() void badBigIntStorageStore1() { bigInt number = bigIntNew(100); - bigIntStorageStoreUnsigned("test", number + 42); + bigIntStorageStoreUnsigned("test", 32, number + 42); } i64 doStackoverflow(i64 a) { diff --git a/integrationTests/vm/arwen/testdata/bad-misc/bad.wasm b/integrationTests/vm/arwen/testdata/bad-misc/bad.wasm index e21b972ddb6..d3cb8d7a504 100755 Binary files a/integrationTests/vm/arwen/testdata/bad-misc/bad.wasm and b/integrationTests/vm/arwen/testdata/bad-misc/bad.wasm differ diff --git a/integrationTests/vm/arwen/testdata/bad-misc/elrond/bigInt.h b/integrationTests/vm/arwen/testdata/bad-misc/elrond/bigInt.h index 2083e2ea126..922faabb0b8 100644 --- a/integrationTests/vm/arwen/testdata/bad-misc/elrond/bigInt.h +++ b/integrationTests/vm/arwen/testdata/bad-misc/elrond/bigInt.h @@ -1,3 +1,6 @@ +#ifndef _BIGINT_H_ +#define _BIGINT_H_ + #include "types.h" typedef unsigned int bigInt; @@ -7,8 +10,8 @@ bigInt bigIntNew(long long value); void bigIntGetUnsignedArgument(int argumentIndex, bigInt argument); void bigIntGetSignedArgument(int argumentIndex, bigInt argument); -int bigIntStorageLoadUnsigned(byte *key, bigInt value); -int bigIntStorageStoreUnsigned(byte *key, bigInt value); +int bigIntStorageLoadUnsigned(byte *key, int keyLength, bigInt value); +int bigIntStorageStoreUnsigned(byte *key, int keyLength, bigInt value); void bigIntAdd(bigInt destination, bigInt op1, bigInt op2); void bigIntSub(bigInt destination, bigInt op1, bigInt op2); @@ -20,9 +23,14 @@ long long bigIntGetInt64(bigInt reference); void bigIntSetInt64(bigInt destination, long long value); void bigIntFinishUnsigned(bigInt reference); +void bigIntFinishSigned(bigInt reference); void bigIntGetCallValue(bigInt destination); void bigIntGetExternalBalance(byte *address, bigInt result); int bigIntByteLength(bigInt reference); int bigIntGetUnsignedBytes(bigInt reference, byte *byte); +int bigIntGetSignedBytes(bigInt reference, byte *byte); void bigIntSetUnsignedBytes(bigInt destination, byte *byte, int byteLength); +void bigIntSetSignedBytes(bigInt destination, byte *byte, int byteLength); + +#endif diff --git a/integrationTests/vm/arwen/testdata/bad-misc/elrond/context.h b/integrationTests/vm/arwen/testdata/bad-misc/elrond/context.h index 1c2e1b34fa5..8a982b7e170 100644 --- a/integrationTests/vm/arwen/testdata/bad-misc/elrond/context.h +++ b/integrationTests/vm/arwen/testdata/bad-misc/elrond/context.h @@ -1,3 +1,6 @@ +#ifndef _CONTEXT_H_ +#define _CONTEXT_H_ + #include "types.h" void getOwner(byte *ownerAddress); @@ -10,7 +13,11 @@ long long getGasLeft(); void finish(byte *data, int length); void int64finish(long long value); void writeLog(byte *pointer, int length, byte *topicPtr, int numTopics); -void signalError(byte *messagePointer, int messageLength); +void asyncCall(byte *destination, byte *value, byte *data, int length); +void signalError(byte *message, int length); + +int executeOnSameContext(long long gas, byte *address, byte *value, byte *function, int functionLength, int numArguments, byte *argumentsLengths, byte *arguments); +int executeOnDestContext(long long gas, byte *address, byte *value, byte *function, int functionLength, int numArguments, byte *argumentsLengths, byte *arguments); // Blockchain-related functions long long getBlockTimestamp(); @@ -20,13 +27,17 @@ int getBlockHash(long long nonce, byte *hash); int getNumArguments(); int getArgument(int argumentIndex, byte *argument); long long int64getArgument(int argumentIndex); +int getArgumentLength(int argumentIndex); // Account-related functions void getExternalBalance(byte *address, byte *balance); -int transfer(long long gasLimit, byte *destination, byte *sender, byte *value, byte *data, int length); +int transferValue(byte *destination, byte *value, byte *data, int length); // Storage-related functions -int storageStore(byte *key, byte *data, int dataLength); -int storageLoad(byte *key, byte *data); -int int64storageStore(byte *key, long long value); -long long int64storageLoad(byte *key); +int storageLoadLength(byte *key, int keyLength); +int storageStore(byte *key, int keyLength, byte *data, int dataLength); +int storageLoad(byte *key, int keyLength, byte *data); +int int64storageStore(byte *key, int keyLength, long long value); +long long int64storageLoad(byte *key, int keyLength); + +#endif diff --git a/integrationTests/vm/arwen/testdata/bad-misc/output/bad.wasm b/integrationTests/vm/arwen/testdata/bad-misc/output/bad.wasm new file mode 100755 index 00000000000..d3cb8d7a504 Binary files /dev/null and b/integrationTests/vm/arwen/testdata/bad-misc/output/bad.wasm differ diff --git a/integrationTests/vm/arwen/testdata/counter/counter.c b/integrationTests/vm/arwen/testdata/counter/counter.c new file mode 100644 index 00000000000..079f1a77cdb --- /dev/null +++ b/integrationTests/vm/arwen/testdata/counter/counter.c @@ -0,0 +1,26 @@ +#include "../elrond/context.h" + +byte counterKey[32] = {'m','y','c','o','u','n','t','e','r',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +void init() { + int64storageStore(counterKey, 32, 1); +} + +void increment() { + i64 counter = int64storageLoad(counterKey, 32); + counter++; + int64storageStore(counterKey, 32, counter); + int64finish(counter); +} + +void decrement() { + i64 counter = int64storageLoad(counterKey, 32); + counter--; + int64storageStore(counterKey, 32, counter); + int64finish(counter); +} + +void get() { + i64 counter = int64storageLoad(counterKey, 32); + int64finish(counter); +} diff --git a/integrationTests/vm/arwen/testdata/counter/counter.export b/integrationTests/vm/arwen/testdata/counter/counter.export new file mode 100644 index 00000000000..91723bf1bc3 --- /dev/null +++ b/integrationTests/vm/arwen/testdata/counter/counter.export @@ -0,0 +1,4 @@ +init +increment +decrement +get diff --git a/integrationTests/vm/arwen/testdata/counter/counter.wasm b/integrationTests/vm/arwen/testdata/counter/counter.wasm new file mode 100755 index 00000000000..1dfd188f777 Binary files /dev/null and b/integrationTests/vm/arwen/testdata/counter/counter.wasm differ diff --git a/integrationTests/vm/arwen/testdata/counter/output/counter.wasm b/integrationTests/vm/arwen/testdata/counter/output/counter.wasm new file mode 100755 index 00000000000..1dfd188f777 Binary files /dev/null and b/integrationTests/vm/arwen/testdata/counter/output/counter.wasm differ diff --git a/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/bigInt.h b/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/bigInt.h index 2083e2ea126..922faabb0b8 100644 --- a/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/bigInt.h +++ b/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/bigInt.h @@ -1,3 +1,6 @@ +#ifndef _BIGINT_H_ +#define _BIGINT_H_ + #include "types.h" typedef unsigned int bigInt; @@ -7,8 +10,8 @@ bigInt bigIntNew(long long value); void bigIntGetUnsignedArgument(int argumentIndex, bigInt argument); void bigIntGetSignedArgument(int argumentIndex, bigInt argument); -int bigIntStorageLoadUnsigned(byte *key, bigInt value); -int bigIntStorageStoreUnsigned(byte *key, bigInt value); +int bigIntStorageLoadUnsigned(byte *key, int keyLength, bigInt value); +int bigIntStorageStoreUnsigned(byte *key, int keyLength, bigInt value); void bigIntAdd(bigInt destination, bigInt op1, bigInt op2); void bigIntSub(bigInt destination, bigInt op1, bigInt op2); @@ -20,9 +23,14 @@ long long bigIntGetInt64(bigInt reference); void bigIntSetInt64(bigInt destination, long long value); void bigIntFinishUnsigned(bigInt reference); +void bigIntFinishSigned(bigInt reference); void bigIntGetCallValue(bigInt destination); void bigIntGetExternalBalance(byte *address, bigInt result); int bigIntByteLength(bigInt reference); int bigIntGetUnsignedBytes(bigInt reference, byte *byte); +int bigIntGetSignedBytes(bigInt reference, byte *byte); void bigIntSetUnsignedBytes(bigInt destination, byte *byte, int byteLength); +void bigIntSetSignedBytes(bigInt destination, byte *byte, int byteLength); + +#endif diff --git a/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/context.h b/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/context.h index 1c2e1b34fa5..8a982b7e170 100644 --- a/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/context.h +++ b/integrationTests/vm/arwen/testdata/erc20-c-03/elrond/context.h @@ -1,3 +1,6 @@ +#ifndef _CONTEXT_H_ +#define _CONTEXT_H_ + #include "types.h" void getOwner(byte *ownerAddress); @@ -10,7 +13,11 @@ long long getGasLeft(); void finish(byte *data, int length); void int64finish(long long value); void writeLog(byte *pointer, int length, byte *topicPtr, int numTopics); -void signalError(byte *messagePointer, int messageLength); +void asyncCall(byte *destination, byte *value, byte *data, int length); +void signalError(byte *message, int length); + +int executeOnSameContext(long long gas, byte *address, byte *value, byte *function, int functionLength, int numArguments, byte *argumentsLengths, byte *arguments); +int executeOnDestContext(long long gas, byte *address, byte *value, byte *function, int functionLength, int numArguments, byte *argumentsLengths, byte *arguments); // Blockchain-related functions long long getBlockTimestamp(); @@ -20,13 +27,17 @@ int getBlockHash(long long nonce, byte *hash); int getNumArguments(); int getArgument(int argumentIndex, byte *argument); long long int64getArgument(int argumentIndex); +int getArgumentLength(int argumentIndex); // Account-related functions void getExternalBalance(byte *address, byte *balance); -int transfer(long long gasLimit, byte *destination, byte *sender, byte *value, byte *data, int length); +int transferValue(byte *destination, byte *value, byte *data, int length); // Storage-related functions -int storageStore(byte *key, byte *data, int dataLength); -int storageLoad(byte *key, byte *data); -int int64storageStore(byte *key, long long value); -long long int64storageLoad(byte *key); +int storageLoadLength(byte *key, int keyLength); +int storageStore(byte *key, int keyLength, byte *data, int dataLength); +int storageLoad(byte *key, int keyLength, byte *data); +int int64storageStore(byte *key, int keyLength, long long value); +long long int64storageLoad(byte *key, int keyLength); + +#endif diff --git a/integrationTests/vm/arwen/testdata/erc20-c-03/output/wrc20_arwen.wasm b/integrationTests/vm/arwen/testdata/erc20-c-03/output/wrc20_arwen.wasm new file mode 100755 index 00000000000..f32cba19069 Binary files /dev/null and b/integrationTests/vm/arwen/testdata/erc20-c-03/output/wrc20_arwen.wasm differ diff --git a/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.c b/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.c index d3660a98ff9..910cbae6903 100644 --- a/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.c +++ b/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.c @@ -92,11 +92,11 @@ void init() { // set total supply computeTotalSupplyKey(currentKey); - bigIntStorageStoreUnsigned(currentKey, totalSupply); + bigIntStorageStoreUnsigned(currentKey, 32, totalSupply); // sender balance <- total supply computeBalanceKey(currentKey, sender); - bigIntStorageStoreUnsigned(currentKey, totalSupply); + bigIntStorageStoreUnsigned(currentKey, 32, totalSupply); } // getter function: retrieves total token supply @@ -107,7 +107,7 @@ void totalSupply() { // load total supply from storage computeTotalSupplyKey(currentKey); bigInt totalSupply = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, totalSupply); + bigIntStorageLoadUnsigned(currentKey, 32, totalSupply); // return total supply as big int bigIntFinishUnsigned(totalSupply); @@ -124,7 +124,7 @@ void balanceOf() { // load balance computeBalanceKey(currentKey, caller); bigInt balance = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, balance); + bigIntStorageLoadUnsigned(currentKey, 32, balance); // return balance as big int bigIntFinishUnsigned(balance); @@ -144,7 +144,7 @@ void allowance() { // get allowance computeAllowanceKey(currentKey, sender, recipient); bigInt allowance = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, allowance); + bigIntStorageLoadUnsigned(currentKey, 32, allowance); // return allowance as big int bigIntFinishUnsigned(allowance); @@ -172,7 +172,7 @@ void transferToken() { // load sender balance computeBalanceKey(currentKey, sender); bigInt senderBalance = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, senderBalance); + bigIntStorageLoadUnsigned(currentKey, 32, senderBalance); // check if enough funds if (bigIntCmp(amount, senderBalance) > 0) { @@ -182,14 +182,14 @@ void transferToken() { // update sender balance bigIntSub(senderBalance, senderBalance, amount); - bigIntStorageStoreUnsigned(currentKey, senderBalance); + bigIntStorageStoreUnsigned(currentKey, 32, senderBalance); // load & update receiver balance computeBalanceKey(currentKey, recipient); bigInt receiverBalance = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, receiverBalance); + bigIntStorageLoadUnsigned(currentKey, 32, receiverBalance); bigIntAdd(receiverBalance, receiverBalance, amount); - bigIntStorageStoreUnsigned(currentKey, receiverBalance); + bigIntStorageStoreUnsigned(currentKey, 32, receiverBalance); // log operation saveLogWith3Topics(transferEvent, sender, recipient, amount); @@ -217,7 +217,7 @@ void approve() { // store allowance computeAllowanceKey(currentKey, sender, recipient); - bigIntStorageStoreUnsigned(currentKey, amount); + bigIntStorageStoreUnsigned(currentKey, 32, amount); // log operation saveLogWith3Topics(approveEvent, sender, recipient, amount); @@ -249,7 +249,7 @@ void transferFrom() { // load allowance computeAllowanceKey(currentKey, sender, caller); bigInt allowance = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, allowance); + bigIntStorageLoadUnsigned(currentKey, 32, allowance); // amount should not exceed allowance if (bigIntCmp(amount, allowance) > 0) { @@ -259,12 +259,12 @@ void transferFrom() { // update allowance bigIntSub(allowance, allowance, amount); - bigIntStorageStoreUnsigned(currentKey, allowance); + bigIntStorageStoreUnsigned(currentKey, 32, allowance); // load sender balance computeBalanceKey(currentKey, sender); bigInt senderBalance = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, senderBalance); + bigIntStorageLoadUnsigned(currentKey, 32, senderBalance); // check if enough funds if (bigIntCmp(amount, senderBalance) > 0) { @@ -274,14 +274,14 @@ void transferFrom() { // update sender balance bigIntSub(senderBalance, senderBalance, amount); - bigIntStorageStoreUnsigned(currentKey, senderBalance); + bigIntStorageStoreUnsigned(currentKey, 32, senderBalance); // load & update receiver balance computeBalanceKey(currentKey, recipient); bigInt receiverBalance = bigIntNew(0); - bigIntStorageLoadUnsigned(currentKey, receiverBalance); + bigIntStorageLoadUnsigned(currentKey, 32, receiverBalance); bigIntAdd(receiverBalance, receiverBalance, amount); - bigIntStorageStoreUnsigned(currentKey, receiverBalance); + bigIntStorageStoreUnsigned(currentKey, 32, receiverBalance); // log operation saveLogWith3Topics(transferEvent, sender, recipient, amount); diff --git a/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.wasm b/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.wasm index 1331b5435d9..f32cba19069 100755 Binary files a/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.wasm and b/integrationTests/vm/arwen/testdata/erc20-c-03/wrc20_arwen.wasm differ diff --git a/integrationTests/vm/arwen/testdata/upgrades-parent/output/parent.wasm b/integrationTests/vm/arwen/testdata/upgrades-parent/output/parent.wasm new file mode 100755 index 00000000000..5bb0d92a58c Binary files /dev/null and b/integrationTests/vm/arwen/testdata/upgrades-parent/output/parent.wasm differ diff --git a/integrationTests/vm/arwen/testdata/upgrades-parent/parent.cpp b/integrationTests/vm/arwen/testdata/upgrades-parent/parent.cpp index f2b6a47b424..33f7d63a955 100644 --- a/integrationTests/vm/arwen/testdata/upgrades-parent/parent.cpp +++ b/integrationTests/vm/arwen/testdata/upgrades-parent/parent.cpp @@ -10,8 +10,8 @@ extern "C" int getNumArguments(); int getArgument(int argumentIndex, byte *argument); int getArgumentLength(int argumentIndex); - int storageStore(byte *key, byte *data, int dataLength); - int storageLoad(byte *key, byte *data); + int storageStore(byte *key, int keyLength, byte *data, int dataLength); + int storageLoad(byte *key, int keyLength, byte *data); void signalError(byte *message, int length); void asyncCall(byte *destination, byte *value, byte *data, int length); } @@ -47,7 +47,7 @@ extern "C" void getUltimateAnswer() extern "C" void getChildAddress() { byte childAddress[32]; - storageLoad((byte *)childContractAddressKey, childAddress); + storageLoad((byte *)childContractAddressKey, 32, childAddress); finish(childAddress, 32); } @@ -58,7 +58,7 @@ extern "C" void createChild() getArgument(0, code); byte childAddress[32]; createContract(nullptr, code, codeLength, childAddress, 0, nullptr, nullptr); - storageStore((byte *)childContractAddressKey, childAddress, 32); + storageStore((byte *)childContractAddressKey, 32, childAddress, 32); } extern "C" void upgradeChild() @@ -68,7 +68,7 @@ extern "C" void upgradeChild() getArgument(0, code); byte childAddress[32]; - storageLoad((byte *)childContractAddressKey, childAddress); + storageLoad((byte *)childContractAddressKey, 32, childAddress); // "upgradeContract@code@0100" int dataLength = 15 + 1 + codeLength + 1 + 4; diff --git a/integrationTests/vm/arwen/testdata/upgrades-parent/parent.wasm b/integrationTests/vm/arwen/testdata/upgrades-parent/parent.wasm index a8c326488a6..5bb0d92a58c 100755 Binary files a/integrationTests/vm/arwen/testdata/upgrades-parent/parent.wasm and b/integrationTests/vm/arwen/testdata/upgrades-parent/parent.wasm differ diff --git a/integrationTests/vm/systemVM/stakingSC_test.go b/integrationTests/vm/systemVM/stakingSC_test.go index 928c455c50d..55faa9d49a5 100644 --- a/integrationTests/vm/systemVM/stakingSC_test.go +++ b/integrationTests/vm/systemVM/stakingSC_test.go @@ -10,10 +10,11 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/integrationTests/multiShard/endOfEpoch" "github.com/ElrondNetwork/elrond-go/vm/factory" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStakingUnstakingAndUnboundingOnMultiShardEnvironment(t *testing.T) { @@ -138,7 +139,7 @@ func TestStakingUnstakingAndUnboundingOnMultiShardEnvironmentWithValidatorStatis for _, nds := range nodesMap { idx, err := getNodeIndex(nodes, nds[0]) - assert.Nil(t, err) + require.Nil(t, err) idxProposers = append(idxProposers, idx) } @@ -159,13 +160,6 @@ func TestStakingUnstakingAndUnboundingOnMultiShardEnvironmentWithValidatorStatis initialVal := big.NewInt(10000000000) integrationTests.MintAllNodes(nodes, initialVal) - minNumNodes := nodes[0].NodesSetup.MinNumberOfNodes() - validators := make([]*integrationTests.TestWalletAccount, minNumNodes) - for i := 0; i < int(minNumNodes); i++ { - validators[i] = integrationTests.CreateTestWalletAccount(nodes[0].ShardCoordinator, 0) - } - integrationTests.MintAllPlayers(nodes, validators, initialVal) - verifyInitialBalance(t, nodes, initialVal) round := uint64(0) @@ -183,13 +177,6 @@ func TestStakingUnstakingAndUnboundingOnMultiShardEnvironmentWithValidatorStatis integrationTests.CreateAndSendTransaction(node, nodePrice, factory.AuctionSCAddress, txData) } - // need to add enough stakers in order to make it possible to call unstake and unbond - for index, validator := range validators { - pubKey := generateUniqueKey(index + len(nodes) + 1) - txData = "stake" + "@" + oneEncoded + "@" + pubKey + "@" + hex.EncodeToString([]byte("msg")) - createAndSendTx(nodes[0], validator, nodePrice, factory.AuctionSCAddress, []byte(txData)) - } - time.Sleep(time.Second) nrRoundsToPropagateMultiShard := 10 @@ -236,6 +223,108 @@ func TestStakingUnstakingAndUnboundingOnMultiShardEnvironmentWithValidatorStatis verifyUnbound(t, nodes) } +func TestStakeWithRewardsAddressAndValidatorStatistics(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + nodesPerShard := 2 + numMetachainNodes := 2 + shardConsensusGroupSize := 1 + metaConsensusGroupSize := 1 + + advertiser := integrationTests.CreateMessengerWithKadDht("") + _ = advertiser.Bootstrap() + + nodesMap := integrationTests.CreateNodesWithNodesCoordinatorAndTxKeys( + nodesPerShard, + numMetachainNodes, + numOfShards, + shardConsensusGroupSize, + metaConsensusGroupSize, + integrationTests.GetConnectableAddress(advertiser), + ) + + nodes := make([]*integrationTests.TestProcessorNode, 0) + idxProposers := make([]int, numOfShards+1) + + for _, nds := range nodesMap { + nodes = append(nodes, nds...) + } + + for _, nds := range nodesMap { + idx, err := getNodeIndex(nodes, nds[0]) + assert.Nil(t, err) + + idxProposers = append(idxProposers, idx) + } + integrationTests.DisplayAndStartNodes(nodes) + + roundsPerEpoch := uint64(5) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Messenger.Close() + } + }() + + for _, node := range nodesMap { + fmt.Println(integrationTests.MakeDisplayTable(node)) + } + + initialVal := big.NewInt(10000000000) + integrationTests.MintAllNodes(nodes, initialVal) + + rewardAccount := integrationTests.CreateTestWalletAccount(nodes[0].ShardCoordinator, 0) + + verifyInitialBalance(t, nodes, initialVal) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + var txData string + for _, node := range nodes { + txData = "changeRewardAddress" + "@" + hex.EncodeToString(rewardAccount.Address) + integrationTests.CreateAndSendTransaction(node, big.NewInt(0), factory.AuctionSCAddress, txData) + } + + round = uint64(1) + nonce = uint64(1) + nbBlocksToProduce := roundsPerEpoch * 3 + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + + for i := uint64(0); i < nbBlocksToProduce; i++ { + for _, nodesSlice := range nodesMap { + integrationTests.UpdateRound(nodesSlice, round) + } + + _, _, consensusNodes = integrationTests.AllShardsProposeBlock(round, nonce, nodesMap) + indexesProposers := endOfEpoch.GetBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + + time.Sleep(1 * time.Second) + } + + rewardShardID := nodes[0].ShardCoordinator.ComputeId(rewardAccount.Address) + for _, node := range nodes { + if node.ShardCoordinator.SelfId() != rewardShardID { + continue + } + + rwdAccount := getAccountFromAddrBytes(node.AccntState, rewardAccount.Address) + assert.True(t, rwdAccount.GetBalance().Cmp(big.NewInt(0)) > 0) + } +} + func getNodeIndex(nodeList []*integrationTests.TestProcessorNode, node *integrationTests.TestProcessorNode) (int, error) { for i := range nodeList { if node == nodeList[i] { @@ -247,14 +336,14 @@ func getNodeIndex(nodeList []*integrationTests.TestProcessorNode, node *integrat } func verifyUnbound(t *testing.T, nodes []*integrationTests.TestProcessorNode) { - expectedValue := big.NewInt(0).SetUint64(9999961980) + expectedValue := big.NewInt(0).SetUint64(9999963900) for _, node := range nodes { accShardId := node.ShardCoordinator.ComputeId(node.OwnAccount.Address) for _, helperNode := range nodes { if helperNode.ShardCoordinator.SelfId() == accShardId { sndAcc := getAccountFromAddrBytes(helperNode.AccntState, node.OwnAccount.Address) - assert.True(t, sndAcc.GetBalance().Cmp(expectedValue) == 0) + require.True(t, sndAcc.GetBalance().Cmp(expectedValue) == 0) break } } @@ -262,14 +351,15 @@ func verifyUnbound(t *testing.T, nodes []*integrationTests.TestProcessorNode) { } func checkAccountsAfterStaking(t *testing.T, nodes []*integrationTests.TestProcessorNode) { - expectedValue := big.NewInt(0).SetUint64(9499987270) + expectedValue := big.NewInt(0).SetUint64(9499987910) for _, node := range nodes { accShardId := node.ShardCoordinator.ComputeId(node.OwnAccount.Address) for _, helperNode := range nodes { if helperNode.ShardCoordinator.SelfId() == accShardId { + sndAcc := getAccountFromAddrBytes(helperNode.AccntState, node.OwnAccount.Address) - assert.True(t, sndAcc.GetBalance().Cmp(expectedValue) == 0) + require.True(t, sndAcc.GetBalance().Cmp(expectedValue) == 0) break } } @@ -283,7 +373,7 @@ func verifyInitialBalance(t *testing.T, nodes []*integrationTests.TestProcessorN for _, helperNode := range nodes { if helperNode.ShardCoordinator.SelfId() == accShardId { sndAcc := getAccountFromAddrBytes(helperNode.AccntState, node.OwnAccount.Address) - assert.Equal(t, initialVal, sndAcc.GetBalance()) + require.Equal(t, initialVal, sndAcc.GetBalance()) break } } @@ -299,31 +389,7 @@ func getAccountFromAddrBytes(accState state.AccountsAdapter, address []byte) sta } func generateUniqueKey(identifier int) string { - neededLength := 256 + neededLength := 192 uniqueIdentifier := fmt.Sprintf("%d", identifier) return strings.Repeat("0", neededLength-len(uniqueIdentifier)) + uniqueIdentifier } - -func createAndSendTx( - node *integrationTests.TestProcessorNode, - player *integrationTests.TestWalletAccount, - txValue *big.Int, - rcvAddress []byte, - txData []byte, -) { - tx := &transaction.Transaction{ - Nonce: player.Nonce, - Value: txValue, - SndAddr: player.Address, - RcvAddr: rcvAddress, - Data: txData, - GasPrice: node.EconomicsData.GetMinGasPrice(), - GasLimit: node.EconomicsData.GetMinGasLimit()*uint64(100) + uint64(len(txData)), - } - - txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) - tx.Signature, _ = player.SingleSigner.Sign(player.SkTxSign, txBuff) - - _, _ = node.SendTransaction(tx) - player.Nonce++ -} diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 90394f8c1fd..b67fbc4e7d8 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -46,6 +46,8 @@ var pubkeyConv, _ = pubkeyConverter.NewHexPubkeyConverter(32) var log = logger.GetOrCreate("integrationtests") +const maxTrieLevelInMemory = uint(5) + // VMTestContext - type VMTestContext struct { TxProcessor process.TransactionProcessor @@ -118,7 +120,7 @@ func CreateInMemoryShardAccountsDB() *state.AccountsDB { generalCfg, ) - tr, _ := trie.NewTrie(trieStorage, marsh, testHasher) + tr, _ := trie.NewTrie(trieStorage, marsh, testHasher, maxTrieLevelInMemory) adb, _ := state.NewAccountsDB(tr, testHasher, marsh, &accountFactory{}) return adb @@ -256,7 +258,7 @@ func CreateVMAndBlockchainHook( ) (process.VirtualMachinesContainer, *hooks.BlockChainHookImpl) { actualGasSchedule := gasSchedule if gasSchedule == nil { - actualGasSchedule = arwenConfig.MakeGasMap(1) + actualGasSchedule = arwenConfig.MakeGasMapForTests() defaults.FillGasMapInternal(actualGasSchedule, 1) } diff --git a/node/mock/endOfEpochTriggerStub.go b/node/mock/endOfEpochTriggerStub.go index f32afc7514f..80d548ac30d 100644 --- a/node/mock/endOfEpochTriggerStub.go +++ b/node/mock/endOfEpochTriggerStub.go @@ -121,6 +121,11 @@ func (e *EpochStartTriggerStub) MetaEpoch() uint32 { return 0 } +// Close - +func (e *EpochStartTriggerStub) Close() error { + return nil +} + // IsInterfaceNil - func (e *EpochStartTriggerStub) IsInterfaceNil() bool { return e == nil diff --git a/node/mock/syncStub.go b/node/mock/syncStub.go deleted file mode 100644 index cf79009bcbf..00000000000 --- a/node/mock/syncStub.go +++ /dev/null @@ -1,33 +0,0 @@ -package mock - -import ( - "time" -) - -// SyncStub - -type SyncStub struct { -} - -// StartSync - -func (ss *SyncStub) StartSync() { -} - -// ClockOffset - -func (ss *SyncStub) ClockOffset() time.Duration { - return time.Second -} - -// FormattedCurrentTime - -func (ss *SyncStub) FormattedCurrentTime() string { - return "" -} - -// CurrentTime - -func (ss *SyncStub) CurrentTime() time.Time { - return time.Now() -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ss *SyncStub) IsInterfaceNil() bool { - return ss == nil -} diff --git a/node/mock/syncTimerStub.go b/node/mock/syncTimerStub.go new file mode 100644 index 00000000000..b3d60be3061 --- /dev/null +++ b/node/mock/syncTimerStub.go @@ -0,0 +1,38 @@ +package mock + +import ( + "time" +) + +// SyncTimerStub - +type SyncTimerStub struct { +} + +// StartSyncingTime - +func (sts *SyncTimerStub) StartSyncingTime() { +} + +// ClockOffset - +func (sts *SyncTimerStub) ClockOffset() time.Duration { + return time.Second +} + +// FormattedCurrentTime - +func (sts *SyncTimerStub) FormattedCurrentTime() string { + return "" +} + +// CurrentTime - +func (sts *SyncTimerStub) CurrentTime() time.Time { + return time.Now() +} + +// Close - +func (sts *SyncTimerStub) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sts *SyncTimerStub) IsInterfaceNil() bool { + return sts == nil +} diff --git a/node/node.go b/node/node.go index b2c0179edc4..f623c7ffd3e 100644 --- a/node/node.go +++ b/node/node.go @@ -240,7 +240,8 @@ func (n *Node) StartConsensus() error { log.Debug("cannot set app status handler for shard bootstrapper") } - bootstrapper.StartSync() + bootstrapper.StartSyncingBlocks() + epoch := uint32(0) crtBlockHeader := n.blkc.GetCurrentBlockHeader() if !check.IfNil(crtBlockHeader) { @@ -305,6 +306,8 @@ func (n *Node) StartConsensus() error { return err } + worker.StartWorking() + n.dataPool.Headers().RegisterHandler(worker.ReceivedHeader) err = n.createConsensusTopic(worker) @@ -356,7 +359,7 @@ func (n *Node) StartConsensus() error { return err } - go chronologyHandler.StartRounds() + chronologyHandler.StartRounds() return nil } diff --git a/node/node_test.go b/node/node_test.go index 223eb9b7cd1..34a499ce74e 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -940,7 +940,7 @@ func TestStartConsensus_ShardBootstrapperNilAccounts(t *testing.T) { node.WithBlockChain(chainHandler), node.WithRounder(&mock.RounderMock{}), node.WithGenesisTime(time.Now().Local()), - node.WithSyncer(&mock.SyncStub{}), + node.WithSyncer(&mock.SyncTimerStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithResolversFinder(rf), node.WithDataStore(store), @@ -1004,7 +1004,7 @@ func TestStartConsensus_ShardBootstrapperNilPoolHolder(t *testing.T) { node.WithBlockChain(chainHandler), node.WithRounder(&mock.RounderMock{}), node.WithGenesisTime(time.Now().Local()), - node.WithSyncer(&mock.SyncStub{}), + node.WithSyncer(&mock.SyncTimerStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithAccountsAdapter(accountDb), node.WithResolversFinder(rf), @@ -1046,7 +1046,7 @@ func TestStartConsensus_MetaBootstrapperNilPoolHolder(t *testing.T) { node.WithBlockChain(chainHandler), node.WithRounder(&mock.RounderMock{}), node.WithGenesisTime(time.Now().Local()), - node.WithSyncer(&mock.SyncStub{}), + node.WithSyncer(&mock.SyncTimerStub{}), node.WithShardCoordinator(shardingCoordinator), node.WithDataStore(store), node.WithResolversFinder(&mock.ResolversFinderStub{ @@ -1087,7 +1087,7 @@ func TestStartConsensus_MetaBootstrapperWrongNumberShards(t *testing.T) { node.WithBlockChain(chainHandler), node.WithRounder(&mock.RounderMock{}), node.WithGenesisTime(time.Now().Local()), - node.WithSyncer(&mock.SyncStub{}), + node.WithSyncer(&mock.SyncTimerStub{}), node.WithShardCoordinator(shardingCoordinator), node.WithDataStore(&mock.ChainStorerMock{}), node.WithDataPool(&mock.PoolsHolderStub{}), @@ -1140,7 +1140,7 @@ func TestStartConsensus_ShardBootstrapperPubKeyToByteArrayError(t *testing.T) { node.WithBlockChain(chainHandler), node.WithRounder(&mock.RounderMock{}), node.WithGenesisTime(time.Now().Local()), - node.WithSyncer(&mock.SyncStub{}), + node.WithSyncer(&mock.SyncTimerStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithAccountsAdapter(accountDb), node.WithResolversFinder(rf), @@ -1223,7 +1223,7 @@ func TestStartConsensus_ShardBootstrapperInvalidConsensusType(t *testing.T) { node.WithBlockChain(chainHandler), node.WithRounder(&mock.RounderMock{}), node.WithGenesisTime(time.Now().Local()), - node.WithSyncer(&mock.SyncStub{}), + node.WithSyncer(&mock.SyncTimerStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithAccountsAdapter(accountDb), node.WithResolversFinder(rf), @@ -1305,7 +1305,7 @@ func TestStartConsensus_ShardBootstrapper(t *testing.T) { node.WithBlockChain(chainHandler), node.WithRounder(&mock.RounderMock{}), node.WithGenesisTime(time.Now().Local()), - node.WithSyncer(&mock.SyncStub{}), + node.WithSyncer(&mock.SyncTimerStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithAccountsAdapter(accountDb), node.WithResolversFinder(rf), diff --git a/node/options_test.go b/node/options_test.go index 62bb79a703d..f3ea714eb91 100644 --- a/node/options_test.go +++ b/node/options_test.go @@ -426,7 +426,7 @@ func TestWithSyncer_ShouldWork(t *testing.T) { node, _ := NewNode() - sync := &mock.SyncStub{} + sync := &mock.SyncTimerStub{} opt := WithSyncer(sync) err := opt(node) diff --git a/ntp/interface.go b/ntp/interface.go index 9d20f83a6ea..60b840c1d4b 100644 --- a/ntp/interface.go +++ b/ntp/interface.go @@ -6,7 +6,8 @@ import ( // SyncTimer defines an interface for time synchronization type SyncTimer interface { - StartSync() + Close() error + StartSyncingTime() ClockOffset() time.Duration FormattedCurrentTime() string CurrentTime() time.Time diff --git a/ntp/syncTime.go b/ntp/syncTime.go index 5bf9cc1750e..a32ea484d70 100644 --- a/ntp/syncTime.go +++ b/ntp/syncTime.go @@ -1,6 +1,7 @@ package ntp import ( + "context" "crypto/rand" "fmt" "math" @@ -12,10 +13,12 @@ import ( "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/beevik/ntp" ) var _ SyncTimer = (*syncTime)(nil) +var _ close.Closer = (*syncTime)(nil) var log = logger.GetOrCreate("ntp") @@ -95,6 +98,7 @@ type syncTime struct { syncPeriod time.Duration ntpOptions NTPOptions query func(options NTPOptions, hostIndex int) (*ntp.Response, error) + cancelFunc func() } // NewSyncTime creates a syncTime object. The customQueryFunc argument allows the caller to set a different NTP-querying @@ -118,12 +122,24 @@ func NewSyncTime( return &s } -// StartSync method does the time synchronization at every syncPeriod time elapsed. This method should be started on go +// StartSyncingTime method does the time synchronization at every syncPeriod time elapsed. This method should be started on go // routine -func (s *syncTime) StartSync() { +func (s *syncTime) StartSyncingTime() { + var ctx context.Context + ctx, s.cancelFunc = context.WithCancel(context.Background()) + go s.startSync(ctx) +} + +func (s *syncTime) startSync(ctx context.Context) { for { s.sync() - time.Sleep(s.getSleepTime()) + + select { + case <-ctx.Done(): + log.Debug("syncTime's go routine is stopping...") + return + case <-time.After(s.getSleepTime()): + } } } @@ -258,6 +274,15 @@ func (s *syncTime) CurrentTime() time.Time { return currentTime } +// Close will close the endless running go routine +func (s *syncTime) Close() error { + if s.cancelFunc != nil { + s.cancelFunc() + } + + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (s *syncTime) IsInterfaceNil() bool { return s == nil diff --git a/ntp/syncTime_test.go b/ntp/syncTime_test.go index 8116dc634f0..c6ca49fab4d 100644 --- a/ntp/syncTime_test.go +++ b/ntp/syncTime_test.go @@ -130,7 +130,7 @@ func TestGetOffset(t *testing.T) { func TestCallQuery(t *testing.T) { st := ntp2.NewSyncTime(config.NTPConfig{Hosts: []string{""}, SyncPeriodSeconds: 1}, queryMock4) - go st.StartSync() + st.StartSyncingTime() assert.NotNil(t, st.Query()) assert.Equal(t, time.Second, st.SyncPeriod()) @@ -144,6 +144,8 @@ func TestCallQuery(t *testing.T) { assert.NotEqual(t, qmc, 0) fmt.Printf("Current time: %v\n", st.FormattedCurrentTime()) + + st.Close() } func TestCallQueryShouldErrIndexOutOfBounds(t *testing.T) { diff --git a/process/block/metablock.go b/process/block/metablock.go index 6fd005ff5a4..d7663968716 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1003,6 +1003,9 @@ func (mp *metaProcessor) CommitBlock( return err } + // must be called before commitEpochStart + rewardsTxs := mp.getRewardsTxs(header, body) + mp.commitEpochStart(header, body) headerHash := mp.hasher.Compute(string(marshalizedHeader)) mp.saveMetaHeader(header, headerHash, marshalizedHeader) @@ -1013,8 +1016,6 @@ func (mp *metaProcessor) CommitBlock( return err } - rewardsTxs := mp.getRewardsTxs(header, body) - mp.validatorStatisticsProcessor.DisplayRatings(header.GetEpoch()) err = mp.saveLastNotarizedHeader(header) diff --git a/process/block/poolsCleaner/miniBlocksPoolsCleaner.go b/process/block/poolsCleaner/miniBlocksPoolsCleaner.go index db3481ddd0e..01c0be1236f 100644 --- a/process/block/poolsCleaner/miniBlocksPoolsCleaner.go +++ b/process/block/poolsCleaner/miniBlocksPoolsCleaner.go @@ -1,11 +1,13 @@ package poolsCleaner import ( + "context" "sync" "time" "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -14,6 +16,8 @@ import ( var log = logger.GetOrCreate("process/block/poolsCleaner") +var _ close.Closer = (*miniBlocksPoolsCleaner)(nil) + type mbInfo struct { round int64 senderShardID uint32 @@ -29,6 +33,7 @@ type miniBlocksPoolsCleaner struct { mutMapMiniBlocksRounds sync.RWMutex mapMiniBlocksRounds map[string]*mbInfo + cancelFunc func() } // NewMiniBlocksPoolsCleaner will return a new miniblocks pools cleaner @@ -57,14 +62,25 @@ func NewMiniBlocksPoolsCleaner( mbpc.mapMiniBlocksRounds = make(map[string]*mbInfo) mbpc.miniblocksPool.RegisterHandler(mbpc.receivedMiniBlock) - go mbpc.cleanMiniblocksPools() - return &mbpc, nil } -func (mbpc *miniBlocksPoolsCleaner) cleanMiniblocksPools() { +// StartCleaning actually starts the pools cleaning mechanism +func (mbpc *miniBlocksPoolsCleaner) StartCleaning() { + var ctx context.Context + ctx, mbpc.cancelFunc = context.WithCancel(context.Background()) + go mbpc.cleanMiniblocksPools(ctx) +} + +func (mbpc *miniBlocksPoolsCleaner) cleanMiniblocksPools(ctx context.Context) { for { - time.Sleep(sleepTime) + select { + case <-ctx.Done(): + log.Debug("miniBlocksPoolsCleaner's go routine is stopping...") + return + case <-time.After(sleepTime): + } + numMiniblocksInMap := mbpc.cleanMiniblocksPoolsIfNeeded() log.Debug("miniBlocksPoolsCleaner.cleanMiniblocksPools", "num miniblocks in map", numMiniblocksInMap) } @@ -156,3 +172,17 @@ func (mbpc *miniBlocksPoolsCleaner) cleanMiniblocksPoolsIfNeeded() int { return len(mbpc.mapMiniBlocksRounds) } + +// Close will close the endless running go routine +func (mbpc *miniBlocksPoolsCleaner) Close() error { + if mbpc.cancelFunc != nil { + mbpc.cancelFunc() + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mbpc *miniBlocksPoolsCleaner) IsInterfaceNil() bool { + return mbpc == nil +} diff --git a/process/block/poolsCleaner/txsPoolsCleaner.go b/process/block/poolsCleaner/txsPoolsCleaner.go index bb254a42861..c4fa104d1f1 100644 --- a/process/block/poolsCleaner/txsPoolsCleaner.go +++ b/process/block/poolsCleaner/txsPoolsCleaner.go @@ -2,11 +2,13 @@ package poolsCleaner import ( "bytes" + "context" "sync" "time" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -16,6 +18,8 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/txcache" ) +var _ close.Closer = (*txsPoolsCleaner)(nil) + // sleepTime defines the time between each iteration made in clean...Pools methods const sleepTime = time.Minute @@ -45,6 +49,7 @@ type txsPoolsCleaner struct { mutMapTxsRounds sync.RWMutex mapTxsRounds map[string]*txInfo emptyAddress []byte + cancelFunc func() } // NewTxsPoolsCleaner will return a new txs pools cleaner @@ -94,14 +99,25 @@ func NewTxsPoolsCleaner( tpc.emptyAddress = make([]byte, tpc.addressPubkeyConverter.Len()) - go tpc.cleanTxsPools() - return &tpc, nil } -func (tpc *txsPoolsCleaner) cleanTxsPools() { +// StartCleaning actually starts the pools cleaning mechanism +func (tpc *txsPoolsCleaner) StartCleaning() { + var ctx context.Context + ctx, tpc.cancelFunc = context.WithCancel(context.Background()) + go tpc.cleanTxsPools(ctx) +} + +func (tpc *txsPoolsCleaner) cleanTxsPools(ctx context.Context) { for { - time.Sleep(sleepTime) + select { + case <-ctx.Done(): + log.Debug("txsPoolsCleaner's go routine is stopping...") + return + case <-time.After(sleepTime): + } + numTxsInMap := tpc.cleanTxsPoolsIfNeeded() log.Debug("txsPoolsCleaner.cleanTxsPools", "num txs in map", numTxsInMap) } @@ -296,3 +312,17 @@ func (tpc *txsPoolsCleaner) getShardFromAddress(address []byte) (uint32, error) return tpc.shardCoordinator.ComputeId(address), nil } + +// Close will close the endless running go routine +func (tpc *txsPoolsCleaner) Close() error { + if tpc.cancelFunc != nil { + tpc.cancelFunc() + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tpc *txsPoolsCleaner) IsInterfaceNil() bool { + return tpc == nil +} diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index f6c46390ed3..517d8b9b0fb 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1057,11 +1057,13 @@ func createTxPool() (dataRetriever.ShardedDataCacherNotifier, error) { return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index e1ff818c839..98f57684d0c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -49,11 +49,13 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index b9a5f1b42fe..245a7ee6528 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -2541,11 +2541,13 @@ func createTxPool() (dataRetriever.ShardedDataCacherNotifier, error) { return txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 100000, - SizeInBytes: 1000000000, - Shards: 1, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/process/factory/shard/vmContainerFactory_test.go b/process/factory/shard/vmContainerFactory_test.go index fda81785a05..2e242192ec9 100644 --- a/process/factory/shard/vmContainerFactory_test.go +++ b/process/factory/shard/vmContainerFactory_test.go @@ -53,7 +53,7 @@ func TestNewVMContainerFactory_OkValues(t *testing.T) { vmf, err := NewVMContainerFactory( config.VirtualMachineConfig{}, 10000, - arwenConfig.MakeGasMap(1), + arwenConfig.MakeGasMapForTests(), createMockVMAccountsArguments(), ) @@ -68,7 +68,7 @@ func TestVmContainerFactory_Create(t *testing.T) { vmf, err := NewVMContainerFactory( config.VirtualMachineConfig{}, 10000, - arwenConfig.MakeGasMap(1), + arwenConfig.MakeGasMapForTests(), createMockVMAccountsArguments(), ) assert.NotNil(t, vmf) diff --git a/process/interceptors/multiDataInterceptor.go b/process/interceptors/multiDataInterceptor.go index 6a60b3456f6..c2a47649c94 100644 --- a/process/interceptors/multiDataInterceptor.go +++ b/process/interceptors/multiDataInterceptor.go @@ -99,14 +99,12 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, return err } - interceptedMultiData := make([]process.InterceptedData, 0) lastErrEncountered := error(nil) wgProcess := &sync.WaitGroup{} wgProcess.Add(len(multiDataBuff)) go func() { wgProcess.Wait() - mdi.processor.SignalEndOfProcessing(interceptedMultiData) mdi.throttler.EndProcessing() }() @@ -119,8 +117,6 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, continue } - interceptedMultiData = append(interceptedMultiData, interceptedData) - isForCurrentShard := interceptedData.IsForCurrentShard() isWhiteListed := mdi.whiteListRequest.IsWhiteListed(interceptedData) shouldProcess := isForCurrentShard || isWhiteListed diff --git a/process/interceptors/processor/hdrInterceptorProcessor.go b/process/interceptors/processor/hdrInterceptorProcessor.go index db52f9e28a6..d0184e5a563 100644 --- a/process/interceptors/processor/hdrInterceptorProcessor.go +++ b/process/interceptors/processor/hdrInterceptorProcessor.go @@ -68,10 +68,6 @@ func (hip *HdrInterceptorProcessor) Save(data process.InterceptedData, _ p2p.Pee return nil } -// SignalEndOfProcessing signals the end of processing -func (hip *HdrInterceptorProcessor) SignalEndOfProcessing(data []process.InterceptedData) { -} - // IsInterfaceNil returns true if there is no value under the interface func (hip *HdrInterceptorProcessor) IsInterfaceNil() bool { return hip == nil diff --git a/process/interceptors/processor/miniblockInterceptorProcessor.go b/process/interceptors/processor/miniblockInterceptorProcessor.go index 4e03533ee5f..a2b0b69f74e 100644 --- a/process/interceptors/processor/miniblockInterceptorProcessor.go +++ b/process/interceptors/processor/miniblockInterceptorProcessor.go @@ -100,10 +100,6 @@ func (mip *MiniblockInterceptorProcessor) isMbCrossShard(miniblock *block.MiniBl return miniblock.SenderShardID != mip.shardCoordinator.SelfId() } -// SignalEndOfProcessing signals the end of processing -func (mip *MiniblockInterceptorProcessor) SignalEndOfProcessing(_ []process.InterceptedData) { -} - // IsInterfaceNil returns true if there is no value under the interface func (mip *MiniblockInterceptorProcessor) IsInterfaceNil() bool { return mip == nil diff --git a/process/interceptors/processor/trieNodeInterceptorProcessor.go b/process/interceptors/processor/trieNodeInterceptorProcessor.go index f90ccee478e..51aac28b67b 100644 --- a/process/interceptors/processor/trieNodeInterceptorProcessor.go +++ b/process/interceptors/processor/trieNodeInterceptorProcessor.go @@ -42,23 +42,6 @@ func (tnip *TrieNodeInterceptorProcessor) Save(data process.InterceptedData, _ p return nil } -// SignalEndOfProcessing signals the end of processing -func (tnip *TrieNodeInterceptorProcessor) SignalEndOfProcessing(data []process.InterceptedData) { - nodeData, ok := data[0].(*trie.InterceptedTrieNode) - if !ok { - log.Debug("intercepted data is not a trie node") - return - } - - // TODO instead of using a node to trigger the end of processing, use a dedicated channel - // between interceptor and sync - nodeData.CreateEndOfProcessingTriggerNode() - err := tnip.Save(nodeData, "") - if err != nil { - log.Debug(err.Error()) - } -} - // IsInterfaceNil returns true if there is no value under the interface func (tnip *TrieNodeInterceptorProcessor) IsInterfaceNil() bool { return tnip == nil diff --git a/process/interceptors/processor/trieNodeInterceptorProcessor_test.go b/process/interceptors/processor/trieNodeInterceptorProcessor_test.go index 38751e6fd92..8398fbf201e 100644 --- a/process/interceptors/processor/trieNodeInterceptorProcessor_test.go +++ b/process/interceptors/processor/trieNodeInterceptorProcessor_test.go @@ -6,7 +6,6 @@ import ( "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/trie" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/interceptedBlocks" "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" @@ -66,43 +65,6 @@ func TestTrieNodesInterceptorProcessor_SaveShouldPutInCacher(t *testing.T) { assert.True(t, putCalled) } -func TestTrieNodeInterceptorProcessor_SignalEndOfProcessingWrongTypeShouldNotPutInCache(t *testing.T) { - t.Parallel() - - cacheMock := &mock.CacherStub{ - PutCalled: func(key []byte, value interface{}) bool { - assert.Fail(t, "should have not arrived here") - return false - }, - } - tnip, _ := processor.NewTrieNodesInterceptorProcessor(cacheMock) - - intData := interceptedBlocks.InterceptedHeader{} - slc := make([]process.InterceptedData, 0) - slc = append(slc, &intData) - tnip.SignalEndOfProcessing(slc) -} - -func TestTrieNodeInterceptorProcessor_SignalEndOfProcessingShouldWork(t *testing.T) { - t.Parallel() - - putWasCalled := false - cacheMock := &mock.CacherStub{ - PutCalled: func(key []byte, value interface{}) bool { - putWasCalled = true - return true - }, - } - tnip, _ := processor.NewTrieNodesInterceptorProcessor(cacheMock) - - intData := trie.InterceptedTrieNode{} - slc := make([]process.InterceptedData, 0) - slc = append(slc, &intData) - tnip.SignalEndOfProcessing(slc) - - assert.True(t, putWasCalled) -} - //------- IsInterfaceNil func TestTrieNodesInterceptorProcessor_IsInterfaceNil(t *testing.T) { diff --git a/process/interceptors/processor/txInterceptorProcessor.go b/process/interceptors/processor/txInterceptorProcessor.go index 98e26d6ad69..29431093cd9 100644 --- a/process/interceptors/processor/txInterceptorProcessor.go +++ b/process/interceptors/processor/txInterceptorProcessor.go @@ -60,10 +60,6 @@ func (txip *TxInterceptorProcessor) Save(data process.InterceptedData, _ p2p.Pee return nil } -// SignalEndOfProcessing signals the end of processing -func (txip *TxInterceptorProcessor) SignalEndOfProcessing(_ []process.InterceptedData) { -} - // IsInterfaceNil returns true if there is no value under the interface func (txip *TxInterceptorProcessor) IsInterfaceNil() bool { return txip == nil diff --git a/process/interface.go b/process/interface.go index f91dcc69d64..7833373d055 100644 --- a/process/interface.go +++ b/process/interface.go @@ -16,7 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" ) // TransactionProcessor is the main interface for transaction execution engine @@ -97,7 +97,6 @@ type InterceptedData interface { type InterceptorProcessor interface { Validate(data InterceptedData, fromConnectedPeer p2p.PeerID) error Save(data InterceptedData, fromConnectedPeer p2p.PeerID) error - SignalEndOfProcessing(data []InterceptedData) IsInterfaceNil() bool } @@ -290,10 +289,10 @@ type HashAccesser interface { // Bootstrapper is an interface that defines the behaviour of a struct that is able // to synchronize the node type Bootstrapper interface { + Close() error AddSyncStateListener(func(isSyncing bool)) GetNodeState() core.NodeState - StopSync() - StartSync() + StartSyncingBlocks() SetStatusHandler(handler core.AppStatusHandler) error IsInterfaceNil() bool } @@ -835,3 +834,10 @@ type MiniblockAndHash struct { Miniblock *block.MiniBlock Hash []byte } + +// PoolsCleaner defines the functionality to clean pools for old records +type PoolsCleaner interface { + Close() error + StartCleaning() + IsInterfaceNil() bool +} diff --git a/process/mock/endOfEpochTriggerStub.go b/process/mock/endOfEpochTriggerStub.go index a5e5a124be3..63a7433c4cd 100644 --- a/process/mock/endOfEpochTriggerStub.go +++ b/process/mock/endOfEpochTriggerStub.go @@ -129,6 +129,11 @@ func (e *EpochStartTriggerStub) MetaEpoch() uint32 { return 0 } +// Close - +func (e *EpochStartTriggerStub) Close() error { + return nil +} + // IsInterfaceNil - func (e *EpochStartTriggerStub) IsInterfaceNil() bool { return e == nil diff --git a/process/mock/interceptorProcessorStub.go b/process/mock/interceptorProcessorStub.go index 0ccbff7befc..72b3deec90c 100644 --- a/process/mock/interceptorProcessorStub.go +++ b/process/mock/interceptorProcessorStub.go @@ -21,10 +21,6 @@ func (ips *InterceptorProcessorStub) Save(data process.InterceptedData, _ p2p.Pe return ips.SaveCalled(data) } -// SignalEndOfProcessing - -func (ips *InterceptorProcessorStub) SignalEndOfProcessing(_ []process.InterceptedData) { -} - // IsInterfaceNil - func (ips *InterceptorProcessorStub) IsInterfaceNil() bool { return ips == nil diff --git a/process/mock/poolsHolderMock.go b/process/mock/poolsHolderMock.go index 89f8ac9b3bb..40b22d2cfe5 100644 --- a/process/mock/poolsHolderMock.go +++ b/process/mock/poolsHolderMock.go @@ -30,11 +30,13 @@ func NewPoolsHolderMock() *PoolsHolderMock { phf.transactions, _ = txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 10000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/process/mock/syncTimerMock.go b/process/mock/syncTimerMock.go index 54e01b0e14c..778b9fe3fae 100644 --- a/process/mock/syncTimerMock.go +++ b/process/mock/syncTimerMock.go @@ -10,8 +10,8 @@ type SyncTimerMock struct { CurrentTimeCalled func() time.Time } -// StartSync method does the time synchronization at every syncPeriod time elapsed. This should be started as a go routine -func (stm SyncTimerMock) StartSync() { +// StartSyncingTime method does the time synchronization at every syncPeriod time elapsed. This should be started as a go routine +func (stm SyncTimerMock) StartSyncingTime() { panic("implement me") } @@ -38,6 +38,11 @@ func (stm SyncTimerMock) CurrentTime() time.Time { return time.Unix(0, 0) } +// Close - +func (stm *SyncTimerMock) Close() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (stm *SyncTimerMock) IsInterfaceNil() bool { return stm == nil diff --git a/process/rewardTransaction/export_test.go b/process/rewardTransaction/export_test.go index 8084206194d..2bd90b8874d 100644 --- a/process/rewardTransaction/export_test.go +++ b/process/rewardTransaction/export_test.go @@ -4,6 +4,9 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" ) +// RewardKey - +const RewardKey = rewardKey + // Hasher will return the hasher of InterceptedRewardTransaction for using in test files func (inRTx *InterceptedRewardTransaction) Hasher() hashing.Hasher { return inRTx.hasher diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index a2f91f790d1..5137a6b18af 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -1,6 +1,9 @@ package rewardTransaction import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" @@ -10,6 +13,8 @@ import ( var _ process.RewardTransactionProcessor = (*rewardTxProcessor)(nil) +const rewardKey = "reward" + type rewardTxProcessor struct { accounts state.AccountsAdapter pubkeyConv state.PubkeyConverter @@ -90,9 +95,30 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e return err } + rtp.saveAccumulatedRewards(rTx, accHandler) + return rtp.accounts.SaveAccount(accHandler) } +func (rtp *rewardTxProcessor) saveAccumulatedRewards( + rtx *rewardTx.RewardTx, + userAccount state.UserAccountHandler, +) { + if !core.IsSmartContractAddress(rtx.RcvAddr) { + return + } + + existingReward := big.NewInt(0) + fullRewardKey := core.ElrondProtectedKeyPrefix + rewardKey + val, err := userAccount.DataTrieTracker().RetrieveValue([]byte(fullRewardKey)) + if err == nil { + existingReward.SetBytes(val) + } + + existingReward.Add(existingReward, rtx.Value) + userAccount.DataTrieTracker().SaveKeyValue([]byte(fullRewardKey), existingReward.Bytes()) +} + // IsInterfaceNil returns true if there is no value under the interface func (rtp *rewardTxProcessor) IsInterfaceNil() bool { return rtp == nil diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index e3f59d1b418..4710c2744cc 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -5,6 +5,7 @@ import ( "math/big" "testing" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" @@ -208,3 +209,49 @@ func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, saveAccountWasCalled) } + +func TestRewardTxProcessor_ProcessRewardTransactionToASmartContractShouldWork(t *testing.T) { + t.Parallel() + + saveAccountWasCalled := false + + address := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6} + userAccount, _ := state.NewUserAccount(address) + accountsDb := &mock.AccountsStub{ + LoadAccountCalled: func(address []byte) (state.AccountHandler, error) { + return userAccount, nil + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountWasCalled = true + return nil + }, + } + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + accountsDb, + createMockPubkeyConverter(), + mock.NewMultiShardsCoordinatorMock(3), + ) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(100), + RcvAddr: address, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + assert.True(t, saveAccountWasCalled) + val, err := userAccount.DataTrieTracker().RetrieveValue([]byte(core.ElrondProtectedKeyPrefix + rewardTransaction.RewardKey)) + assert.Nil(t, err) + assert.True(t, rwdTx.Value.Cmp(big.NewInt(0).SetBytes(val)) == 0) + + err = rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + assert.True(t, saveAccountWasCalled) + val, err = userAccount.DataTrieTracker().RetrieveValue([]byte(core.ElrondProtectedKeyPrefix + rewardTransaction.RewardKey)) + assert.Nil(t, err) + rwdTx.Value.Add(rwdTx.Value, rwdTx.Value) + assert.True(t, rwdTx.Value.Cmp(big.NewInt(0).SetBytes(val)) == 0) +} diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index e0ea6134216..46d82d2902a 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -2,6 +2,7 @@ package sync import ( "bytes" + "context" "math" "sync" "time" @@ -10,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/close" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" @@ -25,6 +27,8 @@ import ( var log = logger.GetOrCreate("process/sync") +var _ close.Closer = (*baseBootstrap)(nil) + // sleepTime defines the time in milliseconds between each iteration made in syncBlocks method const sleepTime = 5 * time.Millisecond @@ -105,6 +109,7 @@ type baseBootstrap struct { miniBlocksProvider process.MiniBlockProvider poolsHolder dataRetriever.PoolsHolder mutRequestHeaders sync.Mutex + cancelFunc func() } // setRequestedHeaderNonce method sets the header nonce requested by the sync mechanism @@ -455,28 +460,23 @@ func (boot *baseBootstrap) requestHeadersFromNonceIfMissing(fromNonce uint64) { boot.requestHeaders(fromNonce, toNonce) } -// StopSync method will stop SyncBlocks -func (boot *baseBootstrap) StopSync() { - boot.chStopSync <- true -} - // syncBlocks method calls repeatedly synchronization method SyncBlock -func (boot *baseBootstrap) syncBlocks() { +func (boot *baseBootstrap) syncBlocks(ctx context.Context) { for { - time.Sleep(sleepTime) + select { + case <-ctx.Done(): + log.Debug("bootstrap's go routine is stopping...") + return + case <-time.After(sleepTime): + } if !boot.networkWatcher.IsConnectedToTheNetwork() { continue } - select { - case <-boot.chStopSync: - return - default: - err := boot.syncStarter.SyncBlock() - if err != nil { - log.Debug("SyncBlock", "error", err.Error()) - } + err := boot.syncStarter.SyncBlock() + if err != nil { + log.Debug("SyncBlock", "error", err.Error()) } } } @@ -956,8 +956,6 @@ func (boot *baseBootstrap) init() { boot.poolsHolder.MiniBlocks().RegisterHandler(boot.receivedMiniblock) boot.headers.RegisterHandler(boot.processReceivedHeader) - boot.chStopSync = make(chan bool) - boot.statusHandler = statusHandler.NewNilStatusHandler() boot.syncStateListeners = make([]func(bool), 0) @@ -1000,3 +998,17 @@ func (boot *baseBootstrap) GetNodeState() core.NodeState { return core.NsNotSynchronized } + +// Close will close the endless running go routine +func (boot *baseBootstrap) Close() error { + if boot.cancelFunc != nil { + boot.cancelFunc() + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (boot *baseBootstrap) IsInterfaceNil() bool { + return boot == nil +} diff --git a/process/sync/baseSync_test.go b/process/sync/baseSync_test.go index e111976222f..e0562c79f9d 100644 --- a/process/sync/baseSync_test.go +++ b/process/sync/baseSync_test.go @@ -1,6 +1,7 @@ package sync import ( + "context" "sync/atomic" "testing" "time" @@ -31,9 +32,12 @@ func TestBaseBootstrap_SyncBlocksShouldNotCallSyncIfNotConnectedToTheNetwork(t * }, } - go boot.syncBlocks() + ctx, cancelFunc := context.WithCancel(context.Background()) + go boot.syncBlocks(ctx) + //make sure go routine started and waited a few cycles of boot.syncBlocks time.Sleep(time.Second + sleepTime*10) + cancelFunc() assert.Equal(t, uint32(0), atomic.LoadUint32(&numCalls)) } @@ -57,10 +61,12 @@ func TestBaseBootstrap_SyncBlocksShouldCallSyncIfConnectedToTheNetwork(t *testin }, } - go boot.syncBlocks() + ctx, cancelFunc := context.WithCancel(context.Background()) + go boot.syncBlocks(ctx) //make sure go routine started and waited a few cycles of boot.syncBlocks time.Sleep(time.Second + sleepTime*10) + cancelFunc() assert.True(t, atomic.LoadUint32(&numCalls) > 0) } diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 88a7c88375f..33411267146 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -1,6 +1,8 @@ package sync import ( + "context" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" @@ -102,8 +104,8 @@ func (boot *MetaBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data. return &block.Body{MiniBlocks: miniBlocks}, nil } -// StartSync method will start SyncBlocks as a go routine -func (boot *MetaBootstrap) StartSync() { +// StartSyncingBlocks method will start syncing blocks as a go routine +func (boot *MetaBootstrap) StartSyncingBlocks() { // when a node starts it first tries to bootstrap from storage, if there already exist a database saved errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { @@ -115,7 +117,9 @@ func (boot *MetaBootstrap) StartSync() { boot.setLastEpochStartRound() } - go boot.syncBlocks() + var ctx context.Context + ctx, boot.cancelFunc = context.WithCancel(context.Background()) + go boot.syncBlocks(ctx) } func (boot *MetaBootstrap) setLastEpochStartRound() { @@ -249,11 +253,6 @@ func (boot *MetaBootstrap) getCurrHeader() (data.HeaderHandler, error) { return header, nil } -// IsInterfaceNil returns true if there is no value under the interface -func (boot *MetaBootstrap) IsInterfaceNil() bool { - return boot == nil -} - func (boot *MetaBootstrap) haveHeaderInPoolWithNonce(nonce uint64) bool { _, _, err := process.GetMetaHeaderFromPoolWithNonce( nonce, diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index d25aba76415..a8a490a7c56 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -371,9 +371,9 @@ func TestMetaBootstrap_ShouldNotNeedToSync(t *testing.T) { bs, _ := sync.NewMetaBootstrap(args) - bs.StartSync() + bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) - bs.StopSync() + bs.Close() } func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { @@ -442,7 +442,7 @@ func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { ) bs, _ := sync.NewMetaBootstrap(args) - bs.StartSync() + bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) @@ -452,7 +452,7 @@ func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { time.Sleep(500 * time.Millisecond) - bs.StopSync() + bs.Close() } func TestMetaBootstrap_ShouldReturnNilErr(t *testing.T) { diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index b49d11490a4..54222716f83 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -1,6 +1,7 @@ package sync import ( + "context" "math" "github.com/ElrondNetwork/elrond-go/core" @@ -100,8 +101,8 @@ func (boot *ShardBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data return &block.Body{MiniBlocks: miniBlocks}, nil } -// StartSync method will start SyncBlocks as a go routine -func (boot *ShardBootstrap) StartSync() { +// StartSyncingBlocks method will start syncing blocks as a go routine +func (boot *ShardBootstrap) StartSyncingBlocks() { errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { log.Debug("boot.syncFromStorer", @@ -112,7 +113,9 @@ func (boot *ShardBootstrap) StartSync() { boot.blockProcessor.SetNumProcessedObj(numTxs) } - go boot.syncBlocks() + var ctx context.Context + ctx, boot.cancelFunc = context.WithCancel(context.Background()) + go boot.syncBlocks(ctx) } // SyncBlock method actually does the synchronization. It requests the next block header from the pool @@ -227,11 +230,6 @@ func (boot *ShardBootstrap) getCurrHeader() (data.HeaderHandler, error) { return header, nil } -// IsInterfaceNil returns true if there is no value under the interface -func (boot *ShardBootstrap) IsInterfaceNil() bool { - return boot == nil -} - func (boot *ShardBootstrap) haveHeaderInPoolWithNonce(nonce uint64) bool { _, _, err := process.GetShardHeaderFromPoolWithNonce( nonce, diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 26d240dcc81..0c5379c83a1 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -585,9 +585,9 @@ func TestBootstrap_ShouldNotNeedToSync(t *testing.T) { bs, _ := sync.NewShardBootstrap(args) - bs.StartSync() + bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) - bs.StopSync() + bs.Close() } func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { @@ -675,7 +675,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { ) bs, _ := sync.NewShardBootstrap(args) - bs.StartSync() + bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) @@ -685,7 +685,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { time.Sleep(500 * time.Millisecond) - bs.StopSync() + bs.Close() } func TestBootstrap_ShouldReturnNilErr(t *testing.T) { diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index d2d7adbad9d..e5b31592c95 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -21,8 +21,7 @@ copyConfig() { cp ./filegen/genesis.json ./node/config cp ./filegen/nodesSetup.json ./node/config - cp ./filegen/validatorKey.pem ./node/config - cp ./filegen/walletKey.pem ./node/config + cp ./filegen/*.pem ./node/config #there might be more .pem files there cp ./filegen/genesisSmartContracts.json ./node/config echo "Configuration files copied from the configuration generator to the working directories of the executables." popd diff --git a/storage/factory/common.go b/storage/factory/common.go index fec9d8da1ce..d431acfc946 100644 --- a/storage/factory/common.go +++ b/storage/factory/common.go @@ -13,10 +13,12 @@ const allFiles = -1 // GetCacherFromConfig will return the cache config needed for storage unit from a config came from the toml file func GetCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { return storageUnit.CacheConfig{ - Size: cfg.Size, - SizeInBytes: cfg.SizeInBytes, - Type: storageUnit.CacheType(cfg.Type), - Shards: cfg.Shards, + Size: cfg.Size, + SizePerSender: cfg.SizePerSender, + SizeInBytes: cfg.SizeInBytes, + SizeInBytesPerSender: cfg.SizeInBytesPerSender, + Type: storageUnit.CacheType(cfg.Type), + Shards: cfg.Shards, } } diff --git a/storage/storageUnit/storageunit.go b/storage/storageUnit/storageunit.go index cf2703cc960..eecce45a640 100644 --- a/storage/storageUnit/storageunit.go +++ b/storage/storageUnit/storageunit.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/ElrondNetwork/elrond-go-logger" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/hashing" @@ -67,10 +67,12 @@ type UnitConfig struct { // CacheConfig holds the configurable elements of a cache type CacheConfig struct { - Type CacheType - SizeInBytes uint32 - Size uint32 - Shards uint32 + Type CacheType + SizeInBytes uint32 + SizeInBytesPerSender uint32 + Size uint32 + SizePerSender uint32 + Shards uint32 } // DBConfig holds the configurable elements of a database diff --git a/storage/txcache/config.go b/storage/txcache/config.go index cdd29d7295e..1be512220a6 100644 --- a/storage/txcache/config.go +++ b/storage/txcache/config.go @@ -1,14 +1,49 @@ package txcache +import "fmt" + // CacheConfig holds cache configuration type CacheConfig struct { Name string NumChunksHint uint32 EvictionEnabled bool NumBytesThreshold uint32 + NumBytesPerSenderThreshold uint32 CountThreshold uint32 + CountPerSenderThreshold uint32 NumSendersToEvictInOneStep uint32 - LargeNumOfTxsForASender uint32 - NumTxsToEvictFromASender uint32 - MinGasPriceMicroErd uint32 + MinGasPriceNanoErd uint32 +} + +func (config *CacheConfig) verify() error { + if len(config.Name) == 0 { + return fmt.Errorf("%w: config.Name is invalid", errInvalidCacheConfig) + } + if config.NumChunksHint == 0 { + return fmt.Errorf("%w: config.NumChunksHint is invalid", errInvalidCacheConfig) + } + if config.NumBytesPerSenderThreshold == 0 { + return fmt.Errorf("%w: config.NumBytesPerSenderThreshold is invalid", errInvalidCacheConfig) + } + if config.CountPerSenderThreshold == 0 { + return fmt.Errorf("%w: config.CountPerSenderThreshold is invalid", errInvalidCacheConfig) + } + if config.MinGasPriceNanoErd == 0 { + return fmt.Errorf("%w: config.MinGasPriceNanoErd is invalid", errInvalidCacheConfig) + } + if config.EvictionEnabled { + if config.NumBytesThreshold == 0 { + return fmt.Errorf("%w: config.NumBytesThreshold is invalid", errInvalidCacheConfig) + } + + if config.CountThreshold == 0 { + return fmt.Errorf("%w: config.CountThreshold is invalid", errInvalidCacheConfig) + } + + if config.NumSendersToEvictInOneStep == 0 { + return fmt.Errorf("%w: config.NumSendersToEvictInOneStep is invalid", errInvalidCacheConfig) + } + } + + return nil } diff --git a/storage/txcache/disabledCache.go b/storage/txcache/disabledCache.go new file mode 100644 index 00000000000..cf587439a55 --- /dev/null +++ b/storage/txcache/disabledCache.go @@ -0,0 +1,106 @@ +package txcache + +import ( + "github.com/ElrondNetwork/elrond-go/storage" +) + +var _ storage.Cacher = (*DisabledCache)(nil) + +// DisabledCache represents a disabled cache +type DisabledCache struct { +} + +// NewDisabledCache creates a new disabled cache +func NewDisabledCache() *DisabledCache { + return &DisabledCache{} +} + +// AddTx does nothing +func (cache *DisabledCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { + return false, false +} + +// GetByTxHash returns no transaction +func (cache *DisabledCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { + return nil, false +} + +// SelectTransactions returns an empty slice +func (cache *DisabledCache) SelectTransactions(numRequested int, batchSizePerSender int) []*WrappedTransaction { + return make([]*WrappedTransaction, 0) +} + +// RemoveTxByHash does nothing +func (cache *DisabledCache) RemoveTxByHash(txHash []byte) error { + return nil +} + +// CountTx returns zero +func (cache *DisabledCache) CountTx() int64 { + return 0 +} + +// Len returns zero +func (cache *DisabledCache) Len() int { + return 0 +} + +// ForEachTransaction does nothing +func (cache *DisabledCache) ForEachTransaction(function ForEachTransaction) { +} + +// Clear does nothing +func (cache *DisabledCache) Clear() { +} + +// Put does nothing +func (cache *DisabledCache) Put(key []byte, value interface{}) (evicted bool) { + return false +} + +// Get returns no transaction +func (cache *DisabledCache) Get(key []byte) (value interface{}, ok bool) { + return nil, false +} + +// Has returns false +func (cache *DisabledCache) Has(key []byte) bool { + return false +} + +// Peek returns no transaction +func (cache *DisabledCache) Peek(key []byte) (value interface{}, ok bool) { + return nil, false +} + +// HasOrAdd returns false, does nothing +func (cache *DisabledCache) HasOrAdd(key []byte, value interface{}) (ok, evicted bool) { + return false, false +} + +// Remove does nothing +func (cache *DisabledCache) Remove(key []byte) { +} + +// RemoveOldest does nothing +func (cache *DisabledCache) RemoveOldest() { +} + +// Keys returns an empty slice +func (cache *DisabledCache) Keys() txHashes { + return make([][]byte, 0) +} + +// MaxSize returns zero +func (cache *DisabledCache) MaxSize() int { + return 0 +} + +// RegisterHandler does nothing +func (cache *DisabledCache) RegisterHandler(func(key []byte, value interface{})) { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cache *DisabledCache) IsInterfaceNil() bool { + return cache == nil +} diff --git a/storage/txcache/disabledCache_test.go b/storage/txcache/disabledCache_test.go new file mode 100644 index 00000000000..ae749c0c023 --- /dev/null +++ b/storage/txcache/disabledCache_test.go @@ -0,0 +1,64 @@ +package txcache + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDisabledCache_DoesNothing(t *testing.T) { + cache := NewDisabledCache() + + ok, added := cache.AddTx(nil) + require.False(t, ok) + require.False(t, added) + + tx, ok := cache.GetByTxHash([]byte{}) + require.Nil(t, tx) + require.False(t, ok) + + selection := cache.SelectTransactions(42, 42) + require.Equal(t, 0, len(selection)) + + err := cache.RemoveTxByHash([]byte{}) + require.Nil(t, err) + + count := cache.CountTx() + require.Equal(t, int64(0), count) + + length := cache.Len() + require.Equal(t, 0, length) + + require.NotPanics(t, func() { cache.ForEachTransaction(func(_ []byte, _ *WrappedTransaction) {}) }) + + cache.Clear() + + evicted := cache.Put(nil, nil) + require.False(t, evicted) + + value, ok := cache.Get([]byte{}) + require.Nil(t, value) + require.False(t, ok) + + value, ok = cache.Peek([]byte{}) + require.Nil(t, value) + require.False(t, ok) + + has := cache.Has([]byte{}) + require.False(t, has) + + has, evicted = cache.HasOrAdd([]byte{}, nil) + require.False(t, has) + require.False(t, evicted) + + cache.Remove([]byte{}) + cache.RemoveOldest() + + keys := cache.Keys() + require.Equal(t, 0, len(keys)) + + maxSize := cache.MaxSize() + require.Equal(t, 0, maxSize) + + require.NotPanics(t, func() { cache.RegisterHandler(func(_ []byte, _ interface{}) {}) }) +} diff --git a/storage/txcache/errors.go b/storage/txcache/errors.go index 26e9c62aade..e58333db214 100644 --- a/storage/txcache/errors.go +++ b/storage/txcache/errors.go @@ -2,8 +2,7 @@ package txcache import "fmt" -// ErrTxNotFound signals that the transactions was not found in the cache -var ErrTxNotFound = fmt.Errorf("tx not found in cache") - -// ErrMapsSyncInconsistency signals that there's an inconsistency between the internal maps on which the cache relies -var ErrMapsSyncInconsistency = fmt.Errorf("maps sync inconsistency between 'txByHash' and 'txListBySender'") +var errTxNotFound = fmt.Errorf("tx not found in cache") +var errMapsSyncInconsistency = fmt.Errorf("maps sync inconsistency between 'txByHash' and 'txListBySender'") +var errTxDuplicated = fmt.Errorf("duplicated tx") +var errInvalidCacheConfig = fmt.Errorf("invalid cache config") diff --git a/storage/txcache/eviction.go b/storage/txcache/eviction.go index 774a915058b..81c266568da 100644 --- a/storage/txcache/eviction.go +++ b/storage/txcache/eviction.go @@ -11,12 +11,7 @@ func (cache *TxCache) doEviction() { return } - tooManyBytes := cache.areThereTooManyBytes() - tooManyTxs := cache.areThereTooManyTxs() - tooManySenders := cache.areThereTooManySenders() - - isCapacityExceeded := tooManyBytes || tooManyTxs || tooManySenders - if !isCapacityExceeded { + if !cache.isCapacityExceeded() { return } @@ -30,15 +25,9 @@ func (cache *TxCache) doEviction() { stopWatch := cache.monitorEvictionStart() - if tooManyTxs { - cache.makeSnapshotOfSenders() - journal.passOneNumTxs, journal.passOneNumSenders = cache.evictHighNonceTransactions() - journal.evictionPerformed = true - } - - if cache.shouldContinueEvictingSenders() { + if cache.isCapacityExceeded() { cache.makeSnapshotOfSenders() - journal.passTwoNumSteps, journal.passTwoNumTxs, journal.passTwoNumSenders = cache.evictSendersInLoop() + journal.passOneNumSteps, journal.passOneNumTxs, journal.passOneNumSenders = cache.evictSendersInLoop() journal.evictionPerformed = true } @@ -55,6 +44,10 @@ func (cache *TxCache) destroySnapshotOfSenders() { cache.evictionSnapshotOfSenders = nil } +func (cache *TxCache) isCapacityExceeded() bool { + return cache.areThereTooManyBytes() || cache.areThereTooManySenders() || cache.areThereTooManyTxs() +} + func (cache *TxCache) areThereTooManyBytes() bool { numBytes := cache.NumBytes() tooManyBytes := numBytes > int64(cache.config.NumBytesThreshold) @@ -73,53 +66,15 @@ func (cache *TxCache) areThereTooManyTxs() bool { return tooManyTxs } -func (cache *TxCache) shouldContinueEvictingSenders() bool { - return cache.areThereTooManyTxs() || cache.areThereTooManySenders() || cache.areThereTooManyBytes() -} - -// evictHighNonceTransactions removes transactions from the cache -// For senders with many transactions (> "LargeNumOfTxsForASender"), evict "NumTxsToEvictFromASender" transactions -// Also makes sure that there's no sender with 0 transactions -func (cache *TxCache) evictHighNonceTransactions() (uint32, uint32) { - threshold := cache.config.LargeNumOfTxsForASender - numTxsToEvict := cache.config.NumTxsToEvictFromASender - - // Heuristics: estimate that ~10% of senders have more transactions than the threshold - sendersToEvictInitialCapacity := len(cache.evictionSnapshotOfSenders)/10 + 1 - txsToEvictInitialCapacity := sendersToEvictInitialCapacity * int(numTxsToEvict) - - sendersToEvict := make([]string, 0, sendersToEvictInitialCapacity) - txsToEvict := make([][]byte, 0, txsToEvictInitialCapacity) - - for _, txList := range cache.evictionSnapshotOfSenders { - if txList.HasMoreThan(threshold) { - txsToEvictForSender := txList.RemoveHighNonceTxs(numTxsToEvict) - txsToEvict = append(txsToEvict, txsToEvictForSender...) - } - - if txList.IsEmpty() { - sendersToEvict = append(sendersToEvict, txList.sender) - } - } - - // Note that, at this very moment, high nonce transactions have been evicted from senders' lists, - // but not yet from the map of transactions. - // - // This may cause slight inconsistencies, such as: - // - if a tx previously (recently) removed from the sender's list ("RemoveHighNonceTxs") arrives again at the pool, - // before the execution of "doEvictItems", the tx will be ignored as it still exists (for a short time) in the map of transactions. - return cache.doEvictItems(txsToEvict, sendersToEvict) -} - // This is called concurrently by two goroutines: the eviction one and the sweeping one -func (cache *TxCache) doEvictItems(txsToEvict [][]byte, sendersToEvict []string) (countTxs uint32, countSenders uint32) { +func (cache *TxCache) doEvictItems(txsToEvict txHashes, sendersToEvict []string) (countTxs uint32, countSenders uint32) { countTxs = cache.txByHash.RemoveTxsBulk(txsToEvict) countSenders = cache.txListBySender.RemoveSendersBulk(sendersToEvict) return } func (cache *TxCache) evictSendersInLoop() (uint32, uint32, uint32) { - return cache.evictSendersWhile(cache.shouldContinueEvictingSenders) + return cache.evictSendersWhile(cache.isCapacityExceeded) } // evictSendersWhileTooManyTxs removes transactions in a loop, as long as "shouldContinue" is true @@ -161,7 +116,7 @@ func (cache *TxCache) evictSendersWhile(shouldContinue func() bool) (step uint32 // This is called concurrently by two goroutines: the eviction one and the sweeping one func (cache *TxCache) evictSendersAndTheirTxs(listsToEvict []*txListForSender) (uint32, uint32) { sendersToEvict := make([]string, 0, len(listsToEvict)) - txsToEvict := make([][]byte, 0, approximatelyCountTxInLists(listsToEvict)) + txsToEvict := make(txHashes, 0, approximatelyCountTxInLists(listsToEvict)) for _, txList := range listsToEvict { sendersToEvict = append(sendersToEvict, txList.sender) diff --git a/storage/txcache/eviction_test.go b/storage/txcache/eviction_test.go index 7f22906285a..30943d86f5a 100644 --- a/storage/txcache/eviction_test.go +++ b/storage/txcache/eviction_test.go @@ -9,71 +9,21 @@ import ( "github.com/stretchr/testify/require" ) -func TestEviction_EvictHighNonceTransactions(t *testing.T) { - config := CacheConfig{ - NumChunksHint: 16, - CountThreshold: 400, - LargeNumOfTxsForASender: 50, - NumTxsToEvictFromASender: 25, - MinGasPriceMicroErd: 100, - } - - cache := NewTxCache(config) - - for index := 0; index < 200; index++ { - cache.AddTx(createTx([]byte{'a', byte(index)}, "alice", uint64(index))) - } - - for index := 0; index < 200; index++ { - cache.AddTx(createTx([]byte{'b', byte(index)}, "bob", uint64(index))) - } - - cache.AddTx(createTx([]byte("hash-carol"), "carol", uint64(1))) - - require.Equal(t, int64(3), cache.txListBySender.counter.Get()) - require.Equal(t, int64(401), cache.txByHash.counter.Get()) - - cache.makeSnapshotOfSenders() - nTxs, nSenders := cache.evictHighNonceTransactions() - - require.Equal(t, uint32(50), nTxs) - require.Equal(t, uint32(0), nSenders) - require.Equal(t, int64(3), cache.txListBySender.counter.Get()) - require.Equal(t, int64(351), cache.txByHash.counter.Get()) -} - -func TestEviction_EvictHighNonceTransactions_CoverEmptiedSenderList(t *testing.T) { - config := CacheConfig{ - NumChunksHint: 1, - CountThreshold: 0, - LargeNumOfTxsForASender: 0, - NumTxsToEvictFromASender: 1, - MinGasPriceMicroErd: 100, - } - - cache := NewTxCache(config) - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) - require.Equal(t, int64(1), cache.CountSenders()) - - cache.makeSnapshotOfSenders() - - // Alice is also removed from the map of senders, since it has no transaction left - nTxs, nSenders := cache.evictHighNonceTransactions() - require.Equal(t, uint32(1), nTxs) - require.Equal(t, uint32(1), nSenders) - require.Equal(t, int64(0), cache.CountSenders()) -} - func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 20, NumBytesThreshold: math.MaxUint32, - MinGasPriceMicroErd: 100, + NumBytesPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { @@ -98,19 +48,24 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { numBytesPerTx := uint32(1000) config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, NumBytesThreshold: numBytesPerTx * 100, + NumBytesPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 20, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 10000, 100*oneTrilion)) + cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 10000, 100*oneBillion)) } require.Equal(t, int64(200), cache.txListBySender.counter.Get()) @@ -128,24 +83,28 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, NumBytesThreshold: math.MaxUint32, + NumBytesPerSenderThreshold: math.MaxUint32, CountThreshold: 2, + CountPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 2, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 1000, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 1000, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneTrilion)) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + + cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 1000, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 1000, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneBillion)) cache.doEviction() - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passTwoNumSteps) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) + require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) // Alice and Bob evicted. Carol still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -156,28 +115,32 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 2, - MinGasPriceMicroErd: 100, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 800, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 500, 100000, 100*oneTrilion)) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 200, 100000, 700*oneTrilion)) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + + cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 800, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 500, 100000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 200, 100000, 700*oneBillion)) require.InDelta(t, float64(19.50394606), cache.getRawScoreOfSender("alice"), delta) require.InDelta(t, float64(23.68494667), cache.getRawScoreOfSender("bob"), delta) require.InDelta(t, float64(100), cache.getRawScoreOfSender("carol"), delta) cache.doEviction() - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(0), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passTwoNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passTwoNumSteps) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) + require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) // Alice and Bob evicted (lower score). Carol still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -188,12 +151,19 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 1, CountThreshold: 0, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) cache.isEvictionInProgress.Set() @@ -204,12 +174,19 @@ func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 1, CountThreshold: 0, NumSendersToEvictInOneStep: 42, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) cache.makeSnapshotOfSenders() @@ -222,12 +199,19 @@ func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 1, CountThreshold: 0, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) cache.AddTx(createTx([]byte("hash-bob"), "bob", uint64(1))) @@ -247,19 +231,24 @@ func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { // ~1 second on average laptop. func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, EvictionEnabled: true, NumBytesThreshold: 1000000000, CountThreshold: 240000, NumSendersToEvictInOneStep: dataRetriever.TxPoolNumSendersToEvictInOneStep, - LargeNumOfTxsForASender: 1000, - NumTxsToEvictFromASender: 250, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } numSenders := 25000 numTxsPerSender := 10 - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + addManyTransactionsWithUniformDistribution(cache, numSenders, numTxsPerSender) // Sometimes (due to map iteration non-determinism), more eviction happens - one more step of 100 senders. @@ -268,7 +257,7 @@ func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { } func Test_EvictSendersAndTheirTxs_Concurrently(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() var wg sync.WaitGroup for i := 0; i < 10; i++ { diff --git a/storage/txcache/maps/bucketSortedMap_test.go b/storage/txcache/maps/bucketSortedMap_test.go index 98cf23948cc..94c075a0c80 100644 --- a/storage/txcache/maps/bucketSortedMap_test.go +++ b/storage/txcache/maps/bucketSortedMap_test.go @@ -241,7 +241,7 @@ func TestBucketSortedMap_GetSnapshotAscending(t *testing.T) { myMap := NewBucketSortedMap(4, 100) snapshot := myMap.GetSnapshotAscending() - require.ElementsMatch(t, []BucketSortedMapItem{}, snapshot) + require.Equal(t, []BucketSortedMapItem{}, snapshot) a := newScoredDummyItem("a", 15) b := newScoredDummyItem("b", 101) @@ -256,14 +256,14 @@ func TestBucketSortedMap_GetSnapshotAscending(t *testing.T) { simulateMutationThatChangesScore(myMap, "c") snapshot = myMap.GetSnapshotAscending() - require.ElementsMatch(t, []BucketSortedMapItem{c, a, b}, snapshot) + require.Equal(t, []BucketSortedMapItem{c, a, b}, snapshot) } func TestBucketSortedMap_GetSnapshotDescending(t *testing.T) { myMap := NewBucketSortedMap(4, 100) snapshot := myMap.GetSnapshotDescending() - require.ElementsMatch(t, []BucketSortedMapItem{}, snapshot) + require.Equal(t, []BucketSortedMapItem{}, snapshot) a := newScoredDummyItem("a", 15) b := newScoredDummyItem("b", 101) @@ -278,7 +278,7 @@ func TestBucketSortedMap_GetSnapshotDescending(t *testing.T) { simulateMutationThatChangesScore(myMap, "c") snapshot = myMap.GetSnapshotDescending() - require.ElementsMatch(t, []BucketSortedMapItem{b, a, c}, snapshot) + require.Equal(t, []BucketSortedMapItem{b, a, c}, snapshot) } func TestBucketSortedMap_AddManyItems(t *testing.T) { diff --git a/storage/txcache/monitoring.go b/storage/txcache/monitoring.go index 5dcac6617d6..95848b450ca 100644 --- a/storage/txcache/monitoring.go +++ b/storage/txcache/monitoring.go @@ -93,14 +93,11 @@ type evictionJournal struct { evictionPerformed bool passOneNumTxs uint32 passOneNumSenders uint32 - passTwoNumTxs uint32 - passTwoNumSenders uint32 - passTwoNumSteps uint32 + passOneNumSteps uint32 } func (journal *evictionJournal) display() { - log.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders) - log.Debug("Eviction.pass2:", "txs", journal.passTwoNumTxs, "senders", journal.passTwoNumSenders, "steps", journal.passTwoNumSteps) + log.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders, "steps", journal.passOneNumSteps) } func (cache *TxCache) diagnose() { diff --git a/storage/txcache/monitoring_test.go b/storage/txcache/monitoring_test.go index 257e5cb073a..23a27ca6d46 100644 --- a/storage/txcache/monitoring_test.go +++ b/storage/txcache/monitoring_test.go @@ -9,13 +9,19 @@ import ( func TestMonitoring_numTxAddedAndRemovedDuringEviction(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, NumBytesThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) cache.isEvictionInProgress.Set() @@ -35,13 +41,19 @@ func TestMonitoring_numTxAddedAndRemovedDuringEviction(t *testing.T) { func TestMonitoring_numTxAddedAndRemovedBetweenSelections(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, CountThreshold: math.MaxUint32, NumBytesThreshold: math.MaxUint32, NumSendersToEvictInOneStep: 1, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) require.Equal(t, int64(0), cache.numTxAddedBetweenSelections.Get()) diff --git a/storage/txcache/sweeping_test.go b/storage/txcache/sweeping_test.go index 2d4a9f5d6f3..9e5e5639525 100644 --- a/storage/txcache/sweeping_test.go +++ b/storage/txcache/sweeping_test.go @@ -7,7 +7,7 @@ import ( ) func TestSweeping_CollectSweepable(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) @@ -50,7 +50,7 @@ func TestSweeping_CollectSweepable(t *testing.T) { } func TestSweeping_WhenSendersEscapeCollection(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) @@ -95,7 +95,7 @@ func TestSweeping_WhenSendersEscapeCollection(t *testing.T) { } func TestSweeping_SweepSweepable(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) diff --git a/storage/txcache/testutils_test.go b/storage/txcache/testutils_test.go index 32bf54ddb04..83c1cee3cb9 100644 --- a/storage/txcache/testutils_test.go +++ b/storage/txcache/testutils_test.go @@ -11,19 +11,53 @@ import ( ) const oneMilion = 1000000 -const oneTrilion = oneMilion * oneMilion +const oneBillion = oneMilion * 1000 const delta = 0.00000001 -func toMicroERD(erd uint64) uint64 { - return erd * 1000000 +func toNanoERD(erd float64) uint64 { + return uint64(erd * float64(1000000000)) } func kBToBytes(kB float32) uint64 { return uint64(kB * 1000) } +func (cache *TxCache) areInternalMapsConsistent() bool { + internalMapByHash := cache.txByHash + internalMapBySender := cache.txListBySender + + senders := internalMapBySender.getSnapshotAscending() + numTransactionsInMapByHash := len(internalMapByHash.keys()) + numTransactionsInMapBySender := 0 + + for _, sender := range senders { + numTransactionsInMapBySender += int(sender.countTx()) + + for _, hash := range sender.getTxHashesAsStrings() { + _, ok := internalMapByHash.getTx(hash) + if !ok { + return false + } + } + } + + if numTransactionsInMapBySender != numTransactionsInMapByHash { + return false + } + + return true +} + +func (cache *TxCache) getHashesForSender(sender string) []string { + return cache.getListForSender(sender).getTxHashesAsStrings() +} + func (cache *TxCache) getListForSender(sender string) *txListForSender { - list, ok := cache.txListBySender.getListForSender(sender) + return cache.txListBySender.testGetListForSender(sender) +} + +func (sendersMap *txListBySenderMap) testGetListForSender(sender string) *txListForSender { + list, ok := sendersMap.getListForSender(sender) if !ok { panic("sender not in cache") } @@ -49,6 +83,21 @@ func (cache *TxCache) isSenderSweepable(sender string) bool { return false } +func (listForSender *txListForSender) getTxHashesAsStrings() []string { + hashes := listForSender.getTxHashes() + return hashesAsStrings(hashes) +} + +func hashesAsStrings(hashes txHashes) []string { + result := make([]string, len(hashes)) + + for i := 0; i < len(hashes); i++ { + result[i] = string(hashes[i]) + } + + return result +} + func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nTransactionsPerSender int) { for senderTag := 0; senderTag < nSenders; senderTag++ { sender := createFakeSenderAddress(senderTag) diff --git a/storage/txcache/txByHashMap.go b/storage/txcache/txByHashMap.go index 626bce1b8a4..16f26a213e7 100644 --- a/storage/txcache/txByHashMap.go +++ b/storage/txcache/txByHashMap.go @@ -57,7 +57,7 @@ func (txMap *txByHashMap) getTx(txHash string) (*WrappedTransaction, bool) { } // RemoveTxsBulk removes transactions, in bulk -func (txMap *txByHashMap) RemoveTxsBulk(txHashes [][]byte) uint32 { +func (txMap *txByHashMap) RemoveTxsBulk(txHashes txHashes) uint32 { oldCount := uint32(txMap.counter.Get()) for _, txHash := range txHashes { @@ -65,6 +65,7 @@ func (txMap *txByHashMap) RemoveTxsBulk(txHashes [][]byte) uint32 { } newCount := uint32(txMap.counter.Get()) + // TODO: Check this for overflow as well, then fix in EN-6299 numRemoved := oldCount - newCount return numRemoved } @@ -85,9 +86,9 @@ func (txMap *txByHashMap) clear() { txMap.counter.Set(0) } -func (txMap *txByHashMap) keys() [][]byte { +func (txMap *txByHashMap) keys() txHashes { keys := txMap.backingMap.Keys() - keysAsBytes := make([][]byte, len(keys)) + keysAsBytes := make(txHashes, len(keys)) for i := 0; i < len(keys); i++ { keysAsBytes[i] = []byte(keys[i]) } diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index 680c76ea6da..69d09a833b7 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -10,6 +10,8 @@ import ( var _ storage.Cacher = (*TxCache)(nil) +type txHashes = [][]byte + // TxCache represents a cache-like structure (it has a fixed capacity and implements an eviction mechanism) for holding transactions type TxCache struct { name string @@ -29,9 +31,14 @@ type TxCache struct { } // NewTxCache creates a new transaction cache -func NewTxCache(config CacheConfig) *TxCache { +func NewTxCache(config CacheConfig) (*TxCache, error) { log.Debug("NewTxCache", "config", config) + err := config.verify() + if err != nil { + return nil, err + } + // Note: for simplicity, we use the same "numChunksHint" for both internal concurrent maps numChunksHint := config.NumChunksHint @@ -44,7 +51,7 @@ func NewTxCache(config CacheConfig) *TxCache { } txCache.initSweepable() - return txCache + return txCache, nil } // AddTx adds a transaction in the cache @@ -62,12 +69,16 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { } ok = true - added = cache.txByHash.addTx(tx) + added, evicted := cache.txListBySender.addTx(tx) if added { - cache.txListBySender.addTx(tx) + cache.txByHash.addTx(tx) cache.monitorTxAddition() } + if len(evicted) > 0 { + cache.txByHash.RemoveTxsBulk(evicted) + } + return } @@ -142,7 +153,7 @@ func (cache *TxCache) doAfterSelection() { func (cache *TxCache) RemoveTxByHash(txHash []byte) error { tx, ok := cache.txByHash.removeTx(string(txHash)) if !ok { - return ErrTxNotFound + return errTxNotFound } cache.monitorTxRemoval() @@ -150,7 +161,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) error { found := cache.txListBySender.removeTx(tx) if !found { cache.onRemoveTxInconsistency(txHash) - return ErrMapsSyncInconsistency + return errMapsSyncInconsistency } return nil @@ -234,7 +245,7 @@ func (cache *TxCache) RemoveOldest() { } // Keys returns the tx hashes in the cache -func (cache *TxCache) Keys() [][]byte { +func (cache *TxCache) Keys() txHashes { return cache.txByHash.keys() } diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index cecef150a0c..a093a5a3d3b 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -1,6 +1,7 @@ package txcache import ( + "errors" "fmt" "math" "sync" @@ -13,8 +14,73 @@ import ( "github.com/stretchr/testify/require" ) +func Test_NewTxCache(t *testing.T) { + config := CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, + } + + withEvictionConfig := CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, + EvictionEnabled: true, + NumBytesThreshold: math.MaxUint32, + CountThreshold: math.MaxUint32, + NumSendersToEvictInOneStep: 100, + } + + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + + badConfig := config + badConfig.Name = "" + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.Name") + + badConfig = config + badConfig.NumChunksHint = 0 + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumChunksHint") + + badConfig = config + badConfig.NumBytesPerSenderThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumBytesPerSenderThreshold") + + badConfig = config + badConfig.CountPerSenderThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.CountPerSenderThreshold") + + badConfig = config + badConfig.MinGasPriceNanoErd = 0 + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.MinGasPriceNanoErd") + + badConfig = withEvictionConfig + badConfig.NumBytesThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumBytesThreshold") + + badConfig = withEvictionConfig + badConfig.CountThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.CountThreshold") + + badConfig = withEvictionConfig + badConfig.NumSendersToEvictInOneStep = 0 + requireErrorOnNewTxCache(t, badConfig, errInvalidCacheConfig, "config.NumSendersToEvictInOneStep") +} + +func requireErrorOnNewTxCache(t *testing.T, config CacheConfig, errExpected error, errPartialMessage string) { + cache, errReceived := NewTxCache(config) + require.Nil(t, cache) + require.True(t, errors.Is(errReceived, errExpected)) + require.Contains(t, errReceived.Error(), errPartialMessage) +} + func Test_AddTx(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() tx := createTx([]byte("hash-1"), "alice", 1) @@ -33,7 +99,7 @@ func Test_AddTx(t *testing.T) { } func Test_AddNilTx_DoesNothing(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() txHash := []byte("hash-1") @@ -46,8 +112,46 @@ func Test_AddNilTx_DoesNothing(t *testing.T) { require.Nil(t, foundTx) } +func Test_AddTx_AppliesSizeConstraintsPerSenderForNumTransactions(t *testing.T) { + cache := newCacheToTest(math.MaxUint32, 3) + + cache.AddTx(createTx([]byte("tx-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("tx-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("tx-alice-4"), "alice", 4)) + cache.AddTx(createTx([]byte("tx-bob-1"), "bob", 1)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 2)) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) + + cache.AddTx(createTx([]byte("tx-alice-3"), "alice", 3)) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) +} + +func Test_AddTx_AppliesSizeConstraintsPerSenderForNumBytes(t *testing.T) { + cache := newCacheToTest(1024, math.MaxUint32) + + cache.AddTx(createTxWithParams([]byte("tx-alice-1"), "alice", 1, 128, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-2"), "alice", 2, 512, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-4"), "alice", 3, 256, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-1"), "bob", 1, 512, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 2, 513, 42, 42)) + + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) + + cache.AddTx(createTxWithParams([]byte("tx-alice-3"), "alice", 3, 256, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 3, 512, 42, 42)) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) +} + func Test_RemoveByTxHash(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-2"), "alice", 2)) @@ -66,7 +170,7 @@ func Test_RemoveByTxHash(t *testing.T) { } func Test_CountTx_And_Len(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-2"), "alice", 2)) @@ -77,7 +181,7 @@ func Test_CountTx_And_Len(t *testing.T) { } func Test_GetByTxHash_And_Peek_And_Get(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() txHash := []byte("hash-1") tx := createTx(txHash, "alice", 1) @@ -105,13 +209,13 @@ func Test_GetByTxHash_And_Peek_And_Get(t *testing.T) { } func Test_RemoveByTxHash_Error_WhenMissing(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() err := cache.RemoveTxByHash([]byte("missing")) - require.Equal(t, err, ErrTxNotFound) + require.Equal(t, err, errTxNotFound) } func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() txHash := []byte("hash-1") tx := createTx(txHash, "alice", 1) @@ -121,11 +225,11 @@ func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { cache.txListBySender.removeTx(tx) err := cache.RemoveTxByHash(txHash) - require.Equal(t, err, ErrMapsSyncInconsistency) + require.Equal(t, err, errMapsSyncInconsistency) } func Test_Clear(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) @@ -137,7 +241,7 @@ func Test_Clear(t *testing.T) { } func Test_ForEachTransaction(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) @@ -150,7 +254,7 @@ func Test_ForEachTransaction(t *testing.T) { } func Test_SelectTransactions_Dummy(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) @@ -166,7 +270,7 @@ func Test_SelectTransactions_Dummy(t *testing.T) { } func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) @@ -187,7 +291,7 @@ func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { } func Test_SelectTransactions(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() // Add "nSenders" * "nTransactionsPerSender" transactions in the cache (in reversed nonce order) nSenders := 1000 @@ -224,7 +328,7 @@ func Test_SelectTransactions(t *testing.T) { } func Test_Keys(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) cache.AddTx(createTx([]byte("alice-y"), "alice", 43)) @@ -241,38 +345,48 @@ func Test_Keys(t *testing.T) { func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { config := CacheConfig{ + Name: "untitled", NumChunksHint: 16, EvictionEnabled: true, NumBytesThreshold: math.MaxUint32, CountThreshold: 100, NumSendersToEvictInOneStep: 1, - LargeNumOfTxsForASender: math.MaxUint32, - NumTxsToEvictFromASender: 0, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } // 11 * 10 - cache := NewTxCache(config) + cache, err := NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + addManyTransactionsWithUniformDistribution(cache, 11, 10) require.LessOrEqual(t, cache.CountTx(), int64(100)) config = CacheConfig{ + Name: "untitled", NumChunksHint: 16, EvictionEnabled: true, NumBytesThreshold: math.MaxUint32, CountThreshold: 250000, NumSendersToEvictInOneStep: 1, - LargeNumOfTxsForASender: math.MaxUint32, - NumTxsToEvictFromASender: 0, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, } // 100 * 1000 - cache = NewTxCache(config) + cache, err = NewTxCache(config) + require.Nil(t, err) + require.NotNil(t, cache) + addManyTransactionsWithUniformDistribution(cache, 100, 1000) require.LessOrEqual(t, cache.CountTx(), int64(250000)) } func Test_NotImplementedFunctions(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() evicted := cache.Put(nil, nil) require.False(t, evicted) @@ -290,7 +404,7 @@ func Test_NotImplementedFunctions(t *testing.T) { } func Test_IsInterfaceNil(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() require.False(t, check.IfNil(cache)) makeNil := func() storage.Cacher { @@ -302,11 +416,11 @@ func Test_IsInterfaceNil(t *testing.T) { } func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { - cache := newCacheToTest() + cache := newUnconstrainedCacheToTest() // Alice will quickly move between two score buckets (chunks) - cheapTransaction := createTxWithParams([]byte("alice-x-o"), "alice", 0, 128, 50000, 100*oneTrilion) - expensiveTransaction := createTxWithParams([]byte("alice-x-1"), "alice", 1, 128, 50000, 300*oneTrilion) + cheapTransaction := createTxWithParams([]byte("alice-x-o"), "alice", 0, 128, 50000, 100*oneBillion) + expensiveTransaction := createTxWithParams([]byte("alice-x-1"), "alice", 1, 128, 50000, 300*oneBillion) cache.AddTx(cheapTransaction) cache.AddTx(expensiveTransaction) @@ -339,6 +453,32 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { require.False(t, timedOut, "Timed out. Perhaps deadlock?") } -func newCacheToTest() *TxCache { - return NewTxCache(CacheConfig{Name: "test", NumChunksHint: 16, MinGasPriceMicroErd: 100}) +func newUnconstrainedCacheToTest() *TxCache { + cache, err := NewTxCache(CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, + }) + if err != nil { + panic(fmt.Sprintf("newUnconstrainedCacheToTest(): %s", err)) + } + + return cache +} + +func newCacheToTest(numBytesPerSenderThreshold uint32, countPerSenderThreshold uint32) *TxCache { + cache, err := NewTxCache(CacheConfig{ + Name: "test", + NumChunksHint: 16, + NumBytesPerSenderThreshold: numBytesPerSenderThreshold, + CountPerSenderThreshold: countPerSenderThreshold, + MinGasPriceNanoErd: 100, + }) + if err != nil { + panic(fmt.Sprintf("newCacheToTest(): %s", err)) + } + + return cache } diff --git a/storage/txcache/txListBySenderMap.go b/storage/txcache/txListBySenderMap.go index 0b45b9b6817..1c6b7a2f14c 100644 --- a/storage/txcache/txListBySenderMap.go +++ b/storage/txcache/txListBySenderMap.go @@ -25,10 +25,10 @@ func newTxListBySenderMap(nChunksHint uint32, cacheConfig CacheConfig) txListByS } // addTx adds a transaction in the map, in the corresponding list (selected by its sender) -func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) { +func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) (bool, txHashes) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - listForSender.AddTx(tx) + return listForSender.AddTx(tx) } func (txMap *txListBySenderMap) getOrAddListForSender(sender string) *txListForSender { @@ -75,8 +75,8 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { } isFound := listForSender.RemoveTx(tx) - - if listForSender.IsEmpty() { + isEmpty := listForSender.IsEmpty() + if isEmpty { txMap.removeSender(sender) } diff --git a/storage/txcache/txListBySenderMap_test.go b/storage/txcache/txListBySenderMap_test.go index 36d740659e0..d97fff3c6ab 100644 --- a/storage/txcache/txListBySenderMap_test.go +++ b/storage/txcache/txListBySenderMap_test.go @@ -2,6 +2,7 @@ package txcache import ( "fmt" + "math" "sync" "testing" @@ -22,17 +23,21 @@ func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { myMap := newSendersMapToTest() - txAlice1 := createTx([]byte("a"), "alice", uint64(1)) - txAlice2 := createTx([]byte("a"), "alice", uint64(2)) + txAlice1 := createTx([]byte("a1"), "alice", uint64(1)) + txAlice2 := createTx([]byte("a2"), "alice", uint64(2)) txBob := createTx([]byte("b"), "bob", uint64(1)) myMap.addTx(txAlice1) myMap.addTx(txAlice2) myMap.addTx(txBob) require.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, uint64(2), myMap.testGetListForSender("alice").countTx()) + require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) myMap.removeTx(txAlice1) require.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, uint64(1), myMap.testGetListForSender("alice").countTx()) + require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) myMap.removeTx(txAlice2) // All alice's transactions have been removed now @@ -171,5 +176,8 @@ func createTxListBySenderMap(numSenders int) txListBySenderMap { } func newSendersMapToTest() txListBySenderMap { - return newTxListBySenderMap(4, CacheConfig{}) + return newTxListBySenderMap(4, CacheConfig{ + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + }) } diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index fbb5d16072d..06152ce7dac 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -45,14 +45,15 @@ func newTxListForSender(sender string, cacheConfig *CacheConfig, onScoreChange s // AddTx adds a transaction in sender's list // This is a "sorted" insert -func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) { +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, txHashes) { // We don't allow concurrent interceptor goroutines to mutate a given sender's list listForSender.mutex.Lock() defer listForSender.mutex.Unlock() - nonce := tx.Tx.GetNonce() - gasPrice := tx.Tx.GetGasPrice() - insertionPlace := listForSender.findInsertionPlace(nonce, gasPrice) + insertionPlace, err := listForSender.findInsertionPlace(tx) + if err != nil { + return false, nil + } if insertionPlace == nil { listForSender.items.PushFront(tx) @@ -61,32 +62,82 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) { } listForSender.onAddedTransaction(tx) + evicted := listForSender.applySizeConstraints() + listForSender.triggerScoreChange() + + return true, evicted +} + +// This function should only be used in critical section (listForSender.mutex) +func (listForSender *txListForSender) applySizeConstraints() txHashes { + evictedTxHashes := make(txHashes, 0) + + // Iterate back to front + for element := listForSender.items.Back(); element != nil; element = element.Prev() { + if !listForSender.isCapacityExceeded() { + break + } + + listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + + // Keep track of removed transactions + value := element.Value.(*WrappedTransaction) + evictedTxHashes = append(evictedTxHashes, value.TxHash) + } + + return evictedTxHashes +} + +func (listForSender *txListForSender) isCapacityExceeded() bool { + maxBytes := int64(listForSender.cacheConfig.NumBytesPerSenderThreshold) + maxNumTxs := uint64(listForSender.cacheConfig.CountPerSenderThreshold) + tooManyBytes := listForSender.totalBytes.Get() > maxBytes + tooManyTxs := listForSender.countTx() > maxNumTxs + + return tooManyBytes || tooManyTxs } func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction) { listForSender.totalBytes.Add(int64(estimateTxSize(tx))) listForSender.totalGas.Add(int64(estimateTxGas(tx))) listForSender.totalFee.Add(int64(estimateTxFee(tx))) +} + +func (listForSender *txListForSender) triggerScoreChange() { listForSender.onScoreChange(listForSender) } // This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) findInsertionPlace(incomingNonce uint64, incomingGasPrice uint64) *list.Element { +func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTransaction) (*list.Element, error) { + incomingNonce := incomingTx.Tx.GetNonce() + incomingGasPrice := incomingTx.Tx.GetGasPrice() + for element := listForSender.items.Back(); element != nil; element = element.Prev() { - tx := element.Value.(*WrappedTransaction).Tx - nonce := tx.GetNonce() - gasPrice := tx.GetGasPrice() + currentTx := element.Value.(*WrappedTransaction) + currentTxNonce := currentTx.Tx.GetNonce() + currentTxGasPrice := currentTx.Tx.GetGasPrice() - if nonce == incomingNonce && gasPrice > incomingGasPrice { - return element + if incomingTx.sameAs(currentTx) { + // The incoming transaction will be discarded + return nil, errTxDuplicated } - if nonce < incomingNonce { - return element + if currentTxNonce == incomingNonce && currentTxGasPrice > incomingGasPrice { + // The incoming transaction will be placed right after the existing one, which has same nonce but higher price. + // If the nonces are the same, but the incoming gas price is higher or equal, the search loop continues. + return element, nil + } + + if currentTxNonce < incomingNonce { + // We've found the first transaction with a lower nonce than the incoming one, + // thus the incoming transaction will be placed right after this one. + return element, nil } } - return nil + // The incoming transaction will be inserted at the head of the list. + return nil, nil } // RemoveTx removes a transaction from the sender's list @@ -100,6 +151,7 @@ func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { if isFound { listForSender.items.Remove(marker) listForSender.onRemovedListElement(marker) + listForSender.triggerScoreChange() } return isFound @@ -111,32 +163,6 @@ func (listForSender *txListForSender) onRemovedListElement(element *list.Element listForSender.totalBytes.Subtract(int64(estimateTxSize(value))) listForSender.totalGas.Subtract(int64(estimateTxGas(value))) listForSender.totalFee.Subtract(int64(estimateTxFee(value))) - listForSender.onScoreChange(listForSender) -} - -// RemoveHighNonceTxs removes "count" transactions from the back of the list -func (listForSender *txListForSender) RemoveHighNonceTxs(count uint32) [][]byte { - listForSender.mutex.Lock() - defer listForSender.mutex.Unlock() - - removedTxHashes := make([][]byte, count) - - index := uint32(0) - var previous *list.Element - for element := listForSender.items.Back(); element != nil && count > index; element = previous { - // Remove node - previous = element.Prev() - listForSender.items.Remove(element) - listForSender.onRemovedListElement(element) - - // Keep track of removed transaction - value := element.Value.(*WrappedTransaction) - removedTxHashes[index] = value.TxHash - - index++ - } - - return removedTxHashes } // This function should only be used in critical section (listForSender.mutex) @@ -157,11 +183,6 @@ func (listForSender *txListForSender) findListElementWithTx(txToFind *WrappedTra return nil } -// HasMoreThan checks whether the list has more items than specified -func (listForSender *txListForSender) HasMoreThan(count uint32) bool { - return uint32(listForSender.countTxWithLock()) > count -} - // IsEmpty checks whether the list is empty func (listForSender *txListForSender) IsEmpty() bool { return listForSender.countTxWithLock() == 0 @@ -225,11 +246,11 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati } // getTxHashes returns the hashes of transactions in the list -func (listForSender *txListForSender) getTxHashes() [][]byte { +func (listForSender *txListForSender) getTxHashes() txHashes { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() - result := make([][]byte, 0, listForSender.countTx()) + result := make(txHashes, 0, listForSender.countTx()) for element := listForSender.items.Front(); element != nil; element = element.Next() { value := element.Value.(*WrappedTransaction) diff --git a/storage/txcache/txListForSenderAsSortedMapItem.go b/storage/txcache/txListForSenderAsSortedMapItem.go index df567f44463..0fa62379e34 100644 --- a/storage/txcache/txListForSenderAsSortedMapItem.go +++ b/storage/txcache/txListForSenderAsSortedMapItem.go @@ -13,10 +13,10 @@ type senderScoreParams struct { count uint64 // Size is in bytes size uint64 - // Fee is in micro ERD + // Fee is in nano ERD fee uint64 gas uint64 - // Price is in micro ERD + // Price is in nano ERD minGasPrice uint32 } @@ -41,7 +41,7 @@ func (listForSender *txListForSender) computeRawScore() float64 { gas := listForSender.totalGas.GetUint64() size := listForSender.totalBytes.GetUint64() count := listForSender.countTx() - minGasPrice := listForSender.cacheConfig.MinGasPriceMicroErd + minGasPrice := listForSender.cacheConfig.MinGasPriceNanoErd return computeSenderScore(senderScoreParams{count: count, size: size, fee: fee, gas: gas, minGasPrice: minGasPrice}) } @@ -59,8 +59,8 @@ func (listForSender *txListForSender) computeRawScore() float64 { // For asymptoticScore, see (https://en.wikipedia.org/wiki/Logistic_function) // // Where: -// - PPUAvg: average gas points (fee) per processing unit, in micro ERD -// - PPUMin: minimum gas points (fee) per processing unit (given by economics.toml), in micro ERD +// - PPUAvg: average gas points (fee) per processing unit, in nano ERD +// - PPUMin: minimum gas points (fee) per processing unit (given by economics.toml), in nano ERD // - txCount: number of transactions // - txSize: size of transactions, in kB (1000 bytes) // diff --git a/storage/txcache/txListForSenderAsSortedMapItem_test.go b/storage/txcache/txListForSenderAsSortedMapItem_test.go index b94a055f759..369671cd7d4 100644 --- a/storage/txcache/txListForSenderAsSortedMapItem_test.go +++ b/storage/txcache/txListForSenderAsSortedMapItem_test.go @@ -7,11 +7,11 @@ import ( ) func TestSenderAsBucketSortedMapItem_ComputeScore(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 200000, 100*oneTrilion)) - list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneTrilion)) - list.AddTx(createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneTrilion)) + list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 200000, 100*oneBillion)) + list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneBillion)) + list.AddTx(createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneBillion)) require.Equal(t, uint64(3), list.countTx()) require.Equal(t, int64(2000), list.totalBytes.Get()) @@ -22,11 +22,11 @@ func TestSenderAsBucketSortedMapItem_ComputeScore(t *testing.T) { } func TestSenderAsBucketSortedMapItem_ScoreFluctuatesDeterministicallyWhenTransactionsAreAddedOrRemoved(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() - A := createTxWithParams([]byte("A"), ".", 1, 1000, 200000, 100*oneTrilion) - B := createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneTrilion) - C := createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneTrilion) + A := createTxWithParams([]byte("A"), ".", 1, 1000, 200000, 100*oneBillion) + B := createTxWithParams([]byte("b"), ".", 1, 500, 100000, 100*oneBillion) + C := createTxWithParams([]byte("c"), ".", 1, 500, 100000, 100*oneBillion) scoreNone := int(list.ComputeScore()) list.AddTx(A) @@ -55,23 +55,23 @@ func TestSenderAsBucketSortedMapItem_ScoreFluctuatesDeterministicallyWhenTransac } func Test_computeSenderScore(t *testing.T) { - score := computeSenderScore(senderScoreParams{count: 14000, size: kBToBytes(100000), fee: toMicroERD(300000), gas: 2500000000, minGasPrice: 100}) + score := computeSenderScore(senderScoreParams{count: 14000, size: kBToBytes(100000), fee: toNanoERD(300), gas: 2500000000, minGasPrice: 100}) require.InDelta(t, float64(0.1789683371), score, delta) - score = computeSenderScore(senderScoreParams{count: 19000, size: kBToBytes(3000), fee: toMicroERD(2300000), gas: 19000000000, minGasPrice: 100}) + score = computeSenderScore(senderScoreParams{count: 19000, size: kBToBytes(3000), fee: toNanoERD(2300), gas: 19000000000, minGasPrice: 100}) require.InDelta(t, float64(0.2517997181), score, delta) - score = computeSenderScore(senderScoreParams{count: 3, size: kBToBytes(2), fee: toMicroERD(40), gas: 400000, minGasPrice: 100}) + score = computeSenderScore(senderScoreParams{count: 3, size: kBToBytes(2), fee: toNanoERD(0.04), gas: 400000, minGasPrice: 100}) require.InDelta(t, float64(5.795382396), score, delta) - score = computeSenderScore(senderScoreParams{count: 1, size: kBToBytes(0.3), fee: toMicroERD(50), gas: 100000, minGasPrice: 100}) + score = computeSenderScore(senderScoreParams{count: 1, size: kBToBytes(0.3), fee: toNanoERD(0.05), gas: 100000, minGasPrice: 100}) require.InDelta(t, float64(100), score, delta) } func Benchmark_computeSenderScore(b *testing.B) { for i := 0; i < b.N; i++ { for j := uint64(0); j < 10000000; j++ { - computeSenderScore(senderScoreParams{count: j, size: (j + 1) * 500, fee: toMicroERD(11 * j), gas: 100000 * j}) + computeSenderScore(senderScoreParams{count: j, size: (j + 1) * 500, fee: toNanoERD(float64(0.011) * float64(j)), gas: 100000 * j}) } } } diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 144357e3f66..76780ec02f4 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -8,26 +8,18 @@ import ( ) func TestListForSender_AddTx_Sorts(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.AddTx(createTx([]byte("a"), ".", 1)) list.AddTx(createTx([]byte("c"), ".", 3)) list.AddTx(createTx([]byte("d"), ".", 4)) list.AddTx(createTx([]byte("b"), ".", 2)) - txHashes := list.getTxHashes() - - require.Equal(t, 4, list.items.Len()) - require.Equal(t, 4, len(txHashes)) - - require.Equal(t, []byte("a"), txHashes[0]) - require.Equal(t, []byte("b"), txHashes[1]) - require.Equal(t, []byte("c"), txHashes[2]) - require.Equal(t, []byte("d"), txHashes[3]) + require.Equal(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42)) list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100)) @@ -35,20 +27,87 @@ func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { list.AddTx(createTxWithParams([]byte("d"), ".", 2, 128, 42, 42)) list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101)) - txHashes := list.getTxHashes() + require.Equal(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) +} + +func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) { + list := newUnconstrainedListToTest() + + list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42)) + list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100)) + list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 42, 100)) + list.AddTx(createTxWithParams([]byte("d"), ".", 3, 128, 42, 98)) + list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101)) + list.AddTx(createTxWithParams([]byte("f"), ".", 2, 128, 42, 42)) + list.AddTx(createTxWithParams([]byte("g"), ".", 3, 128, 42, 99)) + + // In case of same-nonce, same-price transactions, the newer one has priority + require.Equal(t, []string{"a", "f", "e", "c", "b", "g", "d"}, list.getTxHashesAsStrings()) +} + +func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { + list := newUnconstrainedListToTest() + + added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.False(t, added) +} + +func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing.T) { + list := newListToTest(math.MaxUint32, 3) + + list.AddTx(createTx([]byte("tx1"), ".", 1)) + list.AddTx(createTx([]byte("tx5"), ".", 5)) + list.AddTx(createTx([]byte("tx4"), ".", 4)) + list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.Equal(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) - require.Equal(t, 5, list.items.Len()) - require.Equal(t, 5, len(txHashes)) + _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3)) + require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) - require.Equal(t, []byte("a"), txHashes[0]) - require.Equal(t, []byte("d"), txHashes[1]) - require.Equal(t, []byte("e"), txHashes[2]) - require.Equal(t, []byte("b"), txHashes[3]) - require.Equal(t, []byte("c"), txHashes[4]) + // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted + _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 42, 42)) + require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) + + // Though Undesirably to some extent, "tx3++"" is added, then evicted + _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 42, 42)) + require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) +} + +func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { + list := newListToTest(1024, math.MaxUint32) + + list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 42, 42)) + list.AddTx(createTxWithParams([]byte("tx2"), ".", 2, 512, 42, 42)) + list.AddTx(createTxWithParams([]byte("tx3"), ".", 3, 256, 42, 42)) + _, evicted := list.AddTx(createTxWithParams([]byte("tx5"), ".", 4, 256, 42, 42)) + require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx5"}, hashesAsStrings(evicted)) + + _, evicted = list.AddTx(createTxWithParams([]byte("tx5--"), ".", 4, 128, 42, 42)) + require.Equal(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{}, hashesAsStrings(evicted)) + + _, evicted = list.AddTx(createTxWithParams([]byte("tx4"), ".", 4, 128, 42, 42)) + require.Equal(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx5--"}, hashesAsStrings(evicted)) + + // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted + _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 256, 42, 100)) + require.Equal(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) } func TestListForSender_findTx(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() txA := createTx([]byte("A"), ".", 41) txANewer := createTx([]byte("ANewer"), ".", 41) @@ -63,6 +122,10 @@ func TestListForSender_findTx(t *testing.T) { elementWithB := list.findListElementWithTx(txB) noElementWithD := list.findListElementWithTx(txD) + require.NotNil(t, elementWithA) + require.NotNil(t, elementWithANewer) + require.NotNil(t, elementWithB) + require.Equal(t, txA, elementWithA.Value.(*WrappedTransaction)) require.Equal(t, txANewer, elementWithANewer.Value.(*WrappedTransaction)) require.Equal(t, txB, elementWithB.Value.(*WrappedTransaction)) @@ -70,7 +133,7 @@ func TestListForSender_findTx(t *testing.T) { } func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.AddTx(createTx([]byte("A"), ".", 42)) // Find one with a lower nonce, not added to cache @@ -79,7 +142,7 @@ func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { } func TestListForSender_RemoveTransaction(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() tx := createTx([]byte("a"), ".", 1) list.AddTx(tx) @@ -90,46 +153,15 @@ func TestListForSender_RemoveTransaction(t *testing.T) { } func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() tx := createTx([]byte(""), ".", 1) list.RemoveTx(tx) require.Equal(t, 0, list.items.Len()) } -func TestListForSender_RemoveHighNonceTransactions(t *testing.T) { - list := newListToTest() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) - } - - list.RemoveHighNonceTxs(50) - require.Equal(t, 50, list.items.Len()) - - list.RemoveHighNonceTxs(20) - require.Equal(t, 30, list.items.Len()) - - list.RemoveHighNonceTxs(30) - require.Equal(t, 0, list.items.Len()) -} - -func TestListForSender_RemoveHighNonceTransactions_NoPanicWhenCornerCases(t *testing.T) { - list := newListToTest() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) - } - - list.RemoveHighNonceTxs(0) - require.Equal(t, 100, list.items.Len()) - - list.RemoveHighNonceTxs(500) - require.Equal(t, 0, list.items.Len()) -} - func TestListForSender_SelectBatchTo(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() for index := 0; index < 100; index++ { list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) @@ -158,7 +190,7 @@ func TestListForSender_SelectBatchTo(t *testing.T) { } func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() for index := 0; index < 100; index++ { list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index))) @@ -176,7 +208,7 @@ func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { } func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(1) @@ -207,7 +239,7 @@ func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { } func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(1) @@ -240,7 +272,7 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) } func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(1) @@ -272,7 +304,7 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing. } func TestListForSender_NotifyAccountNonce(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() require.Equal(t, uint64(0), list.accountNonce.Get()) require.False(t, list.accountNonceKnown.IsSet()) @@ -284,21 +316,21 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { } func TestListForSender_hasInitialGap(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() list.notifyAccountNonce(42) // No transaction, no gap require.False(t, list.hasInitialGap()) // One gap - list.AddTx(createTx([]byte("tx-44"), ".", 43)) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) require.True(t, list.hasInitialGap()) // Resolve gap - list.AddTx(createTx([]byte("tx-44"), ".", 42)) + list.AddTx(createTx([]byte("tx-42"), ".", 42)) require.False(t, list.hasInitialGap()) } func TestListForSender_getTxHashes(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() require.Len(t, list.getTxHashes(), 0) list.AddTx(createTx([]byte("A"), ".", 1)) @@ -310,12 +342,11 @@ func TestListForSender_getTxHashes(t *testing.T) { } func TestListForSender_DetectRaceConditions(t *testing.T) { - list := newListToTest() + list := newUnconstrainedListToTest() go func() { // These are called concurrently with addition: during eviction, during removal etc. approximatelyCountTxInLists([]*txListForSender{list}) - list.HasMoreThan(42) list.IsEmpty() }() @@ -324,6 +355,18 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { }() } -func newListToTest() *txListForSender { - return newTxListForSender(".", &CacheConfig{MinGasPriceMicroErd: 100}, func(value *txListForSender) {}) +func newUnconstrainedListToTest() *txListForSender { + return newTxListForSender(".", &CacheConfig{ + NumBytesPerSenderThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + MinGasPriceNanoErd: 100, + }, func(value *txListForSender) {}) +} + +func newListToTest(numBytesThreshold uint32, countThreshold uint32) *txListForSender { + return newTxListForSender(".", &CacheConfig{ + NumBytesPerSenderThreshold: numBytesThreshold, + CountPerSenderThreshold: countThreshold, + MinGasPriceNanoErd: 100, + }, func(value *txListForSender) {}) } diff --git a/storage/txcache/txMeasures.go b/storage/txcache/txMeasures.go index 0dab0cada09..7b198faa8c1 100644 --- a/storage/txcache/txMeasures.go +++ b/storage/txcache/txMeasures.go @@ -14,15 +14,14 @@ func estimateTxGas(tx *WrappedTransaction) uint64 { return gasLimit } -// estimateTxFee returns an approximation for the cost of a transaction, in micro ERD (1/1000000 ERD) +// estimateTxFee returns an approximation for the cost of a transaction, in nano ERD // TODO: switch to integer operations (as opposed to float operations). // TODO: do not assume the order of magnitude of minGasPrice. func estimateTxFee(tx *WrappedTransaction) uint64 { - // In order to obtain the result as micro ERD, we have to divide by 10^12, one trillion (since ~1 ERD accounts for ~10^18 gas currency units at the ~minimum price) - // In order to have better precision, we divide each of the factors by 10^6, one million - const SquareRootOfOneTrillion = 1000000 - gasLimit := float32(tx.Tx.GetGasLimit()) / SquareRootOfOneTrillion - gasPrice := float32(tx.Tx.GetGasPrice()) / SquareRootOfOneTrillion - feeInMicroERD := gasLimit * gasPrice - return uint64(feeInMicroERD) + // In order to obtain the result as nano ERD (not as "atomic" 10^-18 ERD), we have to divide by 10^9 + // In order to have better precision, we divide the factors by 10^6, and 10^3 respectively + gasLimit := float32(tx.Tx.GetGasLimit()) / 1000000 + gasPrice := float32(tx.Tx.GetGasPrice()) / 1000 + feeInNanoERD := gasLimit * gasPrice + return uint64(feeInNanoERD) } diff --git a/storage/txcache/wrappedTransaction.go b/storage/txcache/wrappedTransaction.go index 84c08b5afbc..354fb058b69 100644 --- a/storage/txcache/wrappedTransaction.go +++ b/storage/txcache/wrappedTransaction.go @@ -1,6 +1,10 @@ package txcache -import "github.com/ElrondNetwork/elrond-go/data" +import ( + "bytes" + + "github.com/ElrondNetwork/elrond-go/data" +) // WrappedTransaction contains a transaction, its hash and extra information type WrappedTransaction struct { @@ -9,3 +13,7 @@ type WrappedTransaction struct { SenderShardID uint32 ReceiverShardID uint32 } + +func (wrappedTx *WrappedTransaction) sameAs(another *WrappedTransaction) bool { + return bytes.Equal(wrappedTx.TxHash, another.TxHash) +} diff --git a/update/container/accountDBSyncers_test.go b/update/container/accountDBSyncers_test.go new file mode 100644 index 00000000000..dc307885a54 --- /dev/null +++ b/update/container/accountDBSyncers_test.go @@ -0,0 +1,102 @@ +package containers + +import ( + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/update" + "github.com/ElrondNetwork/elrond-go/update/mock" + "github.com/stretchr/testify/require" +) + +func TestNewAccountsDBSyncersContainer(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + require.False(t, check.IfNil(adsc)) +} + +func TestAccountDBSyncers_AddGetShouldWork(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + testKey := "key" + testVal := &mock.AccountsDBSyncerStub{} + err := adsc.Add(testKey, testVal) + require.NoError(t, err) + + res, err := adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testVal, res) +} + +func TestAccountDBSyncers_AddMultipleShouldWork(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + testKey0 := "key0" + testVal0 := &mock.AccountsDBSyncerStub{} + testKey1 := "key1" + testVal1 := &mock.AccountsDBSyncerStub{} + + err := adsc.AddMultiple([]string{testKey0, testKey1}, []update.AccountsDBSyncer{testVal0, testVal1}) + require.NoError(t, err) + + res0, err := adsc.Get(testKey0) + require.NoError(t, err) + require.Equal(t, testVal0, res0) + + res1, err := adsc.Get(testKey1) + require.NoError(t, err) + require.Equal(t, testVal1, res1) + + require.Equal(t, 2, adsc.Len()) +} + +func TestAccountDBSyncers_ReplaceShouldWork(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + testKey := "key" + testVal := &mock.AccountsDBSyncerStub{} + err := adsc.Add(testKey, testVal) + require.NoError(t, err) + + res, err := adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testVal, res) + + // update + newTestVal := &mock.AccountsDBSyncerStub{ + SyncAccountsCalled: func(_ []byte) error { + return errors.New("local error") + }, + } + err = adsc.Replace(testKey, newTestVal) + require.NoError(t, err) + + res, err = adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, newTestVal, res) +} + +func TestAccountDBSyncers_DeleteShouldWork(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + testKey := "key" + testVal := &mock.AccountsDBSyncerStub{} + err := adsc.Add(testKey, testVal) + require.NoError(t, err) + + res, err := adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testVal, res) + + adsc.Remove(testKey) + + res, err = adsc.Get(testKey) + require.Nil(t, res) + require.Equal(t, update.ErrInvalidContainerKey, err) +} diff --git a/update/container/trieSyncers_test.go b/update/container/trieSyncers_test.go new file mode 100644 index 00000000000..f1d8a8baf9a --- /dev/null +++ b/update/container/trieSyncers_test.go @@ -0,0 +1,103 @@ +package containers + +import ( + "context" + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/update" + "github.com/ElrondNetwork/elrond-go/update/mock" + "github.com/stretchr/testify/require" +) + +func TestNewTrieSyncersContainer(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + require.False(t, check.IfNil(tsc)) +} + +func TestTrieSyncers_AddGetShouldWork(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + testKey := "key" + testVal := &mock.TrieSyncersStub{} + err := tsc.Add(testKey, testVal) + require.NoError(t, err) + + res, err := tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testVal, res) +} + +func TestTrieSyncers_AddMultipleShouldWork(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + testKey0 := "key0" + testVal0 := &mock.TrieSyncersStub{} + testKey1 := "key1" + testVal1 := &mock.TrieSyncersStub{} + + err := tsc.AddMultiple([]string{testKey0, testKey1}, []update.TrieSyncer{testVal0, testVal1}) + require.NoError(t, err) + + res0, err := tsc.Get(testKey0) + require.NoError(t, err) + require.Equal(t, testVal0, res0) + + res1, err := tsc.Get(testKey1) + require.NoError(t, err) + require.Equal(t, testVal1, res1) + + require.Equal(t, 2, tsc.Len()) +} + +func TestTrieSyncers_ReplaceShouldWork(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + testKey := "key" + testVal := &mock.TrieSyncersStub{} + err := tsc.Add(testKey, testVal) + require.NoError(t, err) + + res, err := tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testVal, res) + + // update + newTestVal := &mock.TrieSyncersStub{ + StartSyncingCalled: func(_ []byte, _ context.Context) error { + return errors.New("local err") + }, + } + err = tsc.Replace(testKey, newTestVal) + require.NoError(t, err) + + res, err = tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, newTestVal, res) +} + +func TestTrieSyncers_DeleteShouldWork(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + testKey := "key" + testVal := &mock.TrieSyncersStub{} + err := tsc.Add(testKey, testVal) + require.NoError(t, err) + + res, err := tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testVal, res) + + tsc.Remove(testKey) + + res, err = tsc.Get(testKey) + require.Nil(t, res) + require.Equal(t, update.ErrInvalidContainerKey, err) +} diff --git a/update/factory/accountDBSyncerContainerFactory.go b/update/factory/accountDBSyncerContainerFactory.go index cc6c2112bb2..8b16b0de5f3 100644 --- a/update/factory/accountDBSyncerContainerFactory.go +++ b/update/factory/accountDBSyncerContainerFactory.go @@ -19,24 +19,26 @@ import ( // ArgsNewAccountsDBSyncersContainerFactory defines the arguments needed to create accounts DB syncers container type ArgsNewAccountsDBSyncersContainerFactory struct { - TrieCacher storage.Cacher - RequestHandler update.RequestHandler - ShardCoordinator sharding.Coordinator - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - TrieStorageManager data.StorageManager - WaitTime time.Duration + TrieCacher storage.Cacher + RequestHandler update.RequestHandler + ShardCoordinator sharding.Coordinator + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + TrieStorageManager data.StorageManager + WaitTime time.Duration + MaxTrieLevelInMemory uint } type accountDBSyncersContainerFactory struct { - trieCacher storage.Cacher - requestHandler update.RequestHandler - container update.AccountsDBSyncContainer - shardCoordinator sharding.Coordinator - hasher hashing.Hasher - marshalizer marshal.Marshalizer - waitTime time.Duration - trieStorageManager data.StorageManager + trieCacher storage.Cacher + requestHandler update.RequestHandler + container update.AccountsDBSyncContainer + shardCoordinator sharding.Coordinator + hasher hashing.Hasher + marshalizer marshal.Marshalizer + waitTime time.Duration + trieStorageManager data.StorageManager + maxTrieLevelinMemory uint } const minWaitTime = time.Second @@ -105,12 +107,13 @@ func (a *accountDBSyncersContainerFactory) Create() (update.AccountsDBSyncContai func (a *accountDBSyncersContainerFactory) createUserAccountsSyncer(shardId uint32) error { args := syncer.ArgsNewUserAccountsSyncer{ ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ - Hasher: a.hasher, - Marshalizer: a.marshalizer, - TrieStorageManager: a.trieStorageManager, - RequestHandler: a.requestHandler, - WaitTime: a.waitTime, - Cacher: a.trieCacher, + Hasher: a.hasher, + Marshalizer: a.marshalizer, + TrieStorageManager: a.trieStorageManager, + RequestHandler: a.requestHandler, + WaitTime: a.waitTime, + Cacher: a.trieCacher, + MaxTrieLevelInMemory: a.maxTrieLevelinMemory, }, ShardId: shardId, } @@ -126,12 +129,13 @@ func (a *accountDBSyncersContainerFactory) createUserAccountsSyncer(shardId uint func (a *accountDBSyncersContainerFactory) createValidatorAccountsSyncer(shardId uint32) error { args := syncer.ArgsNewValidatorAccountsSyncer{ ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ - Hasher: a.hasher, - Marshalizer: a.marshalizer, - TrieStorageManager: a.trieStorageManager, - RequestHandler: a.requestHandler, - WaitTime: a.waitTime, - Cacher: a.trieCacher, + Hasher: a.hasher, + Marshalizer: a.marshalizer, + TrieStorageManager: a.trieStorageManager, + RequestHandler: a.requestHandler, + WaitTime: a.waitTime, + Cacher: a.trieCacher, + MaxTrieLevelInMemory: a.maxTrieLevelinMemory, }, } accountSyncer, err := syncer.NewValidatorAccountsSyncer(args) diff --git a/update/factory/dataTrieFactory.go b/update/factory/dataTrieFactory.go index 8006eeabe35..c9eaa7f47a4 100644 --- a/update/factory/dataTrieFactory.go +++ b/update/factory/dataTrieFactory.go @@ -20,18 +20,20 @@ import ( // ArgsNewDataTrieFactory is the argument structure for the new data trie factory type ArgsNewDataTrieFactory struct { - StorageConfig config.StorageConfig - SyncFolder string - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - ShardCoordinator sharding.Coordinator + StorageConfig config.StorageConfig + SyncFolder string + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + ShardCoordinator sharding.Coordinator + MaxTrieLevelInMemory uint } type dataTrieFactory struct { - shardCoordinator sharding.Coordinator - trieStorage data.StorageManager - marshalizer marshal.Marshalizer - hasher hashing.Hasher + shardCoordinator sharding.Coordinator + trieStorage data.StorageManager + marshalizer marshal.Marshalizer + hasher hashing.Hasher + maxTrieLevelInMemory uint } // NewDataTrieFactory creates a data trie factory @@ -105,7 +107,7 @@ func (d *dataTrieFactory) Create() (state.TriesHolder, error) { } func (d *dataTrieFactory) createAndAddOneTrie(shId uint32, accType genesis.Type, container state.TriesHolder) error { - dataTrie, err := trie.NewTrie(d.trieStorage, d.marshalizer, d.hasher) + dataTrie, err := trie.NewTrie(d.trieStorage, d.marshalizer, d.hasher, d.maxTrieLevelInMemory) if err != nil { return err } diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index 60c92dff843..32cbc3d5836 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -47,6 +47,7 @@ type ArgsExporter struct { ExportFolder string ExportTriesStorageConfig config.StorageConfig ExportStateStorageConfig config.StorageConfig + MaxTrieLevelInMemory uint WhiteListHandler process.WhiteListHandler WhiteListerVerifiedTxs process.WhiteListHandler InterceptorsContainer process.InterceptorsContainer @@ -79,6 +80,7 @@ type exportHandlerFactory struct { exportFolder string exportTriesStorageConfig config.StorageConfig exportStateStorageConfig config.StorageConfig + maxTrieLevelInMemory uint whiteListHandler process.WhiteListHandler whiteListerVerifiedTxs process.WhiteListHandler interceptorsContainer process.InterceptorsContainer @@ -248,11 +250,12 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { } argsDataTrieFactory := ArgsNewDataTrieFactory{ - StorageConfig: e.exportTriesStorageConfig, - SyncFolder: e.exportFolder, - Marshalizer: e.marshalizer, - Hasher: e.hasher, - ShardCoordinator: e.shardCoordinator, + StorageConfig: e.exportTriesStorageConfig, + SyncFolder: e.exportFolder, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + ShardCoordinator: e.shardCoordinator, + MaxTrieLevelInMemory: e.maxTrieLevelInMemory, } dataTriesContainerFactory, err := NewDataTrieFactory(argsDataTrieFactory) if err != nil { @@ -283,13 +286,14 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { } argsAccountsSyncers := ArgsNewAccountsDBSyncersContainerFactory{ - TrieCacher: e.dataPool.TrieNodes(), - RequestHandler: e.requestHandler, - ShardCoordinator: e.shardCoordinator, - Hasher: e.hasher, - Marshalizer: e.marshalizer, - TrieStorageManager: dataTriesContainerFactory.TrieStorageManager(), - WaitTime: time.Minute, + TrieCacher: e.dataPool.TrieNodes(), + RequestHandler: e.requestHandler, + ShardCoordinator: e.shardCoordinator, + Hasher: e.hasher, + Marshalizer: e.marshalizer, + TrieStorageManager: dataTriesContainerFactory.TrieStorageManager(), + WaitTime: time.Minute, + MaxTrieLevelInMemory: e.maxTrieLevelInMemory, } accountsDBSyncerFactory, err := NewAccountsDBSContainerFactory(argsAccountsSyncers) if err != nil { diff --git a/update/genesis/import.go b/update/genesis/import.go index 06cffdf44a0..17a6f0498a0 100644 --- a/update/genesis/import.go +++ b/update/genesis/import.go @@ -22,6 +22,8 @@ import ( var _ update.ImportHandler = (*stateImport)(nil) +const maxTrieLevelInMemory = uint(5) + // ArgsNewStateImport is the arguments structure to create a new state importer type ArgsNewStateImport struct { Reader update.MultiFileReader @@ -238,7 +240,7 @@ func (si *stateImport) getTrie(shardID uint32, accType Type) (data.Trie, error) trieStorageManager = si.trieStorageManagers[triesFactory.PeerAccountTrie] } - trieForShard, err := trie.NewTrie(trieStorageManager, si.marshalizer, si.hasher) + trieForShard, err := trie.NewTrie(trieStorageManager, si.marshalizer, si.hasher, maxTrieLevelInMemory) if err != nil { return nil, err } @@ -269,7 +271,7 @@ func (si *stateImport) importDataTrie(fileName string) error { return fmt.Errorf("%w wanted a roothash", update.ErrWrongTypeAssertion) } - dataTrie, err := trie.NewTrie(si.trieStorageManagers[triesFactory.UserAccountTrie], si.marshalizer, si.hasher) + dataTrie, err := trie.NewTrie(si.trieStorageManagers[triesFactory.UserAccountTrie], si.marshalizer, si.hasher, maxTrieLevelInMemory) if err != nil { return err } diff --git a/update/mock/poolsHolderMock.go b/update/mock/poolsHolderMock.go index 54e593f9241..975f0fa9480 100644 --- a/update/mock/poolsHolderMock.go +++ b/update/mock/poolsHolderMock.go @@ -29,11 +29,13 @@ func NewPoolsHolderMock() *PoolsHolderMock { phf.transactions, _ = txpool.NewShardedTxPool( txpool.ArgShardedTxPool{ Config: storageUnit.CacheConfig{ - Size: 10000, - SizeInBytes: 1000000000, - Shards: 16, + Size: 100000, + SizePerSender: 1000, + SizeInBytes: 1000000000, + SizeInBytesPerSender: 10000000, + Shards: 16, }, - MinGasPrice: 100000000000000, + MinGasPrice: 200000000000, NumberOfShards: 1, }, ) diff --git a/update/sync/base_test.go b/update/sync/base_test.go new file mode 100644 index 00000000000..53bdfd7aacc --- /dev/null +++ b/update/sync/base_test.go @@ -0,0 +1,69 @@ +package sync + +import ( + "errors" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/update" + "github.com/ElrondNetwork/elrond-go/update/mock" + "github.com/stretchr/testify/require" +) + +func TestGetDataFromStorage_NilStorageShouldErr(t *testing.T) { + t.Parallel() + + res, err := GetDataFromStorage([]byte("test"), nil) + require.Equal(t, update.ErrNilStorage, err) + require.Nil(t, res) +} + +func TestGetDataFromStorage_NotFoundShouldErr(t *testing.T) { + t.Parallel() + + localErr := errors.New("not found") + storer := &mock.StorerStub{ + GetCalled: func(_ []byte) ([]byte, error) { + return nil, localErr + }, + } + + res, err := GetDataFromStorage([]byte("test"), storer) + require.Equal(t, localErr, err) + require.Nil(t, res) +} + +func TestGetDataFromStorage_FoundShouldWork(t *testing.T) { + t.Parallel() + + expRes := []byte("result") + storer := &mock.StorerStub{ + GetCalled: func(_ []byte) ([]byte, error) { + return expRes, nil + }, + } + + res, err := GetDataFromStorage([]byte("test"), storer) + require.NoError(t, err) + require.Equal(t, expRes, res) +} + +func TestWaitFor_ShouldTimeout(t *testing.T) { + t.Parallel() + + chanToUse := make(chan bool, 1) + err := WaitFor(chanToUse, 10*time.Millisecond) + require.Equal(t, update.ErrTimeIsOut, err) +} + +func TestWaitFor_ShouldWorkAfterTheChannelIsWrittenIn(t *testing.T) { + t.Parallel() + + chanToUse := make(chan bool, 1) + go func() { + time.Sleep(10 * time.Millisecond) + chanToUse <- true + }() + err := WaitFor(chanToUse, 100*time.Millisecond) + require.NoError(t, err) +} diff --git a/update/sync/concurrentTriesMap_test.go b/update/sync/concurrentTriesMap_test.go new file mode 100644 index 00000000000..9340581bd64 --- /dev/null +++ b/update/sync/concurrentTriesMap_test.go @@ -0,0 +1,61 @@ +package sync + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core/random" + "github.com/ElrondNetwork/elrond-go/update/mock" + "github.com/stretchr/testify/require" +) + +func TestNewConcurrentTriesMap(t *testing.T) { + t.Parallel() + + ctm := newConcurrentTriesMap() + require.NotNil(t, ctm) +} + +func TestConcurrentTriesMap_ConcurrentAccesses(t *testing.T) { + t.Parallel() + + // when running with -race, this should not throw race conditions. If we remove the mutex protection inside the struct, + // this test will fail + testDuration := 50 * time.Millisecond + rnd := random.ConcurrentSafeIntRandomizer{} + ctx, cancel := context.WithTimeout(context.Background(), testDuration) + ctm := newConcurrentTriesMap() + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + default: + randomID := rnd.Intn(100) + ctm.setTrie(fmt.Sprintf("%d", randomID), &mock.TrieStub{}) + } + } + }(ctx) + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + default: + randomID := rnd.Intn(100) + ctm.getTrie(fmt.Sprintf("%d", randomID)) + } + } + }(ctx) + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + default: + _ = ctm.getTries() + } + } + }(ctx) + time.Sleep(testDuration) + cancel() +} diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index de7db584b12..7f99f740699 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -176,7 +176,7 @@ func (m *syncHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (data.Header } var hdr block.Header - err = m.marshalizer.Unmarshal(hdr, hdrData) + err = m.marshalizer.Unmarshal(&hdr, hdrData) if err != nil { return nil, false } diff --git a/update/sync/syncHeadersByHash_test.go b/update/sync/syncHeadersByHash_test.go new file mode 100644 index 00000000000..eb5be748828 --- /dev/null +++ b/update/sync/syncHeadersByHash_test.go @@ -0,0 +1,179 @@ +package sync + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/update" + "github.com/ElrondNetwork/elrond-go/update/mock" + "github.com/stretchr/testify/require" +) + +func TestNewMissingheadersByHashSyncer_NilParamsShouldErr(t *testing.T) { + t.Parallel() + + okArgs := getMisingHeadersByHashSyncerArgs() + + testInput := make(map[ArgsNewMissingHeadersByHashSyncer]error) + + nilStorerArgs := okArgs + nilStorerArgs.Storage = nil + testInput[nilStorerArgs] = dataRetriever.ErrNilHeadersStorage + + nilCacheArgs := okArgs + nilCacheArgs.Cache = nil + testInput[nilCacheArgs] = update.ErrNilCacher + + nilMarshalizerArgs := okArgs + nilMarshalizerArgs.Marshalizer = nil + testInput[nilMarshalizerArgs] = dataRetriever.ErrNilMarshalizer + + nilRequestHandlerArgs := okArgs + nilRequestHandlerArgs.RequestHandler = nil + testInput[nilRequestHandlerArgs] = update.ErrNilRequestHandler + + for args, expectedErr := range testInput { + mhhs, err := NewMissingheadersByHashSyncer(args) + require.True(t, check.IfNil(mhhs)) + require.Equal(t, expectedErr, err) + } +} + +func TestNewMissingheadersByHashSyncer_OkValsShouldWork(t *testing.T) { + t.Parallel() + + args := getMisingHeadersByHashSyncerArgs() + mhhs, err := NewMissingheadersByHashSyncer(args) + require.NoError(t, err) + require.NotNil(t, mhhs) +} + +func TestSyncHeadersByHash_SyncMissingHeadersByHashHeaderFoundInCacheShouldWork(t *testing.T) { + t.Parallel() + + args := getMisingHeadersByHashSyncerArgs() + args.Cache = &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(_ []byte) (data.HeaderHandler, error) { + return &block.MetaBlock{Nonce: 37}, nil + }, + } + mhhs, _ := NewMissingheadersByHashSyncer(args) + + err := mhhs.SyncMissingHeadersByHash([]uint32{0, 1}, [][]byte{[]byte("hash234")}, context.Background()) + require.NoError(t, err) +} + +func TestSyncHeadersByHash_SyncMissingHeadersByHashHeaderFoundInStorageShouldWork(t *testing.T) { + t.Parallel() + + args := getMisingHeadersByHashSyncerArgs() + args.Cache = &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(_ []byte) (data.HeaderHandler, error) { + return nil, errors.New("not found") + }, + } + args.Storage = &mock.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + mb := &block.MetaBlock{Nonce: 37} + mbBytes, _ := args.Marshalizer.Marshal(mb) + return mbBytes, nil + }, + } + mhhs, _ := NewMissingheadersByHashSyncer(args) + + err := mhhs.SyncMissingHeadersByHash([]uint32{0, 1}, [][]byte{[]byte("hash234")}, context.Background()) + require.NoError(t, err) +} + +func TestSyncHeadersByHash_SyncMissingHeadersByHashHeaderNotFoundShouldTimeout(t *testing.T) { + t.Parallel() + + var errNotFound = errors.New("not found") + args := getMisingHeadersByHashSyncerArgs() + args.Cache = &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(_ []byte) (data.HeaderHandler, error) { + return nil, errNotFound + }, + } + args.Storage = &mock.StorerStub{ + GetCalled: func(_ []byte) ([]byte, error) { + return nil, errNotFound + }, + } + mhhs, _ := NewMissingheadersByHashSyncer(args) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + err := mhhs.SyncMissingHeadersByHash([]uint32{0, 1}, [][]byte{[]byte("hash234")}, ctx) + cancel() + + require.Equal(t, update.ErrTimeIsOut, err) +} + +func TestSyncHeadersByHash_GetHeadersNotSyncedShouldErr(t *testing.T) { + t.Parallel() + + args := getMisingHeadersByHashSyncerArgs() + mhhs, _ := NewMissingheadersByHashSyncer(args) + require.NotNil(t, mhhs) + + res, err := mhhs.GetHeaders() + require.Nil(t, res) + require.Equal(t, update.ErrNotSynced, err) +} + +func TestSyncHeadersByHash_GetHeadersShouldReceiveAndReturnOkMb(t *testing.T) { + t.Parallel() + + var handlerToNotify func(header data.HeaderHandler, shardHeaderHash []byte) + var errNotFound = errors.New("not found") + args := getMisingHeadersByHashSyncerArgs() + args.Storage = &mock.StorerStub{ + GetCalled: func(_ []byte) ([]byte, error) { + return nil, errNotFound + }, + } + args.Cache = &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(_ []byte) (data.HeaderHandler, error) { + return nil, errNotFound + }, + RegisterHandlerCalled: func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + handlerToNotify = handler + }, + } + mhhs, _ := NewMissingheadersByHashSyncer(args) + require.NotNil(t, mhhs) + + expectedHash := []byte("hash") + expectedMB := &block.MetaBlock{Nonce: 37} + go func() { + time.Sleep(10 * time.Millisecond) + handlerToNotify(expectedMB, expectedHash) + }() + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + err := mhhs.SyncMissingHeadersByHash([]uint32{0}, [][]byte{[]byte("hash")}, ctx) + require.NoError(t, err) + cancel() + + res, err := mhhs.GetHeaders() + require.NoError(t, err) + require.NotNil(t, res) + + actualMb, ok := res[string(expectedHash)] + require.True(t, ok) + require.Equal(t, expectedMB, actualMb) +} + +func getMisingHeadersByHashSyncerArgs() ArgsNewMissingHeadersByHashSyncer { + return ArgsNewMissingHeadersByHashSyncer{ + Storage: &mock.StorerMock{}, + Cache: &mock.HeadersCacherStub{}, + Marshalizer: &mock.MarshalizerMock{}, + RequestHandler: &mock.RequestHandlerStub{}, + } +} diff --git a/update/sync/syncMiniBlocks_test.go b/update/sync/syncMiniBlocks_test.go index 24dbe7e4917..2d742db0e4e 100644 --- a/update/sync/syncMiniBlocks_test.go +++ b/update/sync/syncMiniBlocks_test.go @@ -225,3 +225,109 @@ func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInPoolReceive(t *testing.T) { cancel() require.Nil(t, err) } + +func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInStorageReceive(t *testing.T) { + t.Parallel() + + mbHash := []byte("mbHash") + mb := &block.MiniBlock{} + marshalizer := &mock.MarshalizerMock{} + args := ArgsNewPendingMiniBlocksSyncer{ + Storage: &mock.StorerStub{ + GetCalled: func(key []byte) (bytes []byte, err error) { + mbBytes, _ := marshalizer.Marshal(mb) + return mbBytes, nil + }, + }, + Cache: &mock.CacherStub{ + RegisterHandlerCalled: func(_ func(_ []byte, _ interface{})) {}, + PeekCalled: func(key []byte) (interface{}, bool) { + return nil, false + }, + }, + Marshalizer: &mock.MarshalizerFake{}, + RequestHandler: &mock.RequestHandlerStub{}, + } + + pendingMiniBlocksSyncer, err := NewPendingMiniBlocksSyncer(args) + require.Nil(t, err) + + metaBlock := &block.MetaBlock{ + Nonce: 1, Epoch: 1, RootHash: []byte("metaRootHash"), + EpochStart: block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{ + { + ShardID: 0, + RootHash: []byte("shardDataRootHash"), + PendingMiniBlockHeaders: []block.MiniBlockHeader{{Hash: mbHash}}, + FirstPendingMetaBlock: []byte("firstPending"), + }, + }, + }, + } + unFinished := make(map[string]*block.MetaBlock) + unFinished["firstPending"] = metaBlock + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() + require.Nil(t, err) +} + +func TestSyncPendingMiniBlocksFromMeta_GetMiniBlocksShouldWork(t *testing.T) { + t.Parallel() + + mbHash := []byte("mbHash") + mb := &block.MiniBlock{ + TxHashes: [][]byte{[]byte("txHash")}, + } + localErr := errors.New("not found") + marshalizer := &mock.MarshalizerMock{} + args := ArgsNewPendingMiniBlocksSyncer{ + Storage: &mock.StorerStub{ + GetCalled: func(key []byte) (bytes []byte, err error) { + mbBytes, _ := marshalizer.Marshal(mb) + return mbBytes, nil + }, + GetFromEpochCalled: func(key []byte, epoch uint32) (bytes []byte, err error) { + return nil, localErr + }, + }, + Cache: &mock.CacherStub{ + RegisterHandlerCalled: func(_ func(_ []byte, _ interface{})) {}, + PeekCalled: func(key []byte) (interface{}, bool) { + return nil, false + }, + }, + Marshalizer: &mock.MarshalizerFake{}, + RequestHandler: &mock.RequestHandlerStub{}, + } + + pendingMiniBlocksSyncer, err := NewPendingMiniBlocksSyncer(args) + require.Nil(t, err) + + metaBlock := &block.MetaBlock{ + Nonce: 1, Epoch: 1, RootHash: []byte("metaRootHash"), + EpochStart: block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{ + { + ShardID: 0, + RootHash: []byte("shardDataRootHash"), + PendingMiniBlockHeaders: []block.MiniBlockHeader{{Hash: mbHash}}, + FirstPendingMetaBlock: []byte("firstPending"), + }, + }, + }, + } + unFinished := make(map[string]*block.MetaBlock) + unFinished["firstPending"] = metaBlock + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() + require.Nil(t, err) + + res, err := pendingMiniBlocksSyncer.GetMiniBlocks() + require.NoError(t, err) + require.Equal(t, mb, res[string(mbHash)]) +} diff --git a/vm/systemSmartContracts/auction.go b/vm/systemSmartContracts/auction.go index 01fb0641908..f8095d5193d 100644 --- a/vm/systemSmartContracts/auction.go +++ b/vm/systemSmartContracts/auction.go @@ -183,13 +183,15 @@ func (s *stakingAuctionSC) changeRewardAddress(args *vmcommon.ContractCallInput) return vmcommon.UserError } + txData := "changeRewardAddress@" + hex.EncodeToString(registrationData.RewardAddress) for _, blsKey := range registrationData.BlsPubKeys { - vmOutput, err := s.executeOnStakingSC([]byte("changeRewardAddress@" + hex.EncodeToString(blsKey) + "@" + hex.EncodeToString(registrationData.RewardAddress))) - isError := err != nil || vmOutput.ReturnCode != vmcommon.Ok - if isError { - log.LogIfError(err) - return vmcommon.UserError - } + txData += "@" + hex.EncodeToString(blsKey) + } + vmOutput, err := s.executeOnStakingSC([]byte(txData)) + isError := err != nil || vmOutput.ReturnCode != vmcommon.Ok + if isError { + log.LogIfError(err) + return vmcommon.UserError } return vmcommon.Ok @@ -818,7 +820,7 @@ func (s *stakingAuctionSC) claim(args *vmcommon.ContractCallInput) vmcommon.Retu zero := big.NewInt(0) claimable := big.NewInt(0).Sub(registrationData.TotalStakeValue, registrationData.LockedStake) if claimable.Cmp(zero) <= 0 { - return vmcommon.UserError + return vmcommon.Ok } registrationData.TotalStakeValue.Set(registrationData.LockedStake) diff --git a/vm/systemSmartContracts/auction_test.go b/vm/systemSmartContracts/auction_test.go index b822fef6056..547686e5c4d 100644 --- a/vm/systemSmartContracts/auction_test.go +++ b/vm/systemSmartContracts/auction_test.go @@ -1436,8 +1436,8 @@ func TestAuctionStakingSC_Claim(t *testing.T) { //do stake stake(t, sc, args.ValidatorSettings.GenesisNodePrice(), receiverAddr, stakerAddress, stakerPubKey, nodesToRunBytes) - //do claim all stake is locked should return UserError - doClaim(t, sc, stakerAddress, receiverAddr, vmcommon.UserError) + //do claim all stake is locked should return Ok + doClaim(t, sc, stakerAddress, receiverAddr, vmcommon.Ok) // do stake to add more money but not lock the stake nonce = 0 diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index bed9a7128ec..acb742d0b5e 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -276,8 +276,7 @@ func (r *stakingSC) changeRewardAddress(args *vmcommon.ContractCallInput) vmcomm return vmcommon.UserError } - for i := 1; i < len(args.Arguments); i++ { - blsKey := args.Arguments[i] + for _, blsKey := range args.Arguments[1:] { stakedData, err := r.getOrCreateRegisteredData(blsKey) if err != nil { return vmcommon.UserError