From a42f45a52ae6eb45eaac3ba2cec7bf72da5ca859 Mon Sep 17 00:00:00 2001 From: yzang2019 Date: Wed, 15 Apr 2026 05:03:56 -0700 Subject: [PATCH 1/7] FlatKV refactor for state sync import + export --- sei-db/common/{evm => keys}/keys.go | 28 +- sei-db/common/{evm => keys}/keys_test.go | 23 +- sei-db/config/sc_config.go | 6 +- .../bench/cryptosim/cryptosim_config.go | 6 +- .../bench/cryptosim/data_generator.go | 20 +- .../state_db/bench/cryptosim/receipt_test.go | 50 +- sei-db/state_db/bench/cryptosim/util.go | 8 +- sei-db/state_db/bench/helper.go | 2 +- .../bench/wrappers/db_implementations.go | 9 +- .../state_db/bench/wrappers/flatkv_wrapper.go | 4 +- .../wrappers/state_store_wrapper_test.go | 6 +- sei-db/state_db/sc/composite/exporter.go | 39 +- sei-db/state_db/sc/composite/importer.go | 44 +- sei-db/state_db/sc/composite/store.go | 78 +- sei-db/state_db/sc/composite/store_test.go | 113 +- sei-db/state_db/sc/flatkv/api.go | 15 +- .../state_db/sc/flatkv/{ => config}/config.go | 33 +- .../sc/flatkv/{ => config}/config_test.go | 45 +- .../flatkv/{ => config}/flatkv_test_config.go | 4 +- .../state_db/sc/flatkv/crash_recovery_test.go | 478 ------- sei-db/state_db/sc/flatkv/exporter.go | 234 +--- ...exporter_test.go => import_export_test.go} | 256 ++-- sei-db/state_db/sc/flatkv/importer.go | 229 +++- sei-db/state_db/sc/flatkv/iterator.go | 307 ----- sei-db/state_db/sc/flatkv/keys_test.go | 18 - sei-db/state_db/sc/flatkv/ktype/ktype.go | 26 +- .../sc/flatkv/{keys.go => ktype/meta.go} | 14 +- .../state_db/sc/flatkv/ktype/metakey_test.go | 17 + .../sc/flatkv/lthash_correctness_test.go | 196 +-- .../state_db/sc/flatkv/perdb_lthash_test.go | 60 +- sei-db/state_db/sc/flatkv/snapshot.go | 3 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 308 ++--- sei-db/state_db/sc/flatkv/store.go | 100 +- sei-db/state_db/sc/flatkv/store_apply.go | 34 +- sei-db/state_db/sc/flatkv/store_iterator.go | 144 +++ sei-db/state_db/sc/flatkv/store_lifecycle.go | 3 +- sei-db/state_db/sc/flatkv/store_meta.go | 23 +- sei-db/state_db/sc/flatkv/store_meta_test.go | 15 +- sei-db/state_db/sc/flatkv/store_read.go | 111 +- sei-db/state_db/sc/flatkv/store_read_test.go | 1105 ++--------------- sei-db/state_db/sc/flatkv/store_test.go | 753 ++++++++--- sei-db/state_db/sc/flatkv/store_write.go | 68 +- sei-db/state_db/sc/flatkv/store_write_test.go | 249 ++-- sei-db/state_db/sc/flatkv/test_helper.go | 186 +++ sei-db/state_db/sc/memiavl/multitree.go | 2 +- sei-db/state_db/ss/composite/store.go | 127 +- sei-db/state_db/ss/composite/store_test.go | 126 +- sei-db/state_db/ss/evm/db_test.go | 2 +- sei-db/state_db/ss/evm/store.go | 2 +- sei-db/state_db/ss/evm/types.go | 2 +- .../tools/cmd/seidb/operations/state_size.go | 2 +- 51 files changed, 2378 insertions(+), 3355 deletions(-) rename sei-db/common/{evm => keys}/keys.go (81%) rename sei-db/common/{evm => keys}/keys_test.go (88%) rename sei-db/state_db/sc/flatkv/{ => config}/config.go (85%) rename sei-db/state_db/sc/flatkv/{ => config}/config_test.go (73%) rename sei-db/state_db/sc/flatkv/{ => config}/flatkv_test_config.go (94%) delete mode 100644 sei-db/state_db/sc/flatkv/crash_recovery_test.go rename sei-db/state_db/sc/flatkv/{exporter_test.go => import_export_test.go} (74%) delete mode 100644 sei-db/state_db/sc/flatkv/iterator.go delete mode 100644 sei-db/state_db/sc/flatkv/keys_test.go rename sei-db/state_db/sc/flatkv/{keys.go => ktype/meta.go} (74%) create mode 100644 sei-db/state_db/sc/flatkv/ktype/metakey_test.go create mode 100644 sei-db/state_db/sc/flatkv/store_iterator.go create mode 100644 sei-db/state_db/sc/flatkv/test_helper.go diff --git a/sei-db/common/evm/keys.go b/sei-db/common/keys/keys.go similarity index 81% rename from sei-db/common/evm/keys.go rename to sei-db/common/keys/keys.go index 5fa6a57007..3a4d86260b 100644 --- a/sei-db/common/evm/keys.go +++ b/sei-db/common/keys/keys.go @@ -1,8 +1,7 @@ -package evm +package keys import ( "bytes" - "errors" ) const ( @@ -13,10 +12,10 @@ const ( // EVMStoreKey is the cosmos store/module name for EVM state. const EVMStoreKey = "evm" -// EVMFlatKVStoreKey is the module name used when exporting/importing FlatKV -// EVM data as a separate module in state-sync snapshots. Both the SC and SS -// layers need to recognise this name and treat it as EVM data. -const EVMFlatKVStoreKey = "evm_flatkv" +// FlatKVStoreKey is the module name used when exporting/importing data from +// the FlatKV backend. Treated as a separate module in state-sync snapshots +// so that import routes data exclusively to FlatKV. +const FlatKVStoreKey = "flatkv" // EVM key prefixes — mirrored from x/evm/types/keys.go. // These are immutable on-disk format markers; changing them would break @@ -26,7 +25,6 @@ var ( stateKeyPrefix = []byte{0x03} codeKeyPrefix = []byte{0x07} codeHashKeyPrefix = []byte{0x08} - codeSizeKeyPrefix = []byte{0x09} nonceKeyPrefix = []byte{0x0a} ) @@ -34,11 +32,6 @@ var ( // Exported for callers that need the raw prefix (e.g. iterator bounds). func StateKeyPrefix() []byte { return stateKeyPrefix } -var ( - // ErrMalformedEVMKey indicates invalid EVM key encoding. - ErrMalformedEVMKey = errors.New("sei-db: malformed evm key") -) - // EVMKeyKind identifies an EVM key family. type EVMKeyKind uint8 @@ -48,14 +41,9 @@ const ( EVMKeyCodeHash // Stripped key: 20-byte address EVMKeyCode // Stripped key: 20-byte address EVMKeyStorage // Stripped key: addr||slot (20+32 bytes) - // TODO: rename this to MiscKey, it's a catch-all that gets potentially non-evm data - EVMKeyLegacy // Full original key preserved (address mappings, codesize, etc.) + EVMKeyLegacy // Full original key preserved (address mappings, codesize, etc.) ) -// EVMKeyUnknown is an alias for EVMKeyEmpty, used by FlatKV to test for -// unrecognised/empty keys. -const EVMKeyUnknown = EVMKeyEmpty - // ParseEVMKey parses an EVM key from the x/evm store keyspace. // // For optimized keys (nonce, code, codehash, storage), keyBytes is the stripped key. @@ -113,13 +101,13 @@ func EVMKeyPrefixByte(kind EVMKeyKind) (byte, bool) { } } -// BuildMemIAVLEVMKey builds a memiavl key from internal bytes. +// BuildEVMKey builds a memiavl key from internal bytes. // This is the reverse of ParseEVMKey for optimized key types. // // NOTE: This is primarily used for tests and temporary compatibility. // FlatKV stores data in internal format; this function converts back to // memiavl format for Iterator/Exporter output. -func BuildMemIAVLEVMKey(kind EVMKeyKind, keyBytes []byte) []byte { +func BuildEVMKey(kind EVMKeyKind, keyBytes []byte) []byte { prefix, ok := EVMKeyPrefixByte(kind) if !ok { return nil diff --git a/sei-db/common/evm/keys_test.go b/sei-db/common/keys/keys_test.go similarity index 88% rename from sei-db/common/evm/keys_test.go rename to sei-db/common/keys/keys_test.go index c60659e7ff..67cb3d3eff 100644 --- a/sei-db/common/evm/keys_test.go +++ b/sei-db/common/keys/keys_test.go @@ -1,4 +1,4 @@ -package evm +package keys import ( "testing" @@ -52,12 +52,6 @@ func TestParseEVMKey(t *testing.T) { wantKind: EVMKeyCodeHash, wantBytes: addr, }, - { - name: "CodeSize goes to Legacy", - key: concat(codeSizeKeyPrefix, addr), - wantKind: EVMKeyLegacy, - wantBytes: concat(codeSizeKeyPrefix, addr), // Full key preserved - }, { name: "Code", key: concat(codeKeyPrefix, addr), @@ -183,17 +177,11 @@ func TestBuildMemIAVLEVMKey(t *testing.T) { keyBytes: concat(addr, slot), want: concat(stateKeyPrefix, concat(addr, slot)), }, - { - name: "Unknown", - kind: EVMKeyUnknown, - keyBytes: addr, - want: nil, - }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got := BuildMemIAVLEVMKey(tc.kind, tc.keyBytes) + got := BuildEVMKey(tc.kind, tc.keyBytes) require.Equal(t, tc.want, got) }) } @@ -204,11 +192,4 @@ func TestInternalKeyLen(t *testing.T) { require.Equal(t, addressLen, InternalKeyLen(EVMKeyNonce)) require.Equal(t, addressLen, InternalKeyLen(EVMKeyCodeHash)) require.Equal(t, addressLen, InternalKeyLen(EVMKeyCode)) - require.Equal(t, 0, InternalKeyLen(EVMKeyUnknown)) -} - -func TestEVMKeyUnknownAlias(t *testing.T) { - // Verify EVMKeyUnknown == EVMKeyEmpty so FlatKV's "skip unknown" checks - // still work correctly after introducing EVMKeyLegacy. - require.Equal(t, EVMKeyEmpty, EVMKeyUnknown) } diff --git a/sei-db/config/sc_config.go b/sei-db/config/sc_config.go index 2315697b15..49f9f6d221 100644 --- a/sei-db/config/sc_config.go +++ b/sei-db/config/sc_config.go @@ -2,8 +2,8 @@ package config import ( "fmt" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/memiavl" ) @@ -45,7 +45,7 @@ type StateCommitConfig struct { MemIAVLConfig memiavl.Config // FlatKVConfig is the configuration for the FlatKV (EVM) backend - FlatKVConfig flatkv.Config + FlatKVConfig config.Config // Max concurrent historical proof queries (RPC /store path). HistoricalProofMaxInFlight int `mapstructure:"historical-proof-max-inflight"` @@ -66,7 +66,7 @@ func DefaultStateCommitConfig() StateCommitConfig { ReadMode: CosmosOnlyRead, EnableLatticeHash: false, MemIAVLConfig: memiavl.DefaultConfig(), - FlatKVConfig: *flatkv.DefaultConfig(), + FlatKVConfig: *config.DefaultConfig(), HistoricalProofMaxInFlight: DefaultSCHistoricalProofMaxInFlight, HistoricalProofRateLimit: DefaultSCHistoricalProofRateLimit, HistoricalProofBurst: DefaultSCHistoricalProofBurst, diff --git a/sei-db/state_db/bench/cryptosim/cryptosim_config.go b/sei-db/state_db/bench/cryptosim/cryptosim_config.go index d2180db2df..d8c8e4af7a 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim_config.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim_config.go @@ -9,7 +9,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/bench/wrappers" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" + flatkvConfig "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" ) const ( @@ -163,7 +163,7 @@ type CryptoSimConfig struct { DeleteLogDirOnShutdown bool // Configures the FlatKV database. Ignored if Backend is not "FlatKV". - FlatKVConfig *flatkv.Config + FlatKVConfig *flatkvConfig.Config // The capacity of the channel that holds blocks awaiting execution. BlockChannelCapacity int @@ -287,7 +287,7 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { DeleteLogDirOnStartup: false, DeleteDataDirOnShutdown: false, DeleteLogDirOnShutdown: false, - FlatKVConfig: flatkv.DefaultConfig(), + FlatKVConfig: flatkvConfig.DefaultConfig(), BlockChannelCapacity: 8, GenerateReceipts: false, RecieptChannelCapacity: 32, diff --git a/sei-db/state_db/bench/cryptosim/data_generator.go b/sei-db/state_db/bench/cryptosim/data_generator.go index 09ead76273..c7b2357437 100644 --- a/sei-db/state_db/bench/cryptosim/data_generator.go +++ b/sei-db/state_db/bench/cryptosim/data_generator.go @@ -4,7 +4,7 @@ import ( "encoding/binary" "fmt" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" ) const ( @@ -18,7 +18,7 @@ const ( // Use the code hash as a proxy. There is currently no mechanism to force FlatKV to update the account balance // field, and code hash keys will cause the account DB to get updated, which is the important part for this // simulation. - accountKeyPrefix = evm.EVMKeyCodeHash + accountKeyPrefix = keys.EVMKeyCodeHash ) // Generates random data for the benchmark. This is not a thread safe utility. @@ -107,7 +107,7 @@ func NewDataGenerator( fmt.Printf("Next block number: %s.\n", int64Commas(int64(nextBlockNumber))) //nolint:gosec - feeCollectionAddress := evm.BuildMemIAVLEVMKey( + feeCollectionAddress := keys.BuildEVMKey( accountKeyPrefix, rand.Address(accountPrefix, 0, AddressLen), ) @@ -166,7 +166,7 @@ func (d *DataGenerator) CreateNewAccount( d.nextAccountID++ addr := d.rand.Address(accountPrefix, accountID, AddressLen) - address = evm.BuildMemIAVLEVMKey(accountKeyPrefix, addr) + address = keys.BuildEVMKey(accountKeyPrefix, addr) isCold = d.rand.Float64() >= d.config.NewAccountDormancyProbability @@ -211,7 +211,7 @@ func (d *DataGenerator) CreateNewErc20Contract( d.nextErc20ContractID++ erc20Address := d.rand.Address(contractPrefix, erc20ContractID, AddressLen) - address = evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, erc20Address) + address = keys.BuildEVMKey(keys.EVMKeyCode, erc20Address) if !write { return erc20ContractID, address, nil @@ -240,7 +240,7 @@ func (d *DataGenerator) RandomAccount() (id int64, address []byte, isNew bool, e lastHotAccountID := d.config.NumberOfHotAccounts accountID := d.rand.Int64Range(int64(firstHotAccountID), int64(lastHotAccountID+1)) addr := d.rand.Address(accountPrefix, accountID, AddressLen) - return accountID, evm.BuildMemIAVLEVMKey(accountKeyPrefix, addr), false, nil + return accountID, keys.BuildEVMKey(accountKeyPrefix, addr), false, nil } else { new := d.rand.Float64() < d.config.NewAccountProbability @@ -260,7 +260,7 @@ func (d *DataGenerator) RandomAccount() (id int64, address []byte, isNew bool, e accountID := d.rand.Int64Range(firstLegalColdAccountID, lastLegalColdAccountID) addr := d.rand.Address(accountPrefix, accountID, AddressLen) - return accountID, evm.BuildMemIAVLEVMKey(accountKeyPrefix, addr), false, nil + return accountID, keys.BuildEVMKey(accountKeyPrefix, addr), false, nil } } @@ -271,7 +271,7 @@ func (d *DataGenerator) randomAccountSlot(accountID int64) ([]byte, error) { slotID := accountID*int64(d.config.Erc20InteractionsPerAccount) + slotNumber storageKeyBytes := d.rand.Address(ethStoragePrefix, slotID, StorageKeyLen) - return evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, storageKeyBytes), nil + return keys.BuildEVMKey(keys.EVMKeyStorage, storageKeyBytes), nil } // Selects a random ERC20 contract for a transaction. @@ -289,7 +289,7 @@ func (d *DataGenerator) randomErc20Contract() ([]byte, error) { } erc20ContractID := d.rand.Int64Range(0, hotMax) addr := d.rand.Address(contractPrefix, erc20ContractID, AddressLen) - return evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr), nil + return keys.BuildEVMKey(keys.EVMKeyCode, addr), nil } // Otherwise, select a cold ERC20 contract at random. @@ -301,7 +301,7 @@ func (d *DataGenerator) randomErc20Contract() ([]byte, error) { int64(d.config.HotErc20ContractSetSize), d.nextErc20ContractID) addr := d.rand.Address(contractPrefix, erc20ContractID, AddressLen) - return evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr), nil + return keys.BuildEVMKey(keys.EVMKeyCode, addr), nil } // Close the data generator and release any resources. diff --git a/sei-db/state_db/bench/cryptosim/receipt_test.go b/sei-db/state_db/bench/cryptosim/receipt_test.go index 367264c094..343987fd66 100644 --- a/sei-db/state_db/bench/cryptosim/receipt_test.go +++ b/sei-db/state_db/bench/cryptosim/receipt_test.go @@ -4,30 +4,30 @@ import ( "testing" ethtypes "github.com/ethereum/go-ethereum/core/types" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" ) func makeTestKeys(t *testing.T) (feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract []byte) { t.Helper() keyRand := NewCannedRandom(4096, 1) - feeAccount = evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) + feeAccount = keys.BuildEVMKey(keys.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) - srcAccount = evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, srcAddr) + srcAccount = keys.BuildEVMKey(keys.EVMKeyCodeHash, srcAddr) dstAddr := keyRand.Address(accountPrefix, 2, AddressLen) - dstAccount = evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, dstAddr) + dstAccount = keys.BuildEVMKey(keys.EVMKeyCodeHash, dstAddr) senderSlotBytes := make([]byte, StorageKeyLen) copy(senderSlotBytes[:AddressLen], srcAddr) copy(senderSlotBytes[AddressLen:], keyRand.SeededBytes(SlotLen, 11)) - senderSlot = evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, senderSlotBytes) + senderSlot = keys.BuildEVMKey(keys.EVMKeyStorage, senderSlotBytes) receiverSlotBytes := make([]byte, StorageKeyLen) copy(receiverSlotBytes[:AddressLen], dstAddr) copy(receiverSlotBytes[AddressLen:], keyRand.SeededBytes(SlotLen, 12)) - receiverSlot = evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, receiverSlotBytes) + receiverSlot = keys.BuildEVMKey(keys.EVMKeyStorage, receiverSlotBytes) - erc20Contract = evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) + erc20Contract = keys.BuildEVMKey(keys.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) return } @@ -128,22 +128,22 @@ func TestBuildERC20TransferReceipt_EVMKeyCodeAccounts(t *testing.T) { crand := NewCannedRandom(1<<20, 42) keyRand := NewCannedRandom(4096, 1) - feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(accountPrefix, 0, AddressLen)) + feeAccount := keys.BuildEVMKey(keys.EVMKeyCode, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) - srcAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, srcAddr) - dstAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(accountPrefix, 2, AddressLen)) + srcAccount := keys.BuildEVMKey(keys.EVMKeyCode, srcAddr) + dstAccount := keys.BuildEVMKey(keys.EVMKeyCode, keyRand.Address(accountPrefix, 2, AddressLen)) senderSlotBytes := make([]byte, StorageKeyLen) copy(senderSlotBytes[:AddressLen], srcAddr) copy(senderSlotBytes[AddressLen:], keyRand.SeededBytes(SlotLen, 11)) - senderSlot := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, senderSlotBytes) + senderSlot := keys.BuildEVMKey(keys.EVMKeyStorage, senderSlotBytes) receiverSlotBytes := make([]byte, StorageKeyLen) copy(receiverSlotBytes[:AddressLen], keyRand.Address(accountPrefix, 2, AddressLen)) copy(receiverSlotBytes[AddressLen:], keyRand.SeededBytes(SlotLen, 12)) - receiverSlot := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, receiverSlotBytes) + receiverSlot := keys.BuildEVMKey(keys.EVMKeyStorage, receiverSlotBytes) - erc20Contract := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) + erc20Contract := keys.BuildEVMKey(keys.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) _, err := BuildERC20TransferReceipt(crand, feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract, 1_000_000, 0) if err != nil { @@ -157,13 +157,13 @@ func TestBuildERC20TransferReceipt_DataGeneratorKeyFormats(t *testing.T) { crand := NewCannedRandom(1<<20, 42) keyRand := NewCannedRandom(4096, 1) - feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) - srcAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 1, AddressLen)) - dstAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 2, AddressLen)) + feeAccount := keys.BuildEVMKey(keys.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) + srcAccount := keys.BuildEVMKey(keys.EVMKeyCodeHash, keyRand.Address(accountPrefix, 1, AddressLen)) + dstAccount := keys.BuildEVMKey(keys.EVMKeyCodeHash, keyRand.Address(accountPrefix, 2, AddressLen)) - senderSlot := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, keyRand.Address(ethStoragePrefix, 10, StorageKeyLen)) - receiverSlot := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, keyRand.Address(ethStoragePrefix, 20, StorageKeyLen)) - erc20Contract := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) + senderSlot := keys.BuildEVMKey(keys.EVMKeyStorage, keyRand.Address(ethStoragePrefix, 10, StorageKeyLen)) + receiverSlot := keys.BuildEVMKey(keys.EVMKeyStorage, keyRand.Address(ethStoragePrefix, 20, StorageKeyLen)) + erc20Contract := keys.BuildEVMKey(keys.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) receipt, err := BuildERC20TransferReceipt(crand, feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract, 1_000_000, 0) if err != nil { @@ -178,23 +178,23 @@ func BenchmarkBuildERC20TransferReceipt(b *testing.B) { keyRand := NewCannedRandom(4096, 1) receiptRand := NewCannedRandom(1<<20, 2) - feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) + feeAccount := keys.BuildEVMKey(keys.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) - srcAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, srcAddr) + srcAccount := keys.BuildEVMKey(keys.EVMKeyCodeHash, srcAddr) dstAddr := keyRand.Address(accountPrefix, 2, AddressLen) - dstAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, dstAddr) + dstAccount := keys.BuildEVMKey(keys.EVMKeyCodeHash, dstAddr) senderSlotBytes := make([]byte, StorageKeyLen) copy(senderSlotBytes[:AddressLen], srcAddr) copy(senderSlotBytes[AddressLen:], keyRand.SeededBytes(SlotLen, 11)) - senderSlot := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, senderSlotBytes) + senderSlot := keys.BuildEVMKey(keys.EVMKeyStorage, senderSlotBytes) receiverSlotBytes := make([]byte, StorageKeyLen) copy(receiverSlotBytes[:AddressLen], dstAddr) copy(receiverSlotBytes[AddressLen:], keyRand.SeededBytes(SlotLen, 12)) - receiverSlot := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, receiverSlotBytes) + receiverSlot := keys.BuildEVMKey(keys.EVMKeyStorage, receiverSlotBytes) - erc20Contract := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) + erc20Contract := keys.BuildEVMKey(keys.EVMKeyCode, keyRand.Address(contractPrefix, 0, AddressLen)) b.ReportAllocs() b.ResetTimer() diff --git a/sei-db/state_db/bench/cryptosim/util.go b/sei-db/state_db/bench/cryptosim/util.go index 508408d61e..74e3ce7c60 100644 --- a/sei-db/state_db/bench/cryptosim/util.go +++ b/sei-db/state_db/bench/cryptosim/util.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" ) // BytesToHex returns a lowercase hex string with 0x prefix, suitable for printing binary keys or addresses. @@ -22,17 +22,17 @@ func BytesToHex(b []byte) string { // Uses EVMKeyCode with padded keyBytes; EVMKeyNonce requires 20-byte addresses and // non-standard lengths are routed to EVMKeyLegacy which FlatKV ignores. func AccountIDCounterKey() []byte { - return evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, paddedCounterKey(accountIdCounterKey)) + return keys.BuildEVMKey(keys.EVMKeyCode, paddedCounterKey(accountIdCounterKey)) } // Get the key for the ERC20 contract ID counter in the database. func Erc20IDCounterKey() []byte { - return evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, paddedCounterKey(erc20IdCounterKey)) + return keys.BuildEVMKey(keys.EVMKeyCode, paddedCounterKey(erc20IdCounterKey)) } // Get the key for the block number counter in the database. func BlockNumberCounterKey() []byte { - return evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, paddedCounterKey(blockNumberCounterKey)) + return keys.BuildEVMKey(keys.EVMKeyCode, paddedCounterKey(blockNumberCounterKey)) } // paddedCounterKey pads the string to AddressLen bytes for use with EVM key builders. diff --git a/sei-db/state_db/bench/helper.go b/sei-db/state_db/bench/helper.go index 97be3f1863..d3eb48bfaf 100644 --- a/sei-db/state_db/bench/helper.go +++ b/sei-db/state_db/bench/helper.go @@ -19,7 +19,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-cosmos/snapshots" snapshottypes "github.com/sei-protocol/sei-chain/sei-cosmos/snapshots/types" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/bench/wrappers" sctypes "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" diff --git a/sei-db/state_db/bench/wrappers/db_implementations.go b/sei-db/state_db/bench/wrappers/db_implementations.go index 1c246b7f20..6b40344070 100644 --- a/sei-db/state_db/bench/wrappers/db_implementations.go +++ b/sei-db/state_db/bench/wrappers/db_implementations.go @@ -5,10 +5,11 @@ import ( "fmt" "path/filepath" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/composite" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" + flatkvConfig "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/memiavl" ssComposite "github.com/sei-protocol/sei-chain/sei-db/state_db/ss/composite" ) @@ -56,9 +57,9 @@ func newMemIAVLCommitStore(dbDir string) (DBWrapper, error) { return NewMemIAVLWrapper(cs), nil } -func newFlatKVCommitStore(ctx context.Context, dbDir string, config *flatkv.Config) (DBWrapper, error) { +func newFlatKVCommitStore(ctx context.Context, dbDir string, config *flatkvConfig.Config) (DBWrapper, error) { if config == nil { - config = flatkv.DefaultConfig() + config = flatkvConfig.DefaultConfig() } config.DataDir = dbDir @@ -142,7 +143,7 @@ func NewDBImpl(ctx context.Context, dbType DBType, dataDir string, dbConfig any) case MemIAVL: return newMemIAVLCommitStore(dataDir) case FlatKV: - return newFlatKVCommitStore(ctx, dataDir, dbConfig.(*flatkv.Config)) + return newFlatKVCommitStore(ctx, dataDir, dbConfig.(*flatkvConfig.Config)) case CompositeDual: return newCompositeCommitStore(ctx, dataDir, config.DualWrite) case CompositeSplit: diff --git a/sei-db/state_db/bench/wrappers/flatkv_wrapper.go b/sei-db/state_db/bench/wrappers/flatkv_wrapper.go index 7c3e2cb620..e0bdcaad87 100644 --- a/sei-db/state_db/bench/wrappers/flatkv_wrapper.go +++ b/sei-db/state_db/bench/wrappers/flatkv_wrapper.go @@ -1,7 +1,7 @@ package wrappers import ( - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" @@ -62,7 +62,7 @@ func (f *flatKVWrapper) Close() error { } func (f *flatKVWrapper) Read(key []byte) (data []byte, found bool, err error) { - val, ok := f.base.Get(evm.EVMStoreKey, key) + val, ok := f.base.Get(keys.EVMStoreKey, key) return val, ok, nil } diff --git a/sei-db/state_db/bench/wrappers/state_store_wrapper_test.go b/sei-db/state_db/bench/wrappers/state_store_wrapper_test.go index 8093374ce8..a2596131d6 100644 --- a/sei-db/state_db/bench/wrappers/state_store_wrapper_test.go +++ b/sei-db/state_db/bench/wrappers/state_store_wrapper_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/stretchr/testify/require" ) @@ -17,8 +17,8 @@ func TestStateStoreWrapperApplyChangesetsAsyncPreservesHistoricalState(t *testin wrapper := NewStateStoreWrapper(store) - keyV1AndV2 := commonevm.BuildMemIAVLEVMKey(commonevm.EVMKeyNonce, bytes.Repeat([]byte{0x11}, 20)) - keyV2Only := commonevm.BuildMemIAVLEVMKey(commonevm.EVMKeyCodeHash, bytes.Repeat([]byte{0x22}, 20)) + keyV1AndV2 := commonevm.BuildEVMKey(commonevm.EVMKeyNonce, bytes.Repeat([]byte{0x11}, 20)) + keyV2Only := commonevm.BuildEVMKey(commonevm.EVMKeyCodeHash, bytes.Repeat([]byte{0x22}, 20)) require.NoError(t, wrapper.ApplyChangeSets(changelogEntry(1, []*proto.NamedChangeSet{ { diff --git a/sei-db/state_db/sc/composite/exporter.go b/sei-db/state_db/sc/composite/exporter.go index 888cb8c1aa..9b5502b629 100644 --- a/sei-db/state_db/sc/composite/exporter.go +++ b/sei-db/state_db/sc/composite/exporter.go @@ -5,6 +5,7 @@ import ( "fmt" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" ) @@ -24,25 +25,29 @@ const ( // - string: a module name header that starts a new module section // - *types.SnapshotNode: a leaf key/value belonging to the current module // -// FlatKV data is exported as a separate "evm_flatkv" module appended after all +// FlatKV data is exported as a separate "flatkv" module appended after all // cosmos modules complete. This keeps the two backends fully independent in the // snapshot stream. type SnapshotExporter struct { cosmosExporter types.Exporter - evmExporter types.Exporter + flatkvExporter types.Exporter phase exportPhase } // NewExporter creates a composite exporter. cosmosExporter must not be nil. -// evmExporter may be nil when FlatKV is not active. -func NewExporter(cosmosExporter types.Exporter, evmExporter types.Exporter) (*SnapshotExporter, error) { +// flatkvExporter may be nil when FlatKV is not active. +func NewExporter(cosmosExporter types.Exporter, flatkvExporter types.Exporter) (*SnapshotExporter, error) { + if cosmosExporter == nil && flatkvExporter == nil { + return nil, fmt.Errorf("either cosmosExporter or flatkvExporter must not be nil") + } + var startingPhase = phaseCosmos if cosmosExporter == nil { - return nil, fmt.Errorf("cosmosExporter must not be nil") + startingPhase = phaseFlatKV } return &SnapshotExporter{ cosmosExporter: cosmosExporter, - evmExporter: evmExporter, - phase: phaseCosmos, + flatkvExporter: flatkvExporter, + phase: startingPhase, }, nil } @@ -51,7 +56,7 @@ func NewExporter(cosmosExporter types.Exporter, evmExporter types.Exporter) (*Sn // The stream is split into two sequential phases: // 1. phaseCosmos — drains all items from the cosmos (memiavl) exporter. // When the cosmos exporter is exhausted, if a FlatKV exporter is present, -// the phase transitions to phaseFlatKV and emits the EVMFlatKVStoreName +// the phase transitions to phaseFlatKV and emits the keys.FlatKVStoreKey // module header as the first item. // 2. phaseFlatKV — drains all items from the FlatKV exporter. // @@ -67,7 +72,7 @@ func (s *SnapshotExporter) Next() (interface{}, error) { } } -// nextFromCosmos pulls items from the cosmos exporter. On exhaustion it +// nextFromCosmos pulls items from the cosmos exporter. On exhaustion, it // transitions to phaseFlatKV (emitting the module header) or phaseDone. func (s *SnapshotExporter) nextFromCosmos() (interface{}, error) { item, err := s.cosmosExporter.Next() @@ -77,9 +82,9 @@ func (s *SnapshotExporter) nextFromCosmos() (interface{}, error) { } // Cosmos done. Append flatKV as a separate module. - if s.evmExporter != nil { + if s.flatkvExporter != nil { s.phase = phaseFlatKV - return EVMFlatKVStoreName, nil + return keys.FlatKVStoreKey, nil } s.phase = phaseDone @@ -88,10 +93,10 @@ func (s *SnapshotExporter) nextFromCosmos() (interface{}, error) { return item, nil } -// nextFromFlatKV pulls items from the FlatKV exporter. On exhaustion it +// nextFromFlatKV pulls items from the FlatKV exporter. On exhaustion, it // transitions to phaseDone. func (s *SnapshotExporter) nextFromFlatKV() (interface{}, error) { - item, err := s.evmExporter.Next() + item, err := s.flatkvExporter.Next() if err != nil { if !errors.Is(err, errorutils.ErrorExportDone) { return nil, err @@ -103,12 +108,12 @@ func (s *SnapshotExporter) nextFromFlatKV() (interface{}, error) { } func (s *SnapshotExporter) Close() error { - var errCosmos, errEVM error + var errCosmos, errFlatKV error if s.cosmosExporter != nil { errCosmos = s.cosmosExporter.Close() } - if s.evmExporter != nil { - errEVM = s.evmExporter.Close() + if s.flatkvExporter != nil { + errFlatKV = s.flatkvExporter.Close() } - return errors.Join(errCosmos, errEVM) + return errors.Join(errCosmos, errFlatKV) } diff --git a/sei-db/state_db/sc/composite/importer.go b/sei-db/state_db/sc/composite/importer.go index 3b0b358c2d..5000356b44 100644 --- a/sei-db/state_db/sc/composite/importer.go +++ b/sei-db/state_db/sc/composite/importer.go @@ -3,6 +3,7 @@ package composite import ( "errors" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" ) @@ -10,46 +11,34 @@ var _ types.Importer = (*SnapshotImporter)(nil) type SnapshotImporter struct { cosmosImporter types.Importer - evmImporter types.Importer + flatkvImporter types.Importer currentModule string } -func NewImporter(cosmosImporter types.Importer, evmImporter types.Importer) *SnapshotImporter { +func NewImporter(cosmosImporter types.Importer, flatkvImporter types.Importer) *SnapshotImporter { return &SnapshotImporter{ cosmosImporter: cosmosImporter, - evmImporter: evmImporter, + flatkvImporter: flatkvImporter, } } -func (si *SnapshotImporter) Close() error { - var errCosmos, errEVM error - if si.cosmosImporter != nil { - errCosmos = si.cosmosImporter.Close() - } - if si.evmImporter != nil { - errEVM = si.evmImporter.Close() - } - return errors.Join(errCosmos, errEVM) -} - func (si *SnapshotImporter) AddModule(name string) error { si.currentModule = name - if name == EVMFlatKVStoreName { - if si.evmImporter != nil { - return si.evmImporter.AddModule(name) + if name == keys.FlatKVStoreKey { + if si.flatkvImporter != nil { + return si.flatkvImporter.AddModule(name) } return nil - } - if si.cosmosImporter != nil { + } else if si.cosmosImporter != nil { return si.cosmosImporter.AddModule(name) } return nil } func (si *SnapshotImporter) AddNode(node *types.SnapshotNode) { - if si.currentModule == EVMFlatKVStoreName { - if si.evmImporter != nil { - si.evmImporter.AddNode(node) + if si.currentModule == keys.FlatKVStoreKey { + if si.flatkvImporter != nil { + si.flatkvImporter.AddNode(node) } return } @@ -57,3 +46,14 @@ func (si *SnapshotImporter) AddNode(node *types.SnapshotNode) { si.cosmosImporter.AddNode(node) } } + +func (si *SnapshotImporter) Close() error { + var errCosmos, errFlatKV error + if si.cosmosImporter != nil { + errCosmos = si.cosmosImporter.Close() + } + if si.flatkvImporter != nil { + errFlatKV = si.flatkvImporter.Close() + } + return errors.Join(errCosmos, errFlatKV) +} diff --git a/sei-db/state_db/sc/composite/store.go b/sei-db/state_db/sc/composite/store.go index 9edb242db2..3caf333a4c 100644 --- a/sei-db/state_db/sc/composite/store.go +++ b/sei-db/state_db/sc/composite/store.go @@ -9,7 +9,7 @@ import ( "path/filepath" commonerrors "github.com/sei-protocol/sei-chain/sei-db/common/errors" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" @@ -20,14 +20,6 @@ import ( var logger = seilog.NewLogger("db", "state-db", "sc", "composite") -// EVMStoreName is the module name for the EVM store in memiavl. -const EVMStoreName = commonevm.EVMStoreKey - -// EVMFlatKVStoreName is the module name used when exporting/importing -// EVM data from the FlatKV backend. Treated as a separate module in -// state-sync snapshots so that import routes data exclusively to FlatKV. -const EVMFlatKVStoreName = commonevm.EVMFlatKVStoreKey - // For backward compatibility purpose reuse current interface var _ types.Committer = (*CompositeCommitStore)(nil) @@ -38,8 +30,8 @@ type CompositeCommitStore struct { // cosmosCommitter is the Cosmos (memiavl) backend - always initialized cosmosCommitter *memiavl.CommitStore - // evmCommitter is the FlatKV backend - may be nil if not enabled - evmCommitter flatkv.Store + // flatkvCommitter is the FlatKV backend - may be nil if not enabled + flatkvCommitter flatkv.Store // homeDir is the base directory for the store homeDir string @@ -74,7 +66,7 @@ func NewCompositeCommitStore( if cfg.WriteMode == config.DualWrite || cfg.WriteMode == config.SplitWrite { cfg.FlatKVConfig.DataDir = filepath.Join(homeDir, "data", "flatkv") var err error - store.evmCommitter, err = flatkv.NewCommitStore(ctx, &cfg.FlatKVConfig) + store.flatkvCommitter, err = flatkv.NewCommitStore(ctx, &cfg.FlatKVConfig) if err != nil { panic(fmt.Errorf("failed to create FlatKV commit store: %w", err)) } @@ -94,7 +86,7 @@ func (cs *CompositeCommitStore) Initialize(initialStores []string) { // are created. Any writer lock acquired during cleanup is retained for // the subsequent LoadVersion(..., false) call. func (cs *CompositeCommitStore) CleanupCrashArtifacts() error { - if fkv, ok := cs.evmCommitter.(*flatkv.CommitStore); ok { + if fkv, ok := cs.flatkvCommitter.(*flatkv.CommitStore); ok { if err := fkv.CleanupOrphanedReadOnlyDirs(); err != nil { return err } @@ -126,21 +118,21 @@ func (cs *CompositeCommitStore) LoadVersion(targetVersion int64, readOnly bool) homeDir: cs.homeDir, config: cs.config, } - if cs.evmCommitter != nil { - evmStore, err := cs.evmCommitter.LoadVersion(targetVersion, true) + if cs.flatkvCommitter != nil { + evmStore, err := cs.flatkvCommitter.LoadVersion(targetVersion, true) if err != nil { logger.Error("FlatKV unavailable for readonly load, EVM data will not be served", "version", targetVersion, "err", err) } else { - newStore.evmCommitter = evmStore + newStore.flatkvCommitter = evmStore } } return newStore, nil } cs.cosmosCommitter = cosmosCommitter - if cs.evmCommitter != nil { - _, err := cs.evmCommitter.LoadVersion(targetVersion, false) + if cs.flatkvCommitter != nil { + _, err := cs.flatkvCommitter.LoadVersion(targetVersion, false) if err != nil { return nil, fmt.Errorf("failed to load FlatKV version: %w", err) } @@ -170,7 +162,7 @@ func (cs *CompositeCommitStore) ApplyChangeSets(changesets []*proto.NamedChangeS var cosmosChangeset []*proto.NamedChangeSet for _, changeset := range changesets { - if changeset.Name == EVMStoreName { + if changeset.Name == keys.EVMStoreKey { evmChangeset = append(evmChangeset, changeset) } else { cosmosChangeset = append(cosmosChangeset, changeset) @@ -198,8 +190,8 @@ func (cs *CompositeCommitStore) ApplyChangeSets(changesets []*proto.NamedChangeS } } - if cs.evmCommitter != nil && len(evmChangeset) > 0 { - if err := cs.evmCommitter.ApplyChangeSets(evmChangeset); err != nil { + if cs.flatkvCommitter != nil && len(evmChangeset) > 0 { + if err := cs.flatkvCommitter.ApplyChangeSets(evmChangeset); err != nil { return fmt.Errorf("failed to apply EVM changesets: %w", err) } } @@ -221,8 +213,8 @@ func (cs *CompositeCommitStore) Commit() (int64, error) { } // Commit to FlatKV as well if enabled - if cs.evmCommitter != nil { - evmVersion, err := cs.evmCommitter.Commit() + if cs.flatkvCommitter != nil { + evmVersion, err := cs.flatkvCommitter.Commit() if err != nil { return 0, fmt.Errorf("failed to commit to EVM store: %w", err) } @@ -242,7 +234,7 @@ func (cs *CompositeCommitStore) Commit() (int64, error) { // so the correction survives subsequent restarts. func (cs *CompositeCommitStore) reconcileVersions() error { cosmosVer := cs.cosmosCommitter.Version() - evmVer := cs.evmCommitter.Version() + evmVer := cs.flatkvCommitter.Version() if cosmosVer == evmVer { return nil } @@ -267,7 +259,7 @@ func (cs *CompositeCommitStore) reconcileVersions() error { } } if evmVer > minVer { - if err := cs.evmCommitter.Rollback(minVer); err != nil { + if err := cs.flatkvCommitter.Rollback(minVer); err != nil { return fmt.Errorf("failed to rollback EVM to reconciled version %d: %w", minVer, err) } } @@ -279,8 +271,8 @@ func (cs *CompositeCommitStore) reconcileVersions() error { func (cs *CompositeCommitStore) Version() int64 { if cs.cosmosCommitter != nil { return cs.cosmosCommitter.Version() - } else if cs.evmCommitter != nil { - return cs.evmCommitter.Version() + } else if cs.flatkvCommitter != nil { + return cs.flatkvCommitter.Version() } return 0 } @@ -322,8 +314,8 @@ func (cs *CompositeCommitStore) appendEvmLatticeHash(ci *proto.CommitInfo, evmHa // WorkingCommitInfo returns the working commit info func (cs *CompositeCommitStore) WorkingCommitInfo() *proto.CommitInfo { ci := cs.cosmosCommitter.WorkingCommitInfo() - if cs.evmCommitter != nil { - return cs.appendEvmLatticeHash(ci, cs.evmCommitter.RootHash()) + if cs.flatkvCommitter != nil { + return cs.appendEvmLatticeHash(ci, cs.flatkvCommitter.RootHash()) } return ci } @@ -331,8 +323,8 @@ func (cs *CompositeCommitStore) WorkingCommitInfo() *proto.CommitInfo { // LastCommitInfo returns the last commit info func (cs *CompositeCommitStore) LastCommitInfo() *proto.CommitInfo { ci := cs.cosmosCommitter.LastCommitInfo() - if cs.evmCommitter != nil { - return cs.appendEvmLatticeHash(ci, cs.evmCommitter.CommittedRootHash()) + if cs.flatkvCommitter != nil { + return cs.appendEvmLatticeHash(ci, cs.flatkvCommitter.CommittedRootHash()) } return ci } @@ -349,8 +341,8 @@ func (cs *CompositeCommitStore) Rollback(targetVersion int64) error { return fmt.Errorf("failed to rollback cosmos commit store: %w", err) } - if cs.evmCommitter != nil { - if err := cs.evmCommitter.Rollback(targetVersion); err != nil { + if cs.flatkvCommitter != nil { + if err := cs.flatkvCommitter.Rollback(targetVersion); err != nil { return fmt.Errorf("failed to rollback evm commit store: %w", err) } } @@ -369,16 +361,16 @@ func (cs *CompositeCommitStore) Exporter(version int64) (types.Exporter, error) return nil, fmt.Errorf("failed to create cosmos exporter: %w", err) } - var evmExporter types.Exporter - if cs.evmCommitter != nil && (cs.config.WriteMode == config.SplitWrite || cs.config.WriteMode == config.DualWrite) { - evmExporter, err = cs.evmCommitter.Exporter(version) + var flatkvExporter types.Exporter + if cs.flatkvCommitter != nil && (cs.config.WriteMode == config.SplitWrite || cs.config.WriteMode == config.DualWrite) { + flatkvExporter, err = cs.flatkvCommitter.Exporter(version) if err != nil { _ = cosmosExporter.Close() - return nil, fmt.Errorf("failed to create evm exporter: %w", err) + return nil, fmt.Errorf("failed to create flatkv exporter: %w", err) } } - return NewExporter(cosmosExporter, evmExporter) + return NewExporter(cosmosExporter, flatkvExporter) } // Importer returns an importer for state sync @@ -388,11 +380,11 @@ func (cs *CompositeCommitStore) Importer(version int64) (types.Importer, error) return nil, err } var evmImporter types.Importer - if cs.evmCommitter != nil { - evmImporter, err = cs.evmCommitter.Importer(version) + if cs.flatkvCommitter != nil { + evmImporter, err = cs.flatkvCommitter.Importer(version) if err != nil { _ = cosmosImporter.Close() - return nil, fmt.Errorf("failed to create evm importer: %w", err) + return nil, fmt.Errorf("failed to create flatkv importer: %w", err) } } compositeImporter := NewImporter(cosmosImporter, evmImporter) @@ -409,8 +401,8 @@ func (cs *CompositeCommitStore) Close() error { } } - if cs.evmCommitter != nil { - if err := cs.evmCommitter.Close(); err != nil { + if cs.flatkvCommitter != nil { + if err := cs.flatkvCommitter.Close(); err != nil { errs = append(errs, fmt.Errorf("failed to close FlatKV: %w", err)) } } diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index b28c0ea209..4d7e851cc0 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -31,18 +31,17 @@ func (f *failingEVMStore) Get(string, []byte) ([]byte, bool) { retur func (f *failingEVMStore) GetBlockHeightModified(string, []byte) (int64, bool, error) { return -1, false, nil } -func (f *failingEVMStore) Has(string, []byte) bool { return false } -func (f *failingEVMStore) Iterator(_, _ []byte) flatkv.Iterator { return nil } -func (f *failingEVMStore) IteratorByPrefix([]byte) flatkv.Iterator { return nil } -func (f *failingEVMStore) RootHash() []byte { return nil } -func (f *failingEVMStore) Version() int64 { return 0 } -func (f *failingEVMStore) WriteSnapshot(string) error { return nil } -func (f *failingEVMStore) Rollback(int64) error { return nil } -func (f *failingEVMStore) Exporter(int64) (types.Exporter, error) { return nil, nil } -func (f *failingEVMStore) Importer(int64) (types.Importer, error) { return nil, nil } -func (f *failingEVMStore) GetPhaseTimer() *metrics.PhaseTimer { return nil } -func (f *failingEVMStore) CommittedRootHash() []byte { return nil } -func (f *failingEVMStore) Close() error { return nil } +func (f *failingEVMStore) Has(string, []byte) bool { return false } +func (f *failingEVMStore) RawGlobalIterator() flatkv.Iterator { return nil } +func (f *failingEVMStore) RootHash() []byte { return nil } +func (f *failingEVMStore) Version() int64 { return 0 } +func (f *failingEVMStore) WriteSnapshot(string) error { return nil } +func (f *failingEVMStore) Rollback(int64) error { return nil } +func (f *failingEVMStore) Exporter(int64) (types.Exporter, error) { return nil, nil } +func (f *failingEVMStore) Importer(int64) (types.Importer, error) { return nil, nil } +func (f *failingEVMStore) GetPhaseTimer() *metrics.PhaseTimer { return nil } +func (f *failingEVMStore) CommittedRootHash() []byte { return nil } +func (f *failingEVMStore) Close() error { return nil } func padLeft32(val ...byte) []byte { var b [32]byte @@ -55,7 +54,7 @@ func TestCompositeStoreBasicOperations(t *testing.T) { cfg := config.DefaultStateCommitConfig() cs := NewCompositeCommitStore(t.Context(), dir, cfg) - cs.Initialize([]string{"test", EVMStoreName}) + cs.Initialize([]string{"test", keys.EVMStoreKey}) _, err := cs.LoadVersion(0, false) require.NoError(t, err) @@ -76,7 +75,7 @@ func TestCompositeStoreBasicOperations(t *testing.T) { }, }, { - Name: EVMStoreName, + Name: keys.EVMStoreKey, Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ {Key: []byte("evm_key1"), Value: []byte("evm_value1")}, @@ -95,7 +94,7 @@ func TestCompositeStoreBasicOperations(t *testing.T) { testStore := cs.GetChildStoreByName("test") require.NotNil(t, testStore) - evmStore := cs.GetChildStoreByName(EVMStoreName) + evmStore := cs.GetChildStoreByName(keys.EVMStoreKey) require.NotNil(t, evmStore) } @@ -195,7 +194,7 @@ func TestWorkingAndLastCommitInfo(t *testing.T) { func TestLatticeHashCommitInfo(t *testing.T) { addr := [20]byte{0xAA} slot := [32]byte{0xBB} - evmStorageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, append(addr[:], slot[:]...)) + evmStorageKey := keys.BuildEVMKey(keys.EVMKeyStorage, append(addr[:], slot[:]...)) makeChangesets := func(round byte) []*proto.NamedChangeSet { return []*proto.NamedChangeSet{ @@ -208,7 +207,7 @@ func TestLatticeHashCommitInfo(t *testing.T) { }, }, { - Name: EVMStoreName, + Name: keys.EVMStoreKey, Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ {Key: evmStorageKey, Value: padLeft32(round)}, @@ -239,7 +238,7 @@ func TestLatticeHashCommitInfo(t *testing.T) { cfg.EnableLatticeHash = tt.enableLattice cs := NewCompositeCommitStore(t.Context(), dir, cfg) - cs.Initialize([]string{"test", EVMStoreName}) + cs.Initialize([]string{"test", keys.EVMStoreKey}) _, err := cs.LoadVersion(0, false) require.NoError(t, err) defer cs.Close() @@ -253,7 +252,7 @@ func TestLatticeHashCommitInfo(t *testing.T) { expectedCosmos := cs.cosmosCommitter.WorkingCommitInfo() var expectedEvmHash []byte if tt.expectLattice { - expectedEvmHash = cs.evmCommitter.RootHash() + expectedEvmHash = cs.flatkvCommitter.RootHash() } workingInfo := cs.WorkingCommitInfo() @@ -291,7 +290,7 @@ func TestLatticeHashCommitInfo(t *testing.T) { expectedCosmosLast := cs.cosmosCommitter.LastCommitInfo() var expectedEvmCommitted []byte if tt.expectLattice { - expectedEvmCommitted = cs.evmCommitter.CommittedRootHash() + expectedEvmCommitted = cs.flatkvCommitter.CommittedRootHash() require.Equal(t, expectedEvmHash, expectedEvmCommitted) } @@ -432,7 +431,7 @@ func TestReadOnlyLoadVersionSoftFailsWhenFlatKVUnavailable(t *testing.T) { // Inject a failing EVM committer to simulate FlatKV being unavailable // for historical versions (different retention, late enablement, etc). - cs.evmCommitter = &failingEVMStore{} + cs.flatkvCommitter = &failingEVMStore{} readOnly, err := cs.LoadVersion(0, true) require.NoError(t, err, "readonly LoadVersion should succeed even when FlatKV fails") @@ -440,7 +439,7 @@ func TestReadOnlyLoadVersionSoftFailsWhenFlatKVUnavailable(t *testing.T) { compositeRO, ok := readOnly.(*CompositeCommitStore) require.True(t, ok) - require.Nil(t, compositeRO.evmCommitter, "evmCommitter should be nil when FlatKV failed") + require.Nil(t, compositeRO.flatkvCommitter, "flatkvCommitter should be nil when FlatKV failed") // Cosmos data should still be accessible store := compositeRO.GetChildStoreByName("test") @@ -511,24 +510,24 @@ func TestExportImportSplitWrite(t *testing.T) { // --- Source store: write cosmos + EVM data --- srcDir := t.TempDir() src := NewCompositeCommitStore(t.Context(), srcDir, cfg) - src.Initialize([]string{"bank", EVMStoreName}) + src.Initialize([]string{"bank", keys.EVMStoreKey}) _, err := src.LoadVersion(0, false) require.NoError(t, err) addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) storageVal := padLeft32(0x42) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 10} err = src.ApplyChangeSets([]*proto.NamedChangeSet{ {Name: "bank", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ {Key: []byte("balance_alice"), Value: []byte("100")}, }}}, - {Name: EVMStoreName, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ + {Name: keys.EVMStoreKey, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ {Key: storageKey, Value: storageVal}, {Key: nonceKey, Value: nonceVal}, }}}, @@ -552,14 +551,14 @@ func TestExportImportSplitWrite(t *testing.T) { } } require.Contains(t, moduleNames, "bank") - require.Contains(t, moduleNames, EVMFlatKVStoreName) + require.Contains(t, moduleNames, keys.FlatKVStoreKey) // evm_flatkv should be the last module - require.Equal(t, EVMFlatKVStoreName, moduleNames[len(moduleNames)-1]) + require.Equal(t, keys.FlatKVStoreKey, moduleNames[len(moduleNames)-1]) // --- Destination store: import --- dstDir := t.TempDir() dst := NewCompositeCommitStore(t.Context(), dstDir, cfg) - dst.Initialize([]string{"bank", EVMStoreName}) + dst.Initialize([]string{"bank", keys.EVMStoreKey}) _, err = dst.LoadVersion(0, false) require.NoError(t, err) require.NoError(t, dst.Close()) @@ -580,12 +579,12 @@ func TestExportImportSplitWrite(t *testing.T) { require.Equal(t, []byte("100"), bankStore.Get([]byte("balance_alice"))) // Verify FlatKV data - require.NotNil(t, dst.evmCommitter) - got, found := dst.evmCommitter.Get(evm.EVMStoreKey, storageKey) + require.NotNil(t, dst.flatkvCommitter) + got, found := dst.flatkvCommitter.Get(keys.EVMStoreKey, storageKey) require.True(t, found, "storage key should exist in FlatKV after import") require.Equal(t, storageVal, got) - got, found = dst.evmCommitter.Get(evm.EVMStoreKey, nonceKey) + got, found = dst.flatkvCommitter.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce key should exist in FlatKV after import") require.Equal(t, nonceVal, got) } @@ -619,7 +618,7 @@ func TestExportCosmosOnlyHasNoFlatKVModule(t *testing.T) { // In cosmos_only mode, evm_flatkv should NOT appear for _, it := range items { - require.NotEqual(t, EVMFlatKVStoreName, it.moduleName, + require.NotEqual(t, keys.FlatKVStoreKey, it.moduleName, "evm_flatkv should not appear in cosmos_only export") } } @@ -644,7 +643,7 @@ func TestCompositeImporterRouting(t *testing.T) { require.NoError(t, imp.AddModule("bank")) imp.AddNode(&types.SnapshotNode{Key: []byte("k1"), Value: []byte("v1")}) - require.NoError(t, imp.AddModule(EVMFlatKVStoreName)) + require.NoError(t, imp.AddModule(keys.FlatKVStoreKey)) imp.AddNode(&types.SnapshotNode{Key: []byte("k2"), Value: []byte("v2")}) require.NoError(t, imp.AddModule("staking")) @@ -657,7 +656,7 @@ func TestCompositeImporterRouting(t *testing.T) { require.Equal(t, []byte("k3"), cosmosNodes[1].Key) // evm_flatkv → evm only - require.Equal(t, []string{EVMFlatKVStoreName}, evmModules) + require.Equal(t, []string{keys.FlatKVStoreKey}, evmModules) require.Len(t, evmNodes, 1) require.Equal(t, []byte("k2"), evmNodes[0].Key) @@ -684,14 +683,14 @@ func (ti *trackingImporter) Close() error { return nil } func TestReconcileVersionsAfterCrash(t *testing.T) { addr := [20]byte{0xAA} slot := [32]byte{0xBB} - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) cfg := splitWriteConfig() dir := t.TempDir() cs := NewCompositeCommitStore(t.Context(), dir, cfg) - cs.Initialize([]string{"test", EVMStoreName}) + cs.Initialize([]string{"test", keys.EVMStoreKey}) _, err := cs.LoadVersion(0, false) require.NoError(t, err) @@ -706,7 +705,7 @@ func TestReconcileVersionsAfterCrash(t *testing.T) { }, }, { - Name: EVMStoreName, + Name: keys.EVMStoreKey, Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ {Key: storageKey, Value: padLeft32(i)}, @@ -719,7 +718,7 @@ func TestReconcileVersionsAfterCrash(t *testing.T) { require.NoError(t, err) } require.Equal(t, int64(3), cs.cosmosCommitter.Version()) - require.Equal(t, int64(3), cs.evmCommitter.Version()) + require.Equal(t, int64(3), cs.flatkvCommitter.Version()) require.NoError(t, cs.Close()) // Simulate crash: rollback FlatKV to version 2 independently, leaving @@ -740,13 +739,13 @@ func TestReconcileVersionsAfterCrash(t *testing.T) { // Reopen the composite store — LoadVersion(0) should detect the // mismatch and reconcile both backends to version 2. cs2 := NewCompositeCommitStore(t.Context(), dir, cfg) - cs2.Initialize([]string{"test", EVMStoreName}) + cs2.Initialize([]string{"test", keys.EVMStoreKey}) _, err = cs2.LoadVersion(0, false) require.NoError(t, err) defer cs2.Close() require.Equal(t, int64(2), cs2.cosmosCommitter.Version(), "cosmos should be rolled back to EVM version") - require.Equal(t, int64(2), cs2.evmCommitter.Version(), "EVM should remain at version 2") + require.Equal(t, int64(2), cs2.flatkvCommitter.Version(), "EVM should remain at version 2") require.Equal(t, int64(2), cs2.Version()) // Verify cosmos data is at version 2 (value = 0x02, not 0x03) @@ -758,14 +757,14 @@ func TestReconcileVersionsAfterCrash(t *testing.T) { func TestReconcileVersionsThenContinueCommitting(t *testing.T) { addr := [20]byte{0xEE} slot := [32]byte{0xFF} - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) cfg := splitWriteConfig() dir := t.TempDir() cs := NewCompositeCommitStore(t.Context(), dir, cfg) - cs.Initialize([]string{"bank", EVMStoreName}) + cs.Initialize([]string{"bank", keys.EVMStoreKey}) _, err := cs.LoadVersion(0, false) require.NoError(t, err) @@ -775,7 +774,7 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { {Name: "bank", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ {Key: []byte("bal"), Value: []byte{i}}, }}}, - {Name: EVMStoreName, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ + {Name: keys.EVMStoreKey, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ {Key: storageKey, Value: padLeft32(i)}, }}}, })) @@ -796,12 +795,12 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { // Reopen — reconciliation should bring both to version 2. cs2 := NewCompositeCommitStore(t.Context(), dir, cfg) - cs2.Initialize([]string{"bank", EVMStoreName}) + cs2.Initialize([]string{"bank", keys.EVMStoreKey}) _, err = cs2.LoadVersion(0, false) require.NoError(t, err) require.Equal(t, int64(2), cs2.cosmosCommitter.Version()) - require.Equal(t, int64(2), cs2.evmCommitter.Version()) + require.Equal(t, int64(2), cs2.flatkvCommitter.Version()) // Continue committing new blocks on top of the reconciled state. // Version 3 is re-created with new data (0xA3 instead of 0x03). @@ -811,7 +810,7 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { {Name: "bank", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ {Key: []byte("bal"), Value: []byte{v}}, }}}, - {Name: EVMStoreName, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ + {Name: keys.EVMStoreKey, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ {Key: storageKey, Value: padLeft32(v)}, }}}, })) @@ -819,25 +818,25 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(3+i), ver, "commit should produce sequential versions") require.Equal(t, ver, cs2.cosmosCommitter.Version()) - require.Equal(t, ver, cs2.evmCommitter.Version()) + require.Equal(t, ver, cs2.flatkvCommitter.Version()) } require.NoError(t, cs2.Close()) // Reopen a third time to verify the post-reconciliation commits are durable // and both backends agree on version 5. cs3 := NewCompositeCommitStore(t.Context(), dir, cfg) - cs3.Initialize([]string{"bank", EVMStoreName}) + cs3.Initialize([]string{"bank", keys.EVMStoreKey}) _, err = cs3.LoadVersion(0, false) require.NoError(t, err) defer cs3.Close() require.Equal(t, int64(5), cs3.cosmosCommitter.Version()) - require.Equal(t, int64(5), cs3.evmCommitter.Version()) + require.Equal(t, int64(5), cs3.flatkvCommitter.Version()) bankStore := cs3.GetChildStoreByName("bank") require.Equal(t, []byte{0xA5}, bankStore.Get([]byte("bal"))) - got, found := cs3.evmCommitter.Get(evm.EVMStoreKey, storageKey) + got, found := cs3.flatkvCommitter.Get(keys.EVMStoreKey, storageKey) require.True(t, found) require.Equal(t, padLeft32(0xA5), got) } @@ -845,14 +844,14 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { func TestReconcileVersionsCosmosAheadByMultiple(t *testing.T) { addr := [20]byte{0xCC} slot := [32]byte{0xDD} - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) cfg := splitWriteConfig() dir := t.TempDir() cs := NewCompositeCommitStore(t.Context(), dir, cfg) - cs.Initialize([]string{"bank", EVMStoreName}) + cs.Initialize([]string{"bank", keys.EVMStoreKey}) _, err := cs.LoadVersion(0, false) require.NoError(t, err) @@ -867,7 +866,7 @@ func TestReconcileVersionsCosmosAheadByMultiple(t *testing.T) { }, }, { - Name: EVMStoreName, + Name: keys.EVMStoreKey, Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ {Key: storageKey, Value: padLeft32(i)}, @@ -893,13 +892,13 @@ func TestReconcileVersionsCosmosAheadByMultiple(t *testing.T) { require.NoError(t, evmStore.Close()) cs2 := NewCompositeCommitStore(t.Context(), dir, cfg) - cs2.Initialize([]string{"bank", EVMStoreName}) + cs2.Initialize([]string{"bank", keys.EVMStoreKey}) _, err = cs2.LoadVersion(0, false) require.NoError(t, err) defer cs2.Close() require.Equal(t, int64(3), cs2.cosmosCommitter.Version()) - require.Equal(t, int64(3), cs2.evmCommitter.Version()) + require.Equal(t, int64(3), cs2.flatkvCommitter.Version()) bankStore := cs2.GetChildStoreByName("bank") require.Equal(t, []byte{3}, bankStore.Get([]byte("bal"))) diff --git a/sei-db/state_db/sc/flatkv/api.go b/sei-db/state_db/sc/flatkv/api.go index 60c618e570..5a0f1ce591 100644 --- a/sei-db/state_db/sc/flatkv/api.go +++ b/sei-db/state_db/sc/flatkv/api.go @@ -49,19 +49,8 @@ type Store interface { // Has reports whether the key exists within the given module. Has(moduleName string, key []byte) bool - // Iterator returns an iterator over [start, end) in memiavl key order. - // Pass nil for unbounded. - // - // EXPERIMENTAL: not used in production; only storage keys supported. - // Interface may change when Exporter/state-sync is implemented. - Iterator(start, end []byte) Iterator - - // IteratorByPrefix iterates all keys with the given prefix (more efficient than Iterator). - // Currently only supports: StateKeyPrefix||addr (storage iteration). - // - // EXPERIMENTAL: not used in production; only storage keys supported. - // Interface may change when Exporter/state-sync is implemented. - IteratorByPrefix(prefix []byte) Iterator + // RawGlobalIterator returns an iterator for all keys across all underlying DBs + RawGlobalIterator() Iterator // RootHash returns the 32-byte checksum of the working LtHash. // Note: This is the Blake3-256 digest of the underlying 2048-byte diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config/config.go similarity index 85% rename from sei-db/state_db/sc/flatkv/config.go rename to sei-db/state_db/sc/flatkv/config/config.go index 1da9f1b6e0..2ca254cff4 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config/config.go @@ -1,8 +1,7 @@ -package flatkv +package config import ( "fmt" - "path/filepath" "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" @@ -135,28 +134,6 @@ func (c *Config) Copy() *Config { return &cp } -// InitializeDataDirectories sets the DataDir for each nested PebbleDB config -// that does not already have one, using DataDir as the base path. The DBs live -// under the working directory: /working/. -func (c *Config) InitializeDataDirectories() { - workDir := filepath.Join(c.DataDir, workingDirName) - if c.AccountDBConfig.DataDir == "" { - c.AccountDBConfig.DataDir = filepath.Join(workDir, accountDBDir) - } - if c.CodeDBConfig.DataDir == "" { - c.CodeDBConfig.DataDir = filepath.Join(workDir, codeDBDir) - } - if c.StorageDBConfig.DataDir == "" { - c.StorageDBConfig.DataDir = filepath.Join(workDir, storageDBDir) - } - if c.LegacyDBConfig.DataDir == "" { - c.LegacyDBConfig.DataDir = filepath.Join(workDir, legacyDBDir) - } - if c.MetadataDBConfig.DataDir == "" { - c.MetadataDBConfig.DataDir = filepath.Join(workDir, metadataDir) - } -} - // Validate checks that the configuration is sane and returns an error if it is not. func (c *Config) Validate() error { if err := c.AccountCacheConfig.Validate(); err != nil { @@ -197,16 +174,16 @@ func (c *Config) Validate() error { return fmt.Errorf("reader threads per core must be greater than 0") } if c.ReaderConstantThreadCount < 0 { - return fmt.Errorf("reader constant thread count must be greater than 0") + return fmt.Errorf("reader constant thread count must not be negative") } if c.ReaderPoolQueueSize < 0 { - return fmt.Errorf("reader pool queue size must be greater than 0") + return fmt.Errorf("reader pool queue size must not be negative") } if c.MiscPoolThreadsPerCore < 0 { - return fmt.Errorf("misc threads per core must be greater than 0") + return fmt.Errorf("misc threads per core must not be negative") } if c.MiscConstantThreadCount < 0 { - return fmt.Errorf("misc constant thread count must be greater than 0") + return fmt.Errorf("misc constant thread count must not be negative") } return nil diff --git a/sei-db/state_db/sc/flatkv/config_test.go b/sei-db/state_db/sc/flatkv/config/config_test.go similarity index 73% rename from sei-db/state_db/sc/flatkv/config_test.go rename to sei-db/state_db/sc/flatkv/config/config_test.go index 9606423227..5664ef6e13 100644 --- a/sei-db/state_db/sc/flatkv/config_test.go +++ b/sei-db/state_db/sc/flatkv/config/config_test.go @@ -1,4 +1,4 @@ -package flatkv +package config import ( "testing" @@ -11,7 +11,11 @@ import ( func validBaseConfig() *Config { cfg := DefaultConfig() cfg.DataDir = "/tmp/test" - cfg.InitializeDataDirectories() + cfg.AccountDBConfig.DataDir = "/tmp/test/account" + cfg.CodeDBConfig.DataDir = "/tmp/test/code" + cfg.StorageDBConfig.DataDir = "/tmp/test/storage" + cfg.LegacyDBConfig.DataDir = "/tmp/test/legacy" + cfg.MetadataDBConfig.DataDir = "/tmp/test/metadata" return cfg } @@ -84,7 +88,11 @@ func TestDefaultConfigValidExceptDataDir(t *testing.T) { require.Error(t, err) cfg.DataDir = "/tmp/test" - cfg.InitializeDataDirectories() + cfg.AccountDBConfig.DataDir = "/tmp/test/account" + cfg.CodeDBConfig.DataDir = "/tmp/test/code" + cfg.StorageDBConfig.DataDir = "/tmp/test/storage" + cfg.LegacyDBConfig.DataDir = "/tmp/test/legacy" + cfg.MetadataDBConfig.DataDir = "/tmp/test/metadata" require.NoError(t, cfg.Validate()) } @@ -103,37 +111,6 @@ func TestConfigCopyDeep(t *testing.T) { require.Equal(t, uint32(999), cp.SnapshotInterval) } -func TestInitializeDataDirectories(t *testing.T) { - cfg := DefaultConfig() - cfg.DataDir = "/base/flatkv" - cfg.AccountDBConfig.DataDir = "" - cfg.CodeDBConfig.DataDir = "" - cfg.StorageDBConfig.DataDir = "" - cfg.LegacyDBConfig.DataDir = "" - cfg.MetadataDBConfig.DataDir = "" - - cfg.InitializeDataDirectories() - - require.Equal(t, "/base/flatkv/working/account", cfg.AccountDBConfig.DataDir) - require.Equal(t, "/base/flatkv/working/code", cfg.CodeDBConfig.DataDir) - require.Equal(t, "/base/flatkv/working/storage", cfg.StorageDBConfig.DataDir) - require.Equal(t, "/base/flatkv/working/legacy", cfg.LegacyDBConfig.DataDir) - require.Equal(t, "/base/flatkv/working/metadata", cfg.MetadataDBConfig.DataDir) -} - -func TestInitializeDataDirectoriesPreservesExisting(t *testing.T) { - cfg := DefaultConfig() - cfg.DataDir = "/base/flatkv" - cfg.AccountDBConfig.DataDir = "/custom/account" - - cfg.InitializeDataDirectories() - - require.Equal(t, "/custom/account", cfg.AccountDBConfig.DataDir, - "existing DataDir should not be overwritten") - require.Equal(t, "/base/flatkv/working/code", cfg.CodeDBConfig.DataDir, - "empty DataDir should be populated") -} - func TestValidateNestedPebbleDBConfigError(t *testing.T) { cfg := validBaseConfig() cfg.AccountDBConfig.EnableMetrics = true diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/config/flatkv_test_config.go similarity index 94% rename from sei-db/state_db/sc/flatkv/flatkv_test_config.go rename to sei-db/state_db/sc/flatkv/config/flatkv_test_config.go index 4ab1b71bfa..b2ff541d46 100644 --- a/sei-db/state_db/sc/flatkv/flatkv_test_config.go +++ b/sei-db/state_db/sc/flatkv/config/flatkv_test_config.go @@ -1,4 +1,4 @@ -package flatkv +package config import ( "path/filepath" @@ -27,7 +27,7 @@ func smallTestCacheConfig() dbcache.CacheConfig { func DefaultTestConfig(t *testing.T) *Config { t.Helper() return &Config{ - DataDir: filepath.Join(t.TempDir(), flatkvRootDir), + DataDir: filepath.Join(t.TempDir(), "flatkv"), SnapshotInterval: DefaultSnapshotInterval, SnapshotKeepRecent: DefaultSnapshotKeepRecent, AccountDBConfig: smallTestPebbleConfig(), diff --git a/sei-db/state_db/sc/flatkv/crash_recovery_test.go b/sei-db/state_db/sc/flatkv/crash_recovery_test.go deleted file mode 100644 index 67cd001174..0000000000 --- a/sei-db/state_db/sc/flatkv/crash_recovery_test.go +++ /dev/null @@ -1,478 +0,0 @@ -package flatkv - -import ( - "path/filepath" - "testing" - - "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" - "github.com/sei-protocol/sei-chain/sei-db/proto" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" - "github.com/stretchr/testify/require" -) - -// verifyLtHashConsistency checks that the in-memory workingLtHash matches a -// fresh full-scan of all data DBs. Used after any recovery path. -func verifyLtHashConsistency(t *testing.T, s *CommitStore) { - t.Helper() - expected := fullScanLtHash(t, s) - require.Equal(t, expected.Checksum(), s.workingLtHash.Checksum(), - "workingLtHash should match fullScanLtHash after recovery") -} - -func TestCrashRecoverySkewedPerDBVersions(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - cfg.SnapshotInterval = 3 - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - addr := addrN(0x01) - for i := 1; i <= 6; i++ { - cs := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - noncePair(addr, uint64(i*10)), - }}, - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err := s.Commit() - require.NoError(t, err) - } - require.Equal(t, int64(6), s.Version()) - - // Save the correct per-DB LtHash for accountDB before skewing version. - savedAccountLtHash := s.perDBWorkingLtHash[accountDBDir].Clone() - - // Skew accountDB's local meta version to 4 while keeping the correct - // LtHash. This simulates a crash where the version watermark wasn't - // persisted but the actual data and hash are intact. - batch := s.accountDB.NewBatch() - require.NoError(t, writeLocalMetaToBatch(batch, 4, savedAccountLtHash)) - require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) - _ = batch.Close() - - require.NoError(t, s.Close()) - - // Reopen: loadGlobalMetadata detects version skew and catchup replays. - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s2.LoadVersion(0, false) - require.NoError(t, err) - defer s2.Close() - - require.Equal(t, int64(6), s2.Version()) - verifyLtHashConsistency(t, s2) - - // Data should be correct and store should accept new writes. - cs := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - noncePair(addr, 999), - }}, - } - require.NoError(t, s2.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - v, err := s2.Commit() - require.NoError(t, err) - require.Equal(t, int64(7), v) -} - -func TestCrashRecoveryGlobalMetadataAheadOfDataDBs(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - cfg.SnapshotInterval = 3 - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - addr := addrN(0x02) - for i := 1; i <= 5; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) - cs := makeChangeSet(key, padLeft32(byte(i*11)), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err := s.Commit() - require.NoError(t, err) - } - - // Save the correct storageDB per-DB LtHash before skewing. - savedStorageLtHash := s.perDBWorkingLtHash[storageDBDir].Clone() - - // Simulate crash: storageDB only flushed v3 (version watermark behind). - batch := s.storageDB.NewBatch() - require.NoError(t, writeLocalMetaToBatch(batch, 3, savedStorageLtHash)) - require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) - _ = batch.Close() - - require.NoError(t, s.Close()) - - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s2.LoadVersion(0, false) - require.NoError(t, err) - defer s2.Close() - - require.Equal(t, int64(5), s2.Version()) - verifyLtHashConsistency(t, s2) - - for i := 1; i <= 5; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) - val, found := s2.Get(evm.EVMStoreKey, key) - require.True(t, found, "slot %d should exist after recovery", i) - require.Equal(t, padLeft32(byte(i*11)), val) - } -} - -func TestCrashRecoveryWALReplayLargeGap(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - cfg.SnapshotInterval = 5 - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - addr := addrN(0x03) - for i := 1; i <= 20; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) - cs := makeChangeSet(key, padLeft32(byte(i)), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err := s.Commit() - require.NoError(t, err) - } - expectedHash := s.RootHash() - require.NoError(t, s.Close()) - - // Reopen normally -- large WAL gap between snapshot and HEAD. - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s2.LoadVersion(0, false) - require.NoError(t, err) - defer s2.Close() - - require.Equal(t, int64(20), s2.Version()) - require.Equal(t, expectedHash, s2.RootHash()) - verifyLtHashConsistency(t, s2) - - // All 20 storage slots should be readable. - for i := 1; i <= 20; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) - val, found := s2.Get(evm.EVMStoreKey, key) - require.True(t, found, "slot %d should exist", i) - require.Equal(t, padLeft32(byte(i)), val) - } -} - -func TestCrashRecoveryEmptyWALAfterSnapshot(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - addr := addrN(0x04) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) - cs := makeChangeSet(key, padLeft32(0xAA), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err = s.Commit() - require.NoError(t, err) - - require.NoError(t, s.WriteSnapshot("")) - expectedHash := s.RootHash() - expectedVersion := s.Version() - - // Clear the WAL entirely (simulate WAL lost after snapshot). - require.NoError(t, s.clearChangelog()) - require.NoError(t, s.Close()) - - // Reopen: should work from snapshot alone. - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s2.LoadVersion(0, false) - require.NoError(t, err) - defer s2.Close() - - require.Equal(t, expectedVersion, s2.Version()) - require.Equal(t, expectedHash, s2.RootHash()) - - val, found := s2.Get(evm.EVMStoreKey, key) - require.True(t, found) - require.Equal(t, padLeft32(0xAA), val) - - // Can continue committing after recovery from snapshot-only state. - cs2 := makeChangeSet(key, padLeft32(0xBB), false) - require.NoError(t, s2.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) - v, err := s2.Commit() - require.NoError(t, err) - require.Equal(t, expectedVersion+1, v) -} - -func TestCrashRecoveryCorruptedAccountValueInDB(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0x05) - cs := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - noncePair(addr, 42), - }}, - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err := s.Commit() - require.NoError(t, err) - - // Corrupt the account value in the DB with invalid-length data. - batch := s.accountDB.NewBatch() - require.NoError(t, batch.Set(accountPhysKey(addr), []byte{0xDE, 0xAD})) - require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) - _ = batch.Close() - - // Next ApplyChangeSets touching this account should detect the corruption - // during batchReadOldValues. - cs2 := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - noncePair(addr, 99), - }}, - } - err = s.ApplyChangeSets([]*proto.NamedChangeSet{cs2}) - require.Error(t, err, "should fail on corrupted AccountValue") - require.Contains(t, err.Error(), "unsupported serialization version") -} - -func TestCrashRecoveryCrashAfterWALBeforeDBCommit(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - cfg.SnapshotInterval = 1 - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - addr := addrN(0x06) - slot := slotN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - cs := makeChangeSet(key, padLeft32(0x11), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err = s.Commit() - require.NoError(t, err) - hashAfterV1 := s.RootHash() - - // Now simulate writing v2 to WAL but "crashing" before DB commit. - cs2 := makeChangeSet(key, padLeft32(0x22), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) - - // Write v2 to WAL manually (like Commit step 1). - changelogEntry := proto.ChangelogEntry{ - Version: 2, - Changesets: s.pendingChangeSets, - } - require.NoError(t, s.changelog.Write(changelogEntry)) - - // Do NOT call commitBatches or update global metadata. - // Reset in-memory state to v1 to simulate crash. - s.clearPendingWrites() - s.committedVersion = 1 - require.NoError(t, s.Close()) - - // Reopen: catchup should replay v2 from WAL. - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s2.LoadVersion(0, false) - require.NoError(t, err) - defer s2.Close() - - require.Equal(t, int64(2), s2.Version()) - require.NotEqual(t, hashAfterV1, s2.RootHash(), "hash should differ after v2 replay") - - val, found := s2.Get(evm.EVMStoreKey, key) - require.True(t, found) - require.Equal(t, padLeft32(0x22), val, "v2 value should be present after catchup") - verifyLtHashConsistency(t, s2) -} - -func TestCrashRecoveryLtHashConsistencyAfterAllPaths(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - cfg.SnapshotInterval = 3 - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - addr := addrN(0x07) - for i := 1; i <= 10; i++ { - pairs := []*proto.KVPair{ - noncePair(addr, uint64(i)), - { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))), - Value: padLeft32(byte(i)), - }, - } - cs := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: pairs}, - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err := s.Commit() - require.NoError(t, err) - } - verifyLtHashConsistency(t, s) - require.NoError(t, s.Close()) - - // Path 1: Normal reopen - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s2.LoadVersion(0, false) - require.NoError(t, err) - verifyLtHashConsistency(t, s2) - - // Path 2: Rollback to v6 - require.NoError(t, s2.Rollback(6)) - require.Equal(t, int64(6), s2.Version()) - verifyLtHashConsistency(t, s2) - - // Path 3: Continue writing after rollback - cs := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - noncePair(addr, 999), - }}, - } - require.NoError(t, s2.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err = s2.Commit() - require.NoError(t, err) - verifyLtHashConsistency(t, s2) - require.NoError(t, s2.Close()) - - // Path 4: Reopen after rollback + new commit - s3, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s3.LoadVersion(0, false) - require.NoError(t, err) - defer s3.Close() - verifyLtHashConsistency(t, s3) -} - -func TestCrashRecoveryCorruptLtHashBlobInMetadata(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), - padLeft32(0x11), false, - ) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err = s.Commit() - require.NoError(t, err) - - // Write garbage to the global _meta/hash key in metadataDB. - batch := s.metadataDB.NewBatch() - require.NoError(t, batch.Set(metaLtHashKey, []byte{0xDE, 0xAD, 0xBE, 0xEF})) - require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) - _ = batch.Close() - - require.NoError(t, s.Close()) - - // Reopen should fail with an LtHash unmarshal error. - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - defer s2.Close() - _, err = s2.LoadVersion(0, false) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid LtHash size") -} - -func TestCrashRecoveryCorruptLtHashBlobInPerDBMeta(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x02), slotN(0x01))), - padLeft32(0x22), false, - ) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err = s.Commit() - require.NoError(t, err) - - // Write garbage to accountDB's _meta/hash key. - batch := s.accountDB.NewBatch() - require.NoError(t, batch.Set(metaLtHashKey, []byte{0x01, 0x02, 0x03})) - require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) - _ = batch.Close() - - require.NoError(t, s.Close()) - - // Reopen should fail with an LtHash unmarshal error from per-DB meta. - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - defer s2.Close() - _, err = s2.LoadVersion(0, false) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid LtHash size") -} - -func TestCrashRecoveryGlobalVersionOverflow(t *testing.T) { - dir := t.TempDir() - cfg := DefaultTestConfig(t) - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x03), slotN(0x01))), - padLeft32(0x33), false, - ) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - _, err = s.Commit() - require.NoError(t, err) - - // Write a version value that exceeds math.MaxInt64 to the global metadata. - overflowBytes := make([]byte, 8) - overflowBytes[0] = 0xFF // 0xFF00000000000000 > MaxInt64 - batch := s.metadataDB.NewBatch() - require.NoError(t, batch.Set(metaVersionKey, overflowBytes)) - require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) - _ = batch.Close() - - require.NoError(t, s.Close()) - - // Reopen should fail with an overflow error. - s2, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - defer s2.Close() - _, err = s2.LoadVersion(0, false) - require.Error(t, err) - require.Contains(t, err.Error(), "global version overflow") -} diff --git a/sei-db/state_db/sc/flatkv/exporter.go b/sei-db/state_db/sc/flatkv/exporter.go index dcde5eedab..d827b07c84 100644 --- a/sei-db/state_db/sc/flatkv/exporter.go +++ b/sei-db/state_db/sc/flatkv/exporter.go @@ -2,47 +2,26 @@ package flatkv import ( "bytes" - "encoding/binary" "fmt" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" - dbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" ) var _ types.Exporter = (*KVExporter)(nil) -type exportDBKind int - -const ( - exportDBAccount exportDBKind = iota - exportDBCode - exportDBStorage - exportDBLegacy - exportDBDone -) - -// KVExporter exports all committed EVM data from a read-only FlatKV store -// as SnapshotNode items. Keys are emitted in memiavl EVM format so the -// importer can feed them through ApplyChangeSets unchanged. +// KVExporter exports all committed data from a read-only FlatKV store as raw +// physical key/value pairs. It uses RawGlobalIterator to walk every data DB +// (account → code → storage → legacy) and emits each row as a single +// SnapshotNode without any parsing or conversion. // // All emitted SnapshotNodes carry the export version and Height=0 (leaf). -// This intentionally flattens version history: state sync only transfers the -// latest state at a given height, not the full edit history. // // The caller must Close the exporter when done. type KVExporter struct { store *CommitStore version int64 - - currentDB exportDBKind - currentIter dbtypes.KeyValueDBIterator - - // accountDB entries decompose into multiple snapshot nodes (nonce + codehash). - pendingNodes []*types.SnapshotNode + iter Iterator } func NewKVExporter(store *CommitStore, version int64) *KVExporter { @@ -53,73 +32,37 @@ func NewKVExporter(store *CommitStore, version int64) *KVExporter { } func (e *KVExporter) Next() (interface{}, error) { - if len(e.pendingNodes) > 0 { - node := e.pendingNodes[0] - e.pendingNodes = e.pendingNodes[1:] - return node, nil - } - - for e.currentDB < exportDBDone { - if e.currentIter == nil { - iter, err := e.openIterForDB(e.currentDB) - if err != nil { - return nil, fmt.Errorf("open iterator for db %d: %w", e.currentDB, err) - } - if iter == nil { - e.currentDB++ - continue + if e.iter == nil { + e.iter = e.store.RawGlobalIterator() + if !e.iter.First() { + if err := e.iter.Error(); err != nil { + return nil, fmt.Errorf("iterator seek error: %w", err) } - if !iter.First() { - err := iter.Error() - _ = iter.Close() - if err != nil { - return nil, fmt.Errorf("iterator seek error for db %d: %w", e.currentDB, err) - } - e.currentDB++ - continue - } - e.currentIter = iter - } - - if !e.currentIter.Valid() { - if err := e.currentIter.Error(); err != nil { - return nil, fmt.Errorf("iterator error: %w", err) - } - _ = e.currentIter.Close() - e.currentIter = nil - e.currentDB++ - continue - } - - if isMetaKey(e.currentIter.Key()) { - e.currentIter.Next() - continue - } - key := bytes.Clone(e.currentIter.Key()) - value := bytes.Clone(e.currentIter.Value()) - e.currentIter.Next() - - nodes, err := e.convertToNodes(e.currentDB, key, value) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - continue + return nil, errorutils.ErrorExportDone } + } - if len(nodes) > 1 { - e.pendingNodes = nodes[1:] + if !e.iter.Valid() { + if err := e.iter.Error(); err != nil { + return nil, fmt.Errorf("iterator error: %w", err) } - return nodes[0], nil + return nil, errorutils.ErrorExportDone } - return nil, errorutils.ErrorExportDone + node := &types.SnapshotNode{ + Key: bytes.Clone(e.iter.Key()), + Value: bytes.Clone(e.iter.Value()), + Version: e.version, + Height: 0, + } + e.iter.Next() + return node, nil } func (e *KVExporter) Close() error { - if e.currentIter != nil { - _ = e.currentIter.Close() - e.currentIter = nil + if e.iter != nil { + _ = e.iter.Close() + e.iter = nil } if e.store != nil { err := e.store.Close() @@ -128,126 +71,3 @@ func (e *KVExporter) Close() error { } return nil } - -// openIterForDB returns an iterator over all user data in the given DB. -// Metadata keys are filtered out by isMetaKey() in the iteration loop. -func (e *KVExporter) openIterForDB(db exportDBKind) (dbtypes.KeyValueDBIterator, error) { - var kvDB dbtypes.KeyValueDB - switch db { - case exportDBAccount: - kvDB = e.store.accountDB - case exportDBCode: - kvDB = e.store.codeDB - case exportDBStorage: - kvDB = e.store.storageDB - case exportDBLegacy: - kvDB = e.store.legacyDB - default: - return nil, nil - } - if kvDB == nil { - return nil, nil - } - return kvDB.NewIter(&dbtypes.IterOptions{}) -} - -func (e *KVExporter) convertToNodes(db exportDBKind, key, value []byte) ([]*types.SnapshotNode, error) { - switch db { - case exportDBAccount: - return e.accountToNodes(key, value) - case exportDBCode: - return e.codeToNodes(key, value) - case exportDBStorage: - return e.storageToNodes(key, value) - case exportDBLegacy: - return e.legacyToNodes(key, value) - default: - return nil, nil - } -} - -func (e *KVExporter) node(key, value []byte) *types.SnapshotNode { - return &types.SnapshotNode{ - Key: key, - Value: value, - Version: e.version, - Height: 0, - } -} - -func (e *KVExporter) accountToNodes(key, value []byte) ([]*types.SnapshotNode, error) { - _, addr, err := ktype.StripEVMPhysicalKey(key) - if err != nil { - return nil, fmt.Errorf("corrupt account physical key: %w", err) - } - - ad, err := vtype.DeserializeAccountData(value) - if err != nil { - return nil, fmt.Errorf("corrupt account entry key=%x: %w", key, err) - } - - nodes := make([]*types.SnapshotNode, 0, 2) - - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr) - nonceValue := make([]byte, vtype.NonceLen) - binary.BigEndian.PutUint64(nonceValue, ad.GetNonce()) - nodes = append(nodes, e.node(nonceKey, nonceValue)) - - codeHash := ad.GetCodeHash() - var zeroHash vtype.CodeHash - if codeHash != nil && *codeHash != zeroHash { - codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr) - codeHashValue := make([]byte, vtype.CodeHashLen) - copy(codeHashValue, codeHash[:]) - nodes = append(nodes, e.node(codeHashKey, codeHashValue)) - } - - return nodes, nil -} - -func (e *KVExporter) codeToNodes(key, value []byte) ([]*types.SnapshotNode, error) { - _, addr, err := ktype.StripEVMPhysicalKey(key) - if err != nil { - return nil, fmt.Errorf("corrupt code physical key: %w", err) - } - - codeData, err := vtype.DeserializeCodeData(value) - if err != nil { - return nil, fmt.Errorf("corrupt code entry key=%x: %w", key, err) - } - memiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr) - return []*types.SnapshotNode{e.node(memiavlKey, codeData.GetBytecode())}, nil -} - -func (e *KVExporter) storageToNodes(key, value []byte) ([]*types.SnapshotNode, error) { - _, strippedKey, err := ktype.StripEVMPhysicalKey(key) - if err != nil { - return nil, fmt.Errorf("corrupt storage physical key: %w", err) - } - - storageData, err := vtype.DeserializeStorageData(value) - if err != nil { - return nil, fmt.Errorf("corrupt storage entry key=%x: %w", key, err) - } - memiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, strippedKey) - return []*types.SnapshotNode{e.node(memiavlKey, storageData.GetValue()[:])}, nil -} - -func (e *KVExporter) legacyToNodes(key, value []byte) ([]*types.SnapshotNode, error) { - moduleName, originalKey, err := ktype.StripModulePrefix(key) - if err != nil { - return nil, fmt.Errorf("legacy key missing module prefix: %w", err) - } - - // exporter are broken now, add this for passing tests - if moduleName != evm.EVMStoreKey { - return nil, nil - } - - legacyData, err := vtype.DeserializeLegacyData(value) - if err != nil { - return nil, fmt.Errorf("corrupt legacy entry module=%s key=%x: %w", moduleName, originalKey, err) - } - - return []*types.SnapshotNode{e.node(originalKey, legacyData.GetValue())}, nil -} diff --git a/sei-db/state_db/sc/flatkv/exporter_test.go b/sei-db/state_db/sc/flatkv/import_export_test.go similarity index 74% rename from sei-db/state_db/sc/flatkv/exporter_test.go rename to sei-db/state_db/sc/flatkv/import_export_test.go index 5a6a4f086e..6473dd0be6 100644 --- a/sei-db/state_db/sc/flatkv/exporter_test.go +++ b/sei-db/state_db/sc/flatkv/import_export_test.go @@ -9,9 +9,10 @@ import ( "github.com/stretchr/testify/require" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" dbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" @@ -56,8 +57,8 @@ func TestExporterStorageKeys(t *testing.T) { val1 := padLeft32(0x11) val2 := padLeft32(0x22) - key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot1)) - key2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot2)) + key1 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot1)) + key2 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot2)) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ {Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ @@ -76,8 +77,9 @@ func TestExporterStorageKeys(t *testing.T) { for _, n := range nodes { require.Equal(t, int64(1), n.Version) require.Equal(t, int8(0), n.Height) - kind, _ := evm.ParseEVMKey(n.Key) - require.Equal(t, evm.EVMKeyStorage, kind) + kind, _, err := ktype.StripEVMPhysicalKey(n.Key) + require.NoError(t, err) + require.Equal(t, keys.EVMKeyStorage, kind) } } @@ -86,10 +88,10 @@ func TestExporterAccountKeys(t *testing.T) { defer s.Close() addr := ktype.Address{0xBB} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 42} - codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + codeHashKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) codeHashVal := make([]byte, vtype.CodeHashLen) codeHashVal[0] = 0xDE @@ -106,20 +108,18 @@ func TestExporterAccountKeys(t *testing.T) { nodes := drainExporter(t, exp) require.NoError(t, exp.Close()) - // accountDB produces nonce + codehash nodes per account - require.Len(t, nodes, 2) - - kindMap := map[evm.EVMKeyKind]*types.SnapshotNode{} - for _, n := range nodes { - kind, _ := evm.ParseEVMKey(n.Key) - kindMap[kind] = n - } + // nonce + codehash merge into a single account row in accountDB + require.Len(t, nodes, 1) - require.Contains(t, kindMap, evm.EVMKeyNonce) - require.Equal(t, nonceVal, kindMap[evm.EVMKeyNonce].Value) + n := nodes[0] + kind, _, err := ktype.StripEVMPhysicalKey(n.Key) + require.NoError(t, err) + require.Equal(t, ktype.EVMKeyAccount, kind) - require.Contains(t, kindMap, evm.EVMKeyCodeHash) - require.Equal(t, codeHashVal, kindMap[evm.EVMKeyCodeHash].Value) + acct, err := vtype.DeserializeAccountData(n.Value) + require.NoError(t, err) + require.Equal(t, uint64(42), acct.GetNonce()) + require.Equal(t, byte(0xDE), acct.GetCodeHash()[0]) } func TestExporterCodeKeys(t *testing.T) { @@ -127,7 +127,7 @@ func TestExporterCodeKeys(t *testing.T) { defer s.Close() addr := ktype.Address{0xCC} - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) codeVal := []byte{0x60, 0x80, 0x60, 0x40} require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -142,15 +142,14 @@ func TestExporterCodeKeys(t *testing.T) { nodes := drainExporter(t, exp) require.NoError(t, exp.Close()) - var codeNodes []*types.SnapshotNode - for _, n := range nodes { - kind, _ := evm.ParseEVMKey(n.Key) - if kind == evm.EVMKeyCode { - codeNodes = append(codeNodes, n) - } - } - require.Len(t, codeNodes, 1) - require.Equal(t, codeVal, codeNodes[0].Value) + require.Len(t, nodes, 1) + kind, _, err := ktype.StripEVMPhysicalKey(nodes[0].Key) + require.NoError(t, err) + require.Equal(t, keys.EVMKeyCode, kind) + + code, err := vtype.DeserializeCodeData(nodes[0].Value) + require.NoError(t, err) + require.Equal(t, codeVal, code.GetBytecode()) } func TestExporterRoundTrip(t *testing.T) { @@ -160,13 +159,13 @@ func TestExporterRoundTrip(t *testing.T) { addr := ktype.Address{0xDD} slot := ktype.Slot{0xEE} - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) storageVal := padLeft32(0xFF) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 7} - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) codeVal := []byte{0x60, 0x80} - codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + codeHashKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) codeHashVal := make([]byte, vtype.CodeHashLen) codeHashVal[31] = 0xAB @@ -194,7 +193,7 @@ func TestExporterRoundTrip(t *testing.T) { imp, err := s2.Importer(1) require.NoError(t, err) - require.NoError(t, imp.AddModule(evm.EVMFlatKVStoreKey)) + require.NoError(t, imp.AddModule("flatkv")) for _, n := range nodes { imp.AddNode(n) } @@ -203,23 +202,23 @@ func TestExporterRoundTrip(t *testing.T) { // --- Verify round-trip --- require.Equal(t, int64(1), s2.Version()) - got, found := s2.Get(evm.EVMStoreKey, storageKey) + got, found := s2.Get(keys.EVMStoreKey, storageKey) require.True(t, found, "storage key should exist after import") require.Equal(t, storageVal, got) - got, found = s2.Get(evm.EVMStoreKey, nonceKey) + got, found = s2.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce key should exist after import") require.Equal(t, nonceVal, got) - got, found = s2.Get(evm.EVMStoreKey, codeKey) + got, found = s2.Get(keys.EVMStoreKey, codeKey) require.True(t, found, "code key should exist after import") require.Equal(t, codeVal, got) - got, found = s2.Get(evm.EVMStoreKey, codeHashKey) + got, found = s2.Get(keys.EVMStoreKey, codeHashKey) require.True(t, found, "codehash key should exist after import") require.Equal(t, codeHashVal, got) - // LtHash should match source since import recomputes it via ApplyChangeSets + // LtHash should match because import recomputes it from the same physical key/value pairs require.Equal(t, srcHash, s2.RootHash()) require.NoError(t, s2.Close()) @@ -244,7 +243,7 @@ func TestExporterEOAAccountOmitsCodeHash(t *testing.T) { defer s.Close() addr := ktype.Address{0xAA} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 1} // EOA: only nonce, no codehash @@ -260,11 +259,17 @@ func TestExporterEOAAccountOmitsCodeHash(t *testing.T) { nodes := drainExporter(t, exp) require.NoError(t, exp.Close()) - // EOA should only produce a nonce node (no codehash) + // EOA produces a single account node with zero codehash (compact form) require.Len(t, nodes, 1) - kind, _ := evm.ParseEVMKey(nodes[0].Key) - require.Equal(t, evm.EVMKeyNonce, kind) - require.Equal(t, nonceVal, nodes[0].Value) + kind, _, err := ktype.StripEVMPhysicalKey(nodes[0].Key) + require.NoError(t, err) + require.Equal(t, ktype.EVMKeyAccount, kind) + + acct, err := vtype.DeserializeAccountData(nodes[0].Value) + require.NoError(t, err) + require.Equal(t, uint64(1), acct.GetNonce()) + var zeroHash vtype.CodeHash + require.Equal(t, &zeroHash, acct.GetCodeHash()) } func TestImportSurvivesReopen(t *testing.T) { @@ -274,9 +279,9 @@ func TestImportSurvivesReopen(t *testing.T) { addr := ktype.Address{0xDD} slot := ktype.Slot{0xEE} - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) storageVal := padLeft32(0xFF) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 7} require.NoError(t, src.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -297,7 +302,7 @@ func TestImportSurvivesReopen(t *testing.T) { dir := t.TempDir() dbPath := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbPath s1, err := NewCommitStore(t.Context(), cfg) @@ -307,7 +312,7 @@ func TestImportSurvivesReopen(t *testing.T) { imp, err := s1.Importer(1) require.NoError(t, err) - require.NoError(t, imp.AddModule(evm.EVMFlatKVStoreKey)) + require.NoError(t, imp.AddModule("flatkv")) for _, n := range nodes { imp.AddNode(n) } @@ -315,7 +320,7 @@ func TestImportSurvivesReopen(t *testing.T) { require.NoError(t, s1.Close()) // Reopen from the same directory — data must survive. - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = dbPath s2, err := NewCommitStore(t.Context(), cfg2) @@ -326,11 +331,11 @@ func TestImportSurvivesReopen(t *testing.T) { require.Equal(t, int64(1), s2.Version()) - got, found := s2.Get(evm.EVMStoreKey, storageKey) + got, found := s2.Get(keys.EVMStoreKey, storageKey) require.True(t, found, "storage key must survive reopen") require.Equal(t, storageVal, got) - got, found = s2.Get(evm.EVMStoreKey, nonceKey) + got, found = s2.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce key must survive reopen") require.Equal(t, nonceVal, got) @@ -346,7 +351,7 @@ func TestImportPurgesStaleData(t *testing.T) { dir := t.TempDir() dbPath := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbPath s, err := NewCommitStore(t.Context(), cfg) @@ -361,16 +366,16 @@ func TestImportPurgesStaleData(t *testing.T) { slotStale := ktype.Slot{0x03} // Storage keys - storageA := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrA, slotA)) - storageStale := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrStale, slotStale)) + storageA := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrA, slotA)) + storageStale := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrStale, slotStale)) // Account keys (nonce + codehash) - nonceA := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addrA[:]) - nonceStale := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addrStale[:]) - codeHashB := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addrB[:]) - codeHashStale := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addrStale[:]) + nonceA := keys.BuildEVMKey(keys.EVMKeyNonce, addrA[:]) + nonceStale := keys.BuildEVMKey(keys.EVMKeyNonce, addrStale[:]) + codeHashB := keys.BuildEVMKey(keys.EVMKeyCodeHash, addrB[:]) + codeHashStale := keys.BuildEVMKey(keys.EVMKeyCodeHash, addrStale[:]) // Code key - codeB := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addrB[:]) - codeStale := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addrStale[:]) + codeB := keys.BuildEVMKey(keys.EVMKeyCode, addrB[:]) + codeStale := keys.BuildEVMKey(keys.EVMKeyCode, addrStale[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 1} codeHashVal := make([]byte, vtype.CodeHashLen) @@ -395,7 +400,7 @@ func TestImportPurgesStaleData(t *testing.T) { var found bool for _, k := range staleKeys { - _, found = s.Get(evm.EVMStoreKey, k) + _, found = s.Get(keys.EVMStoreKey, k) require.True(t, found, "pre-import: key should exist") } @@ -435,7 +440,7 @@ func TestImportPurgesStaleData(t *testing.T) { imp, err := s.Importer(1) require.NoError(t, err) - require.NoError(t, imp.AddModule(evm.EVMFlatKVStoreKey)) + require.NoError(t, imp.AddModule("flatkv")) for _, n := range nodes { imp.AddNode(n) } @@ -443,24 +448,24 @@ func TestImportPurgesStaleData(t *testing.T) { // --- Phase 4: verify stale keys are gone across all DB types --- var got []byte - got, found = s.Get(evm.EVMStoreKey, storageA) + got, found = s.Get(keys.EVMStoreKey, storageA) require.True(t, found, "storage key A should exist") require.Equal(t, newStorageVal, got) - got, found = s.Get(evm.EVMStoreKey, nonceA) + got, found = s.Get(keys.EVMStoreKey, nonceA) require.True(t, found, "nonce key A should exist") require.Equal(t, newNonceVal, got) - got, found = s.Get(evm.EVMStoreKey, codeB) + got, found = s.Get(keys.EVMStoreKey, codeB) require.True(t, found, "code key B should exist") require.Equal(t, newCodeVal, got) - got, found = s.Get(evm.EVMStoreKey, codeHashB) + got, found = s.Get(keys.EVMStoreKey, codeHashB) require.True(t, found, "codehash key B should exist") require.Equal(t, newCodeHashVal, got) for _, k := range staleKeys { - _, found = s.Get(evm.EVMStoreKey, k) + _, found = s.Get(keys.EVMStoreKey, k) require.False(t, found, "stale key should NOT exist after import") } @@ -476,7 +481,7 @@ func TestImportPurgesStaleData(t *testing.T) { require.Equal(t, int64(1), s.Version()) for _, k := range staleKeys { - _, found = s.Get(evm.EVMStoreKey, k) + _, found = s.Get(keys.EVMStoreKey, k) require.False(t, found, "stale key must remain absent after reopen") } require.Equal(t, srcHash, s.RootHash()) @@ -486,7 +491,7 @@ func TestImporterFailsWhenResetCannotRemoveCurrentLink(t *testing.T) { dir := t.TempDir() dbPath := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbPath s, err := NewCommitStore(t.Context(), cfg) @@ -518,7 +523,7 @@ func TestImporterOnReadOnlyStore(t *testing.T) { s := setupTestStore(t) cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), padLeft32(0x11), false, ) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -536,7 +541,7 @@ func TestImporterOnReadOnlyStore(t *testing.T) { func TestImporterHeightNonZeroSkipped(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -549,7 +554,7 @@ func TestImporterHeightNonZeroSkipped(t *testing.T) { // Non-leaf nodes (Height != 0) are silently skipped. imp.AddNode(&types.SnapshotNode{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), Value: padLeft32(0x11), Height: 1, // non-leaf }) @@ -557,15 +562,15 @@ func TestImporterHeightNonZeroSkipped(t *testing.T) { require.NoError(t, imp.Close()) // Data should NOT have been imported. - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))) - _, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))) + _, found := s.Get(keys.EVMStoreKey, key) require.False(t, found, "height != 0 node should be skipped") require.NoError(t, s.Close()) } func TestImporterNilKeySkipped(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -590,7 +595,7 @@ func TestImporterNilKeySkipped(t *testing.T) { func TestImporterEmptyStore(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -610,7 +615,7 @@ func TestImporterEmptyStore(t *testing.T) { func TestImporterCorruptKeyDataPropagatesError(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -621,28 +626,21 @@ func TestImporterCorruptKeyDataPropagatesError(t *testing.T) { imp, err := s.Importer(1) require.NoError(t, err) - // Add a valid storage node first. - imp.AddNode(&types.SnapshotNode{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), - Value: padLeft32(0x11), - }) - - // Add a node with a nonce key but invalid nonce value length. - // This should cause ApplyChangeSets to error during flush/close. - addr2 := addrN(0x02) + // A key without module prefix ("/" separator) should be rejected by + // routePhysicalKey during flush. imp.AddNode(&types.SnapshotNode{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr2[:]), - Value: []byte{0x01, 0x02}, // wrong length for nonce (needs 8 bytes) + Key: []byte{0xDE, 0xAD}, + Value: []byte{0x01, 0x02}, }) err = imp.Close() - require.Error(t, err, "import with invalid nonce length should fail") - // Don't close s here -- it may be in a partial state; just let test cleanup handle it. + require.Error(t, err, "import with invalid physical key should fail") + require.Contains(t, err.Error(), "route key") } func TestImporterDoubleImport(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -650,50 +648,57 @@ func TestImporterDoubleImport(t *testing.T) { _, err = s.LoadVersion(0, false) require.NoError(t, err) - // First import. + storageVal1 := padLeft32(0x11) + sv1 := &[32]byte{} + copy(sv1[:], storageVal1) + storageVal2 := padLeft32(0x22) + sv2 := &[32]byte{} + copy(sv2[:], storageVal2) + + // First import — uses physical keys and serialized VType values. imp1, err := s.Importer(1) require.NoError(t, err) imp1.AddNode(&types.SnapshotNode{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), - Value: padLeft32(0x11), + Key: storagePhysKey(addrN(0x01), slotN(0x01)), + Value: vtype.NewStorageData().SetBlockHeight(1).SetValue(sv1).Serialize(), }) require.NoError(t, imp1.Close()) - key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))) - val, found := s.Get(evm.EVMStoreKey, key1) + key1 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))) + val, found := s.Get(keys.EVMStoreKey, key1) require.True(t, found) - require.Equal(t, padLeft32(0x11), val) + require.Equal(t, storageVal1, val) // Second import: should wipe prior state (resetForImport). imp2, err := s.Importer(2) require.NoError(t, err) imp2.AddNode(&types.SnapshotNode{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x02), slotN(0x02))), - Value: padLeft32(0x22), + Key: storagePhysKey(addrN(0x02), slotN(0x02)), + Value: vtype.NewStorageData().SetBlockHeight(2).SetValue(sv2).Serialize(), }) require.NoError(t, imp2.Close()) require.Equal(t, int64(2), s.Version()) // Data from first import should be gone. - _, found = s.Get(evm.EVMStoreKey, key1) + _, found = s.Get(keys.EVMStoreKey, key1) require.False(t, found, "first import data should be wiped by second import") - key2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x02), slotN(0x02))) - val, found = s.Get(evm.EVMStoreKey, key2) + key2 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x02), slotN(0x02))) + val, found = s.Get(keys.EVMStoreKey, key2) require.True(t, found) - require.Equal(t, padLeft32(0x22), val) + require.Equal(t, storageVal2, val) require.NoError(t, s.Close()) } func TestExporterAtHistoricalVersion(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 1 s := setupTestStoreWithConfig(t, cfg) defer s.Close() addr := addrN(0x10) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) // v1: write 0x11 cs := makeChangeSet(key, padLeft32(0x11), false) @@ -722,19 +727,22 @@ func TestExporterAtHistoricalVersion(t *testing.T) { break } node := item.(*types.SnapshotNode) - kind, _ := evm.ParseEVMKey(node.Key) - if kind == evm.EVMKeyStorage { + kind, _, parseErr := ktype.StripEVMPhysicalKey(node.Key) + require.NoError(t, parseErr) + if kind == keys.EVMKeyStorage { storageNodes = append(storageNodes, node) } } require.NoError(t, exp.Close()) require.Len(t, storageNodes, 1) - require.Equal(t, padLeft32(0x11), storageNodes[0].Value, "historical export should have v1 value") + sd, err := vtype.DeserializeStorageData(storageNodes[0].Value) + require.NoError(t, err) + require.Equal(t, padLeft32(0x11), sd.GetValue()[:], "historical export should have v1 value") } func TestExportImportLargerDataset(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s := setupTestStoreWithConfig(t, cfg) defer s.Close() @@ -748,7 +756,7 @@ func TestExportImportLargerDataset(t *testing.T) { allPairs = append(allPairs, noncePair(addr, uint64(i)), &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(i))), + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(i))), Value: padLeft32(i, i, i), }, ) @@ -776,7 +784,7 @@ func TestExportImportLargerDataset(t *testing.T) { // Import into a fresh store. dir2 := t.TempDir() - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = filepath.Join(dir2, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg2) require.NoError(t, err) @@ -815,26 +823,24 @@ func TestExporterCorruptAccountValueInDB(t *testing.T) { require.NoError(t, batch.Commit(dbtypes.WriteOptions{Sync: true})) _ = batch.Close() - // Construct an exporter directly on this store to exercise the - // corrupt-account path without the read-only checkpoint (which - // replays the WAL and restores the clean value). + // Raw exporter does not parse values — corrupt data is exported as-is. exp := NewKVExporter(s, s.Version()) - var hitError bool + var nodes []*types.SnapshotNode for { - _, err := exp.Next() + item, err := exp.Next() if err != nil { - if errors.Is(err, errorutils.ErrorExportDone) { - break - } - require.Contains(t, err.Error(), "corrupt account entry") - hitError = true + require.True(t, errors.Is(err, errorutils.ErrorExportDone)) break } + node, ok := item.(*types.SnapshotNode) + require.True(t, ok) + nodes = append(nodes, node) } - require.True(t, hitError, "exporter should return error on corrupt AccountValue") - // Only close the iterator, not the underlying store (we own s via defer). - if exp.currentIter != nil { - _ = exp.currentIter.Close() + require.Len(t, nodes, 1, "corrupt value should still be exported as raw bytes") + require.Equal(t, []byte{0xDE, 0xAD}, nodes[0].Value) + if exp.iter != nil { + _ = exp.iter.Close() + exp.iter = nil } } diff --git a/sei-db/state_db/sc/flatkv/importer.go b/sei-db/state_db/sc/flatkv/importer.go index a2a1f5f06e..a26543980a 100644 --- a/sei-db/state_db/sc/flatkv/importer.go +++ b/sei-db/state_db/sc/flatkv/importer.go @@ -2,89 +2,230 @@ package flatkv import ( "fmt" + "sync" + "sync/atomic" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/proto" + seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" ) -const importBatchSize = 20000 +const ( + importBatchSize = 20000 + ingestChanSize = 1 << 16 // 64K buffered main channel + workerChanSize = 1024 // per-DB worker channel +) var _ types.Importer = (*KVImporter)(nil) +// dbWorker owns a single PebbleDB and its LtHash accumulation. It reads +// key/value pairs from its channel, buffers them into a PebbleDB batch, +// and flushes (commit + LtHash update) when the buffer is full or the +// channel is closed. +type dbWorker struct { + dir string + db seidbtypes.KeyValueDB + ch chan rawKVPair + batch seidbtypes.Batch + ltPairs []lthash.KVPairWithLastValue + ltHash *lthash.LtHash +} + +func newDBWorker(dir string, db seidbtypes.KeyValueDB, ltHash *lthash.LtHash) *dbWorker { + return &dbWorker{ + dir: dir, + db: db, + ch: make(chan rawKVPair, workerChanSize), + batch: db.NewBatch(), + ltPairs: make([]lthash.KVPairWithLastValue, 0, importBatchSize), + ltHash: ltHash, + } +} + +// run drains the worker channel until closed, flushing whenever the +// buffer reaches importBatchSize. If done fires, the worker abandons +// remaining work and exits immediately. +func (w *dbWorker) run(done <-chan struct{}) error { + for { + select { + case kv, ok := <-w.ch: + if !ok { + return w.flush() + } + if err := w.batch.Set(kv.Key, kv.Value); err != nil { + return fmt.Errorf("%s set: %w", w.dir, err) + } + w.ltPairs = append(w.ltPairs, lthash.KVPairWithLastValue{ + Key: kv.Key, + Value: kv.Value, + }) + if len(w.ltPairs) >= importBatchSize { + if err := w.flush(); err != nil { + return err + } + } + case <-done: + return nil + } + } +} + +// flush commits the current PebbleDB batch and updates the running LtHash. +func (w *dbWorker) flush() error { + if len(w.ltPairs) == 0 { + return nil + } + + newHash, _ := lthash.ComputeLtHash(w.ltHash, w.ltPairs) + w.ltHash = newHash + + syncOpt := seidbtypes.WriteOptions{Sync: false} + if err := w.batch.Commit(syncOpt); err != nil { + return fmt.Errorf("%s commit: %w", w.dir, err) + } + + w.batch = w.db.NewBatch() + w.ltPairs = w.ltPairs[:0] + return nil +} + +// KVImporter implements types.Importer using a channel-based pipeline with +// per-DB worker goroutines. AddNode sends pairs into a buffered channel; a +// dispatcher goroutine routes each pair to the correct DB worker; each worker +// independently batches writes and computes LtHash. type KVImporter struct { store *CommitStore version int64 - batch []*proto.KVPair - err error + + ingestCh chan rawKVPair + workers map[seidbtypes.KeyValueDB]*dbWorker + wg sync.WaitGroup + + // done is closed on the first pipeline error so that AddNode, + // the dispatcher, and all workers bail immediately. + done chan struct{} + closeOnce sync.Once + firstErr atomic.Pointer[error] } func NewKVImporter(store *CommitStore, version int64) types.Importer { - return &KVImporter{ - store: store, - version: version, - batch: make([]*proto.KVPair, 0, importBatchSize), + imp := &KVImporter{ + store: store, + version: version, + ingestCh: make(chan rawKVPair, ingestChanSize), + workers: make(map[seidbtypes.KeyValueDB]*dbWorker, 4), + done: make(chan struct{}), } -} -func (imp *KVImporter) AddModule(_ string) error { - return nil + for _, ndb := range store.namedDataDBs() { + w := newDBWorker( + ndb.dir, + ndb.db, + store.perDBWorkingLtHash[ndb.dir], + ) + imp.workers[ndb.db] = w + } + + for _, w := range imp.workers { + imp.wg.Add(1) + go func(w *dbWorker) { + defer imp.wg.Done() + if err := w.run(imp.done); err != nil { + imp.setErr(err) + } + }(w) + } + + imp.wg.Add(1) + go func() { + defer imp.wg.Done() + imp.dispatch() + }() + + return imp } -func (imp *KVImporter) AddNode(node *types.SnapshotNode) { - if imp.err != nil || node.Height != 0 || node.Key == nil { - return +// dispatch reads from the main ingest channel, routes each pair, and sends +// it to the appropriate worker channel. It exits when ingestCh is closed +// (normal shutdown) or done fires (error fast-path). +func (imp *KVImporter) dispatch() { + defer func() { + for _, w := range imp.workers { + close(w.ch) + } + }() + + for { + select { + case kv, ok := <-imp.ingestCh: + if !ok { + return + } + db, err := imp.store.routePhysicalKey(kv.Key) + if err != nil { + imp.setErr(fmt.Errorf("route key: %w", err)) + return + } + select { + case imp.workers[db].ch <- kv: + case <-imp.done: + return + } + case <-imp.done: + return + } } +} - imp.batch = append(imp.batch, &proto.KVPair{Key: node.Key, Value: node.Value}) - if len(imp.batch) >= importBatchSize { - imp.flush() +func (imp *KVImporter) setErr(err error) { + if imp.firstErr.CompareAndSwap(nil, &err) { + imp.closeOnce.Do(func() { close(imp.done) }) } } -func (imp *KVImporter) flush() { - if len(imp.batch) == 0 { - return +func (imp *KVImporter) getErr() error { + p := imp.firstErr.Load() + if p == nil { + return nil } + return *p +} + +func (imp *KVImporter) AddModule(_ string) error { + return nil +} - cs := []*proto.NamedChangeSet{{ - Name: evm.EVMStoreKey, - Changeset: proto.ChangeSet{Pairs: imp.batch}, - }} - if err := imp.store.ApplyChangeSets(cs); err != nil { - imp.err = fmt.Errorf("import apply changesets: %w", err) - logger.Error("import flush failed when apply changesets", "err", err) +func (imp *KVImporter) AddNode(node *types.SnapshotNode) { + if node.Height != 0 || node.Key == nil || node.Version != imp.version { return } - if err := imp.store.commitBatches(imp.version); err != nil { - imp.err = fmt.Errorf("import commit batches: %w", err) - logger.Error("import flush failed when commit batches", "err", err) - return + select { + case imp.ingestCh <- rawKVPair{Key: node.Key, Value: node.Value}: + case <-imp.done: } - imp.store.clearPendingWrites() - imp.batch = make([]*proto.KVPair, 0, importBatchSize) } func (imp *KVImporter) Close() error { - if imp.err != nil { - return imp.err + close(imp.ingestCh) + imp.wg.Wait() + + if err := imp.getErr(); err != nil { + return err } - imp.flush() - if imp.err != nil { - return imp.err + + for _, w := range imp.workers { + imp.store.perDBWorkingLtHash[w.dir] = w.ltHash } - imp.store.committedVersion = imp.version - imp.store.committedLtHash = imp.store.workingLtHash.Clone() - if err := imp.store.commitGlobalMetadata(imp.version, imp.store.committedLtHash); err != nil { - return fmt.Errorf("import global metadata: %w", err) + if err := imp.store.FinalizeImport(imp.version); err != nil { + return fmt.Errorf("failed to finalize import: %w", err) } // Write a snapshot so the imported data survives store reopen / restart. // Import bypasses the WAL, so without a snapshot the next LoadVersion // would clone from the pre-import snapshot and lose all imported data. if err := imp.store.WriteSnapshot(""); err != nil { - return fmt.Errorf("import snapshot: %w", err) + return fmt.Errorf("failed to import when writing snapshot: %w", err) } return nil diff --git a/sei-db/state_db/sc/flatkv/iterator.go b/sei-db/state_db/sc/flatkv/iterator.go deleted file mode 100644 index e9a4ea1fbe..0000000000 --- a/sei-db/state_db/sc/flatkv/iterator.go +++ /dev/null @@ -1,307 +0,0 @@ -package flatkv - -import ( - "fmt" - - "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" -) - -// dbIterator is a generic iterator that wraps a PebbleDB iterator -// and converts keys between internal and external (memiavl) formats. -// -// EXPERIMENTAL: not used in production; only storage keys supported. -// Interface may change when Exporter/state-sync is implemented. -type dbIterator struct { - iter types.KeyValueDBIterator - kind evm.EVMKeyKind // key type for conversion - start []byte // external format start key - end []byte // external format end key - err error - closed bool -} - -// Compile-time interface checks -var ( - _ Iterator = (*dbIterator)(nil) - _ Iterator = (*emptyIterator)(nil) -) - -// newDBIterator creates a new dbIterator for the given key kind. -// start/end are external memiavl keys; they are converted to physical keys -// for the underlying DB iterator. -func newDBIterator(db types.KeyValueDB, kind evm.EVMKeyKind, start, end []byte) Iterator { - var physStart, physEnd []byte - startMatches := start == nil - endMatches := end == nil - - if start != nil { - parsedKind, keyBytes := evm.ParseEVMKey(start) - if parsedKind == kind { - physStart = ktype.EVMPhysicalKey(kind, keyBytes) - startMatches = true - } - } - if end != nil { - parsedKind, keyBytes := evm.ParseEVMKey(end) - if parsedKind == kind { - physEnd = ktype.EVMPhysicalKey(kind, keyBytes) - endMatches = true - } - } - - if !startMatches || !endMatches { - return &emptyIterator{} - } - - iter, err := db.NewIter(&types.IterOptions{ - LowerBound: physStart, - UpperBound: physEnd, - }) - if err != nil { - return &emptyIterator{err: err} - } - - return &dbIterator{ - iter: iter, - kind: kind, - start: start, - end: end, - } -} - -// newDBPrefixIterator creates a new dbIterator for prefix scanning. -// strippedPrefix is the stripped key prefix (e.g. addr for storage); -// it is converted to a physical key prefix for the DB. -func newDBPrefixIterator(db types.KeyValueDB, kind evm.EVMKeyKind, strippedPrefix []byte, externalPrefix []byte) Iterator { - physPrefix := ktype.EVMPhysicalKey(kind, strippedPrefix) - physEnd := ktype.PrefixEnd(physPrefix) - - iter, err := db.NewIter(&types.IterOptions{ - LowerBound: physPrefix, - UpperBound: physEnd, - }) - if err != nil { - return &emptyIterator{err: err} - } - - externalEnd := ktype.PrefixEnd(externalPrefix) - - return &dbIterator{ - iter: iter, - kind: kind, - start: externalPrefix, - end: externalEnd, - } -} - -func (it *dbIterator) Domain() ([]byte, []byte) { - return it.start, it.end -} - -func (it *dbIterator) Valid() bool { - if it.closed || it.err != nil { - return false - } - return it.iter.Valid() -} - -func (it *dbIterator) Error() error { - if it.err != nil { - return it.err - } - return it.iter.Error() -} - -func (it *dbIterator) Close() error { - if it.closed { - return nil - } - it.closed = true - return it.iter.Close() -} - -func (it *dbIterator) First() bool { - if it.closed { - return false - } - if !it.iter.First() { - return false - } - it.skipMetaForward() - return it.iter.Valid() -} - -func (it *dbIterator) Last() bool { - if it.closed { - return false - } - if !it.iter.Last() { - return false - } - it.skipMetaBackward() - return it.iter.Valid() -} - -func (it *dbIterator) SeekGE(key []byte) bool { - if it.closed { - return false - } - - physKey, err := it.resolvePhysicalKey(key) - if err != nil { - it.err = err - return false - } - - if !it.iter.SeekGE(physKey) { - return false - } - it.skipMetaForward() - return it.iter.Valid() -} - -func (it *dbIterator) SeekLT(key []byte) bool { - if it.closed { - return false - } - - physKey, err := it.resolvePhysicalKey(key) - if err != nil { - it.err = err - return false - } - - if !it.iter.SeekLT(physKey) { - return false - } - it.skipMetaBackward() - return it.iter.Valid() -} - -// resolvePhysicalKey converts a seek key to physical format for the underlying -// DB iterator. Accepts both formats so that keys returned by Key() can be -// passed directly back to SeekGE/SeekLT: -// - Physical keys ("evm/" + prefix_byte + stripped_key) are validated and -// passed through. -// - Memiavl keys (prefix_byte + stripped_key) are converted via EVMPhysicalKey. -// -// Memiavl EVM prefix bytes (0x03..0x0a) are all below 0x20, while physical -// keys start with an ASCII module name (>= 0x20), so the formats are -// unambiguous. -func (it *dbIterator) resolvePhysicalKey(key []byte) ([]byte, error) { - if len(key) == 0 { - return nil, fmt.Errorf("empty seek key") - } - if key[0] >= 0x20 { // physical key: starts with ASCII module name; memiavl keys start with 0x03..0x0a - kind, _, err := ktype.StripEVMPhysicalKey(key) - if err != nil { - return nil, fmt.Errorf("invalid physical seek key: %w", err) - } - if kind != it.kind { - return nil, fmt.Errorf("physical key type mismatch: expected %d, got %d", it.kind, kind) - } - return key, nil - } - kind, strippedKey := evm.ParseEVMKey(key) - if kind != it.kind { - return nil, fmt.Errorf("key type mismatch: expected %d, got %d", it.kind, kind) - } - return ktype.EVMPhysicalKey(kind, strippedKey), nil -} - -func (it *dbIterator) Next() bool { - if it.closed { - return false - } - if !it.iter.Next() { - return false - } - it.skipMetaForward() - return it.iter.Valid() -} - -func (it *dbIterator) Prev() bool { - if it.closed { - return false - } - if !it.iter.Prev() { - return false - } - it.skipMetaBackward() - return it.iter.Valid() -} - -// skipMetaForward advances past any _meta/ keys. -// On I/O error Valid() becomes false and the loop exits; -// the caller surfaces the error via Error(). -func (it *dbIterator) skipMetaForward() { - for it.iter.Valid() && isMetaKey(it.iter.Key()) { - it.iter.Next() - } -} - -// skipMetaBackward retreats past any _meta/ keys. -// Error handling mirrors skipMetaForward. -func (it *dbIterator) skipMetaBackward() { - for it.iter.Valid() && isMetaKey(it.iter.Key()) { - it.iter.Prev() - } -} - -func (it *dbIterator) Key() []byte { - if !it.Valid() { - return nil - } - // Returns raw physical key ("evm/" + type_prefix + stripped_key). - return it.iter.Key() -} - -func (it *dbIterator) Value() []byte { - if !it.Valid() { - return nil - } - raw := it.iter.Value() - switch it.kind { - case evm.EVMKeyStorage: - sd, err := vtype.DeserializeStorageData(raw) - if err != nil { - it.err = fmt.Errorf("deserialize storage value: %w", err) - return nil - } - return sd.GetValue()[:] - default: - return raw - } -} - -// CommitStore factory methods for creating iterators - -func (s *CommitStore) newStorageIterator(start, end []byte) Iterator { - return newDBIterator(s.storageDB, evm.EVMKeyStorage, start, end) -} - -func (s *CommitStore) newStoragePrefixIterator(internalPrefix []byte, memiavlPrefix []byte) Iterator { - return newDBPrefixIterator(s.storageDB, evm.EVMKeyStorage, internalPrefix, memiavlPrefix) -} - -// emptyIterator is used when no data matches the query. -// If err is set, it indicates a creation failure (e.g. PebbleDB error). -type emptyIterator struct { - err error -} - -func (it *emptyIterator) Domain() ([]byte, []byte) { return nil, nil } -func (it *emptyIterator) Valid() bool { return false } -func (it *emptyIterator) Error() error { return it.err } -func (it *emptyIterator) Close() error { return nil } -func (it *emptyIterator) First() bool { return false } -func (it *emptyIterator) Last() bool { return false } -func (it *emptyIterator) SeekGE(key []byte) bool { return false } -func (it *emptyIterator) SeekLT(key []byte) bool { return false } -func (it *emptyIterator) Next() bool { return false } -func (it *emptyIterator) Prev() bool { return false } -func (it *emptyIterator) Key() []byte { return nil } -func (it *emptyIterator) Value() []byte { return nil } diff --git a/sei-db/state_db/sc/flatkv/keys_test.go b/sei-db/state_db/sc/flatkv/keys_test.go deleted file mode 100644 index dcdc642ac5..0000000000 --- a/sei-db/state_db/sc/flatkv/keys_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package flatkv - -import ( - "testing" - - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" - "github.com/stretchr/testify/require" -) - -func TestIsMetaKey(t *testing.T) { - require.True(t, isMetaKey(metaVersionKey)) - require.True(t, isMetaKey(metaLtHashKey)) - require.True(t, isMetaKey([]byte("_meta/future"))) - require.False(t, isMetaKey([]byte{0x00})) - addr := ktype.Address{0x01} - require.False(t, isMetaKey(addr[:])) - require.False(t, isMetaKey(ktype.StorageKey(ktype.Address{0x01}, ktype.Slot{0x02}))) -} diff --git a/sei-db/state_db/sc/flatkv/ktype/ktype.go b/sei-db/state_db/sc/flatkv/ktype/ktype.go index 7fcfb9f810..9904cf1992 100644 --- a/sei-db/state_db/sc/flatkv/ktype/ktype.go +++ b/sei-db/state_db/sc/flatkv/ktype/ktype.go @@ -9,7 +9,7 @@ import ( "bytes" "fmt" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" ) // --------------------------------------------------------------------------- @@ -53,7 +53,7 @@ func StorageKey(addr Address, slot Slot) []byte { // EVMKeyAccount 0x0a accountDB "evm/" + 0x0a + addr (merges nonce, codehash, balance) // EVMKeyCode 0x07 codeDB "evm/" + 0x07 + addr // EVMKeyLegacy (orig) legacyDB "evm/" + original_key OR "module/" + cosmos_key -const EVMKeyAccount = evm.EVMKeyNonce +const EVMKeyAccount = keys.EVMKeyNonce // ModulePhysicalKey returns "moduleName/" + key. // All four data DBs (account, code, storage, legacy) use this format so keys @@ -81,15 +81,15 @@ func StripModulePrefix(physicalKey []byte) (moduleName string, originalKey []byt // Format: "evm/" + type_prefix_byte + stripped_key. // For account keys (nonce, codehash), canonicalizes to EVMKeyAccount (0x0a) // because these fields are merged into one physical row. -func EVMPhysicalKey(kind evm.EVMKeyKind, strippedKey []byte) []byte { - if kind == evm.EVMKeyCodeHash { +func EVMPhysicalKey(kind keys.EVMKeyKind, strippedKey []byte) []byte { + if kind == keys.EVMKeyCodeHash { kind = EVMKeyAccount } - prefixByte, ok := evm.EVMKeyPrefixByte(kind) + prefixByte, ok := keys.EVMKeyPrefixByte(kind) if !ok { return nil } - mod := evm.EVMStoreKey + mod := keys.EVMStoreKey result := make([]byte, len(mod)+2+len(strippedKey)) copy(result, mod) result[len(mod)] = '/' @@ -100,15 +100,15 @@ func EVMPhysicalKey(kind evm.EVMKeyKind, strippedKey []byte) []byte { // StripEVMPhysicalKey extracts the EVM key kind and stripped key from a // physical DB key. This is the inverse of EVMPhysicalKey for export paths. -// For account keys the returned kind is EVMKeyAccount (evm.EVMKeyNonce). -func StripEVMPhysicalKey(physicalKey []byte) (kind evm.EVMKeyKind, strippedKey []byte, err error) { - _, memiavlKey, err := StripModulePrefix(physicalKey) +// For account keys the returned kind is EVMKeyAccount (keys.EVMKeyNonce). +func StripEVMPhysicalKey(physicalKey []byte) (kind keys.EVMKeyKind, strippedKey []byte, err error) { + _, innerKey, err := StripModulePrefix(physicalKey) if err != nil { - return evm.EVMKeyEmpty, nil, fmt.Errorf("strip EVM physical key: %w", err) + return keys.EVMKeyEmpty, nil, fmt.Errorf("strip EVM physical key: %w", err) } - kind, strippedKey = evm.ParseEVMKey(memiavlKey) - if kind == evm.EVMKeyEmpty { - return evm.EVMKeyEmpty, nil, fmt.Errorf("unrecognised EVM key kind in physical key: %x", physicalKey) + kind, strippedKey = keys.ParseEVMKey(innerKey) + if kind == keys.EVMKeyEmpty { + return keys.EVMKeyEmpty, nil, fmt.Errorf("unrecognised EVM key kind in physical key: %x", physicalKey) } return kind, strippedKey, nil } diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/ktype/meta.go similarity index 74% rename from sei-db/state_db/sc/flatkv/keys.go rename to sei-db/state_db/sc/flatkv/ktype/meta.go index c6a3d04b46..974c61e2a3 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/ktype/meta.go @@ -1,4 +1,4 @@ -package flatkv +package ktype import ( "bytes" @@ -14,20 +14,20 @@ const ( ) var ( - metaKeyPrefixBytes = []byte(metaKeyPrefix) - metaVersionKey = []byte(metaVersion) - metaLtHashKey = []byte(metaLtHash) + MetaKeyPrefixBytes = []byte(metaKeyPrefix) + MetaVersionKey = []byte(metaVersion) + MetaLtHashKey = []byte(metaLtHash) ) -// isMetaKey reports whether key is a per-DB internal metadata key (not user data). +// IsMetaKey reports whether key is a per-DB internal metadata key (not user data). // // Safety: _meta/ keys are 10–13 bytes; the shortest user key is 20 bytes // (an EVM address). Prefix collision would require an address starting with // 0x5F6D657461 ("_meta") — probability ~2^-48 for random addresses and // negligible even under CREATE2 brute-force. Legacy DB keys must not use // the _meta/ prefix. -func isMetaKey(key []byte) bool { - return bytes.HasPrefix(key, metaKeyPrefixBytes) +func IsMetaKey(key []byte) bool { + return bytes.HasPrefix(key, MetaKeyPrefixBytes) } // LocalMeta stores per-DB version tracking metadata. diff --git a/sei-db/state_db/sc/flatkv/ktype/metakey_test.go b/sei-db/state_db/sc/flatkv/ktype/metakey_test.go new file mode 100644 index 0000000000..dbba88b32f --- /dev/null +++ b/sei-db/state_db/sc/flatkv/ktype/metakey_test.go @@ -0,0 +1,17 @@ +package ktype + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsMetaKey(t *testing.T) { + require.True(t, IsMetaKey(MetaVersionKey)) + require.True(t, IsMetaKey(MetaLtHashKey)) + require.True(t, IsMetaKey([]byte("_meta/future"))) + require.False(t, IsMetaKey([]byte{0x00})) + addr := Address{0x01} + require.False(t, IsMetaKey(addr[:])) + require.False(t, IsMetaKey(StorageKey(Address{0x01}, Slot{0x02}))) +} diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 992e0f56a9..4926000e0b 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -5,18 +5,19 @@ import ( "encoding/binary" "errors" "fmt" - "path/filepath" - "testing" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" scTypes "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" "github.com/stretchr/testify/require" + "path/filepath" + "testing" ) // fullScanLtHash computes an LtHash from scratch by iterating every KV pair @@ -31,7 +32,7 @@ func fullScanLtHash(t *testing.T, s *CommitStore) *lthash.LtHash { require.NoError(t, err) defer iter.Close() for iter.First(); iter.Valid(); iter.Next() { - if isMetaKey(iter.Key()) { + if ktype.IsMetaKey(iter.Key()) { continue } key := bytes.Clone(iter.Key()) @@ -44,106 +45,14 @@ func fullScanLtHash(t *testing.T, s *CommitStore) *lthash.LtHash { require.NoError(t, iter.Error()) } - scanDB(s.accountDB) - scanDB(s.codeDB) - scanDB(s.storageDB) - scanDB(s.legacyDB) + for _, db := range s.dataDBs() { + scanDB(db) + } result, _ := lthash.ComputeLtHash(nil, pairs) return result } -// ---------- helpers to build memiavl-format changeset pairs ---------- - -func nonceBytes(n uint64) []byte { - b := make([]byte, vtype.NonceLen) - binary.BigEndian.PutUint64(b, n) - return b -} - -func addrN(n byte) ktype.Address { - var a ktype.Address - a[19] = n - return a -} - -func slotN(n byte) ktype.Slot { - var s ktype.Slot - s[31] = n - return s -} - -func codeHashN(n byte) vtype.CodeHash { - var h vtype.CodeHash - for i := range h { - h[i] = n - } - return h -} - -func noncePair(addr ktype.Address, nonce uint64) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), - Value: nonceBytes(nonce), - } -} - -func codeHashPair(addr ktype.Address, ch vtype.CodeHash) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), - Value: ch[:], - } -} - -func codePair(addr ktype.Address, bytecode []byte) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), - Value: bytecode, - } -} - -func codeDeletePair(addr ktype.Address) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), - Delete: true, - } -} - -func storagePair(addr ktype.Address, slot ktype.Slot, val []byte) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), - Value: padLeft32(val...), - } -} - -func storageDeletePair(addr ktype.Address, slot ktype.Slot) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), - Delete: true, - } -} - -func nonceDeletePair(addr ktype.Address) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), - Delete: true, - } -} - -func codeHashDeletePair(addr ktype.Address) *proto.KVPair { - return &proto.KVPair{ - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), - Delete: true, - } -} - -func namedCS(pairs ...*proto.KVPair) *proto.NamedChangeSet { - return &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: pairs}, - } -} - // ---------- The main 100-block test ---------- // TestLtHashIncrementalEqualsFullScan runs 100 blocks that exercise every @@ -647,7 +556,7 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { dir := t.TempDir() // Phase 1: create state and close - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dir s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -668,7 +577,7 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen and verify - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = dir s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -735,8 +644,8 @@ func TestLtHashCrossApplyAccountSameFieldOverwrite(t *testing.T) { verifyLtHashAtHeight(t, s, 2) // Verify final value - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - val, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, uint64(20), binary.BigEndian.Uint64(val)) } @@ -769,8 +678,8 @@ func TestLtHashCrossApplyStorageOverwrite(t *testing.T) { verifyLtHashAtHeight(t, s, 2) // Verify final value - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - val, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x33), val) } @@ -806,8 +715,8 @@ func TestLtHashCrossApplyCodeOverwrite(t *testing.T) { verifyLtHashAtHeight(t, s, 2) // Verify final value - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - val, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, []byte{0x60, 0x40, 0x02, 0x03}, val) } @@ -839,7 +748,7 @@ func TestLtHashCrossApplyLegacyOverwrite(t *testing.T) { verifyLtHashAtHeight(t, s, 2) // Verify final value - val, found := s.Get(evm.EVMStoreKey, legacyKey) + val, found := s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x30}, val) } @@ -898,28 +807,28 @@ func TestLtHashCrossApplyMixedOverwrite(t *testing.T) { verifyLtHashAtHeight(t, s, 2) // Verify all final values - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found) require.Equal(t, uint64(100), binary.BigEndian.Uint64(nonceVal)) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found := s.Get(evm.EVMStoreKey, chKey) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) + chVal, found := s.Get(keys.EVMStoreKey, chKey) require.True(t, found) expected := codeHashN(0x30) require.Equal(t, expected[:], chVal) - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - codeVal, found := s.Get(evm.EVMStoreKey, codeKey) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) + codeVal, found := s.Get(keys.EVMStoreKey, codeKey) require.True(t, found) require.Equal(t, []byte{0x60, 0x60, 0x01}, codeVal) - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - storageVal, found := s.Get(evm.EVMStoreKey, storageKey) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + storageVal, found := s.Get(keys.EVMStoreKey, storageKey) require.True(t, found) require.Equal(t, padLeft32(0x33), storageVal) - legacyVal, found := s.Get(evm.EVMStoreKey, legacyKey) + legacyVal, found := s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x03}, legacyVal) } @@ -980,13 +889,13 @@ func TestLtHashAccountDeleteThenRecreate(t *testing.T) { commitAndCheck(t, s) verifyLtHashAtHeight(t, s, 2) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found) require.Equal(t, nonceBytes(99), nonceVal) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - _, found = s.Get(evm.EVMStoreKey, chKey) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) + _, found = s.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should be zero (EOA)") raw, err := s.accountDB.Get(accountPhysKey(addr)) @@ -1032,8 +941,8 @@ func TestAccountPendingReadPartialDelete(t *testing.T) { defer s.Close() addr := addrN(0xD4) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) // Apply 1: write nonce + codehash (not committed yet) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -1046,11 +955,11 @@ func TestAccountPendingReadPartialDelete(t *testing.T) { })) // Pending reads before commit - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce should be readable from pending writes") require.Equal(t, nonceBytes(42), nonceVal) - chVal, found := s.Get(evm.EVMStoreKey, chKey) + chVal, found := s.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should be not-found after pending delete") require.Nil(t, chVal) @@ -1067,8 +976,8 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { defer s.Close() addr := addrN(0xD5) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) // Write nonce + codehash (not committed yet) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -1076,11 +985,11 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { })) // Verify both fields are readable before commit - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce should be readable from pending writes") require.Equal(t, nonceBytes(10), nonceVal) - chVal, found := s.Get(evm.EVMStoreKey, chKey) + chVal, found := s.Get(keys.EVMStoreKey, chKey) require.True(t, found, "codehash should be readable from pending writes") expected := codeHashN(0xEE) require.Equal(t, expected[:], chVal) @@ -1091,17 +1000,17 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { })) // Verify both fields return not-found BEFORE commit (the core semantic change) - nonceVal, found = s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found = s.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should not be found after pending full-delete") require.Nil(t, nonceVal) - chVal, found = s.Get(evm.EVMStoreKey, chKey) + chVal, found = s.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should not be found after pending full-delete") require.Nil(t, chVal) - hasNonce := s.Has(evm.EVMStoreKey, nonceKey) + hasNonce := s.Has(keys.EVMStoreKey, nonceKey) require.False(t, hasNonce, "Has(nonce) should be false after pending full-delete") - hasCodeHash := s.Has(evm.EVMStoreKey, chKey) + hasCodeHash := s.Has(keys.EVMStoreKey, chKey) require.False(t, hasCodeHash, "Has(codehash) should be false after pending full-delete") // Verify isDelete is set @@ -1234,7 +1143,7 @@ func TestLtHashCommittedVsWorkingDiverge(t *testing.T) { // LoadVersion has a RootHash that matches the parent's CommittedRootHash and // a full scan of the read-only store's DBs. func TestLtHashReadOnlyMatchesParent(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 1 cfg.SnapshotKeepRecent = 5 @@ -1333,7 +1242,7 @@ func TestLtHashExportImportRoundTrip(t *testing.T) { s2 := setupTestStore(t) imp, err := s2.Importer(1) require.NoError(t, err) - require.NoError(t, imp.AddModule(evm.EVMFlatKVStoreKey)) + require.NoError(t, imp.AddModule("flatkv")) for _, n := range nodes { imp.AddNode(n) } @@ -1357,7 +1266,7 @@ func TestLtHashSnapshotCatchupFullScan(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s1, err := NewCommitStore(t.Context(), cfg) @@ -1380,7 +1289,7 @@ func TestLtHashSnapshotCatchupFullScan(t *testing.T) { require.NoError(t, s1.Close()) // Reopen — snapshot is at v3, WAL catchup replays v4-v7 - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = dbDir s2, err := NewCommitStore(t.Context(), cfg2) require.NoError(t, err) @@ -1405,7 +1314,7 @@ func TestLtHashRollbackFullScan(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) @@ -1484,7 +1393,7 @@ func TestLtHashMultipleRollbacks(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) @@ -1580,3 +1489,12 @@ func TestLtHashLargeBatch(t *testing.T) { commitAndCheck(t, s) verifyLtHashAtHeight(t, s, 3) } + +// verifyLtHashConsistency checks that the in-memory workingLtHash matches a +// fresh full-scan of all data DBs. Used after any recovery path. +func verifyLtHashConsistency(t *testing.T, s *CommitStore) { + t.Helper() + expected := fullScanLtHash(t, s) + require.Equal(t, expected.Checksum(), s.workingLtHash.Checksum(), + "workingLtHash should match fullScanLtHash after recovery") +} diff --git a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go index 3472872b17..48892085f2 100644 --- a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go +++ b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go @@ -5,14 +5,17 @@ import ( "path/filepath" "testing" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" scTypes "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" - "github.com/stretchr/testify/require" ) // testFullScanDBLtHash computes the LtHash of a single data DB by iterating @@ -25,7 +28,7 @@ func testFullScanDBLtHash(t *testing.T, db types.KeyValueDB) *lthash.LtHash { var pairs []lthash.KVPairWithLastValue for iter.First(); iter.Valid(); iter.Next() { - if isMetaKey(iter.Key()) { + if ktype.IsMetaKey(iter.Key()) { continue } pairs = append(pairs, lthash.KVPairWithLastValue{ @@ -45,13 +48,8 @@ func testFullScanDBLtHash(t *testing.T, db types.KeyValueDB) *lthash.LtHash { func fullScanPerDBLtHash(t *testing.T, s *CommitStore) map[string]*lthash.LtHash { t.Helper() result := make(map[string]*lthash.LtHash, 4) - for dbDir, db := range map[string]types.KeyValueDB{ - accountDBDir: s.accountDB, - codeDBDir: s.codeDB, - storageDBDir: s.storageDB, - legacyDBDir: s.legacyDB, - } { - result[dbDir] = testFullScanDBLtHash(t, db) + for _, ndb := range s.namedDataDBs() { + result[ndb.dir] = testFullScanDBLtHash(t, ndb.db) } return result } @@ -96,7 +94,7 @@ func TestPerDBLtHashSkewRecovery(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s1, err := NewCommitStore(t.Context(), cfg) @@ -120,11 +118,11 @@ func TestPerDBLtHashSkewRecovery(t *testing.T) { metaCfg.EnableMetrics = false db, err := pebbledb.Open(t.Context(), &metaCfg) require.NoError(t, err) - require.NoError(t, db.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) + require.NoError(t, db.Set(ktype.MetaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) // Reopen -- catchup should replay version 2 from WAL - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = dbDir s2, err := NewCommitStore(t.Context(), cfg2) @@ -143,7 +141,7 @@ func TestPerDBLtHashPersistenceAfterReopen(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s1, err := NewCommitStore(t.Context(), cfg) @@ -158,7 +156,7 @@ func TestPerDBLtHashPersistenceAfterReopen(t *testing.T) { require.NoError(t, s1.Close()) // Reopen and verify - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = dbDir s2, err := NewCommitStore(t.Context(), cfg2) @@ -256,7 +254,7 @@ func TestPerDBLtHashCatchupReplay(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s1, err := NewCommitStore(t.Context(), cfg) @@ -279,7 +277,7 @@ func TestPerDBLtHashCatchupReplay(t *testing.T) { } require.NoError(t, s1.Close()) - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = dbDir s2, err := NewCommitStore(t.Context(), cfg2) @@ -325,7 +323,7 @@ func TestPerDBLtHashAfterImport(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) @@ -339,10 +337,10 @@ func TestPerDBLtHashAfterImport(t *testing.T) { for i := byte(1); i <= 5; i++ { addr := addrN(i) slot := slotN(i) - sp := storagePair(addr, slot, []byte{i, 0xAA}) - np := noncePair(addr, uint64(i)) - imp.AddNode(&scTypes.SnapshotNode{Key: sp.Key, Value: sp.Value}) - imp.AddNode(&scTypes.SnapshotNode{Key: np.Key, Value: np.Value}) + storVal := vtype.NewStorageData().SetBlockHeight(1).SetValue(&[32]byte{i, 0xAA}).Serialize() + acctVal := vtype.NewAccountData().SetBlockHeight(1).SetNonce(uint64(i)).Serialize() + imp.AddNode(&scTypes.SnapshotNode{Key: storagePhysKey(addr, slot), Value: storVal}) + imp.AddNode(&scTypes.SnapshotNode{Key: accountPhysKey(addr), Value: acctVal}) } require.NoError(t, imp.Close()) @@ -365,7 +363,7 @@ func TestPerDBLtHashRollback(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) @@ -394,7 +392,7 @@ func TestPerDBLtHashPersistedInLocalMeta(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) @@ -428,7 +426,7 @@ func TestPerDBLtHashAfterDirectImport(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) @@ -463,7 +461,7 @@ func TestPerDBLtHashPartialKeyTypeOperations(t *testing.T) { defer s.Close() addr := addrN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) // Write only storage keys: other DBs' per-DB LtHash should remain zero. cs := makeChangeSet(key, padLeft32(0x11), false) @@ -486,7 +484,7 @@ func TestPerDBLtHashDeleteLastKeyZerosHash(t *testing.T) { defer s.Close() addr := addrN(0x02) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x22), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -527,7 +525,7 @@ func TestPerDBLtHashSumInvariantAcrossAllOperations(t *testing.T) { addr := addrN(0x03) // Operation 1: Add storage key. - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(storageKey, padLeft32(0x33), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -572,8 +570,8 @@ func TestPerDBLtHashSumInvariantAcrossAllOperations(t *testing.T) { cs6 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Delete: true}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), Delete: true}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs6})) @@ -584,7 +582,7 @@ func TestPerDBLtHashSumInvariantAcrossAllOperations(t *testing.T) { cs7 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Delete: true}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs7})) diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index 2110875e43..b3ee5cf3a0 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -15,6 +15,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" ) // On-disk layout under /flatkv/: @@ -389,7 +390,7 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { metaCfg.DataDir = filepath.Join(flatkvDir, metadataDir) tmpMeta, err := pebbledb.Open(s.ctx, &metaCfg) if err == nil { - verData, verErr := tmpMeta.Get(metaVersionKey) + verData, verErr := tmpMeta.Get(ktype.MetaVersionKey) _ = tmpMeta.Close() if verErr == nil && len(verData) == 8 { version = int64(binary.BigEndian.Uint64(verData)) //nolint:gosec // block height, always < MaxInt64 diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 9557fd9a2f..d1fb309b61 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -7,20 +7,22 @@ import ( "strings" "testing" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" - "github.com/stretchr/testify/require" ) func commitStorageEntry(t *testing.T, s *CommitStore, addr ktype.Address, slot ktype.Slot, value []byte) int64 { t.Helper() padded := make([]byte, 32) copy(padded[32-len(value):], value) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) cs := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{ @@ -35,7 +37,7 @@ func commitStorageEntry(t *testing.T, s *CommitStore, addr ktype.Address, slot k func TestSnapshotCreatesDir(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -65,7 +67,7 @@ func TestSnapshotCreatesDir(t *testing.T) { func TestSnapshotIdempotent(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -88,7 +90,7 @@ func TestOpenFromSnapshot(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit v1 and v2, snapshot at v2, commit v3 - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -108,7 +110,7 @@ func TestOpenFromSnapshot(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen - should catchup from v2 snapshot + WAL entry for v3 - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -120,12 +122,12 @@ func TestOpenFromSnapshot(t *testing.T) { require.Equal(t, hashAtV3, s2.RootHash()) // Verify data from all 3 versions is present - key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x10}, ktype.Slot{0x01})) - key3 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x10}, ktype.Slot{0x03})) - v, ok := s2.Get(evm.EVMStoreKey, key1) + key1 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x10}, ktype.Slot{0x01})) + key3 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x10}, ktype.Slot{0x03})) + v, ok := s2.Get(keys.EVMStoreKey, key1) require.True(t, ok) require.Equal(t, padLeft32(0x01), v) - v, ok = s2.Get(evm.EVMStoreKey, key3) + v, ok = s2.Get(keys.EVMStoreKey, key3) require.True(t, ok) require.Equal(t, padLeft32(0x03), v) } @@ -133,7 +135,7 @@ func TestOpenFromSnapshot(t *testing.T) { func TestCatchupUpdatesLtHash(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -154,7 +156,7 @@ func TestCatchupUpdatesLtHash(t *testing.T) { require.NoError(t, s1.Close()) // Reopen: catchup from v2 snapshot through v3,v4,v5 via WAL - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -169,7 +171,7 @@ func TestCatchupUpdatesLtHash(t *testing.T) { } func TestRollbackRewindsState(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -192,13 +194,13 @@ func TestRollbackRewindsState(t *testing.T) { require.Equal(t, hashAtV4, s.RootHash()) // v5's data should not exist (WAL truncated, snapshot pruned) - key5 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x30}, ktype.Slot{0x05})) - _, ok := s.Get(evm.EVMStoreKey, key5) + key5 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x30}, ktype.Slot{0x05})) + _, ok := s.Get(keys.EVMStoreKey, key5) require.False(t, ok, "v5 data should be gone after rollback to v4") // v4's data should still exist - key4 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x30}, ktype.Slot{0x04})) - v, ok := s.Get(evm.EVMStoreKey, key4) + key4 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x30}, ktype.Slot{0x04})) + v, ok := s.Get(keys.EVMStoreKey, key4) require.True(t, ok) require.Equal(t, padLeft32(0x04), v) @@ -206,7 +208,7 @@ func TestRollbackRewindsState(t *testing.T) { } func TestRollbackToSnapshotExact(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -229,7 +231,7 @@ func TestRollbackToSnapshotExact(t *testing.T) { func TestPartialSnapshotCleanup(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -291,7 +293,7 @@ func TestMigrationFromFlatLayout(t *testing.T) { require.True(t, os.IsNotExist(err)) // Open the store - should trigger migration - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -325,7 +327,7 @@ func TestOpenVersionValidation(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit some data - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -349,11 +351,11 @@ func TestOpenVersionValidation(t *testing.T) { acctCfg.EnableMetrics = false db, err := pebbledb.Open(t.Context(), &acctCfg) require.NoError(t, err) - require.NoError(t, db.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) + require.NoError(t, db.Set(ktype.MetaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) // Phase 3: reopen - should detect skew and catchup - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -425,7 +427,7 @@ func TestSeekSnapshot(t *testing.T) { func TestLoadVersionWithTarget(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -441,7 +443,7 @@ func TestLoadVersionWithTarget(t *testing.T) { require.NoError(t, s1.Close()) // Reopen at specific version 3 - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -460,10 +462,10 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { addr := ktype.Address{0x7A} slot := ktype.Slot{0x7B} - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) // Phase 1: build baseline at v2 and snapshot it. - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -475,7 +477,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) // Record baseline value at v2 for the same key. - vAtV2, ok := s1.Get(evm.EVMStoreKey, key) + vAtV2, ok := s1.Get(keys.EVMStoreKey, key) require.True(t, ok) require.Equal(t, padLeft32(0x01), vAtV2) @@ -487,19 +489,19 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { // Phase 3: reopen exactly at v2. If later commits had mutated the snapshot // baseline in place, we'd incorrectly read 0x04 here. - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s2.LoadVersion(2, false) require.NoError(t, err) - gotV2, ok := s2.Get(evm.EVMStoreKey, key) + gotV2, ok := s2.Get(keys.EVMStoreKey, key) require.True(t, ok) require.Equal(t, padLeft32(0x01), gotV2, "snapshot baseline should remain stable") require.NoError(t, s2.Close()) // Phase 4: reopen latest again to ensure catchup/replay still reaches v4. - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s3, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -508,7 +510,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { defer s3.Close() require.Equal(t, int64(4), s3.Version()) - gotLatest, ok := s3.Get(evm.EVMStoreKey, key) + gotLatest, ok := s3.Get(keys.EVMStoreKey, key) require.True(t, ok) require.Equal(t, padLeft32(0x04), gotLatest) } @@ -520,9 +522,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { addr := ktype.Address{0x80} slot := ktype.Slot{0x81} - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -540,7 +542,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s.Close()) // Round 1: load exactly v2 - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -548,13 +550,13 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), s1.Version()) require.Equal(t, hashAtV2, s1.RootHash()) - v, ok := s1.Get(evm.EVMStoreKey, key) + v, ok := s1.Get(keys.EVMStoreKey, key) require.True(t, ok) require.Equal(t, padLeft32(0x02), v) require.NoError(t, s1.Close()) // Round 2: load latest (catches up through v3, v4) - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -562,13 +564,13 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(4), s2.Version()) require.Equal(t, hashAtV4, s2.RootHash()) - v, ok = s2.Get(evm.EVMStoreKey, key) + v, ok = s2.Get(keys.EVMStoreKey, key) require.True(t, ok) require.Equal(t, padLeft32(0x04), v) require.NoError(t, s2.Close()) // Round 3: load v2 AGAIN — snapshot must still be clean. - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s3, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -576,7 +578,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err, "LoadVersion(2) must succeed after LoadVersion(0) dirtied working dir") require.Equal(t, int64(2), s3.Version()) require.Equal(t, hashAtV2, s3.RootHash()) - v, ok = s3.Get(evm.EVMStoreKey, key) + v, ok = s3.Get(keys.EVMStoreKey, key) require.True(t, ok) require.Equal(t, padLeft32(0x02), v) require.NoError(t, s3.Close()) @@ -587,7 +589,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { func TestRollbackTargetBeforeWALStart(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -624,7 +626,7 @@ func TestRollbackTargetBeforeWALStart(t *testing.T) { // Simulate restart: should stay at v2. require.NoError(t, s.Close()) - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -824,7 +826,7 @@ func TestCreateWorkingDirReclones(t *testing.T) { // ============================================================================= func TestPruneSnapshotsKeepsRecent(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(t.TempDir(), flatkvRootDir) cfg.SnapshotKeepRecent = 1 s, err := NewCommitStore(t.Context(), cfg) @@ -850,7 +852,7 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { } func TestPruneSnapshotsKeepAll(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotKeepRecent = 100 s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -888,7 +890,7 @@ func TestOrphanSnapshotRecovery(t *testing.T) { _, err := os.Lstat(currentPath(flatkvDir)) require.True(t, os.IsNotExist(err), "no current symlink should exist") - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -952,7 +954,7 @@ func TestTraverseSnapshotsEarlyStop(t *testing.T) { // ============================================================================= func TestVerifyWALTailSuccess(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -967,7 +969,7 @@ func TestVerifyWALTailSuccess(t *testing.T) { } func TestVerifyWALTailMismatch(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -991,7 +993,7 @@ func TestTryTruncateWAL(t *testing.T) { // SnapshotKeepRecent=0 so pruneSnapshots removes snapshot-0 once // the manual snapshot at v5 is created; this makes v5 the earliest // snapshot and gives tryTruncateWAL a positive truncation offset. - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) cfg.SnapshotKeepRecent = 0 s, err := NewCommitStore(t.Context(), cfg) @@ -1019,7 +1021,7 @@ func TestTryTruncateWAL(t *testing.T) { } func TestTryTruncateWALNoSnapshot(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -1042,7 +1044,7 @@ func TestTryTruncateWALNoSnapshot(t *testing.T) { func TestRollbackRemovesPostTargetSnapshots(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1143,7 +1145,7 @@ func TestSeekSnapshotExact(t *testing.T) { func TestMultipleSnapshotsAndReopen(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) cfg.SnapshotKeepRecent = 10 s, err := NewCommitStore(t.Context(), cfg) @@ -1161,7 +1163,7 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { for i, expectedHash := range hashes { ver := int64(i + 1) - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = filepath.Join(dir, flatkvRootDir) cfg2.SnapshotKeepRecent = 10 s2, err := NewCommitStore(t.Context(), cfg2) @@ -1180,7 +1182,7 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(context.Background(), cfg) require.NoError(t, err) @@ -1209,7 +1211,7 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { // Reopen: working dir should be reused (SNAPSHOT_BASE matches current), // so committedVersion should be 5 (from working dir metadata), not 2 // (from the snapshot). Catchup should replay 0 entries. - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(context.Background(), cfg) require.NoError(t, err) @@ -1223,7 +1225,7 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { func TestSnapshotPreservesAllKeyTypes(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1234,9 +1236,9 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { slot := ktype.Slot{0xCD} pairs := []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11)}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 7}}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x80}}, + {Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11)}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 7}}, + {Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x80}}, } cs := &proto.NamedChangeSet{Name: "evm", Changeset: proto.ChangeSet{Pairs: pairs}} require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -1247,7 +1249,7 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) require.NoError(t, s.Close()) - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1258,18 +1260,18 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { require.Equal(t, int64(1), s2.Version()) require.Equal(t, hash, s2.RootHash()) - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - v, ok := s2.Get(evm.EVMStoreKey, storageKey) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + v, ok := s2.Get(keys.EVMStoreKey, storageKey) require.True(t, ok) require.Equal(t, padLeft32(0x11), v) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - v, ok = s2.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + v, ok = s2.Get(keys.EVMStoreKey, nonceKey) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 7}, v) - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - v, ok = s2.Get(evm.EVMStoreKey, codeKey) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) + v, ok = s2.Get(keys.EVMStoreKey, codeKey) require.True(t, ok) require.Equal(t, []byte{0x60, 0x80}, v) } @@ -1282,7 +1284,7 @@ func TestReopenAfterEmptyCommits(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1299,7 +1301,7 @@ func TestReopenAfterEmptyCommits(t *testing.T) { hashBefore := s.RootHash() require.NoError(t, s.Close()) - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) @@ -1319,7 +1321,7 @@ func TestReopenAfterDeletes(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1333,10 +1335,10 @@ func TestReopenAfterDeletes(t *testing.T) { cs := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11)}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Value: ch[:]}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x80}}, + {Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11)}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}}, + {Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), Value: ch[:]}, + {Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x80}}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -1346,10 +1348,10 @@ func TestReopenAfterDeletes(t *testing.T) { delCS := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), Delete: true}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Delete: true}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Delete: true}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Delete: true}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{delCS})) @@ -1359,7 +1361,7 @@ func TestReopenAfterDeletes(t *testing.T) { hashBefore := s.RootHash() require.NoError(t, s.Close()) - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) @@ -1369,22 +1371,22 @@ func TestReopenAfterDeletes(t *testing.T) { require.Equal(t, hashBefore, s2.RootHash()) - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - _, found := s2.Get(evm.EVMStoreKey, storageKey) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + _, found := s2.Get(keys.EVMStoreKey, storageKey) require.False(t, found, "storage should stay deleted after reopen") - codeKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found = s2.Get(evm.EVMStoreKey, codeKey2) + codeKey2 := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) + _, found = s2.Get(keys.EVMStoreKey, codeKey2) require.False(t, found, "code should stay deleted after reopen") // With Account Row GC, all-zero account row is physically deleted. - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s2.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + nonceVal, found := s2.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should not be found after reopen (row deleted)") require.Nil(t, nonceVal) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found := s2.Get(evm.EVMStoreKey, chKey) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) + chVal, found := s2.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should not be found after reopen (row deleted)") require.Nil(t, chVal) } @@ -1394,7 +1396,7 @@ func TestReopenAfterDeletes(t *testing.T) { // ============================================================================= func TestWALTruncationThenRollback(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 5 cfg.SnapshotKeepRecent = 1 s, err := NewCommitStore(t.Context(), cfg) @@ -1412,18 +1414,18 @@ func TestWALTruncationThenRollback(t *testing.T) { require.Equal(t, int64(5), s.Version()) for i := 1; i <= 5; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(byte(i)), slotN(byte(i)))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(byte(i)), slotN(byte(i)))) var val []byte var found bool - val, found = s.Get(evm.EVMStoreKey, key) + val, found = s.Get(keys.EVMStoreKey, key) require.True(t, found, "key at block %d should exist after rollback to v5", i) require.Equal(t, padLeft32(byte(i)), val) } for i := 6; i <= 10; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(byte(i)), slotN(byte(i)))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(byte(i)), slotN(byte(i)))) var found bool - _, found = s.Get(evm.EVMStoreKey, key) + _, found = s.Get(keys.EVMStoreKey, key) require.False(t, found, "key at block %d should NOT exist after rollback to v5", i) } @@ -1435,7 +1437,7 @@ func TestWALTruncationThenRollback(t *testing.T) { // ============================================================================= func TestReopenAfterSnapshotAndTruncation(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 5 cfg.SnapshotKeepRecent = 1 @@ -1462,10 +1464,10 @@ func TestReopenAfterSnapshotAndTruncation(t *testing.T) { require.Equal(t, hashBefore, s2.RootHash()) for i := 1; i <= 10; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(byte(i)), slotN(byte(i)))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(byte(i)), slotN(byte(i)))) var val []byte var found bool - val, found = s2.Get(evm.EVMStoreKey, key) + val, found = s2.Get(keys.EVMStoreKey, key) require.True(t, found, "key at block %d should exist after reopen", i) require.Equal(t, padLeft32(byte(i)), val) } @@ -1479,7 +1481,7 @@ func TestSingleDBOpenFailure(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1501,7 +1503,7 @@ func TestSingleDBOpenFailure(t *testing.T) { } _ = os.Remove(filepath.Join(dbDir, "working", snapshotBaseFile)) - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) @@ -1517,7 +1519,7 @@ func TestGlobalMetadataCorruption(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1533,7 +1535,7 @@ func TestGlobalMetadataCorruption(t *testing.T) { metaCfg.EnableMetrics = false db, err := pebbledb.Open(context.Background(), &metaCfg) require.NoError(t, err) - require.NoError(t, db.Set(metaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) + require.NoError(t, db.Set(ktype.MetaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) snapMeta := filepath.Join(dbDir, snapshotName(1), metadataDir) @@ -1542,11 +1544,11 @@ func TestGlobalMetadataCorruption(t *testing.T) { metaCfg2.EnableMetrics = false db2, err := pebbledb.Open(context.Background(), &metaCfg2) require.NoError(t, err) - require.NoError(t, db2.Set(metaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) + require.NoError(t, db2.Set(ktype.MetaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db2.Close()) _ = os.Remove(filepath.Join(dbDir, "working", snapshotBaseFile)) - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) @@ -1562,7 +1564,7 @@ func TestWALDirectoryDeleted(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1577,7 +1579,7 @@ func TestWALDirectoryDeleted(t *testing.T) { walDir := filepath.Join(dbDir, changelogDir) require.NoError(t, os.RemoveAll(walDir)) - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) @@ -1590,8 +1592,8 @@ func TestWALDirectoryDeleted(t *testing.T) { commitStorageEntry(t, s2, ktype.Address{0x03}, ktype.Slot{0x03}, []byte{0xCC}) require.Equal(t, int64(3), s2.Version()) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x03}, ktype.Slot{0x03})) - val, found := s2.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(ktype.Address{0x03}, ktype.Slot{0x03})) + val, found := s2.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0xCC), val) } @@ -1600,7 +1602,7 @@ func TestLocalMetaCorruption(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1617,7 +1619,7 @@ func TestLocalMetaCorruption(t *testing.T) { acctCfg.EnableMetrics = false db, err := pebbledb.Open(context.Background(), &acctCfg) require.NoError(t, err) - require.NoError(t, db.Set(metaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) + require.NoError(t, db.Set(ktype.MetaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) // Same corruption in the snapshot dir. @@ -1627,13 +1629,13 @@ func TestLocalMetaCorruption(t *testing.T) { acctCfg2.EnableMetrics = false db2, err := pebbledb.Open(context.Background(), &acctCfg2) require.NoError(t, err) - require.NoError(t, db2.Set(metaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) + require.NoError(t, db2.Set(ktype.MetaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db2.Close()) // Remove SNAPSHOT_BASE to force re-clone from corrupted snapshot. _ = os.Remove(filepath.Join(dbDir, "working", snapshotBaseFile)) - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) @@ -1650,7 +1652,7 @@ func TestWALSegmentCorruption(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -1669,7 +1671,7 @@ func TestWALSegmentCorruption(t *testing.T) { metaCfg.EnableMetrics = false mdb, err := pebbledb.Open(context.Background(), &metaCfg) require.NoError(t, err) - require.NoError(t, mdb.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) + require.NoError(t, mdb.Set(ktype.MetaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, mdb.Close()) // Corrupt WAL segments: tidwall/wal will auto-truncate, losing all entries. @@ -1692,7 +1694,7 @@ func TestWALSegmentCorruption(t *testing.T) { require.Greater(t, corrupted, 0, "should have found at least one WAL segment to corrupt") // Request version 2: global says v1, WAL auto-truncated (empty), can't catchup to v2. - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) @@ -1708,7 +1710,7 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(context.Background(), cfg) @@ -1717,13 +1719,13 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { require.NoError(t, err) addr := ktype.Address{0xE1} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) cs1 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 5}}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Value: make([]byte, vtype.CodeHashLength)}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 5}}, + {Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), Value: make([]byte, vtype.CodeHashLength)}, }}, } ch := vtype.CodeHash{0xAA} @@ -1735,8 +1737,8 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { cs2 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Delete: true}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), Delete: true}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) @@ -1754,7 +1756,7 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { require.Equal(t, hashBefore, s2.RootHash(), "LtHash should match after reopen") - nonceVal, found := s2.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found := s2.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should not be found after reopen (row deleted)") require.Nil(t, nonceVal) } @@ -1763,7 +1765,7 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dbDir s, err := NewCommitStore(context.Background(), cfg) @@ -1776,7 +1778,7 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { cs1 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 7}}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 7}}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) @@ -1786,7 +1788,7 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { cs2 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Delete: true}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) @@ -1803,7 +1805,7 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { require.NoError(t, err) versionBuf := make([]byte, 8) versionBuf[7] = 1 // version = 1 - require.NoError(t, mdb.Set(metaVersionKey, versionBuf, types.WriteOptions{Sync: true})) + require.NoError(t, mdb.Set(ktype.MetaVersionKey, versionBuf, types.WriteOptions{Sync: true})) require.NoError(t, mdb.Close()) s2, err := NewCommitStore(context.Background(), cfg) @@ -1815,14 +1817,14 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { require.Equal(t, int64(2), s2.Version()) require.Equal(t, hashAtV2, s2.RootHash(), "LtHash should match after WAL replay") - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found := s2.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + _, found := s2.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should not be found after WAL replay (row deleted)") } func TestAccountRowDeleteAfterSnapshotRollback(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) cfg.SnapshotInterval = 1 cfg.SnapshotKeepRecent = 2 @@ -1833,40 +1835,40 @@ func TestAccountRowDeleteAfterSnapshotRollback(t *testing.T) { require.NoError(t, err) addr := ktype.Address{0xE3} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) cs1 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 3}}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 3}}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) _, err = s.Commit() // v1 (snapshot taken) require.NoError(t, err) - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 3}, nonceVal) cs2 := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Delete: true}, + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Delete: true}, }}, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) _, err = s.Commit() // v2 (row deleted, snapshot taken) require.NoError(t, err) - _, found = s.Get(evm.EVMStoreKey, nonceKey) + _, found = s.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should be gone at v2") // Rollback to v1: row should be restored require.NoError(t, s.Rollback(1)) require.Equal(t, int64(1), s.Version()) - nonceVal, found = s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found = s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce should be restored after rollback to v1") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 3}, nonceVal) @@ -1877,7 +1879,7 @@ func TestRollbackOnReadOnlyStore(t *testing.T) { s := setupTestStore(t) cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), padLeft32(0x11), false, ) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -1894,13 +1896,13 @@ func TestRollbackOnReadOnlyStore(t *testing.T) { } func TestRollbackToCurrentVersion(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 1 s := setupTestStoreWithConfig(t, cfg) defer s.Close() addr := addrN(0x02) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x22), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) // v1 + snapshot @@ -1912,19 +1914,19 @@ func TestRollbackToCurrentVersion(t *testing.T) { require.Equal(t, int64(1), s.Version()) require.Equal(t, hashV1, s.RootHash()) - val, found := s.Get(evm.EVMStoreKey, key) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x22), val) } func TestRollbackToFutureVersionFails(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 1 s := setupTestStoreWithConfig(t, cfg) defer s.Close() cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x03), slotN(0x01))), + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x03), slotN(0x01))), padLeft32(0x33), false, ) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -1935,41 +1937,41 @@ func TestRollbackToFutureVersionFails(t *testing.T) { } func TestRollbackDiscardsUncommittedPendingWrites(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 1 s := setupTestStoreWithConfig(t, cfg) defer s.Close() addr := addrN(0x04) - key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key1 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs1 := makeChangeSet(key1, padLeft32(0x44), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) // v1 // Apply but do NOT commit. - key2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x02))) + key2 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x02))) cs2 := makeChangeSet(key2, padLeft32(0x55), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) require.NoError(t, s.Rollback(1)) require.Equal(t, int64(1), s.Version()) - val, found := s.Get(evm.EVMStoreKey, key1) + val, found := s.Get(keys.EVMStoreKey, key1) require.True(t, found) require.Equal(t, padLeft32(0x44), val) - _, found = s.Get(evm.EVMStoreKey, key2) + _, found = s.Get(keys.EVMStoreKey, key2) require.False(t, found, "uncommitted pending write should be discarded after rollback") } func TestRollbackThenNewTimeline(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 1 s := setupTestStoreWithConfig(t, cfg) defer s.Close() addr := addrN(0x05) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs1 := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) @@ -1989,14 +1991,14 @@ func TestRollbackThenNewTimeline(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), v) // Version 2 in the new timeline. - val, found := s.Get(evm.EVMStoreKey, key) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0xFF), val) } func TestRollbackPreservesWALContinuity(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) cfg.SnapshotInterval = 2 @@ -2007,7 +2009,7 @@ func TestRollbackPreservesWALContinuity(t *testing.T) { addr := addrN(0x06) for i := 1; i <= 4; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) cs := makeChangeSet(key, padLeft32(byte(i)), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) _, err := s.Commit() @@ -2018,7 +2020,7 @@ func TestRollbackPreservesWALContinuity(t *testing.T) { // Continue committing. for i := 5; i <= 6; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) cs := makeChangeSet(key, padLeft32(byte(i)), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) _, err := s.Commit() @@ -2042,7 +2044,7 @@ func TestWriteSnapshotOnReadOnlyStore(t *testing.T) { s := setupTestStore(t) cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), padLeft32(0x11), false, ) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -2071,7 +2073,7 @@ func TestWriteSnapshotWhileReadOnlyCloneActive(t *testing.T) { s := setupTestStore(t) cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x07), slotN(0x01))), + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x07), slotN(0x01))), padLeft32(0x77), false, ) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -2085,7 +2087,7 @@ func TestWriteSnapshotWhileReadOnlyCloneActive(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) // RO clone should still work. - val, found := ro.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x07), slotN(0x01)))) + val, found := ro.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x07), slotN(0x01)))) require.True(t, found) require.Equal(t, padLeft32(0x77), val) require.NoError(t, s.Close()) @@ -2096,7 +2098,7 @@ func TestWriteSnapshotDirParameterIgnored(t *testing.T) { defer s.Close() cs := makeChangeSet( - evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x08), slotN(0x01))), + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x08), slotN(0x01))), padLeft32(0x88), false, ) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -2106,7 +2108,7 @@ func TestWriteSnapshotDirParameterIgnored(t *testing.T) { require.NoError(t, s.WriteSnapshot("/tmp/this-should-be-ignored")) // Verify snapshot was created in the correct location (not the passed dir). - val, found := s.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0x08), slotN(0x01)))) + val, found := s.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x08), slotN(0x01)))) require.True(t, found) require.Equal(t, padLeft32(0x88), val) } diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 86086a022c..993b859a24 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -10,20 +10,24 @@ import ( "runtime" "time" + "github.com/zbiljic/go-filelock" + "go.opentelemetry.io/otel" + commonerrors "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" "github.com/sei-protocol/sei-chain/sei-db/wal" "github.com/sei-protocol/seilog" - "github.com/zbiljic/go-filelock" - "go.opentelemetry.io/otel" ) var logger = seilog.NewLogger("db", "state-db", "sc", "flatkv") @@ -53,12 +57,34 @@ const ( // dataDBDirs lists all data DB directory names (used for per-DB LtHash iteration). var dataDBDirs = []string{accountDBDir, codeDBDir, storageDBDir, legacyDBDir} +// InitializeDataDirectories sets the DataDir for each nested PebbleDB config +// that does not already have one, using DataDir as the base path. The DBs live +// under the working directory: /working/. +func InitializeDataDirectories(c *config.Config) { + workDir := filepath.Join(c.DataDir, workingDirName) + if c.AccountDBConfig.DataDir == "" { + c.AccountDBConfig.DataDir = filepath.Join(workDir, accountDBDir) + } + if c.CodeDBConfig.DataDir == "" { + c.CodeDBConfig.DataDir = filepath.Join(workDir, codeDBDir) + } + if c.StorageDBConfig.DataDir == "" { + c.StorageDBConfig.DataDir = filepath.Join(workDir, storageDBDir) + } + if c.LegacyDBConfig.DataDir == "" { + c.LegacyDBConfig.DataDir = filepath.Join(workDir, legacyDBDir) + } + if c.MetadataDBConfig.DataDir == "" { + c.MetadataDBConfig.DataDir = filepath.Join(workDir, metadataDir) + } +} + // CommitStore implements flatkv.Store for EVM state storage. // NOT thread-safe; callers must serialize all operations. type CommitStore struct { ctx context.Context cancel context.CancelFunc - config Config + config config.Config dbDir string // Five separate PebbleDB instances. @@ -70,7 +96,7 @@ type CommitStore struct { legacyDB seidbtypes.KeyValueDB // "module/"+key → vtype.LegacyData // Per-DB committed version, keyed by DB dir name (e.g. accountDBDir). - localMeta map[string]*LocalMeta + localMeta map[string]*ktype.LocalMeta // LtHash state for integrity checking committedVersion int64 @@ -118,14 +144,58 @@ type CommitStore struct { var _ Store = (*CommitStore)(nil) +// dataDBs returns the four data PebbleDB instances in fixed iteration order: +// accountDB, codeDB, storageDB, legacyDB. metadataDB is excluded. +func (s *CommitStore) dataDBs() []seidbtypes.KeyValueDB { + return []seidbtypes.KeyValueDB{s.accountDB, s.codeDB, s.storageDB, s.legacyDB} +} + +type namedDB struct { + dir string + db seidbtypes.KeyValueDB +} + +// namedDataDBs returns the four data DBs paired with their directory names. +func (s *CommitStore) namedDataDBs() []namedDB { + return []namedDB{ + {accountDBDir, s.accountDB}, + {codeDBDir, s.codeDB}, + {storageDBDir, s.storageDB}, + {legacyDBDir, s.legacyDB}, + } +} + +// routePhysicalKey maps a physical DB key to its target database. +// Non-EVM modules are routed to legacyDB; EVM keys are routed by kind. +func (s *CommitStore) routePhysicalKey(physicalKey []byte) (seidbtypes.KeyValueDB, error) { + moduleName, innerKey, err := ktype.StripModulePrefix(physicalKey) + if err != nil { + return nil, err + } + if moduleName != keys.EVMStoreKey { + return s.legacyDB, nil + } + kind, _ := keys.ParseEVMKey(innerKey) + switch kind { + case ktype.EVMKeyAccount, keys.EVMKeyCodeHash: + return s.accountDB, nil + case keys.EVMKeyCode: + return s.codeDB, nil + case keys.EVMKeyStorage: + return s.storageDB, nil + default: + return s.legacyDB, nil + } +} + // NewCommitStore creates a new (unopened) FlatKV commit store. // Call LoadVersion to open and initialize. func NewCommitStore( ctx context.Context, - cfg *Config, + cfg *config.Config, ) (*CommitStore, error) { - cfg.InitializeDataDirectories() + InitializeDataDirectories(cfg) if err := cfg.Validate(); err != nil { return nil, fmt.Errorf("failed to validate config: %w", err) @@ -147,7 +217,7 @@ func NewCommitStore( ctx: ctx, cancel: cancel, config: *cfg, - localMeta: make(map[string]*LocalMeta), + localMeta: make(map[string]*ktype.LocalMeta), accountWrites: make(map[string]*vtype.AccountData), codeWrites: make(map[string]*vtype.CodeData), storageWrites: make(map[string]*vtype.StorageData), @@ -481,7 +551,7 @@ func (s *CommitStore) openDBs(dbDir, changelogRoot string) (retErr error) { s.storageDB = nil s.legacyDB = nil s.changelog = nil - s.localMeta = make(map[string]*LocalMeta) + s.localMeta = make(map[string]*ktype.LocalMeta) } }() @@ -529,18 +599,12 @@ func (s *CommitStore) openDBs(dbDir, changelogRoot string) (retErr error) { toClose = append(toClose, s.changelog) } - dataDBs := map[string]seidbtypes.KeyValueDB{ - accountDBDir: s.accountDB, - codeDBDir: s.codeDB, - storageDBDir: s.storageDB, - legacyDBDir: s.legacyDB, - } - for name, db := range dataDBs { - meta, err := loadLocalMeta(db) + for _, ndb := range s.namedDataDBs() { + meta, err := loadLocalMeta(ndb.db) if err != nil { - return fmt.Errorf("failed to load %s local meta: %w", name, err) + return fmt.Errorf("failed to load %s local meta: %w", ndb.dir, err) } - s.localMeta[name] = meta + s.localMeta[ndb.dir] = meta } return nil diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go index 103cbaa19a..6b52e584e2 100644 --- a/sei-db/state_db/sc/flatkv/store_apply.go +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -4,7 +4,7 @@ import ( "fmt" "maps" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" @@ -47,8 +47,8 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error // Gather account pairs accountWrites, err := mergeAccountUpdates( - changesByType[evm.EVMKeyNonce], - changesByType[evm.EVMKeyCodeHash], + changesByType[keys.EVMKeyNonce], + changesByType[keys.EVMKeyCodeHash], nil, // TODO: update this when we add a balance key! ) if err != nil { @@ -58,21 +58,21 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error accountPairs := gatherLTHashPairs(newAccountValues, accountOld) maps.Copy(s.accountWrites, newAccountValues) - storageChanges, err := processStorageChanges(changesByType[evm.EVMKeyStorage], blockHeight) + storageChanges, err := processStorageChanges(changesByType[keys.EVMKeyStorage], blockHeight) if err != nil { return fmt.Errorf("failed to parse storage changes: %w", err) } storagePairs := gatherLTHashPairs(storageChanges, storageOld) maps.Copy(s.storageWrites, storageChanges) - codeChanges, err := processCodeChanges(changesByType[evm.EVMKeyCode], blockHeight) + codeChanges, err := processCodeChanges(changesByType[keys.EVMKeyCode], blockHeight) if err != nil { return fmt.Errorf("failed to parse code changes: %w", err) } codePairs := gatherLTHashPairs(codeChanges, codeOld) maps.Copy(s.codeWrites, codeChanges) - legacyChanges, err := processLegacyChanges(changesByType[evm.EVMKeyLegacy]) + legacyChanges, err := processLegacyChanges(changesByType[keys.EVMKeyLegacy]) if err != nil { return fmt.Errorf("failed to parse legacy changes: %w", err) } @@ -121,15 +121,15 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error } // classifyAndPrefix splits changeSets into per-EVMKeyKind maps whose keys are -// already in physical format ("module/" + memiavl_key). Non-EVM modules are +// already in physical format ("module/" + prefix_encoded_key). Non-EVM modules are // merged into the EVMKeyLegacy bucket with a "/" prefix. // // This replaces the former sortChangeSets + prefixModuleKeys two-pass approach, // avoiding an extra map allocation and repeated string concatenation per key. -func classifyAndPrefix(changeSets []*proto.NamedChangeSet) (map[evm.EVMKeyKind]map[string][]byte, error) { - result := make(map[evm.EVMKeyKind]map[string][]byte, 5) +func classifyAndPrefix(changeSets []*proto.NamedChangeSet) (map[keys.EVMKeyKind]map[string][]byte, error) { + result := make(map[keys.EVMKeyKind]map[string][]byte, 5) - getOrCreate := func(kind evm.EVMKeyKind, sizeHint int) map[string][]byte { + getOrCreate := func(kind keys.EVMKeyKind, sizeHint int) map[string][]byte { m, ok := result[kind] if !ok { m = make(map[string][]byte, sizeHint) @@ -139,20 +139,20 @@ func classifyAndPrefix(changeSets []*proto.NamedChangeSet) (map[evm.EVMKeyKind]m } for _, cs := range changeSets { - if cs.Changeset.Pairs == nil { + if cs.Changeset.Pairs == nil || len(cs.Changeset.Pairs) == 0 { continue } - if cs.Name == evm.EVMStoreKey { + if cs.Name == keys.EVMStoreKey { for _, pair := range cs.Changeset.Pairs { - kind, keyBytes := evm.ParseEVMKey(pair.Key) - if kind == evm.EVMKeyEmpty { + kind, keyBytes := keys.ParseEVMKey(pair.Key) + if kind == keys.EVMKeyEmpty { return nil, fmt.Errorf("flatkv: empty key in changeset") } var physKey string - if kind == evm.EVMKeyLegacy { - physKey = string(ktype.ModulePhysicalKey(evm.EVMStoreKey, pair.Key)) + if kind == keys.EVMKeyLegacy { + physKey = string(ktype.ModulePhysicalKey(keys.EVMStoreKey, pair.Key)) } else { physKey = string(ktype.EVMPhysicalKey(kind, keyBytes)) } @@ -165,7 +165,7 @@ func classifyAndPrefix(changeSets []*proto.NamedChangeSet) (map[evm.EVMKeyKind]m } } } else { - legacyMap := getOrCreate(evm.EVMKeyLegacy, len(cs.Changeset.Pairs)) + legacyMap := getOrCreate(keys.EVMKeyLegacy, len(cs.Changeset.Pairs)) for _, pair := range cs.Changeset.Pairs { physKey := string(ktype.ModulePhysicalKey(cs.Name, pair.Key)) if pair.Delete { diff --git a/sei-db/state_db/sc/flatkv/store_iterator.go b/sei-db/state_db/sc/flatkv/store_iterator.go new file mode 100644 index 0000000000..c7ecd9141c --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_iterator.go @@ -0,0 +1,144 @@ +package flatkv + +import ( + seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" +) + +// RawGlobalIterator returns an iterator that walks each data DB sequentially +// in fixed order (account → code → storage → legacy). Within each DB the +// keys are returned in PebbleDB's natural order. Per-DB _meta/* keys are +// skipped. Pending writes are not visible. metadataDB is not included. +func (s *CommitStore) RawGlobalIterator() Iterator { + return &sequentialIterator{dbs: s.dataDBs()} +} + +// sequentialIterator iterates through a slice of DBs one at a time. +// It fully drains the current DB before moving to the next. +type sequentialIterator struct { + dbs []seidbtypes.KeyValueDB + dbIdx int // index into dbs for the current DB + iter seidbtypes.KeyValueDBIterator + err error +} + +// openCurrent opens an iterator on dbs[dbIdx]. Returns false if no more DBs. +func (s *sequentialIterator) openCurrent() bool { + for s.dbIdx < len(s.dbs) { + it, err := s.dbs[s.dbIdx].NewIter(nil) + if err != nil { + s.err = err + return false + } + s.iter = it + return true + } + return false +} + +// advanceDB closes the current iterator and moves to the next DB, +// positioning at the first non-meta key. Returns true if positioned. +// If the current iterator has an error, it is captured and iteration stops. +func (s *sequentialIterator) advanceDB() bool { + for { + if s.iter != nil { + if err := s.iter.Error(); err != nil { + s.err = err + _ = s.iter.Close() + s.iter = nil + return false + } + _ = s.iter.Close() + s.iter = nil + } + s.dbIdx++ + if !s.openCurrent() { + return false + } + s.iter.First() + skipMeta(s.iter) + if s.iter.Valid() { + return true + } + } +} + +func skipMeta(it seidbtypes.KeyValueDBIterator) { + for it.Valid() && ktype.IsMetaKey(it.Key()) { + it.Next() + } +} + +func (s *sequentialIterator) Domain() ([]byte, []byte) { return nil, nil } + +func (s *sequentialIterator) Valid() bool { + return s.iter != nil && s.iter.Valid() +} + +func (s *sequentialIterator) Error() error { + if s.err != nil { + return s.err + } + if s.iter != nil { + return s.iter.Error() + } + return nil +} + +func (s *sequentialIterator) Close() error { + if s.iter != nil { + _ = s.iter.Close() + s.iter = nil + } + return nil +} + +func (s *sequentialIterator) First() bool { + if s.iter != nil { + _ = s.iter.Close() + s.iter = nil + } + s.dbIdx = 0 + if !s.openCurrent() { + return false + } + s.iter.First() + skipMeta(s.iter) + if s.iter.Valid() { + return true + } + return s.advanceDB() +} + +func (s *sequentialIterator) Next() bool { + if !s.Valid() { + return false + } + s.iter.Next() + skipMeta(s.iter) + if s.iter.Valid() { + return true + } + return s.advanceDB() +} + +func (s *sequentialIterator) Key() []byte { + if !s.Valid() { + return nil + } + return s.iter.Key() +} + +func (s *sequentialIterator) Value() []byte { + if !s.Valid() { + return nil + } + return s.iter.Value() +} + +// Unsupported positioning methods — not needed for forward-only scanning. + +func (s *sequentialIterator) Last() bool { return false } +func (s *sequentialIterator) SeekGE([]byte) bool { return false } +func (s *sequentialIterator) SeekLT([]byte) bool { return false } +func (s *sequentialIterator) Prev() bool { return false } diff --git a/sei-db/state_db/sc/flatkv/store_lifecycle.go b/sei-db/state_db/sc/flatkv/store_lifecycle.go index 56f0a085c8..e08e48d2d1 100644 --- a/sei-db/state_db/sc/flatkv/store_lifecycle.go +++ b/sei-db/state_db/sc/flatkv/store_lifecycle.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" ) @@ -61,7 +62,7 @@ func (s *CommitStore) closeDBsOnly() error { s.legacyDB = nil } - s.localMeta = make(map[string]*LocalMeta) + s.localMeta = make(map[string]*ktype.LocalMeta) if len(errs) > 0 { return errors.Join(errs...) diff --git a/sei-db/state_db/sc/flatkv/store_meta.go b/sei-db/state_db/sc/flatkv/store_meta.go index dcd6966119..dfc14e4d32 100644 --- a/sei-db/state_db/sc/flatkv/store_meta.go +++ b/sei-db/state_db/sc/flatkv/store_meta.go @@ -7,6 +7,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" ) @@ -23,13 +24,13 @@ func versionToBytes(v int64) []byte { } // loadLocalMeta loads per-DB metadata by reading separate keys. -func loadLocalMeta(db types.KeyValueDB) (*LocalMeta, error) { - meta := &LocalMeta{} +func loadLocalMeta(db types.KeyValueDB) (*ktype.LocalMeta, error) { + meta := &ktype.LocalMeta{} - versionData, err := db.Get(metaVersionKey) + versionData, err := db.Get(ktype.MetaVersionKey) if err != nil { if errorutils.IsNotFound(err) { - return &LocalMeta{CommittedVersion: 0}, nil + return &ktype.LocalMeta{CommittedVersion: 0}, nil } return nil, fmt.Errorf("could not read meta version: %w", err) } @@ -38,7 +39,7 @@ func loadLocalMeta(db types.KeyValueDB) (*LocalMeta, error) { } meta.CommittedVersion = int64(binary.BigEndian.Uint64(versionData)) //nolint:gosec // version won't exceed int64 max - hashData, err := db.Get(metaLtHashKey) + hashData, err := db.Get(ktype.MetaLtHashKey) if err != nil && !errorutils.IsNotFound(err) { return nil, fmt.Errorf("could not read meta hash: %w", err) } @@ -55,11 +56,11 @@ func loadLocalMeta(db types.KeyValueDB) (*LocalMeta, error) { // writeLocalMetaToBatch writes per-DB metadata (version + LtHash) as separate keys. func writeLocalMetaToBatch(batch types.Batch, version int64, ltHash *lthash.LtHash) error { - if err := batch.Set(metaVersionKey, versionToBytes(version)); err != nil { + if err := batch.Set(ktype.MetaVersionKey, versionToBytes(version)); err != nil { return fmt.Errorf("set meta version: %w", err) } if ltHash != nil { - if err := batch.Set(metaLtHashKey, ltHash.Marshal()); err != nil { + if err := batch.Set(ktype.MetaLtHashKey, ltHash.Marshal()); err != nil { return fmt.Errorf("set meta hash: %w", err) } } @@ -69,7 +70,7 @@ func writeLocalMetaToBatch(batch types.Batch, version int64, ltHash *lthash.LtHa // loadGlobalVersion reads the global committed version from metadata DB. // Returns 0 if not found (fresh start). func (s *CommitStore) loadGlobalVersion() (int64, error) { - data, err := s.metadataDB.Get(metaVersionKey) + data, err := s.metadataDB.Get(ktype.MetaVersionKey) if errorutils.IsNotFound(err) { return 0, nil } @@ -89,7 +90,7 @@ func (s *CommitStore) loadGlobalVersion() (int64, error) { // loadGlobalLtHash reads the global committed LtHash from metadata DB. // Returns nil if not found (fresh start). func (s *CommitStore) loadGlobalLtHash() (*lthash.LtHash, error) { - data, err := s.metadataDB.Get(metaLtHashKey) + data, err := s.metadataDB.Get(ktype.MetaLtHashKey) if errorutils.IsNotFound(err) { return nil, nil } @@ -106,10 +107,10 @@ func (s *CommitStore) commitGlobalMetadata(version int64, hash *lthash.LtHash) e batch := s.metadataDB.NewBatch() defer func() { _ = batch.Close() }() - if err := batch.Set(metaVersionKey, versionToBytes(version)); err != nil { + if err := batch.Set(ktype.MetaVersionKey, versionToBytes(version)); err != nil { return fmt.Errorf("failed to set global version: %w", err) } - if err := batch.Set(metaLtHashKey, hash.Marshal()); err != nil { + if err := batch.Set(ktype.MetaLtHashKey, hash.Marshal()); err != nil { return fmt.Errorf("failed to set global lthash: %w", err) } diff --git a/sei-db/state_db/sc/flatkv/store_meta_test.go b/sei-db/state_db/sc/flatkv/store_meta_test.go index 6802feaa82..f11bf368b7 100644 --- a/sei-db/state_db/sc/flatkv/store_meta_test.go +++ b/sei-db/state_db/sc/flatkv/store_meta_test.go @@ -10,6 +10,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" ) @@ -33,7 +34,7 @@ func TestLoadLocalMeta(t *testing.T) { db := setupTestDB(t) defer db.Close() - require.NoError(t, db.Set(metaVersionKey, versionToBytes(42), types.WriteOptions{})) + require.NoError(t, db.Set(ktype.MetaVersionKey, versionToBytes(42), types.WriteOptions{})) // Load it back loaded, err := loadLocalMeta(db) @@ -46,7 +47,7 @@ func TestLoadLocalMeta(t *testing.T) { db := setupTestDB(t) defer db.Close() - require.NoError(t, db.Set(metaVersionKey, []byte{0x01, 0x02}, types.WriteOptions{})) + require.NoError(t, db.Set(ktype.MetaVersionKey, []byte{0x01, 0x02}, types.WriteOptions{})) _, err := loadLocalMeta(db) require.Error(t, err) @@ -60,7 +61,7 @@ func TestStoreCommitBatchesUpdatesLocalMeta(t *testing.T) { addr := ktype.Address{0x12} slot := ktype.Slot{0x34} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, padLeft32(0x56), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -71,7 +72,7 @@ func TestStoreCommitBatchesUpdatesLocalMeta(t *testing.T) { require.Equal(t, int64(1), s.localMeta[storageDBDir].CommittedVersion) // Verify it's persisted in DB - data, err := s.storageDB.Get(metaVersionKey) + data, err := s.storageDB.Get(ktype.MetaVersionKey) require.NoError(t, err) require.Equal(t, int64(1), int64(binary.BigEndian.Uint64(data))) } @@ -139,7 +140,7 @@ func TestStoreMetadataOperations(t *testing.T) { defer s.Close() // Write invalid data (wrong size) - err := s.metadataDB.Set(metaVersionKey, []byte{0x01}, types.WriteOptions{}) + err := s.metadataDB.Set(ktype.MetaVersionKey, []byte{0x01}, types.WriteOptions{}) require.NoError(t, err) // Should return error @@ -157,7 +158,7 @@ func TestGlobalMetadataPersistence(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - cfg := DefaultConfig() + cfg := config.DefaultConfig() cfg.DataDir = dbDir s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -178,7 +179,7 @@ func TestGlobalMetadataPersistence(t *testing.T) { expectedHash := s.committedLtHash.Checksum() require.NoError(t, s.Close()) - cfg2 := DefaultConfig() + cfg2 := config.DefaultConfig() cfg2.DataDir = dbDir s2, err := NewCommitStore(context.Background(), cfg2) require.NoError(t, err) diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index b0c531f5d9..630955ad3f 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -1,25 +1,24 @@ package flatkv import ( - "bytes" "encoding/binary" "fmt" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) // Get returns the value for the given key within the specified module. -// For EVM keys (moduleName == evm.EVMStoreKey), the key is a memiavl EVM key -// routed internally to account/storage/code/legacy DBs. +// For EVM keys (moduleName == keys.EVMStoreKey), the key is a prefix-encoded +// EVM key routed internally to account/storage/code/legacy DBs. // For non-EVM modules, the key is read from legacy storage with the module prefix. // Returns (value, true) if found, (nil, false) if not found. // Panics on I/O errors or unsupported key types. func (s *CommitStore) Get(moduleName string, key []byte) ([]byte, bool) { - if moduleName != evm.EVMStoreKey { + if moduleName != keys.EVMStoreKey { value, err := s.getLegacyValue(moduleName, key) if err != nil { panic(fmt.Sprintf("flatkv: Get module=%s key %x: %v", moduleName, key, err)) @@ -27,19 +26,19 @@ func (s *CommitStore) Get(moduleName string, key []byte) ([]byte, bool) { return value, value != nil } - kind, keyBytes := evm.ParseEVMKey(key) + kind, keyBytes := keys.ParseEVMKey(key) switch kind { - case evm.EVMKeyEmpty: + case keys.EVMKeyEmpty: return nil, false - case evm.EVMKeyStorage: + case keys.EVMKeyStorage: value, err := s.getStorageValue(keyBytes) if err != nil { panic(fmt.Sprintf("flatkv: Get storage key %x: %v", key, err)) } return value, value != nil - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + case keys.EVMKeyNonce, keys.EVMKeyCodeHash: accountData, err := s.getAccountData(keyBytes) if err != nil { panic(fmt.Sprintf("flatkv: Get account key %x: %v", key, err)) @@ -48,7 +47,7 @@ func (s *CommitStore) Get(moduleName string, key []byte) ([]byte, bool) { return nil, false } - if kind == evm.EVMKeyNonce { + if kind == keys.EVMKeyNonce { nonceBytes := make([]byte, vtype.NonceLen) binary.BigEndian.PutUint64(nonceBytes, accountData.GetNonce()) return nonceBytes, true @@ -61,15 +60,15 @@ func (s *CommitStore) Get(moduleName string, key []byte) ([]byte, bool) { } return codeHash[:], true - case evm.EVMKeyCode: + case keys.EVMKeyCode: value, err := s.getCodeValue(keyBytes) if err != nil { panic(fmt.Sprintf("flatkv: Get code key %x: %v", key, err)) } return value, value != nil - case evm.EVMKeyLegacy: - value, err := s.getLegacyValue(evm.EVMStoreKey, keyBytes) + case keys.EVMKeyLegacy: + value, err := s.getLegacyValue(keys.EVMStoreKey, keyBytes) if err != nil { panic(fmt.Sprintf("flatkv: Get legacy key %x: %v", key, err)) } @@ -84,14 +83,14 @@ func (s *CommitStore) Get(moduleName string, key []byte) ([]byte, bool) { // Only supported for EVM keys; non-EVM legacy data does not track block height. // If not found, returns (-1, false, nil). func (s *CommitStore) GetBlockHeightModified(moduleName string, key []byte) (int64, bool, error) { - if moduleName != evm.EVMStoreKey { + if moduleName != keys.EVMStoreKey { return -1, false, fmt.Errorf("block height modified not tracked for module %q", moduleName) } - kind, keyBytes := evm.ParseEVMKey(key) + kind, keyBytes := keys.ParseEVMKey(key) switch kind { - case evm.EVMKeyStorage: + case keys.EVMKeyStorage: sd, err := s.getStorageData(keyBytes) if err != nil { return -1, false, err @@ -101,7 +100,7 @@ func (s *CommitStore) GetBlockHeightModified(moduleName string, key []byte) (int } return sd.GetBlockHeight(), true, nil - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + case keys.EVMKeyNonce, keys.EVMKeyCodeHash: accountData, err := s.getAccountData(keyBytes) if err != nil { return -1, false, err @@ -111,7 +110,7 @@ func (s *CommitStore) GetBlockHeightModified(moduleName string, key []byte) (int } return accountData.GetBlockHeight(), true, nil - case evm.EVMKeyCode: + case keys.EVMKeyCode: cd, err := s.getCodeData(keyBytes) if err != nil { return -1, false, err @@ -132,78 +131,6 @@ func (s *CommitStore) Has(moduleName string, key []byte) bool { return found } -// Iterator returns an iterator over [start, end) in memiavl key order. -// -// IMPORTANT: Iterator only reads COMMITTED state from the underlying DBs. -// Pending writes from ApplyChangeSets are NOT visible until after Commit(). -// -// EXPERIMENTAL: not used in production; only storage keys (0x03) supported. -// Interface may change when Exporter/state-sync is implemented. -func (s *CommitStore) Iterator(start, end []byte) Iterator { - // Validate bounds: start must be < end - if start != nil && end != nil && bytes.Compare(start, end) >= 0 { - return &emptyIterator{} // Invalid range [start, end) - } - - // Check if start/end are storage keys before iterating storage - if start != nil { - kind, _ := evm.ParseEVMKey(start) - if kind != evm.EVMKeyUnknown && kind != evm.EVMKeyStorage { - return &emptyIterator{} - } - } - if end != nil { - kind, _ := evm.ParseEVMKey(end) - if kind != evm.EVMKeyUnknown && kind != evm.EVMKeyStorage { - return &emptyIterator{} - } - } - - return s.newStorageIterator(start, end) -} - -// IteratorByPrefix returns an iterator for keys with the given prefix. -// More efficient than Iterator for single-address queries. -// -// IMPORTANT: Like Iterator(), this only reads COMMITTED state. -// Pending writes are not visible until Commit(). -// -// EXPERIMENTAL: not used in production; only storage keys supported. -// Interface may change when Exporter/state-sync is implemented. -func (s *CommitStore) IteratorByPrefix(prefix []byte) Iterator { - if len(prefix) == 0 { - return s.Iterator(nil, nil) - } - - // Handle storage address prefix specially. - // ParseEVMKey requires full key length (prefix + addr + slot = 53 bytes), - // but a storage prefix is only (prefix + addr = 21 bytes). - // Detect storage prefix: 0x03 || addr(20) = 21 bytes - statePrefix := evm.StateKeyPrefix() - if len(prefix) == len(statePrefix)+ktype.AddressLen && - bytes.HasPrefix(prefix, statePrefix) { - // Storage address prefix: iterate all slots for this address - // Internal key format: addr(20) || slot(32) - // For prefix scan: use addr(20) as prefix - addrBytes := prefix[len(statePrefix):] - return s.newStoragePrefixIterator(addrBytes, prefix) - } - - // Try parsing as full key - kind, keyBytes := evm.ParseEVMKey(prefix) - if kind == evm.EVMKeyUnknown { - // Invalid prefix, return empty iterator - return &emptyIterator{} - } - - switch kind { - case evm.EVMKeyStorage: - return s.newStoragePrefixIterator(keyBytes, prefix) - default: - return &emptyIterator{} - } -} - // ============================================================================= // Internal Getters (used by ApplyChangeSets for LtHash computation) // ============================================================================= @@ -242,7 +169,7 @@ func (s *CommitStore) getStorageData(keyBytes []byte) (*vtype.StorageData, error if len(keyBytes) != ktype.AddressLen+ktype.SlotLen { return nil, fmt.Errorf("storageDB: expected key length %d, got %d", ktype.AddressLen+ktype.SlotLen, len(keyBytes)) } - return readFromDB(ktype.EVMPhysicalKey(evm.EVMKeyStorage, keyBytes), s.storageWrites, s.storageDB, vtype.DeserializeStorageData, "storageDB") + return readFromDB(ktype.EVMPhysicalKey(keys.EVMKeyStorage, keyBytes), s.storageWrites, s.storageDB, vtype.DeserializeStorageData, "storageDB") } func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { @@ -260,7 +187,7 @@ func (s *CommitStore) getCodeData(keyBytes []byte) (*vtype.CodeData, error) { if len(keyBytes) != ktype.AddressLen { return nil, fmt.Errorf("codeDB: expected key length %d, got %d", ktype.AddressLen, len(keyBytes)) } - return readFromDB(ktype.EVMPhysicalKey(evm.EVMKeyCode, keyBytes), s.codeWrites, s.codeDB, vtype.DeserializeCodeData, "codeDB") + return readFromDB(ktype.EVMPhysicalKey(keys.EVMKeyCode, keyBytes), s.codeWrites, s.codeDB, vtype.DeserializeCodeData, "codeDB") } func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 4fb345566f..47bfb4faaf 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -2,15 +2,15 @@ package flatkv import ( "encoding/binary" - "path/filepath" "testing" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" - "github.com/stretchr/testify/require" ) // ============================================================================= @@ -24,10 +24,10 @@ func TestStoreGetPendingWrites(t *testing.T) { addr := ktype.Address{0x11} slot := ktype.Slot{0x22} value := padLeft32(0x33) - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) // No data initially - _, found := s.Get(evm.EVMStoreKey, key) + _, found := s.Get(keys.EVMStoreKey, key) require.False(t, found) // Apply changeset (adds to pending writes) @@ -35,7 +35,7 @@ func TestStoreGetPendingWrites(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Should be readable from pending writes - got, found := s.Get(evm.EVMStoreKey, key) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, value, got) @@ -43,7 +43,7 @@ func TestStoreGetPendingWrites(t *testing.T) { commitAndCheck(t, s) // Should still be readable after commit - got, found = s.Get(evm.EVMStoreKey, key) + got, found = s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, value, got) } @@ -54,7 +54,7 @@ func TestStoreGetPendingDelete(t *testing.T) { addr := ktype.Address{0x44} slot := ktype.Slot{0x55} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) // Write and commit cs1 := makeChangeSet(key, padLeft32(0x66), false) @@ -62,7 +62,7 @@ func TestStoreGetPendingDelete(t *testing.T) { commitAndCheck(t, s) // Verify exists - _, found := s.Get(evm.EVMStoreKey, key) + _, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) // Apply delete (pending) @@ -70,14 +70,14 @@ func TestStoreGetPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) // Should not be found (pending delete) - _, found = s.Get(evm.EVMStoreKey, key) + _, found = s.Get(keys.EVMStoreKey, key) require.False(t, found) // Commit delete commitAndCheck(t, s) // Still should not be found - _, found = s.Get(evm.EVMStoreKey, key) + _, found = s.Get(keys.EVMStoreKey, key) require.False(t, found) } @@ -89,14 +89,14 @@ func TestStoreGetNonStorageKeys(t *testing.T) { // Non-storage keys should return not found (before write) nonStorageKeys := [][]byte{ - evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), - evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), - evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), + keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), + keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), } var found bool for _, key := range nonStorageKeys { - _, found = s.Get(evm.EVMStoreKey, key) + _, found = s.Get(keys.EVMStoreKey, key) require.False(t, found, "non-storage keys should not be found before write") } } @@ -107,10 +107,10 @@ func TestStoreHas(t *testing.T) { addr := ktype.Address{0x88} slot := ktype.Slot{0x99} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) // Initially not found - found := s.Has(evm.EVMStoreKey, key) + found := s.Has(keys.EVMStoreKey, key) require.False(t, found) // Write and commit @@ -119,7 +119,7 @@ func TestStoreHas(t *testing.T) { commitAndCheck(t, s) // Now should exist - found = s.Has(evm.EVMStoreKey, key) + found = s.Has(keys.EVMStoreKey, key) require.True(t, found) } @@ -135,7 +135,7 @@ func TestStoreGetLegacyPendingWrites(t *testing.T) { legacyKey := append([]byte{0x09}, addr[:]...) // Not found initially - _, found := s.Get(evm.EVMStoreKey, legacyKey) + _, found := s.Get(keys.EVMStoreKey, legacyKey) require.False(t, found) // Apply changeset @@ -143,13 +143,13 @@ func TestStoreGetLegacyPendingWrites(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Should be readable from pending writes - got, found := s.Get(evm.EVMStoreKey, legacyKey) + got, found := s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x40}, got) // Commit and still readable commitAndCheck(t, s) - got, found = s.Get(evm.EVMStoreKey, legacyKey) + got, found = s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x40}, got) } @@ -166,7 +166,7 @@ func TestStoreGetLegacyPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) - _, found := s.Get(evm.EVMStoreKey, legacyKey) + _, found := s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found) // Apply delete (pending) @@ -174,12 +174,12 @@ func TestStoreGetLegacyPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) // Should not be found (pending delete) - _, found = s.Get(evm.EVMStoreKey, legacyKey) + _, found = s.Get(keys.EVMStoreKey, legacyKey) require.False(t, found) // Commit delete commitAndCheck(t, s) - _, found = s.Get(evm.EVMStoreKey, legacyKey) + _, found = s.Get(keys.EVMStoreKey, legacyKey) require.False(t, found) } @@ -193,7 +193,7 @@ func TestStoreDelete(t *testing.T) { addr := ktype.Address{0x55} slot := ktype.Slot{0x66} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) // Write cs1 := makeChangeSet(key, padLeft32(0x77), false) @@ -201,7 +201,7 @@ func TestStoreDelete(t *testing.T) { commitAndCheck(t, s) // Verify exists - got, found := s.Get(evm.EVMStoreKey, key) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x77), got) @@ -211,193 +211,10 @@ func TestStoreDelete(t *testing.T) { commitAndCheck(t, s) // Should not exist - _, found = s.Get(evm.EVMStoreKey, key) + _, found = s.Get(keys.EVMStoreKey, key) require.False(t, found) } -// ============================================================================= -// Iterator -// ============================================================================= - -func TestStoreIteratorEmpty(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - // Empty store - iter := s.Iterator(nil, nil) - defer iter.Close() - - require.False(t, iter.Valid(), "empty store should have invalid iterator") -} - -func TestStoreIteratorSingleKey(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := ktype.Address{0xAA} - slot := ktype.Slot{0xBB} - value := padLeft32(0xCC) - memiavlKey := memiavlStorageKey(addr, slot) - physKey := storagePhysKey(addr, slot) - - cs := makeChangeSet(memiavlKey, value, false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, s) - - // Iterate all - iter := s.Iterator(nil, nil) - defer iter.Close() - - require.True(t, iter.First()) - require.True(t, iter.Valid()) - require.Equal(t, physKey, iter.Key()) // physical key format - require.Equal(t, value, iter.Value()) - - // Only one key - iter.Next() - require.False(t, iter.Valid()) -} - -func TestStoreIteratorMultipleKeys(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := ktype.Address{0xDD} - - // Write multiple slots - entries := []struct { - slot ktype.Slot - value byte - }{ - {ktype.Slot{0x01}, 0xAA}, - {ktype.Slot{0x02}, 0xBB}, - {ktype.Slot{0x03}, 0xCC}, - } - - pairs := make([]*proto.KVPair, len(entries)) - for i, e := range entries { - key := memiavlStorageKey(addr, e.slot) - pairs[i] = &proto.KVPair{Key: key, Value: padLeft32(e.value)} - } - - cs := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{ - Pairs: pairs, - }, - } - - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, s) - - // Iterate all - iter := s.Iterator(nil, nil) - defer iter.Close() - - count := 0 - for iter.First(); iter.Valid(); iter.Next() { - count++ - require.NotNil(t, iter.Key()) - require.NotNil(t, iter.Value()) - } - require.Equal(t, len(entries), count) -} - -func TestStoreIteratorNonStorageKeys(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - // Iterating non-storage keys should return empty iterator (Phase 1) - addr := ktype.Address{0xCC} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - - iter := s.Iterator(nonceKey, ktype.PrefixEnd(nonceKey)) - defer iter.Close() - - require.False(t, iter.Valid(), "non-storage key iteration should be empty in Phase 1") -} - -// ============================================================================= -// Prefix Iterator -// ============================================================================= - -func TestStoreStoragePrefixIteration(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := ktype.Address{0xAB} - - // Write multiple slots - for i := byte(1); i <= 3; i++ { - slot := ktype.Slot{i} - key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, padLeft32(i*10), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - } - commitAndCheck(t, s) - - // Iterate by address prefix - prefix := append(evm.StateKeyPrefix(), addr[:]...) - iter := s.IteratorByPrefix(prefix) - defer iter.Close() - - count := 0 - for iter.First(); iter.Valid(); iter.Next() { - count++ - require.NotNil(t, iter.Key()) - require.NotNil(t, iter.Value()) - } - require.Equal(t, 3, count) -} - -func TestStoreIteratorByPrefixAddress(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr1 := ktype.Address{0xAA} - addr2 := ktype.Address{0xBB} - - // Write slots for addr1 - for i := byte(1); i <= 3; i++ { - slot := ktype.Slot{i} - key := memiavlStorageKey(addr1, slot) - cs := makeChangeSet(key, padLeft32(i*10), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - } - - // Write slots for addr2 - for i := byte(1); i <= 2; i++ { - slot := ktype.Slot{i} - key := memiavlStorageKey(addr2, slot) - cs := makeChangeSet(key, padLeft32(i*20), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - } - - commitAndCheck(t, s) - - // Iterate by addr1 prefix - prefix1 := append(evm.StateKeyPrefix(), addr1[:]...) - iter1 := s.IteratorByPrefix(prefix1) - defer iter1.Close() - - count1 := 0 - for iter1.First(); iter1.Valid(); iter1.Next() { - count1++ - } - require.Equal(t, 3, count1, "should find 3 slots for addr1") - - // Iterate by addr2 prefix - prefix2 := append(evm.StateKeyPrefix(), addr2[:]...) - iter2 := s.IteratorByPrefix(prefix2) - defer iter2.Close() - - count2 := 0 - for iter2.First(); iter2.Valid(); iter2.Next() { - count2++ - } - require.Equal(t, 2, count2, "should find 2 slots for addr2") -} - // ============================================================================= // R-1 ~ R-5: Get/Has for All Key Types from Committed DB // ============================================================================= @@ -426,40 +243,40 @@ func TestGetAllKeyTypesFromCommittedDB(t *testing.T) { commitAndCheck(t, s) // Storage - got, found := s.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot))) + got, found := s.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot))) require.True(t, found, "storage should be found") require.Equal(t, padLeft32(0x42), got) // Nonce - got, found = s.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) + got, found = s.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyNonce, addr[:])) require.True(t, found, "nonce should be found") require.Equal(t, uint64(7), binary.BigEndian.Uint64(got)) // CodeHash - got, found = s.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) + got, found = s.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:])) require.True(t, found, "codehash should be found") require.Equal(t, ch[:], got) // Code - got, found = s.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) + got, found = s.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCode, addr[:])) require.True(t, found, "code should be found") require.Equal(t, bytecode, got) // Legacy - got, found = s.Get(evm.EVMStoreKey, legacyKey) + got, found = s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found, "legacy should be found") require.Equal(t, legacyVal, got) // Has should match - found = s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot))) + found = s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot))) require.True(t, found) - found = s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) + found = s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyNonce, addr[:])) require.True(t, found) - found = s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) + found = s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:])) require.True(t, found) - found = s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) + found = s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCode, addr[:])) require.True(t, found) - found = s.Has(evm.EVMStoreKey, legacyKey) + found = s.Has(keys.EVMStoreKey, legacyKey) require.True(t, found) } @@ -468,24 +285,24 @@ func TestGetNonceFromCommittedEOA(t *testing.T) { defer s.Close() addr := addrN(0xA2) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(noncePair(addr, 42)), })) commitAndCheck(t, s) - got, found := s.Get(evm.EVMStoreKey, nonceKey) + got, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce should be found for EOA") require.Equal(t, uint64(42), binary.BigEndian.Uint64(got)) - _, found = s.Get(evm.EVMStoreKey, chKey) + _, found = s.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should NOT be found for EOA") - found = s.Has(evm.EVMStoreKey, nonceKey) + found = s.Has(keys.EVMStoreKey, nonceKey) require.True(t, found) - found = s.Has(evm.EVMStoreKey, chKey) + found = s.Has(keys.EVMStoreKey, chKey) require.False(t, found) } @@ -495,25 +312,25 @@ func TestGetCodeHashFromCommittedContract(t *testing.T) { addr := addrN(0xA3) ch := codeHashN(0xCC) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(noncePair(addr, 1), codeHashPair(addr, ch)), })) commitAndCheck(t, s) - got, found := s.Get(evm.EVMStoreKey, chKey) + got, found := s.Get(keys.EVMStoreKey, chKey) require.True(t, found, "codehash should be found for contract") require.Equal(t, ch[:], got) - got, found = s.Get(evm.EVMStoreKey, nonceKey) + got, found = s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found) require.Equal(t, uint64(1), binary.BigEndian.Uint64(got)) - found = s.Has(evm.EVMStoreKey, chKey) + found = s.Has(keys.EVMStoreKey, chKey) require.True(t, found) - found = s.Has(evm.EVMStoreKey, nonceKey) + found = s.Has(keys.EVMStoreKey, nonceKey) require.True(t, found) } @@ -523,20 +340,20 @@ func TestGetCodeFromCommittedDB(t *testing.T) { addr := addrN(0xA4) bytecode := []byte{0x60, 0x80, 0x52} - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) // Pending code write is visible before commit require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(codePair(addr, bytecode)), })) - got, found := s.Get(evm.EVMStoreKey, codeKey) + got, found := s.Get(keys.EVMStoreKey, codeKey) require.True(t, found, "pending code write should be visible") require.Equal(t, bytecode, got) commitAndCheck(t, s) // Still visible after commit - got, found = s.Get(evm.EVMStoreKey, codeKey) + got, found = s.Get(keys.EVMStoreKey, codeKey) require.True(t, found) require.Equal(t, bytecode, got) @@ -544,11 +361,11 @@ func TestGetCodeFromCommittedDB(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(codeDeletePair(addr)), })) - _, found = s.Get(evm.EVMStoreKey, codeKey) + _, found = s.Get(keys.EVMStoreKey, codeKey) require.False(t, found, "pending code delete should hide the entry") commitAndCheck(t, s) - _, found = s.Get(evm.EVMStoreKey, codeKey) + _, found = s.Get(keys.EVMStoreKey, codeKey) require.False(t, found, "code should be gone after commit") } @@ -566,10 +383,10 @@ func TestGetUnknownKeyTypes(t *testing.T) { {"empty key", []byte{}}, } { t.Run(tc.name, func(t *testing.T) { - val, found := s.Get(evm.EVMStoreKey, tc.key) + val, found := s.Get(keys.EVMStoreKey, tc.key) require.False(t, found) require.Nil(t, val) - found = s.Has(evm.EVMStoreKey, tc.key) + found = s.Has(keys.EVMStoreKey, tc.key) require.False(t, found) }) } @@ -585,10 +402,10 @@ func TestGetUnknownKeyTypes(t *testing.T) { {"short nonce-like (2 bytes)", []byte{0x04, 0x01}}, } { t.Run(tc.name, func(t *testing.T) { - val, found := s.Get(evm.EVMStoreKey, tc.key) + val, found := s.Get(keys.EVMStoreKey, tc.key) require.False(t, found) require.Nil(t, val) - found = s.Has(evm.EVMStoreKey, tc.key) + found = s.Has(keys.EVMStoreKey, tc.key) require.False(t, found) }) } @@ -606,8 +423,8 @@ func TestGetAccountAfterFullDeletePending(t *testing.T) { defer s.Close() addr := addrN(0xB1) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(noncePair(addr, 10), codeHashPair(addr, codeHashN(0xDD))), @@ -618,15 +435,15 @@ func TestGetAccountAfterFullDeletePending(t *testing.T) { namedCS(nonceDeletePair(addr), codeHashDeletePair(addr)), })) - _, nonceFound := s.Get(evm.EVMStoreKey, nonceKey) + _, nonceFound := s.Get(keys.EVMStoreKey, nonceKey) require.False(t, nonceFound, "nonce should not be found after full delete (isDelete=true)") - _, chFound := s.Get(evm.EVMStoreKey, chKey) + _, chFound := s.Get(keys.EVMStoreKey, chKey) require.False(t, chFound, "codehash should not be found after full delete (isDelete=true)") - found := s.Has(evm.EVMStoreKey, nonceKey) + found := s.Has(keys.EVMStoreKey, nonceKey) require.False(t, found) - found = s.Has(evm.EVMStoreKey, chKey) + found = s.Has(keys.EVMStoreKey, chKey) require.False(t, found) } @@ -635,8 +452,8 @@ func TestGetAccountAfterFullDeleteCommitted(t *testing.T) { defer s.Close() addr := addrN(0xB2) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(noncePair(addr, 5), codeHashPair(addr, codeHashN(0xEE))), @@ -650,15 +467,15 @@ func TestGetAccountAfterFullDeleteCommitted(t *testing.T) { // After full delete + commit, the account row is physically deleted from // accountDB (batch.Delete in commitBatches). Both fields return not-found. - _, nonceFound := s.Get(evm.EVMStoreKey, nonceKey) + _, nonceFound := s.Get(keys.EVMStoreKey, nonceKey) require.False(t, nonceFound, "nonce should not be found after full delete + commit") - _, chFound := s.Get(evm.EVMStoreKey, chKey) + _, chFound := s.Get(keys.EVMStoreKey, chKey) require.False(t, chFound, "codehash should not be found after full delete + commit") - found := s.Has(evm.EVMStoreKey, nonceKey) + found := s.Has(keys.EVMStoreKey, nonceKey) require.False(t, found) - found = s.Has(evm.EVMStoreKey, chKey) + found = s.Has(keys.EVMStoreKey, chKey) require.False(t, found) } @@ -667,8 +484,8 @@ func TestGetAccountAfterPartialDelete(t *testing.T) { defer s.Close() addr := addrN(0xB3) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(noncePair(addr, 99), codeHashPair(addr, codeHashN(0xFF))), @@ -681,11 +498,11 @@ func TestGetAccountAfterPartialDelete(t *testing.T) { })) commitAndCheck(t, s) - got, found := s.Get(evm.EVMStoreKey, nonceKey) + got, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce should survive partial delete") require.Equal(t, uint64(99), binary.BigEndian.Uint64(got)) - _, found = s.Get(evm.EVMStoreKey, chKey) + _, found = s.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should be gone after delete") // Account row should still exist (EOA encoding) @@ -705,14 +522,14 @@ func TestGetAfterOverwrite(t *testing.T) { addr := addrN(0xC1) slot := slotN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(storagePair(addr, slot, []byte{0x11})), })) commitAndCheck(t, s) - got, found := s.Get(evm.EVMStoreKey, key) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x11), got) @@ -721,7 +538,7 @@ func TestGetAfterOverwrite(t *testing.T) { })) commitAndCheck(t, s) - got, found = s.Get(evm.EVMStoreKey, key) + got, found = s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x22, 0x33), got, "should return v2 value after overwrite") } @@ -732,7 +549,7 @@ func TestGetAfterDeleteAndRecreate(t *testing.T) { addr := addrN(0xC2) slot := slotN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) // v1: create require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -746,7 +563,7 @@ func TestGetAfterDeleteAndRecreate(t *testing.T) { })) commitAndCheck(t, s) - _, found := s.Get(evm.EVMStoreKey, key) + _, found := s.Get(keys.EVMStoreKey, key) require.False(t, found, "should not be found after delete") // v3: re-create with different value @@ -755,7 +572,7 @@ func TestGetAfterDeleteAndRecreate(t *testing.T) { })) commitAndCheck(t, s) - got, found := s.Get(evm.EVMStoreKey, key) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0xBB, 0xCC), got, "should return v3 value after re-create") } @@ -770,7 +587,7 @@ func TestGetAfterReopenAllKeyTypes(t *testing.T) { legacyKey := append([]byte{0x09}, addr[:]...) // Phase 1: write everything and close - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = dir s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -792,7 +609,7 @@ func TestGetAfterReopenAllKeyTypes(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen and verify all reads - cfg2 := DefaultTestConfig(t) + cfg2 := config.DefaultTestConfig(t) cfg2.DataDir = dir s2, err := NewCommitStore(t.Context(), cfg2) require.NoError(t, err) @@ -800,23 +617,23 @@ func TestGetAfterReopenAllKeyTypes(t *testing.T) { require.NoError(t, err) defer s2.Close() - got, found := s2.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot))) + got, found := s2.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot))) require.True(t, found, "storage should survive reopen") require.Equal(t, padLeft32(0x42), got) - got, found = s2.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) + got, found = s2.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyNonce, addr[:])) require.True(t, found, "nonce should survive reopen") require.Equal(t, uint64(100), binary.BigEndian.Uint64(got)) - got, found = s2.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) + got, found = s2.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:])) require.True(t, found, "codehash should survive reopen") require.Equal(t, ch[:], got) - got, found = s2.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) + got, found = s2.Get(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCode, addr[:])) require.True(t, found, "code should survive reopen") require.Equal(t, bytecode, got) - got, found = s2.Get(evm.EVMStoreKey, legacyKey) + got, found = s2.Get(keys.EVMStoreKey, legacyKey) require.True(t, found, "legacy should survive reopen") require.Equal(t, []byte{0x77}, got) } @@ -837,14 +654,14 @@ func TestIteratorDoesNotSeePendingWrites(t *testing.T) { })) // Before commit: iterator should not see the pending write - iter := s.Iterator(nil, nil) + iter := s.RawGlobalIterator() require.False(t, iter.First(), "iterator should not see pending writes") require.NoError(t, iter.Close()) commitAndCheck(t, s) // After commit: iterator should see it - iter = s.Iterator(nil, nil) + iter = s.RawGlobalIterator() defer iter.Close() require.True(t, iter.First(), "iterator should see committed entry") require.True(t, iter.Valid()) @@ -873,510 +690,16 @@ func TestIteratorDoesNotSeePendingDeletes(t *testing.T) { })) // Iterator should still see all 3 (pending delete not visible) - count := iterCount(t, s.Iterator(nil, nil)) + count := iterCount(t, s.RawGlobalIterator()) require.Equal(t, 3, count, "pending delete should not affect iterator") commitAndCheck(t, s) // After commit: only 2 remain - count = iterCount(t, s.Iterator(nil, nil)) + count = iterCount(t, s.RawGlobalIterator()) require.Equal(t, 2, count, "committed delete should remove entry from iterator") } -// ============================================================================= -// R-14 ~ R-18: Iterator Navigation -// ============================================================================= - -func TestIteratorLast(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD3) - slots := []ktype.Slot{slotN(0x10), slotN(0x20), slotN(0x30)} - - var pairs []*proto.KVPair - for _, sl := range slots { - pairs = append(pairs, storagePair(addr, sl, []byte{0xAA})) - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{namedCS(pairs...)})) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - require.True(t, iter.Last(), "Last() should succeed") - require.True(t, iter.Valid()) - require.Equal(t, storagePhysKey(addr, slotN(0x30)), iter.Key()) -} - -func TestIteratorSeekGE(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD4) - slots := []byte{0x10, 0x20, 0x30, 0x40, 0x50} - var pairs []*proto.KVPair - for _, sl := range slots { - pairs = append(pairs, storagePair(addr, slotN(sl), []byte{sl})) - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{namedCS(pairs...)})) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - // SeekGE to a key between 0x20 and 0x30 → should land on 0x30 - seekKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x25))) - require.True(t, iter.SeekGE(seekKey)) - require.Equal(t, storagePhysKey(addr, slotN(0x30)), iter.Key()) - - // SeekGE to exact key 0x30 → should land on 0x30 - seekKey = evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x30))) - require.True(t, iter.SeekGE(seekKey)) - require.Equal(t, storagePhysKey(addr, slotN(0x30)), iter.Key()) - - // SeekGE past all keys → invalid - seekKey = evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0xFF))) - require.False(t, iter.SeekGE(seekKey)) -} - -func TestIteratorSeekLT(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD5) - slots := []byte{0x10, 0x20, 0x30, 0x40, 0x50} - var pairs []*proto.KVPair - for _, sl := range slots { - pairs = append(pairs, storagePair(addr, slotN(sl), []byte{sl})) - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{namedCS(pairs...)})) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - // SeekLT(0x30) → should land on 0x20 - seekKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x30))) - require.True(t, iter.SeekLT(seekKey)) - require.Equal(t, storagePhysKey(addr, slotN(0x20)), iter.Key()) - - // SeekLT before first key → invalid - seekKey = evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x10))) - require.False(t, iter.SeekLT(seekKey)) -} - -func TestIteratorPrev(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD6) - slots := []ktype.Slot{slotN(0x10), slotN(0x20), slotN(0x30)} - var pairs []*proto.KVPair - for _, sl := range slots { - pairs = append(pairs, storagePair(addr, sl, []byte{0xAA})) - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{namedCS(pairs...)})) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - require.True(t, iter.Last()) - require.Equal(t, storagePhysKey(addr, slotN(0x30)), iter.Key()) - - require.True(t, iter.Prev()) - require.Equal(t, storagePhysKey(addr, slotN(0x20)), iter.Key()) - - require.True(t, iter.Prev()) - require.Equal(t, storagePhysKey(addr, slotN(0x10)), iter.Key()) - - require.False(t, iter.Prev(), "Prev past first should be invalid") -} - -func TestIteratorSeekGEKeyTypeMismatch(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD7) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(storagePair(addr, slotN(0x01), []byte{0xAA})), - })) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - // SeekGE with a nonce key on a storage iterator → mismatch - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - require.False(t, iter.SeekGE(nonceKey)) - require.Error(t, iter.Error(), "key type mismatch should set an error") -} - -func TestIteratorSeekGEPhysicalKeyTypeMismatch(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD7) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(storagePair(addr, slotN(0x01), []byte{0xAA})), - })) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - // SeekGE with an account physical key on a storage iterator → mismatch - accountKey := ktype.EVMPhysicalKey(ktype.EVMKeyAccount, addr[:]) - require.False(t, iter.SeekGE(accountKey)) - require.Error(t, iter.Error(), "physical key type mismatch should set an error") - require.Contains(t, iter.Error().Error(), "mismatch") -} - -func TestIteratorSeekGEInvalidPhysicalKey(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD7) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(storagePair(addr, slotN(0x01), []byte{0xAA})), - })) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - // SeekGE with garbage ASCII key → rejected - require.False(t, iter.SeekGE([]byte("not-a-valid-key"))) - require.Error(t, iter.Error(), "invalid physical key should set an error") -} - -func TestIteratorSeekGEPhysicalKeyRoundTrip(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD7) - slot := slotN(0x01) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(storagePair(addr, slot, []byte{0xAA})), - })) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - require.True(t, iter.First()) - - // Key() returns a physical key; feeding it back to SeekGE must work. - physKey := append([]byte(nil), iter.Key()...) - require.True(t, iter.SeekGE(physKey)) - require.Equal(t, physKey, iter.Key()) -} - -// ============================================================================= -// R-19: Iterator Skips Meta Keys -// ============================================================================= - -func TestIteratorSkipsMetaKeys(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD8) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS( - storagePair(addr, slotN(0x01), []byte{0x11}), - storagePair(addr, slotN(0x02), []byte{0x22}), - ), - })) - commitAndCheck(t, s) - - // Verify _meta/ keys exist in raw storageDB - rawIter, err := s.storageDB.NewIter(&types.IterOptions{}) - require.NoError(t, err) - rawCount := 0 - metaCount := 0 - for rawIter.First(); rawIter.Valid(); rawIter.Next() { - rawCount++ - if isMetaKey(rawIter.Key()) { - metaCount++ - } - } - require.NoError(t, rawIter.Error()) - require.NoError(t, rawIter.Close()) - require.Greater(t, metaCount, 0, "storageDB should contain _meta/ keys") - - // FlatKV iterator should skip meta keys - count := iterCount(t, s.Iterator(nil, nil)) - require.Equal(t, 2, count, "iterator should only see live data entries, not _meta/") - require.Equal(t, rawCount-metaCount, count) -} - -// ============================================================================= -// R-20 ~ R-23: Iterator Range Bounds -// ============================================================================= - -func TestIteratorRangeBounds(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xD9) - slots := []byte{0x10, 0x20, 0x30, 0x40, 0x50} - var pairs []*proto.KVPair - for _, sl := range slots { - pairs = append(pairs, storagePair(addr, slotN(sl), []byte{sl})) - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{namedCS(pairs...)})) - commitAndCheck(t, s) - - startKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x20))) - endKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x40))) - - iter := s.Iterator(startKey, endKey) - defer iter.Close() - - var keys [][]byte - for iter.First(); iter.Valid(); iter.Next() { - keys = append(keys, append([]byte(nil), iter.Key()...)) - } - - require.Len(t, keys, 2, "range [0x20, 0x40) should see 0x20 and 0x30") - require.Equal(t, storagePhysKey(addr, slotN(0x20)), keys[0]) - require.Equal(t, storagePhysKey(addr, slotN(0x30)), keys[1]) -} - -func TestIteratorHalfOpenStart(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xDA) - slots := []byte{0x10, 0x20, 0x30, 0x40, 0x50} - var pairs []*proto.KVPair - for _, sl := range slots { - pairs = append(pairs, storagePair(addr, slotN(sl), []byte{sl})) - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{namedCS(pairs...)})) - commitAndCheck(t, s) - - endKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x30))) - count := iterCount(t, s.Iterator(nil, endKey)) - require.Equal(t, 2, count, "[nil, 0x30) should see 0x10, 0x20") -} - -func TestIteratorHalfOpenEnd(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xDB) - slots := []byte{0x10, 0x20, 0x30, 0x40, 0x50} - var pairs []*proto.KVPair - for _, sl := range slots { - pairs = append(pairs, storagePair(addr, slotN(sl), []byte{sl})) - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{namedCS(pairs...)})) - commitAndCheck(t, s) - - startKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x30))) - count := iterCount(t, s.Iterator(startKey, nil)) - require.Equal(t, 3, count, "[0x30, nil) should see 0x30, 0x40, 0x50") -} - -func TestIteratorInvalidRange(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xDC) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(storagePair(addr, slotN(0x01), []byte{0xAA})), - })) - commitAndCheck(t, s) - - startKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x30))) - endKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x10))) - - iter := s.Iterator(startKey, endKey) - defer iter.Close() - require.False(t, iter.Valid(), "start >= end should yield empty iterator") -} - -// ============================================================================= -// R-24 ~ R-27: Iterator Domain and Edge Cases -// ============================================================================= - -func TestIteratorDomain(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xDD) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(storagePair(addr, slotN(0x01), []byte{0xAA})), - })) - commitAndCheck(t, s) - - startKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x00))) - endKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0xFF))) - iter := s.Iterator(startKey, endKey) - defer iter.Close() - - domainStart, domainEnd := iter.Domain() - require.Equal(t, startKey, domainStart) - require.Equal(t, endKey, domainEnd) -} - -func TestIteratorByPrefixEmpty(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xDE) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS( - storagePair(addr, slotN(0x01), []byte{0x11}), - storagePair(addr, slotN(0x02), []byte{0x22}), - ), - })) - commitAndCheck(t, s) - - // Empty prefix falls back to Iterator(nil, nil) → sees all storage - count := iterCount(t, s.IteratorByPrefix([]byte{})) - require.Equal(t, 2, count, "empty prefix should iterate all storage") -} - -func TestIteratorByPrefixNonStorage(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xDF) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(noncePair(addr, 1), storagePair(addr, slotN(0x01), []byte{0x11})), - })) - commitAndCheck(t, s) - - // Nonce prefix → empty iterator (only storage iteration is supported) - noncePrefix := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - iter := s.IteratorByPrefix(noncePrefix) - defer iter.Close() - require.False(t, iter.Valid(), "non-storage prefix should return empty iterator") -} - -func TestIteratorAfterClose(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0xE0) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS(storagePair(addr, slotN(0x01), []byte{0xAA})), - })) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - require.True(t, iter.First()) - require.NoError(t, iter.Close()) - - // After close: all navigation returns false, no panic - require.False(t, iter.First()) - require.False(t, iter.Last()) - require.False(t, iter.Next()) - require.False(t, iter.Prev()) - require.False(t, iter.Valid()) - require.Nil(t, iter.Key()) - require.Nil(t, iter.Value()) -} - -// ============================================================================= -// R-28 ~ R-29: Read-Only Store -// ============================================================================= - -func TestReadOnlyGetAllKeyTypes(t *testing.T) { - dir := t.TempDir() - - addr := addrN(0xF1) - slot := slotN(0x01) - ch := codeHashN(0xAA) - bytecode := []byte{0x60, 0x80} - legacyKey := append([]byte{0x09}, addr[:]...) - - cfg := DefaultTestConfig(t) - cfg.SnapshotInterval = 1 - cfg.SnapshotKeepRecent = 5 - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - defer s.Close() - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS( - noncePair(addr, 50), - codeHashPair(addr, ch), - codePair(addr, bytecode), - storagePair(addr, slot, []byte{0x42}), - ), - makeChangeSet(legacyKey, []byte{0x77}, false), - })) - _, err = s.Commit() - require.NoError(t, err) - - ro, err := s.LoadVersion(1, true) - require.NoError(t, err) - defer ro.Close() - - got, found := ro.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot))) - require.True(t, found) - require.Equal(t, padLeft32(0x42), got) - - got, found = ro.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) - require.True(t, found) - require.Equal(t, uint64(50), binary.BigEndian.Uint64(got)) - - got, found = ro.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) - require.True(t, found) - require.Equal(t, ch[:], got) - - got, found = ro.Get(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) - require.True(t, found) - require.Equal(t, bytecode, got) - - got, found = ro.Get(evm.EVMStoreKey, legacyKey) - require.True(t, found) - require.Equal(t, []byte{0x77}, got) -} - -func TestReadOnlyIterator(t *testing.T) { - dir := t.TempDir() - - addr := addrN(0xF2) - - cfg := DefaultTestConfig(t) - cfg.SnapshotInterval = 1 - cfg.SnapshotKeepRecent = 5 - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - defer s.Close() - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS( - storagePair(addr, slotN(0x10), []byte{0x11}), - storagePair(addr, slotN(0x20), []byte{0x22}), - storagePair(addr, slotN(0x30), []byte{0x33}), - ), - })) - _, err = s.Commit() - require.NoError(t, err) - - ro, err := s.LoadVersion(1, true) - require.NoError(t, err) - defer ro.Close() - - count := iterCount(t, ro.Iterator(nil, nil)) - require.Equal(t, 3, count, "read-only iterator should see all committed entries") - - prefix := append(evm.StateKeyPrefix(), addr[:]...) - count = iterCount(t, ro.IteratorByPrefix(prefix)) - require.Equal(t, 3, count, "read-only prefix iterator should see all slots for addr") -} - // ============================================================================= // Helpers // ============================================================================= @@ -1396,7 +719,7 @@ func TestGetNilKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - val, found := s.Get(evm.EVMStoreKey, nil) + val, found := s.Get(keys.EVMStoreKey, nil) require.False(t, found) require.Nil(t, val) } @@ -1405,7 +728,7 @@ func TestGetEmptyKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - val, found := s.Get(evm.EVMStoreKey, []byte{}) + val, found := s.Get(keys.EVMStoreKey, []byte{}) require.False(t, found) require.Nil(t, val) } @@ -1414,7 +737,7 @@ func TestHasNilKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - found := s.Has(evm.EVMStoreKey, nil) + found := s.Has(keys.EVMStoreKey, nil) require.False(t, found) } @@ -1422,7 +745,7 @@ func TestHasEmptyKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - found := s.Has(evm.EVMStoreKey, []byte{}) + found := s.Has(keys.EVMStoreKey, []byte{}) require.False(t, found) } @@ -1435,7 +758,7 @@ func TestHasForAllKeyTypes(t *testing.T) { ch := codeHashN(0xAB) pairs := []*proto.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11)}, + {Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11)}, noncePair(addr, 42), codeHashPair(addr, ch), codePair(addr, []byte{0x60, 0x60}), @@ -1447,13 +770,13 @@ func TestHasForAllKeyTypes(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) - found := s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot))) + found := s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot))) require.True(t, found) - found = s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) + found = s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyNonce, addr[:])) require.True(t, found) - found = s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) + found = s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:])) require.True(t, found) - found = s.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) + found = s.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyCode, addr[:])) require.True(t, found) } @@ -1463,17 +786,17 @@ func TestHasOnPendingDeletes(t *testing.T) { addr := addrN(0x11) slot := slotN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) cs := makeChangeSet(key, padLeft32(0xAA), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) - found := s.Has(evm.EVMStoreKey, key) + found := s.Has(keys.EVMStoreKey, key) require.True(t, found) delCS := makeChangeSet(key, nil, true) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{delCS})) - found = s.Has(evm.EVMStoreKey, key) + found = s.Has(keys.EVMStoreKey, key) require.False(t, found, "Has should return false for pending-deleted key") } @@ -1482,7 +805,7 @@ func TestHasOnReadOnlyStore(t *testing.T) { addr := addrN(0x12) slot := slotN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) cs := makeChangeSet(key, padLeft32(0xBB), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -1492,36 +815,23 @@ func TestHasOnReadOnlyStore(t *testing.T) { require.NoError(t, err) defer ro.Close() - found := ro.Has(evm.EVMStoreKey, key) + found := ro.Has(keys.EVMStoreKey, key) require.True(t, found) - found = ro.Has(evm.EVMStoreKey, evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addrN(0xFF), slotN(0xFF)))) + found = ro.Has(keys.EVMStoreKey, keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0xFF), slotN(0xFF)))) require.False(t, found) require.NoError(t, s.Close()) } func TestGetAfterRollback(t *testing.T) { - s := setupTestStoreWithConfig(t, &Config{ - SnapshotInterval: 2, - SnapshotKeepRecent: 5, - AccountDBConfig: smallTestPebbleConfig(), - AccountCacheConfig: smallTestCacheConfig(), - CodeDBConfig: smallTestPebbleConfig(), - CodeCacheConfig: smallTestCacheConfig(), - StorageDBConfig: smallTestPebbleConfig(), - StorageCacheConfig: smallTestCacheConfig(), - LegacyDBConfig: smallTestPebbleConfig(), - LegacyCacheConfig: smallTestCacheConfig(), - MetadataDBConfig: smallTestPebbleConfig(), - MetadataCacheConfig: smallTestCacheConfig(), - ReaderThreadsPerCore: 2.0, - ReaderPoolQueueSize: 1024, - MiscPoolThreadsPerCore: 4.0, - }) + cfg := config.DefaultTestConfig(t) + cfg.SnapshotInterval = 2 + cfg.SnapshotKeepRecent = 5 + s := setupTestStoreWithConfig(t, cfg) defer s.Close() addr := addrN(0x13) slot := slotN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) cs1 := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) @@ -1535,14 +845,14 @@ func TestGetAfterRollback(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs3})) commitAndCheck(t, s) // v3 - val, found := s.Get(evm.EVMStoreKey, key) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x33), val) require.NoError(t, s.Rollback(2)) require.Equal(t, int64(2), s.Version()) - _, found = s.Get(evm.EVMStoreKey, key) + _, found = s.Get(keys.EVMStoreKey, key) require.False(t, found, "key should be deleted at v2") } @@ -1551,192 +861,9 @@ func TestGetWithTruncatedEVMKey(t *testing.T) { defer s.Close() // A key with a valid storage prefix but too short to be parsed. - statePrefix := evm.StateKeyPrefix() + statePrefix := keys.StateKeyPrefix() truncatedKey := append(statePrefix, 0x01, 0x02) - val, found := s.Get(evm.EVMStoreKey, truncatedKey) + val, found := s.Get(keys.EVMStoreKey, truncatedKey) require.False(t, found) require.Nil(t, val) } - -func TestIteratorStartEqualsEnd(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0x20) - key := memiavlStorageKey(addr, slotN(0x01)) - cs := makeChangeSet(key, padLeft32(0x11), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, s) - - // start == end produces an empty iterator. - iter := s.Iterator(key, key) - require.False(t, iter.Valid()) - require.False(t, iter.First()) - require.NoError(t, iter.Close()) -} - -func TestIteratorInterleavedNextPrev(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0x21) - for i := byte(1); i <= 5; i++ { - key := memiavlStorageKey(addr, slotN(i)) - cs := makeChangeSet(key, padLeft32(i), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - } - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - require.True(t, iter.First()) - val1 := append([]byte(nil), iter.Value()...) - - require.True(t, iter.Next()) - val2 := append([]byte(nil), iter.Value()...) - require.NotEqual(t, val1, val2) - - // Prev should go back to the first key. - require.True(t, iter.Prev()) - require.Equal(t, val1, iter.Value()) -} - -func TestIteratorMultipleFirstLastCalls(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0x22) - for i := byte(1); i <= 3; i++ { - key := memiavlStorageKey(addr, slotN(i)) - cs := makeChangeSet(key, padLeft32(i), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - } - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - defer iter.Close() - - require.True(t, iter.First()) - firstKey := append([]byte(nil), iter.Key()...) - - require.True(t, iter.Last()) - lastKey := append([]byte(nil), iter.Key()...) - - // Calling First again should return to the first key. - require.True(t, iter.First()) - require.Equal(t, firstKey, iter.Key()) - - // Calling Last again should return to the last key. - require.True(t, iter.Last()) - require.Equal(t, lastKey, iter.Key()) -} - -func TestIteratorByPrefixAfterDeletions(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0x23) - for i := byte(1); i <= 3; i++ { - key := memiavlStorageKey(addr, slotN(i)) - cs := makeChangeSet(key, padLeft32(i*10), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - } - commitAndCheck(t, s) - - // Delete slot 2. - delKey := memiavlStorageKey(addr, slotN(2)) - delCS := makeChangeSet(delKey, nil, true) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{delCS})) - commitAndCheck(t, s) - - // Iterator should see only 2 entries. - prefix := append(evm.StateKeyPrefix(), addr[:]...) - iter := s.IteratorByPrefix(prefix) - defer iter.Close() - - count := 0 - for ok := iter.First(); ok; ok = iter.Next() { - count++ - } - require.Equal(t, 2, count, "deleted key should not appear in iterator") -} - -func TestIteratorByPrefixOnReadOnlyStore(t *testing.T) { - s := setupTestStore(t) - - addr := addrN(0x24) - for i := byte(1); i <= 3; i++ { - key := memiavlStorageKey(addr, slotN(i)) - cs := makeChangeSet(key, padLeft32(i), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - } - commitAndCheck(t, s) - - ro, err := s.LoadVersion(0, true) - require.NoError(t, err) - defer ro.Close() - - prefix := append(evm.StateKeyPrefix(), addr[:]...) - iter := ro.IteratorByPrefix(prefix) - defer iter.Close() - - count := 0 - for ok := iter.First(); ok; ok = iter.Next() { - count++ - } - require.Equal(t, 3, count) - require.NoError(t, s.Close()) -} - -func TestIteratorByPrefixNilPrefix(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := addrN(0x25) - key := memiavlStorageKey(addr, slotN(0x01)) - cs := makeChangeSet(key, padLeft32(0x11), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, s) - - // nil prefix goes through Iterator(nil, nil) path = full scan. - iter := s.IteratorByPrefix(nil) - defer iter.Close() - - count := 0 - for ok := iter.First(); ok; ok = iter.Next() { - count++ - } - require.Equal(t, 1, count, "nil prefix should scan all storage keys") -} - -func TestIteratorOnClosedStore(t *testing.T) { - s := setupTestStore(t) - - addr := addrN(0x26) - key := memiavlStorageKey(addr, slotN(0x01)) - cs := makeChangeSet(key, padLeft32(0x11), false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, s) - - iter := s.Iterator(nil, nil) - require.True(t, iter.First()) - require.NoError(t, iter.Close()) - - // Close the store, then try a new iterator -- should not panic. - require.NoError(t, s.Close()) - - // Note: after Close(), the DB handles are nil. Depending on implementation - // this may panic or return an empty/erroring iterator. We just verify no panic. - func() { - defer func() { - if r := recover(); r != nil { - t.Logf("Iterator on closed store panicked (expected): %v", r) - } - }() - iter2 := s.Iterator(nil, nil) - if iter2 != nil { - _ = iter2.Close() - } - }() -} diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index bd4868a2ab..edcccb86ef 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -8,11 +8,10 @@ import ( "github.com/stretchr/testify/require" commonerrors "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" ) @@ -34,92 +33,12 @@ func TestCommitStoreImplementsStore(t *testing.T) { require.Len(t, s.RootHash(), 32) } -// ============================================================================= -// Test Helpers -// ============================================================================= - -// memiavlStorageKey builds a memiavl-format storage key for testing external API. -func memiavlStorageKey(addr ktype.Address, slot ktype.Slot) []byte { - internal := ktype.StorageKey(addr, slot) - return evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, internal) -} - -// accountPhysKey returns the physical DB key for an account address. -func accountPhysKey(addr ktype.Address) []byte { - return ktype.EVMPhysicalKey(ktype.EVMKeyAccount, addr[:]) -} - -// storagePhysKey returns the physical DB key for a storage slot. -func storagePhysKey(addr ktype.Address, slot ktype.Slot) []byte { - return ktype.EVMPhysicalKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) -} - -// padLeft32 returns a 32-byte big-endian value with the given bytes right-aligned. -func padLeft32(val ...byte) []byte { - var b [32]byte - copy(b[32-len(val):], val) - return b[:] -} - -// makeChangeSet creates a changeset -func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { - return &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{ - Pairs: []*proto.KVPair{ - {Key: key, Value: value, Delete: delete}, - }, - }, - } -} - -// setupTestDB creates a temporary PebbleDB for testing -func setupTestDB(t *testing.T) types.KeyValueDB { - t.Helper() - cfg := pebbledb.DefaultTestConfig(t) - cacheCfg := pebbledb.DefaultTestCacheConfig() - db, err := pebbledb.OpenWithCache(t.Context(), &cfg, &cacheCfg, - threading.NewAdHocPool(), threading.NewAdHocPool()) - require.NoError(t, err) - return db -} - -// setupTestStore creates a minimal test store -func setupTestStore(t *testing.T) *CommitStore { - t.Helper() - s, err := NewCommitStore(t.Context(), DefaultTestConfig(t)) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - return s -} - -// setupTestStoreWithConfig creates a test store with custom config -func setupTestStoreWithConfig(t *testing.T, cfg *Config) *CommitStore { - t.Helper() - dir := t.TempDir() - cfg.DataDir = filepath.Join(dir, flatkvRootDir) - s, err := NewCommitStore(t.Context(), cfg) - require.NoError(t, err) - _, err = s.LoadVersion(0, false) - require.NoError(t, err) - return s -} - -// commitAndCheck commits and asserts no error, returns the version -func commitAndCheck(t *testing.T, s *CommitStore) int64 { - t.Helper() - v, err := s.Commit() - require.NoError(t, err) - return v -} - // ============================================================================= // Basic Store Operations // ============================================================================= func TestStoreOpenClose(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -129,7 +48,7 @@ func TestStoreOpenClose(t *testing.T) { } func TestStoreClose(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -152,7 +71,7 @@ func TestStoreCommitVersionAutoIncrement(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, padLeft32(0xCC), false) @@ -188,13 +107,13 @@ func TestStoreApplyAndCommit(t *testing.T) { addr := ktype.Address{0x11} slot := ktype.Slot{0x22} value := padLeft32(0x33) - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, value, false) // Apply but not commit - should be readable from pending writes require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - got, found := s.Get(evm.EVMStoreKey, key) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found, "should be readable from pending writes") require.Equal(t, value, got) @@ -202,7 +121,7 @@ func TestStoreApplyAndCommit(t *testing.T) { commitAndCheck(t, s) // Still should be readable after commit - got, found = s.Get(evm.EVMStoreKey, key) + got, found = s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, value, got) } @@ -224,7 +143,7 @@ func TestStoreMultipleWrites(t *testing.T) { // Create multiple pairs in one changeset pairs := make([]*proto.KVPair, len(entries)) for i, e := range entries { - key := memiavlStorageKey(addr, e.slot) + key := evmStorageKey(addr, e.slot) pairs[i] = &proto.KVPair{Key: key, Value: padLeft32(e.value)} } @@ -240,8 +159,8 @@ func TestStoreMultipleWrites(t *testing.T) { // Verify all entries for _, e := range entries { - key := memiavlStorageKey(addr, e.slot) - got, found := s.Get(evm.EVMStoreKey, key) + key := evmStorageKey(addr, e.slot) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(e.value), got) } @@ -269,7 +188,7 @@ func TestStoreClearsPendingAfterCommit(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, padLeft32(0xCC), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -295,7 +214,7 @@ func TestStoreVersioning(t *testing.T) { addr := ktype.Address{0x88} slot := ktype.Slot{0x99} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) // Version 1 cs1 := makeChangeSet(key, padLeft32(0x01), false) @@ -312,7 +231,7 @@ func TestStoreVersioning(t *testing.T) { require.Equal(t, int64(2), s.Version()) // Latest value should be from version 2 - got, found := s.Get(evm.EVMStoreKey, key) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x02), got) } @@ -323,10 +242,10 @@ func TestStorePersistence(t *testing.T) { addr := ktype.Address{0xDD} slot := ktype.Slot{0xEE} value := padLeft32(0xFF) - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) // Write and close - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -339,7 +258,7 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, s1.Close()) // Reopen and verify - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -347,7 +266,7 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, err) defer s2.Close() - got, found := s2.Get(evm.EVMStoreKey, key) + got, found := s2.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, value, got) @@ -370,7 +289,7 @@ func TestStoreRootHashChanges(t *testing.T) { // Apply changeset addr := ktype.Address{0xAB} slot := ktype.Slot{0xCD} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, padLeft32(0xEF), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -398,7 +317,7 @@ func TestStoreRootHashChangesOnApply(t *testing.T) { // Apply changeset addr := ktype.Address{0xEE} slot := ktype.Slot{0xFF} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -414,7 +333,7 @@ func TestStoreRootHashStableAfterCommit(t *testing.T) { addr := ktype.Address{0x12} slot := ktype.Slot{0x34} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, padLeft32(0x56), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -459,14 +378,14 @@ func TestStoreRollbackNoSnapshot(t *testing.T) { func TestFileLockPreventsDoubleOpen(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s1.LoadVersion(0, false) require.NoError(t, err) - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -486,7 +405,7 @@ func TestFileLockPreventsDoubleOpen(t *testing.T) { // ============================================================================= func TestClearChangelog(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -512,7 +431,7 @@ func TestClearChangelog(t *testing.T) { // ============================================================================= func TestCloseDBsOnlyIdempotent(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -532,7 +451,7 @@ func TestCloseDBsOnlyIdempotent(t *testing.T) { func TestLoadVersionTargetBeyondWALFails(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -544,7 +463,7 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -559,7 +478,7 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { func TestReopenReusesWorkingDir(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -575,7 +494,7 @@ func TestReopenReusesWorkingDir(t *testing.T) { _, err = os.Stat(basePath) require.NoError(t, err, "SNAPSHOT_BASE should exist after close") - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -591,7 +510,7 @@ func TestReopenReusesWorkingDir(t *testing.T) { // ============================================================================= func TestWalOffsetForVersionFastPath(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -614,7 +533,7 @@ func TestWalOffsetForVersionFastPath(t *testing.T) { } func TestWalOffsetForVersionBeforeWAL(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -631,7 +550,7 @@ func TestWalOffsetForVersionBeforeWAL(t *testing.T) { } func TestWalOffsetForVersionNotFound(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -651,7 +570,7 @@ func TestWalOffsetForVersionNotFound(t *testing.T) { func TestCatchupFromSpecificVersion(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -666,7 +585,7 @@ func TestCatchupFromSpecificVersion(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -703,7 +622,7 @@ func TestGetMissingKeyReturnsNil(t *testing.T) { s := setupTestStore(t) defer s.Close() - v, ok := s.Get(evm.EVMStoreKey, []byte{0xFF, 0xFF, 0xFF}) + v, ok := s.Get(keys.EVMStoreKey, []byte{0xFF, 0xFF, 0xFF}) require.False(t, ok) require.Nil(t, v) } @@ -712,17 +631,17 @@ func TestGetUnsupportedKeyType_Strict(t *testing.T) { s := setupTestStore(t) defer s.Close() - val, found := s.Get(evm.EVMStoreKey, []byte{}) + val, found := s.Get(keys.EVMStoreKey, []byte{}) require.False(t, found) require.Nil(t, val) } func TestGetUnsupportedKeyType_NonStrict(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s := setupTestStoreWithConfig(t, cfg) defer s.Close() - val, found := s.Get(evm.EVMStoreKey, []byte{}) + val, found := s.Get(keys.EVMStoreKey, []byte{}) require.False(t, found) require.Nil(t, val) } @@ -737,16 +656,16 @@ func TestPersistenceAllKeyTypes(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s1.LoadVersion(0, false) require.NoError(t, err) - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) cs := makeChangeSet(storageKey, padLeft32(0x11), false) require.NoError(t, s1.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -759,7 +678,7 @@ func TestPersistenceAllKeyTypes(t *testing.T) { hash := s1.RootHash() require.NoError(t, s1.Close()) - cfg = DefaultTestConfig(t) + cfg = config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s2, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -770,15 +689,15 @@ func TestPersistenceAllKeyTypes(t *testing.T) { require.Equal(t, int64(1), s2.Version()) require.Equal(t, hash, s2.RootHash()) - v, ok := s2.Get(evm.EVMStoreKey, storageKey) + v, ok := s2.Get(keys.EVMStoreKey, storageKey) require.True(t, ok) require.Equal(t, padLeft32(0x11), v) - v, ok = s2.Get(evm.EVMStoreKey, nonceKey) + v, ok = s2.Get(keys.EVMStoreKey, nonceKey) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 5}, v) - v, ok = s2.Get(evm.EVMStoreKey, codeKey) + v, ok = s2.Get(keys.EVMStoreKey, codeKey) require.True(t, ok) require.Equal(t, []byte{0x60, 0x80}, v) } @@ -788,14 +707,14 @@ func TestPersistenceAllKeyTypes(t *testing.T) { // ============================================================================= func TestReadOnlyBasicLoadAndRead(t *testing.T) { - s, err := NewCommitStore(t.Context(), DefaultTestConfig(t)) + s, err := NewCommitStore(t.Context(), config.DefaultTestConfig(t)) require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) value := padLeft32(0xCC) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, value, false)})) @@ -806,7 +725,7 @@ func TestReadOnlyBasicLoadAndRead(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - got, found := ro.Get(evm.EVMStoreKey, key) + got, found := ro.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, value, got) require.NotNil(t, ro.RootHash()) @@ -814,7 +733,7 @@ func TestReadOnlyBasicLoadAndRead(t *testing.T) { } func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) writer, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = writer.LoadVersion(0, false) @@ -822,7 +741,7 @@ func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { addr := ktype.Address{0xCC} slot := ktype.Slot{0xDD} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) value := padLeft32(0xEE) require.NoError(t, writer.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, value, false)})) @@ -836,13 +755,13 @@ func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - got, found := ro.Get(evm.EVMStoreKey, key) + got, found := ro.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, value, got) } func TestReadOnlyAtSpecificVersion(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -850,7 +769,7 @@ func TestReadOnlyAtSpecificVersion(t *testing.T) { addr := ktype.Address{0x11} slot := ktype.Slot{0x22} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) for i := byte(1); i <= 5; i++ { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -864,13 +783,13 @@ func TestReadOnlyAtSpecificVersion(t *testing.T) { defer ro.Close() require.Equal(t, int64(3), ro.Version()) - got, found := ro.Get(evm.EVMStoreKey, key) + got, found := ro.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(3), got) } func TestReadOnlyWriteGuards(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -878,7 +797,7 @@ func TestReadOnlyWriteGuards(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) @@ -899,7 +818,7 @@ func TestReadOnlyWriteGuards(t *testing.T) { } func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -907,7 +826,7 @@ func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) @@ -923,13 +842,13 @@ func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { require.Equal(t, int64(3), s.Version()) require.Equal(t, int64(1), ro.Version()) - got, found := ro.Get(evm.EVMStoreKey, key) + got, found := ro.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(1), got) } func TestReadOnlyConcurrentInstances(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 2 cfg.SnapshotKeepRecent = 10 s, err := NewCommitStore(t.Context(), cfg) @@ -939,7 +858,7 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { addr := ktype.Address{0x11} slot := ktype.Slot{0x22} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) for i := byte(1); i <= 4; i++ { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -959,8 +878,8 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { require.Equal(t, int64(4), ro1.Version()) require.Equal(t, int64(4), ro2.Version()) - g1, ok1 := ro1.Get(evm.EVMStoreKey, key) - g2, ok2 := ro2.Get(evm.EVMStoreKey, key) + g1, ok1 := ro1.Get(keys.EVMStoreKey, key) + g2, ok2 := ro2.Get(keys.EVMStoreKey, key) require.True(t, ok1) require.True(t, ok2) require.Equal(t, padLeft32(4), g1) @@ -968,7 +887,7 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { } func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -976,7 +895,7 @@ func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) @@ -988,13 +907,13 @@ func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), v) - got, found := s.Get(evm.EVMStoreKey, key) + got, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(2), got) } func TestReadOnlyCloseRemovesTempDir(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -1002,7 +921,7 @@ func TestReadOnlyCloseRemovesTempDir(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) @@ -1018,7 +937,7 @@ func TestReadOnlyCloseRemovesTempDir(t *testing.T) { } func TestCleanupOrphanedReadOnlyDirs(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) defer func() { require.NoError(t, s.Close()) }() @@ -1042,7 +961,7 @@ func TestCleanupOrphanedReadOnlyDirs(t *testing.T) { } func TestCleanupOrphanedReadOnlyDirsHoldsWriterLock(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s1, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) defer func() { require.NoError(t, s1.Close()) }() @@ -1060,7 +979,7 @@ func TestCleanupOrphanedReadOnlyDirsHoldsWriterLock(t *testing.T) { func TestLoadVersionReload(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -1069,7 +988,7 @@ func TestLoadVersionReload(t *testing.T) { require.NoError(t, err) addr := addrN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -1083,7 +1002,7 @@ func TestLoadVersionReload(t *testing.T) { require.Equal(t, int64(1), s.Version()) require.Equal(t, expectedHash, s.RootHash()) - val, found := s.Get(evm.EVMStoreKey, key) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x11), val) require.NoError(t, s.Close()) @@ -1093,7 +1012,7 @@ func TestLoadVersionReadOnlyVersion0(t *testing.T) { s := setupTestStore(t) addr := addrN(0x02) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x22), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -1104,7 +1023,7 @@ func TestLoadVersionReadOnlyVersion0(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - val, found := ro.Get(evm.EVMStoreKey, key) + val, found := ro.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x22), val) require.NoError(t, s.Close()) @@ -1114,13 +1033,13 @@ func TestLoadVersionReadOnlyDoesNotSeePending(t *testing.T) { s := setupTestStore(t) addr := addrN(0x03) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x33), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) // Apply a new changeset without committing. - key2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x02))) + key2 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x02))) cs2 := makeChangeSet(key2, padLeft32(0x44), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) @@ -1129,11 +1048,11 @@ func TestLoadVersionReadOnlyDoesNotSeePending(t *testing.T) { require.NoError(t, err) defer ro.Close() - _, found := ro.Get(evm.EVMStoreKey, key2) + _, found := ro.Get(keys.EVMStoreKey, key2) require.False(t, found, "read-only store should not see uncommitted data") // But committed data should be visible. - val, found := ro.Get(evm.EVMStoreKey, key) + val, found := ro.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0x33), val) require.NoError(t, s.Close()) @@ -1141,7 +1060,7 @@ func TestLoadVersionReadOnlyDoesNotSeePending(t *testing.T) { func TestLoadVersionEmptyWAL(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -1158,7 +1077,7 @@ func TestLoadVersionEmptyWAL(t *testing.T) { func TestCloseWithPendingUncommittedWrites(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) s, err := NewCommitStore(t.Context(), cfg) @@ -1167,13 +1086,13 @@ func TestCloseWithPendingUncommittedWrites(t *testing.T) { require.NoError(t, err) addr := addrN(0x10) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) // Apply but do NOT commit. - key2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x02))) + key2 := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x02))) cs2 := makeChangeSet(key2, padLeft32(0x22), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) @@ -1189,11 +1108,11 @@ func TestCloseWithPendingUncommittedWrites(t *testing.T) { require.Equal(t, int64(1), s2.Version()) - val, found := s2.Get(evm.EVMStoreKey, key) + val, found := s2.Get(keys.EVMStoreKey, key) require.True(t, found, "committed data should persist") require.Equal(t, padLeft32(0x11), val) - _, found = s2.Get(evm.EVMStoreKey, key2) + _, found = s2.Get(keys.EVMStoreKey, key2) require.False(t, found, "uncommitted data should be lost") } @@ -1201,7 +1120,7 @@ func TestCloseDuringConcurrentReadOnlyClone(t *testing.T) { s := setupTestStore(t) addr := addrN(0x11) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0xAA), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -1213,7 +1132,7 @@ func TestCloseDuringConcurrentReadOnlyClone(t *testing.T) { require.NoError(t, s.Close()) // RO should still function. - val, found := ro.Get(evm.EVMStoreKey, key) + val, found := ro.Get(keys.EVMStoreKey, key) require.True(t, found, "RO clone should remain functional after parent close") require.Equal(t, padLeft32(0xAA), val) @@ -1234,7 +1153,7 @@ func TestRootHashAndVersionAfterClose(t *testing.T) { s := setupTestStore(t) addr := addrN(0x12) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0xBB), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -1258,7 +1177,7 @@ func TestCatchupWithEmptyWAL(t *testing.T) { func TestCatchupSkipsAlreadyCommittedEntries(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) cfg.SnapshotInterval = 2 @@ -1269,7 +1188,7 @@ func TestCatchupSkipsAlreadyCommittedEntries(t *testing.T) { addr := addrN(0x20) for i := 1; i <= 5; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) cs := makeChangeSet(key, padLeft32(byte(i)), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) _, err := s.Commit() @@ -1292,7 +1211,7 @@ func TestCatchupSkipsAlreadyCommittedEntries(t *testing.T) { func TestCatchupTargetVersionMiddleOfWAL(t *testing.T) { dir := t.TempDir() - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.DataDir = filepath.Join(dir, flatkvRootDir) cfg.SnapshotInterval = 2 @@ -1304,7 +1223,7 @@ func TestCatchupTargetVersionMiddleOfWAL(t *testing.T) { addr := addrN(0x21) var hashes [6][]byte for i := 1; i <= 5; i++ { - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) cs := makeChangeSet(key, padLeft32(byte(i)), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) _, err := s.Commit() @@ -1336,3 +1255,489 @@ func TestWalOffsetForVersionNilChangelog(t *testing.T) { s.changelog = savedChangelog require.NoError(t, s.Close()) } + +func TestCrashRecoverySkewedPerDBVersions(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotInterval = 3 + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + addr := addrN(0x01) + for i := 1; i <= 6; i++ { + cs := &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ + noncePair(addr, uint64(i*10)), + }}, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err := s.Commit() + require.NoError(t, err) + } + require.Equal(t, int64(6), s.Version()) + + // Save the correct per-DB LtHash for accountDB before skewing version. + savedAccountLtHash := s.perDBWorkingLtHash[accountDBDir].Clone() + + // Skew accountDB's local meta version to 4 while keeping the correct + // LtHash. This simulates a crash where the version watermark wasn't + // persisted but the actual data and hash are intact. + batch := s.accountDB.NewBatch() + require.NoError(t, writeLocalMetaToBatch(batch, 4, savedAccountLtHash)) + require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) + _ = batch.Close() + + require.NoError(t, s.Close()) + + // Reopen: loadGlobalMetadata detects version skew and catchup replays. + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s2.LoadVersion(0, false) + require.NoError(t, err) + defer s2.Close() + + require.Equal(t, int64(6), s2.Version()) + verifyLtHashConsistency(t, s2) + + // Data should be correct and store should accept new writes. + cs := &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ + noncePair(addr, 999), + }}, + } + require.NoError(t, s2.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + v, err := s2.Commit() + require.NoError(t, err) + require.Equal(t, int64(7), v) +} + +func TestCrashRecoveryGlobalMetadataAheadOfDataDBs(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotInterval = 3 + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + addr := addrN(0x02) + for i := 1; i <= 5; i++ { + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + cs := makeChangeSet(key, padLeft32(byte(i*11)), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err := s.Commit() + require.NoError(t, err) + } + + // Save the correct storageDB per-DB LtHash before skewing. + savedStorageLtHash := s.perDBWorkingLtHash[storageDBDir].Clone() + + // Simulate crash: storageDB only flushed v3 (version watermark behind). + batch := s.storageDB.NewBatch() + require.NoError(t, writeLocalMetaToBatch(batch, 3, savedStorageLtHash)) + require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) + _ = batch.Close() + + require.NoError(t, s.Close()) + + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s2.LoadVersion(0, false) + require.NoError(t, err) + defer s2.Close() + + require.Equal(t, int64(5), s2.Version()) + verifyLtHashConsistency(t, s2) + + for i := 1; i <= 5; i++ { + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + val, found := s2.Get(keys.EVMStoreKey, key) + require.True(t, found, "slot %d should exist after recovery", i) + require.Equal(t, padLeft32(byte(i*11)), val) + } +} + +func TestCrashRecoveryWALReplayLargeGap(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotInterval = 5 + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + addr := addrN(0x03) + for i := 1; i <= 20; i++ { + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + cs := makeChangeSet(key, padLeft32(byte(i)), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err := s.Commit() + require.NoError(t, err) + } + expectedHash := s.RootHash() + require.NoError(t, s.Close()) + + // Reopen normally -- large WAL gap between snapshot and HEAD. + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s2.LoadVersion(0, false) + require.NoError(t, err) + defer s2.Close() + + require.Equal(t, int64(20), s2.Version()) + require.Equal(t, expectedHash, s2.RootHash()) + verifyLtHashConsistency(t, s2) + + // All 20 storage slots should be readable. + for i := 1; i <= 20; i++ { + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))) + val, found := s2.Get(keys.EVMStoreKey, key) + require.True(t, found, "slot %d should exist", i) + require.Equal(t, padLeft32(byte(i)), val) + } +} + +func TestCrashRecoveryEmptyWALAfterSnapshot(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + addr := addrN(0x04) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + cs := makeChangeSet(key, padLeft32(0xAA), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err = s.Commit() + require.NoError(t, err) + + require.NoError(t, s.WriteSnapshot("")) + expectedHash := s.RootHash() + expectedVersion := s.Version() + + // Clear the WAL entirely (simulate WAL lost after snapshot). + require.NoError(t, s.clearChangelog()) + require.NoError(t, s.Close()) + + // Reopen: should work from snapshot alone. + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s2.LoadVersion(0, false) + require.NoError(t, err) + defer s2.Close() + + require.Equal(t, expectedVersion, s2.Version()) + require.Equal(t, expectedHash, s2.RootHash()) + + val, found := s2.Get(keys.EVMStoreKey, key) + require.True(t, found) + require.Equal(t, padLeft32(0xAA), val) + + // Can continue committing after recovery from snapshot-only state. + cs2 := makeChangeSet(key, padLeft32(0xBB), false) + require.NoError(t, s2.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + v, err := s2.Commit() + require.NoError(t, err) + require.Equal(t, expectedVersion+1, v) +} + +func TestCrashRecoveryCorruptedAccountValueInDB(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := addrN(0x05) + cs := &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ + noncePair(addr, 42), + }}, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err := s.Commit() + require.NoError(t, err) + + // Corrupt the account value in the DB with invalid-length data. + batch := s.accountDB.NewBatch() + require.NoError(t, batch.Set(accountPhysKey(addr), []byte{0xDE, 0xAD})) + require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) + _ = batch.Close() + + // Next ApplyChangeSets touching this account should detect the corruption + // during batchReadOldValues. + cs2 := &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{noncePair(addr, 99)}}, + } + err = s.ApplyChangeSets([]*proto.NamedChangeSet{cs2}) + require.Error(t, err, "should fail on corrupted AccountValue") + require.Contains(t, err.Error(), "unsupported serialization version") +} + +func TestCrashRecoveryCrashAfterWALBeforeDBCommit(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotInterval = 1 + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + addr := addrN(0x06) + slot := slotN(0x01) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + cs := makeChangeSet(key, padLeft32(0x11), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err = s.Commit() + require.NoError(t, err) + hashAfterV1 := s.RootHash() + + // Now simulate writing v2 to WAL but "crashing" before DB commit. + cs2 := makeChangeSet(key, padLeft32(0x22), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + + // Write v2 to WAL manually (like Commit step 1). + changelogEntry := proto.ChangelogEntry{ + Version: 2, + Changesets: s.pendingChangeSets, + } + require.NoError(t, s.changelog.Write(changelogEntry)) + + // Do NOT call commitBatches or update global metadata. + // Reset in-memory state to v1 to simulate crash. + s.clearPendingWrites() + s.committedVersion = 1 + require.NoError(t, s.Close()) + + // Reopen: catchup should replay v2 from WAL. + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s2.LoadVersion(0, false) + require.NoError(t, err) + defer s2.Close() + + require.Equal(t, int64(2), s2.Version()) + require.NotEqual(t, hashAfterV1, s2.RootHash(), "hash should differ after v2 replay") + + val, found := s2.Get(keys.EVMStoreKey, key) + require.True(t, found) + require.Equal(t, padLeft32(0x22), val, "v2 value should be present after catchup") + verifyLtHashConsistency(t, s2) +} + +func TestCrashRecoveryLtHashConsistencyAfterAllPaths(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotInterval = 3 + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + addr := addrN(0x07) + for i := 1; i <= 10; i++ { + pairs := []*proto.KVPair{ + noncePair(addr, uint64(i)), + { + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(byte(i)))), + Value: padLeft32(byte(i)), + }, + } + cs := &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: pairs}, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err := s.Commit() + require.NoError(t, err) + } + verifyLtHashConsistency(t, s) + require.NoError(t, s.Close()) + + // Path 1: Normal reopen + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s2.LoadVersion(0, false) + require.NoError(t, err) + verifyLtHashConsistency(t, s2) + + // Path 2: Rollback to v6 + require.NoError(t, s2.Rollback(6)) + require.Equal(t, int64(6), s2.Version()) + verifyLtHashConsistency(t, s2) + + // Path 3: Continue writing after rollback + cs := &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ + noncePair(addr, 999), + }}, + } + require.NoError(t, s2.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err = s2.Commit() + require.NoError(t, err) + verifyLtHashConsistency(t, s2) + require.NoError(t, s2.Close()) + + // Path 4: Reopen after rollback + new commit + s3, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s3.LoadVersion(0, false) + require.NoError(t, err) + defer s3.Close() + verifyLtHashConsistency(t, s3) +} + +func TestCrashRecoveryCorruptLtHashBlobInMetadata(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + cs := makeChangeSet( + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x01), slotN(0x01))), + padLeft32(0x11), false, + ) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err = s.Commit() + require.NoError(t, err) + + // Write garbage to the global _meta/hash key in metadataDB. + batch := s.metadataDB.NewBatch() + require.NoError(t, batch.Set(ktype.MetaLtHashKey, []byte{0xDE, 0xAD, 0xBE, 0xEF})) + require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) + _ = batch.Close() + + require.NoError(t, s.Close()) + + // Reopen should fail with an LtHash unmarshal error. + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + defer s2.Close() + _, err = s2.LoadVersion(0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid LtHash size") +} + +func TestCrashRecoveryCorruptLtHashBlobInPerDBMeta(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + cs := makeChangeSet( + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x02), slotN(0x01))), + padLeft32(0x22), false, + ) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err = s.Commit() + require.NoError(t, err) + + // Write garbage to accountDB's _meta/hash key. + batch := s.accountDB.NewBatch() + require.NoError(t, batch.Set(ktype.MetaLtHashKey, []byte{0x01, 0x02, 0x03})) + require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) + _ = batch.Close() + + require.NoError(t, s.Close()) + + // Reopen should fail with an LtHash unmarshal error from per-DB meta. + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + defer s2.Close() + _, err = s2.LoadVersion(0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid LtHash size") +} + +func TestCrashRecoveryGlobalVersionOverflow(t *testing.T) { + dir := t.TempDir() + cfg := config.DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + + cs := makeChangeSet( + keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addrN(0x03), slotN(0x01))), + padLeft32(0x33), false, + ) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + _, err = s.Commit() + require.NoError(t, err) + + // Write a version value that exceeds math.MaxInt64 to the global metadata. + overflowBytes := make([]byte, 8) + overflowBytes[0] = 0xFF // 0xFF00000000000000 > MaxInt64 + batch := s.metadataDB.NewBatch() + require.NoError(t, batch.Set(ktype.MetaVersionKey, overflowBytes)) + require.NoError(t, batch.Commit(types.WriteOptions{Sync: true})) + _ = batch.Close() + + require.NoError(t, s.Close()) + + // Reopen should fail with an overflow error. + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + defer s2.Close() + _, err = s2.LoadVersion(0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "global version overflow") +} + +func TestInitializeDataDirectories(t *testing.T) { + cfg := config.DefaultConfig() + cfg.DataDir = "/base/flatkv" + cfg.AccountDBConfig.DataDir = "" + cfg.CodeDBConfig.DataDir = "" + cfg.StorageDBConfig.DataDir = "" + cfg.LegacyDBConfig.DataDir = "" + cfg.MetadataDBConfig.DataDir = "" + + InitializeDataDirectories(cfg) + + require.Equal(t, "/base/flatkv/working/account", cfg.AccountDBConfig.DataDir) + require.Equal(t, "/base/flatkv/working/code", cfg.CodeDBConfig.DataDir) + require.Equal(t, "/base/flatkv/working/storage", cfg.StorageDBConfig.DataDir) + require.Equal(t, "/base/flatkv/working/legacy", cfg.LegacyDBConfig.DataDir) + require.Equal(t, "/base/flatkv/working/metadata", cfg.MetadataDBConfig.DataDir) +} + +func TestInitializeDataDirectoriesPreservesExisting(t *testing.T) { + cfg := config.DefaultConfig() + cfg.DataDir = "/base/flatkv" + cfg.AccountDBConfig.DataDir = "/custom/account" + + InitializeDataDirectories(cfg) + + require.Equal(t, "/custom/account", cfg.AccountDBConfig.DataDir, + "existing DataDir should not be overwritten") + require.Equal(t, "/base/flatkv/working/code", cfg.CodeDBConfig.DataDir, + "empty DataDir should be populated") +} diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 7bc5c6ddbe..0240913fc6 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -5,9 +5,10 @@ import ( "fmt" "sync" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) @@ -82,7 +83,7 @@ func (s *CommitStore) flushAllDBs() error { errs := make([]error, 4) var wg sync.WaitGroup wg.Add(4) - for i, db := range []types.KeyValueDB{s.accountDB, s.codeDB, s.storageDB, s.legacyDB} { + for i, db := range s.dataDBs() { s.miscPool.Submit(func() { defer wg.Done() errs[i] = db.Flush() @@ -180,7 +181,7 @@ func (s *CommitStore) commitBatches(version int64) error { // Update in-memory local meta after all commits succeed. for _, p := range pending { - s.localMeta[p.dbDir] = &LocalMeta{ + s.localMeta[p.dbDir] = &ktype.LocalMeta{ CommittedVersion: version, LtHash: s.perDBWorkingLtHash[p.dbDir].Clone(), } @@ -192,7 +193,7 @@ func prepareBatch[T vtype.VType]( db types.KeyValueDB, writes map[string]T, version int64, - localMeta *LocalMeta, + localMeta *ktype.LocalMeta, ltHash *lthash.LtHash, dbName string, ) (types.Batch, error) { @@ -270,26 +271,67 @@ func deserializeBatchResults[T vtype.VType]( return nil } +// rawKVPair is a raw physical key/value pair as stored on disk. +type rawKVPair struct { + Key []byte + Value []byte +} + +// FinalizeImport persists per-DB metadata (version + LtHash) and global +// metadata after all import data has been written. This must be called +// exactly once at the end of an import to make the data durable across restarts. +func (s *CommitStore) FinalizeImport(version int64) error { + syncOpt := types.WriteOptions{Sync: true} + for _, ndb := range s.namedDataDBs() { + batch := ndb.db.NewBatch() + if err := writeLocalMetaToBatch(batch, version, s.perDBWorkingLtHash[ndb.dir]); err != nil { + _ = batch.Close() + return fmt.Errorf("%s local meta: %w", ndb.dir, err) + } + if err := batch.Commit(syncOpt); err != nil { + _ = batch.Close() + return fmt.Errorf("%s commit: %w", ndb.dir, err) + } + _ = batch.Close() + s.localMeta[ndb.dir] = &ktype.LocalMeta{ + CommittedVersion: version, + LtHash: s.perDBWorkingLtHash[ndb.dir].Clone(), + } + } + + globalHash := lthash.New() + for _, dir := range dataDBDirs { + globalHash.MixIn(s.perDBWorkingLtHash[dir]) + } + s.workingLtHash = globalHash + s.committedVersion = version + s.committedLtHash = s.workingLtHash.Clone() + if err := s.commitGlobalMetadata(version, s.committedLtHash); err != nil { + return fmt.Errorf("import global metadata: %w", err) + } + return nil +} + // batchReadOldValues returns the prior value for every key in changesByType. // Pending writes are resolved from memory; the rest are batch-read from disk // in parallel. -func (s *CommitStore) batchReadOldValues(changesByType map[evm.EVMKeyKind]map[string][]byte) ( +func (s *CommitStore) batchReadOldValues(changesByType map[keys.EVMKeyKind]map[string][]byte) ( storageOld map[string]*vtype.StorageData, accountOld map[string]*vtype.AccountData, codeOld map[string]*vtype.CodeData, legacyOld map[string]*vtype.LegacyData, err error, ) { - storageOld = make(map[string]*vtype.StorageData, len(changesByType[evm.EVMKeyStorage])) - accountOld = make(map[string]*vtype.AccountData, len(changesByType[evm.EVMKeyNonce])+len(changesByType[evm.EVMKeyCodeHash])) - codeOld = make(map[string]*vtype.CodeData, len(changesByType[evm.EVMKeyCode])) - legacyOld = make(map[string]*vtype.LegacyData, len(changesByType[evm.EVMKeyLegacy])) + storageOld = make(map[string]*vtype.StorageData, len(changesByType[keys.EVMKeyStorage])) + accountOld = make(map[string]*vtype.AccountData, len(changesByType[keys.EVMKeyNonce])+len(changesByType[keys.EVMKeyCodeHash])) + codeOld = make(map[string]*vtype.CodeData, len(changesByType[keys.EVMKeyCode])) + legacyOld = make(map[string]*vtype.LegacyData, len(changesByType[keys.EVMKeyLegacy])) - storageBatch := collectPendingReads(s.storageWrites, storageOld, changesByType[evm.EVMKeyStorage]) + storageBatch := collectPendingReads(s.storageWrites, storageOld, changesByType[keys.EVMKeyStorage]) // TODO: add balance changeMap when balance key is supported. - accountBatch := collectPendingReads(s.accountWrites, accountOld, changesByType[evm.EVMKeyNonce], changesByType[evm.EVMKeyCodeHash]) - codeBatch := collectPendingReads(s.codeWrites, codeOld, changesByType[evm.EVMKeyCode]) - legacyBatch := collectPendingReads(s.legacyWrites, legacyOld, changesByType[evm.EVMKeyLegacy]) + accountBatch := collectPendingReads(s.accountWrites, accountOld, changesByType[keys.EVMKeyNonce], changesByType[keys.EVMKeyCodeHash]) + codeBatch := collectPendingReads(s.codeWrites, codeOld, changesByType[keys.EVMKeyCode]) + legacyBatch := collectPendingReads(s.legacyWrites, legacyOld, changesByType[keys.EVMKeyLegacy]) type readJob struct { batch map[string]types.BatchGetResult diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 6817428f99..4e164f5796 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -5,12 +5,14 @@ import ( "testing" "time" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" - "github.com/stretchr/testify/require" ) // ============================================================================= @@ -28,8 +30,8 @@ func TestStoreNonStorageKeys(t *testing.T) { 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00} // Write non-storage keys (now supported with AccountValue) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + codeHashKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) // Write nonce (8 bytes) cs1 := makeChangeSet(nonceKey, []byte{0, 0, 0, 0, 0, 0, 0, 17}, false) @@ -42,12 +44,12 @@ func TestStoreNonStorageKeys(t *testing.T) { commitAndCheck(t, s) // Nonce should be found - nonceValue, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceValue, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 17}, nonceValue) // CodeHash should be found - codeHashValue, found := s.Get(evm.EVMStoreKey, codeHashKey) + codeHashValue, found := s.Get(keys.EVMStoreKey, codeHashKey) require.True(t, found, "codehash should be found") require.Equal(t, codeHash[:], codeHashValue) } @@ -64,17 +66,17 @@ func TestStoreWriteAllDBs(t *testing.T) { pairs := []*proto.KVPair{ // Storage key { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11, 0x22), }, // Account nonce key { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}, // nonce = 42 }, // Code key - keyed by address, not codeHash { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x60, 0x60}, // some bytecode }, // Legacy key (codeSize: 0x09 || addr) @@ -95,36 +97,31 @@ func TestStoreWriteAllDBs(t *testing.T) { commitAndCheck(t, s) // Verify all 4 DBs have their LocalMeta updated to version 1 (persisted) - for name, db := range map[string]types.KeyValueDB{ - "storageDB": s.storageDB, - "accountDB": s.accountDB, - "codeDB": s.codeDB, - "legacyDB": s.legacyDB, - } { - raw, err := db.Get(metaVersionKey) - require.NoError(t, err, "%s meta version read", name) - require.Equal(t, int64(1), int64(binary.BigEndian.Uint64(raw)), "%s persisted version", name) + for _, ndb := range s.namedDataDBs() { + raw, err := ndb.db.Get(ktype.MetaVersionKey) + require.NoError(t, err, "%s meta version read", ndb.dir) + require.Equal(t, int64(1), int64(binary.BigEndian.Uint64(raw)), "%s persisted version", ndb.dir) } // Verify storage data was written (via Store.Get which deserializes) - storageMemiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - storageValue, found := s.Get(evm.EVMStoreKey, storageMemiavlKey) + storageMemiavlKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + storageValue, found := s.Get(keys.EVMStoreKey, storageMemiavlKey) require.True(t, found, "Storage should be found") require.Equal(t, padLeft32(0x11, 0x22), storageValue) // Verify account and code data was written - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + nonceValue, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "Nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue) - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - codeValue, found := s.Get(evm.EVMStoreKey, codeKey) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) + codeValue, found := s.Get(keys.EVMStoreKey, codeKey) require.True(t, found, "Code should be found") require.Equal(t, []byte{0x60, 0x60, 0x60}, codeValue) // Verify legacy data persisted (via Store.Get which deserializes) - legacyVal, found := s.Get(evm.EVMStoreKey, legacyKey) + legacyVal, found := s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found, "Legacy should be found") require.Equal(t, []byte{0x00, 0x03}, legacyVal) } @@ -146,7 +143,7 @@ func TestStoreWriteEmptyCommit(t *testing.T) { // Commit version 2 with storage write only addr := ktype.Address{0x99} slot := ktype.Slot{0x88} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs := makeChangeSet(key, padLeft32(0x77), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -165,19 +162,19 @@ func TestStoreWriteAccountAndCode(t *testing.T) { // Note: Code is keyed by address (not codeHash) per x/evm/types/keys.go pairs := []*proto.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr1[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr1[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 1}, // nonce = 1 }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr2[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr2[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 2}, // nonce = 2 }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr1[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr1[:]), Value: []byte{0x60, 0x80}, }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr2[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr2[:]), Value: []byte{0x60, 0xA0}, }, } @@ -195,24 +192,24 @@ func TestStoreWriteAccountAndCode(t *testing.T) { requireAllLocalMetaAt(t, s, 1) // Verify account data was written - nonceKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr1[:]) - nonce1, found := s.Get(evm.EVMStoreKey, nonceKey1) + nonceKey1 := keys.BuildEVMKey(keys.EVMKeyNonce, addr1[:]) + nonce1, found := s.Get(keys.EVMStoreKey, nonceKey1) require.True(t, found, "Nonce1 should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 1}, nonce1) - nonceKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr2[:]) - nonce2, found := s.Get(evm.EVMStoreKey, nonceKey2) + nonceKey2 := keys.BuildEVMKey(keys.EVMKeyNonce, addr2[:]) + nonce2, found := s.Get(keys.EVMStoreKey, nonceKey2) require.True(t, found, "Nonce2 should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 2}, nonce2) // Verify code data was written - codeKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr1[:]) - code1, found := s.Get(evm.EVMStoreKey, codeKey1) + codeKey1 := keys.BuildEVMKey(keys.EVMKeyCode, addr1[:]) + code1, found := s.Get(keys.EVMStoreKey, codeKey1) require.True(t, found, "Code1 should be found") require.Equal(t, []byte{0x60, 0x80}, code1) - codeKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr2[:]) - code2, found := s.Get(evm.EVMStoreKey, codeKey2) + codeKey2 := keys.BuildEVMKey(keys.EVMKeyCode, addr2[:]) + code2, found := s.Get(keys.EVMStoreKey, codeKey2) require.True(t, found, "Code2 should be found") require.Equal(t, []byte{0x60, 0xA0}, code2) @@ -233,15 +230,15 @@ func TestStoreWriteDelete(t *testing.T) { // Note: Code is keyed by address per x/evm/types/keys.go pairs := []*proto.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11), }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 1}, }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Value: []byte{0x60}, }, } @@ -257,15 +254,15 @@ func TestStoreWriteDelete(t *testing.T) { // For account, "delete" means setting fields to zero in AccountValue deletePairs := []*proto.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Delete: true, }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Delete: true, // Sets nonce to 0 in AccountValue }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Delete: true, }, } @@ -283,14 +280,14 @@ func TestStoreWriteDelete(t *testing.T) { // Nonce was the only account field written (no codehash). After delete, // all fields are zero so the accountDB row is physically deleted. - nonceKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found := s.Get(evm.EVMStoreKey, nonceKeyDel) + nonceKeyDel := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + nonceValue, found := s.Get(keys.EVMStoreKey, nonceKeyDel) require.False(t, found, "nonce should not be found after account row deletion") require.Nil(t, nonceValue) // Verify code is deleted - codeKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found = s.Get(evm.EVMStoreKey, codeKeyDel) + codeKeyDel := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) + _, found = s.Get(keys.EVMStoreKey, codeKeyDel) require.False(t, found, "code should be deleted") requireAllLocalMetaAt(t, s, 2) @@ -307,11 +304,11 @@ func TestAccountValueStorage(t *testing.T) { // AccountValue stores: balance(32) || nonce(8) || codehash(32) pairs := []*proto.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}, // nonce = 42 }, { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), Value: expectedCodeHash[:], // 32-byte codehash }, } @@ -343,13 +340,13 @@ func TestAccountValueStorage(t *testing.T) { require.Equal(t, &zeroBalance, ad.GetBalance(), "Balance should be zero") // Get method should return individual fields - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + nonceValue, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "Nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue, "Nonce should be 42") - codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - codeHashValue, found := s.Get(evm.EVMStoreKey, codeHashKey) + codeHashKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) + codeHashValue, found := s.Get(keys.EVMStoreKey, codeHashKey) require.True(t, found, "CodeHash should be found") require.Equal(t, expectedCodeHash[:], codeHashValue, "CodeHash should match") } @@ -380,7 +377,7 @@ func TestStoreWriteLegacyKeys(t *testing.T) { require.Equal(t, int64(1), s.localMeta[legacyDBDir].CommittedVersion) // Verify data persisted (via Store.Get which deserializes) - got, found := s.Get(evm.EVMStoreKey, codeSizeKey) + got, found := s.Get(keys.EVMStoreKey, codeSizeKey) require.True(t, found) require.Equal(t, codeSizeValue, got) } @@ -395,17 +392,17 @@ func TestStoreWriteLegacyAndOptimizedKeys(t *testing.T) { pairs := []*proto.KVPair{ // Storage (optimized) { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)), + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), Value: padLeft32(0x11, 0x22), }, // Nonce (optimized) { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}, }, // Code (optimized) { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x60, 0x60}, }, // CodeSize → legacy (0x09 || addr) @@ -427,7 +424,7 @@ func TestStoreWriteLegacyAndOptimizedKeys(t *testing.T) { // Verify legacy data persisted (via Store.Get which deserializes) codeSizeKey := append([]byte{0x09}, addr[:]...) - got, found := s.Get(evm.EVMStoreKey, codeSizeKey) + got, found := s.Get(keys.EVMStoreKey, codeSizeKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x03}, got) } @@ -445,7 +442,7 @@ func TestStoreWriteDeleteLegacyKey(t *testing.T) { commitAndCheck(t, s) // Verify exists - got, found := s.Get(evm.EVMStoreKey, legacyKey) + got, found := s.Get(keys.EVMStoreKey, legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x10}, got) @@ -455,7 +452,7 @@ func TestStoreWriteDeleteLegacyKey(t *testing.T) { commitAndCheck(t, s) // Should not be found - _, found = s.Get(evm.EVMStoreKey, legacyKey) + _, found = s.Get(keys.EVMStoreKey, legacyKey) require.False(t, found) } @@ -504,7 +501,7 @@ func TestStoreLegacyEmptyCommitLocalMeta(t *testing.T) { func TestStoreFsyncConfig(t *testing.T) { t.Run("DefaultConfig", func(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) store, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = store.LoadVersion(0, false) @@ -517,7 +514,7 @@ func TestStoreFsyncConfig(t *testing.T) { }) t.Run("FsyncDisabled", func(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.Fsync = false store, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -527,7 +524,7 @@ func TestStoreFsyncConfig(t *testing.T) { addr := ktype.Address{0xAA} slot := ktype.Slot{0xBB} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) // Write and commit with fsync disabled cs := makeChangeSet(key, padLeft32(0xCC), false) @@ -535,7 +532,7 @@ func TestStoreFsyncConfig(t *testing.T) { commitAndCheck(t, store) // Data should be readable - got, found := store.Get(evm.EVMStoreKey, key) + got, found := store.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, padLeft32(0xCC), got) @@ -549,7 +546,7 @@ func TestStoreFsyncConfig(t *testing.T) { // ============================================================================= func TestAutoSnapshotTriggeredByInterval(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 5 cfg.SnapshotKeepRecent = 2 s, err := NewCommitStore(t.Context(), cfg) @@ -572,7 +569,7 @@ func TestAutoSnapshotTriggeredByInterval(t *testing.T) { } func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 10 cfg.SnapshotKeepRecent = 2 s, err := NewCommitStore(t.Context(), cfg) @@ -601,7 +598,7 @@ func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { } func TestAutoSnapshotDisabledWhenIntervalZero(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.SnapshotInterval = 0 s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -640,8 +637,8 @@ func TestMultipleApplyChangeSetsBeforeCommit(t *testing.T) { slot1 := ktype.Slot{0x01} slot2 := ktype.Slot{0x02} - key1 := memiavlStorageKey(addr, slot1) - key2 := memiavlStorageKey(addr, slot2) + key1 := evmStorageKey(addr, slot1) + key2 := evmStorageKey(addr, slot2) cs1 := makeChangeSet(key1, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) @@ -651,11 +648,11 @@ func TestMultipleApplyChangeSetsBeforeCommit(t *testing.T) { commitAndCheck(t, s) - v1, ok := s.Get(evm.EVMStoreKey, key1) + v1, ok := s.Get(keys.EVMStoreKey, key1) require.True(t, ok) require.Equal(t, padLeft32(0x11), v1) - v2, ok := s.Get(evm.EVMStoreKey, key2) + v2, ok := s.Get(keys.EVMStoreKey, key2) require.True(t, ok) require.Equal(t, padLeft32(0x22), v2) } @@ -665,8 +662,8 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { defer s.Close() addr := ktype.Address{0xBB} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + codeHashKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) codeHash := vtype.CodeHash{0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -680,11 +677,11 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) commitAndCheck(t, s) - nonceVal, ok := s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, ok := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceVal, "nonce should be preserved after codehash update") - chVal, ok := s.Get(evm.EVMStoreKey, codeHashKey) + chVal, ok := s.Get(keys.EVMStoreKey, codeHashKey) require.True(t, ok) require.Equal(t, codeHash[:], chVal) } @@ -695,7 +692,7 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { func TestLtHashDeterministicAcrossReopen(t *testing.T) { writeAndGetHash := func() []byte { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -721,7 +718,7 @@ func TestLtHashUpdatedByDelete(t *testing.T) { addr := ktype.Address{0xDD} slot := ktype.Slot{0xEE} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) cs1 := makeChangeSet(key, padLeft32(0xFF), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) @@ -741,8 +738,8 @@ func TestLtHashAccountFieldMerge(t *testing.T) { defer s.Close() addr := ktype.Address{0xCC} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + codeHashKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) codeHash := vtype.CodeHash{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -777,7 +774,7 @@ func TestOverwriteSameKeyInSingleBlock(t *testing.T) { addr := ktype.Address{0xEE} slot := ktype.Slot{0xFF} - key := memiavlStorageKey(addr, slot) + key := evmStorageKey(addr, slot) pairs := []*proto.KVPair{ {Key: key, Value: padLeft32(0x01)}, @@ -790,7 +787,7 @@ func TestOverwriteSameKeyInSingleBlock(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) - v, ok := s.Get(evm.EVMStoreKey, key) + v, ok := s.Get(keys.EVMStoreKey, key) require.True(t, ok) require.Equal(t, padLeft32(0x02), v, "last write should win") } @@ -819,7 +816,7 @@ func TestEmptyCommitAdvancesVersion(t *testing.T) { // ============================================================================= func TestStoreFsyncEnabled(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) cfg.Fsync = true s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) @@ -832,7 +829,7 @@ func TestStoreFsyncEnabled(t *testing.T) { commitStorageEntry(t, s, ktype.Address{0x01}, ktype.Slot{0x01}, []byte{0x01}) require.Equal(t, int64(1), s.Version()) - v, ok := s.Get(evm.EVMStoreKey, memiavlStorageKey(ktype.Address{0x01}, ktype.Slot{0x01})) + v, ok := s.Get(keys.EVMStoreKey, evmStorageKey(ktype.Address{0x01}, ktype.Slot{0x01})) require.True(t, ok) require.Equal(t, padLeft32(0x01), v) } @@ -842,7 +839,7 @@ func TestStoreFsyncEnabled(t *testing.T) { // ============================================================================= func TestLastSnapshotTimeUpdated(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -863,7 +860,7 @@ func TestLastSnapshotTimeUpdated(t *testing.T) { // ============================================================================= func TestWALRecordsChangesets(t *testing.T) { - cfg := DefaultTestConfig(t) + cfg := config.DefaultTestConfig(t) s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -916,23 +913,23 @@ func TestDeleteSemanticsCodehashAsymmetry(t *testing.T) { commitAndCheck(t, s) // After deleting all account fields, the row is physically deleted (Account Row GC). - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should not be found after all-zero account row deletion") require.Nil(t, nonceVal) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found := s.Get(evm.EVMStoreKey, chKey) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) + chVal, found := s.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should not be found after row deletion") require.Nil(t, chVal) - hasCodeHash := s.Has(evm.EVMStoreKey, chKey) + hasCodeHash := s.Has(keys.EVMStoreKey, chKey) require.False(t, hasCodeHash, "Has(codehash) should be false after delete") - hasNonce := s.Has(evm.EVMStoreKey, nonceKey) + hasNonce := s.Has(keys.EVMStoreKey, nonceKey) require.False(t, hasNonce, "Has(nonce) should be false after row deletion") - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found = s.Get(evm.EVMStoreKey, codeKey) + codeKey := keys.BuildEVMKey(keys.EVMKeyCode, addr[:]) + _, found = s.Get(keys.EVMStoreKey, codeKey) require.False(t, found, "code should be physically deleted") _, err := s.accountDB.Get(accountPhysKey(addr)) @@ -959,8 +956,8 @@ func TestCrossApplyChangeSetsOrdering(t *testing.T) { commitAndCheck(t, s) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - _, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + _, found := s.Get(keys.EVMStoreKey, key) require.False(t, found, "write-then-delete: key should be gone") }) @@ -983,8 +980,8 @@ func TestCrossApplyChangeSetsOrdering(t *testing.T) { commitAndCheck(t, s) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) - val, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found, "delete-then-write: key should exist") require.Equal(t, padLeft32(0xBB), val) }) @@ -1092,7 +1089,7 @@ func TestApplyChangeSetsInvalidNonceLength(t *testing.T) { Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0x01, 0x02, 0x03}, // 3 bytes, expected 8 }, }, @@ -1113,7 +1110,7 @@ func TestApplyChangeSetsInvalidCodehashLength(t *testing.T) { Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), + Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), Value: []byte{0x01, 0x02}, // 2 bytes, expected 32 }, }, @@ -1143,8 +1140,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) // With Account Row GC, nonce-only account becomes all-zero → row deleted - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + _, found := s.Get(keys.EVMStoreKey, key) require.False(t, found, "nonce-only account should be deleted after nonce delete") }) @@ -1165,8 +1162,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - val, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found) require.Equal(t, uint64(99), bytesToNonce(val)) }) @@ -1184,8 +1181,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - _, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) + _, found := s.Get(keys.EVMStoreKey, key) require.False(t, found, "codehash-only account: delete → all-zero → row deleted") }) @@ -1206,8 +1203,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - val, found := s.Get(evm.EVMStoreKey, key) + key := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) + val, found := s.Get(keys.EVMStoreKey, key) require.True(t, found, "codehash should be restored after delete-then-write") expected := codeHashN(0xBB) require.Equal(t, expected[:], val) @@ -1279,8 +1276,8 @@ func TestAccountRowDeletedWhenAllFieldsZero(t *testing.T) { defer s.Close() addr := addrN(0xA1) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + chKey := keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]) ch := codeHashN(0xBB) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -1296,11 +1293,11 @@ func TestAccountRowDeletedWhenAllFieldsZero(t *testing.T) { _, err := s.accountDB.Get(accountPhysKey(addr)) require.Error(t, err, "accountDB row should be physically deleted") - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should not be found after row deletion") require.Nil(t, nonceVal) - chVal, found := s.Get(evm.EVMStoreKey, chKey) + chVal, found := s.Get(keys.EVMStoreKey, chKey) require.False(t, found, "codehash should not be found after row deletion") require.Nil(t, chVal) } @@ -1310,7 +1307,7 @@ func TestAccountRowPersistsWhenPartiallyZero(t *testing.T) { defer s.Close() addr := addrN(0xA2) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) ch := codeHashN(0xCC) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -1327,7 +1324,7 @@ func TestAccountRowPersistsWhenPartiallyZero(t *testing.T) { require.NoError(t, err, "accountDB row should still exist after partial delete") require.NotNil(t, raw) - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found, "nonce should still be readable") require.Equal(t, nonceBytes(7), nonceVal) } @@ -1337,7 +1334,7 @@ func TestAccountRowDeleteThenRecreate(t *testing.T) { defer s.Close() addr := addrN(0xA3) - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(noncePair(addr, 10)), @@ -1361,7 +1358,7 @@ func TestAccountRowDeleteThenRecreate(t *testing.T) { require.NoError(t, err, "row should be recreated") require.NotNil(t, raw) - nonceVal, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceVal, found := s.Get(keys.EVMStoreKey, nonceKey) require.True(t, found) require.Equal(t, nonceBytes(99), nonceVal) } @@ -1395,8 +1392,8 @@ func TestAccountRowGCOnWriteZero(t *testing.T) { _, err := s.accountDB.Get(accountPhysKey(addr)) require.Error(t, err, "accountDB row should be GC'd when write-zero makes account empty") - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found := s.Get(evm.EVMStoreKey, nonceKey) + nonceKey := keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]) + _, found := s.Get(keys.EVMStoreKey, nonceKey) require.False(t, found, "nonce should not be found after write-zero GC") } @@ -1478,7 +1475,7 @@ func countLiveEntries(t *testing.T, db types.KeyValueDB) int { count := 0 for iter.First(); iter.Valid(); iter.Next() { - if isMetaKey(iter.Key()) { + if ktype.IsMetaKey(iter.Key()) { continue } count++ @@ -1549,7 +1546,7 @@ func TestApplyChangeSetsMixedEVMAndNonEVM(t *testing.T) { addr := addrN(0xAA) slot := slotN(0x01) - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) evmCS := &proto.NamedChangeSet{ Name: "evm", @@ -1570,7 +1567,7 @@ func TestApplyChangeSetsMixedEVMAndNonEVM(t *testing.T) { require.Len(t, s.storageWrites, 1) // The EVM value should be readable via pending writes. - val, found := s.Get(evm.EVMStoreKey, storageKey) + val, found := s.Get(keys.EVMStoreKey, storageKey) require.True(t, found) require.Equal(t, padLeft32(0x42), val) @@ -1606,7 +1603,7 @@ func TestApplyChangeSetsOnReadOnlyStore(t *testing.T) { s := setupTestStore(t) addr := addrN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -1657,7 +1654,7 @@ func TestApplyChangeSetsErrorRecoveryPartialState(t *testing.T) { addr := addrN(0xBB) slot := slotN(0x01) - storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slot)) + storageKey := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) // First pair: valid storage write // Second pair: invalid nonce length (triggers error) @@ -1665,7 +1662,7 @@ func TestApplyChangeSetsErrorRecoveryPartialState(t *testing.T) { Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ {Key: storageKey, Value: padLeft32(0xAA)}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0x01, 0x02}}, // wrong length + {Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), Value: []byte{0x01, 0x02}}, // wrong length }}, } @@ -1727,7 +1724,7 @@ func TestDoubleCommitNoApplyBetween(t *testing.T) { defer s.Close() addr := addrN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -1747,7 +1744,7 @@ func TestCommitOnReadOnlyStore(t *testing.T) { s := setupTestStore(t) addr := addrN(0x01) - key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) + key := keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slotN(0x01))) cs := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) diff --git a/sei-db/state_db/sc/flatkv/test_helper.go b/sei-db/state_db/sc/flatkv/test_helper.go new file mode 100644 index 0000000000..a3640cae14 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/test_helper.go @@ -0,0 +1,186 @@ +package flatkv + +import ( + "encoding/binary" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" + "github.com/stretchr/testify/require" + "path/filepath" + "testing" +) + +// ============================================================================= +// Test Helpers +// ============================================================================= + +// evmStorageKey builds a prefix-encoded storage key for the external Get/Has API. +func evmStorageKey(addr ktype.Address, slot ktype.Slot) []byte { + internal := ktype.StorageKey(addr, slot) + return keys.BuildEVMKey(keys.EVMKeyStorage, internal) +} + +// accountPhysKey returns the physical DB key for an account address. +func accountPhysKey(addr ktype.Address) []byte { + return ktype.EVMPhysicalKey(ktype.EVMKeyAccount, addr[:]) +} + +// storagePhysKey returns the physical DB key for a storage slot. +func storagePhysKey(addr ktype.Address, slot ktype.Slot) []byte { + return ktype.EVMPhysicalKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)) +} + +// padLeft32 returns a 32-byte big-endian value with the given bytes right-aligned. +func padLeft32(val ...byte) []byte { + var b [32]byte + copy(b[32-len(val):], val) + return b[:] +} + +// makeChangeSet creates a changeset +func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { + return &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{ + Pairs: []*proto.KVPair{ + {Key: key, Value: value, Delete: delete}, + }, + }, + } +} + +// setupTestDB creates a temporary PebbleDB for testing +func setupTestDB(t *testing.T) types.KeyValueDB { + t.Helper() + cfg := pebbledb.DefaultTestConfig(t) + cacheCfg := pebbledb.DefaultTestCacheConfig() + db, err := pebbledb.OpenWithCache(t.Context(), &cfg, &cacheCfg, + threading.NewAdHocPool(), threading.NewAdHocPool()) + require.NoError(t, err) + return db +} + +// setupTestStore creates a minimal test store +func setupTestStore(t *testing.T) *CommitStore { + t.Helper() + s, err := NewCommitStore(t.Context(), config.DefaultTestConfig(t)) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + return s +} + +// setupTestStoreWithConfig creates a test store with custom config +func setupTestStoreWithConfig(t *testing.T, cfg *config.Config) *CommitStore { + t.Helper() + dir := t.TempDir() + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) + require.NoError(t, err) + return s +} + +// commitAndCheck commits and asserts no error, returns the version +func commitAndCheck(t *testing.T, s *CommitStore) int64 { + t.Helper() + v, err := s.Commit() + require.NoError(t, err) + return v +} + +// ---------- helpers to build prefix-encoded changeset pairs ---------- +func nonceBytes(n uint64) []byte { + b := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(b, n) + return b +} + +func addrN(n byte) ktype.Address { + var a ktype.Address + a[19] = n + return a +} + +func slotN(n byte) ktype.Slot { + var s ktype.Slot + s[31] = n + return s +} + +func codeHashN(n byte) vtype.CodeHash { + var h vtype.CodeHash + for i := range h { + h[i] = n + } + return h +} + +func noncePair(addr ktype.Address, nonce uint64) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), + Value: nonceBytes(nonce), + } +} + +func codeHashPair(addr ktype.Address, ch vtype.CodeHash) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), + Value: ch[:], + } +} + +func codePair(addr ktype.Address, bytecode []byte) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), + Value: bytecode, + } +} + +func codeDeletePair(addr ktype.Address) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyCode, addr[:]), + Delete: true, + } +} + +func storagePair(addr ktype.Address, slot ktype.Slot, val []byte) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), + Value: padLeft32(val...), + } +} + +func storageDeletePair(addr ktype.Address, slot ktype.Slot) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyStorage, ktype.StorageKey(addr, slot)), + Delete: true, + } +} + +func nonceDeletePair(addr ktype.Address) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyNonce, addr[:]), + Delete: true, + } +} + +func codeHashDeletePair(addr ktype.Address) *proto.KVPair { + return &proto.KVPair{ + Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, addr[:]), + Delete: true, + } +} + +func namedCS(pairs ...*proto.KVPair) *proto.NamedChangeSet { + return &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: pairs}, + } +} diff --git a/sei-db/state_db/sc/memiavl/multitree.go b/sei-db/state_db/sc/memiavl/multitree.go index 0c266ca3e4..6e9101e339 100644 --- a/sei-db/state_db/sc/memiavl/multitree.go +++ b/sei-db/state_db/sc/memiavl/multitree.go @@ -16,7 +16,7 @@ import ( "golang.org/x/time/rate" "github.com/sei-protocol/sei-chain/sei-db/common/errors" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/wal" diff --git a/sei-db/state_db/ss/composite/store.go b/sei-db/state_db/ss/composite/store.go index cbdcbad8a9..8598f3191a 100644 --- a/sei-db/state_db/ss/composite/store.go +++ b/sei-db/state_db/ss/composite/store.go @@ -1,15 +1,18 @@ package composite import ( + "encoding/binary" "fmt" "path/filepath" "sync" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/ss/backend" "github.com/sei-protocol/sei-chain/sei-db/state_db/ss/cosmos" "github.com/sei-protocol/sei-chain/sei-db/state_db/ss/evm" @@ -258,30 +261,105 @@ func stripEVMFromChangesets(changesets []*proto.NamedChangeSet) []*proto.NamedCh return stripped } -func normalizeSnapshotNode(node types.SnapshotNode) types.SnapshotNode { - if node.StoreKey == commonevm.EVMFlatKVStoreKey { - node.StoreKey = evm.EVMStoreKey +// convertFlatKVNodes transforms a single FlatKV physical-key snapshot node +// into one or more EVM SS nodes by stripping the module prefix from the key, +// deserializing the vtype metadata from the value, and (for merged account +// rows) splitting into separate nonce and codeHash nodes. +func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { + _, innerKey, err := ktype.StripModulePrefix(node.Key) + if err != nil { + return nil, fmt.Errorf("convertFlatKVNodes: %w", err) + } + + kind, strippedKey := keys.ParseEVMKey(innerKey) + + switch kind { + case keys.EVMKeyNonce: + acct, err := vtype.DeserializeAccountData(node.Value) + if err != nil { + return nil, fmt.Errorf("convertFlatKVNodes account: %w", err) + } + var nodes []types.SnapshotNode + if nonce := acct.GetNonce(); nonce != 0 { + nonceBuf := make([]byte, 8) + binary.BigEndian.PutUint64(nonceBuf, nonce) + nodes = append(nodes, types.SnapshotNode{ + StoreKey: evm.EVMStoreKey, + Key: keys.BuildEVMKey(keys.EVMKeyNonce, strippedKey), + Value: nonceBuf, + }) + } + if codeHash := acct.GetCodeHash(); *codeHash != (vtype.CodeHash{}) { + nodes = append(nodes, types.SnapshotNode{ + StoreKey: evm.EVMStoreKey, + Key: keys.BuildEVMKey(keys.EVMKeyCodeHash, strippedKey), + Value: append([]byte(nil), codeHash[:]...), + }) + } + return nodes, nil + + case keys.EVMKeyStorage: + sd, err := vtype.DeserializeStorageData(node.Value) + if err != nil { + return nil, fmt.Errorf("convertFlatKVNodes storage: %w", err) + } + return []types.SnapshotNode{ + {StoreKey: evm.EVMStoreKey, Key: innerKey, Value: sd.GetValue()[:]}, + }, nil + + case keys.EVMKeyCode: + cd, err := vtype.DeserializeCodeData(node.Value) + if err != nil { + return nil, fmt.Errorf("convertFlatKVNodes code: %w", err) + } + return []types.SnapshotNode{ + {StoreKey: evm.EVMStoreKey, Key: innerKey, Value: cd.GetBytecode()}, + }, nil + + case keys.EVMKeyLegacy: + ld, err := vtype.DeserializeLegacyData(node.Value) + if err != nil { + return nil, fmt.Errorf("convertFlatKVNodes legacy: %w", err) + } + return []types.SnapshotNode{ + {StoreKey: evm.EVMStoreKey, Key: innerKey, Value: ld.GetValue()}, + }, nil + + default: + return nil, nil } - return node } func (s *CompositeStateStore) Import(version int64, ch <-chan types.SnapshotNode) error { if s.evmStore == nil || s.config.WriteMode == config.CosmosOnlyWrite { - // Normalize evm_flatkv → evm so downstream routing and storage work - // correctly regardless of whether the snapshot was exported with the - // FlatKV module or only the legacy evm module. - normalized := make(chan types.SnapshotNode, cap(ch)) + // FlatKV nodes carry physical keys and vtype-serialized values that + // Cosmos SS cannot interpret directly. Convert them into standard + // EVM-format nodes before forwarding. + converted := make(chan types.SnapshotNode, cap(ch)) + var convertErr error go func() { - defer close(normalized) + defer close(converted) for node := range ch { - normalized <- normalizeSnapshotNode(node) + if node.StoreKey == keys.FlatKVStoreKey { + nodes, err := convertFlatKVNodes(node) + if err != nil { + convertErr = fmt.Errorf("import: %w", err) + return + } + for _, n := range nodes { + converted <- n + } + continue + } + converted <- node } }() - return s.cosmosStore.Import(version, normalized) + if err := s.cosmosStore.Import(version, converted); err != nil { + return err + } + return convertErr } - splitWrite := s.config.WriteMode == config.SplitWrite - cosmosCh := make(chan types.SnapshotNode, 100) evmCh := make(chan types.SnapshotNode, 100) importErrCh := make(chan error, 2) @@ -345,19 +423,32 @@ func (s *CompositeStateStore) Import(version int64, ch <-chan types.SnapshotNode } for node := range ch { - node = normalizeSnapshotNode(node) drainImportErr() if importErr != nil { continue } + if node.StoreKey == keys.FlatKVStoreKey { + converted, err := convertFlatKVNodes(node) + if err != nil { + importErr = fmt.Errorf("import: %w", err) + closeImportChans() + continue + } + for _, n := range converted { + if err := sendNode(evmCh, n); err != nil { + break + } + } + continue + } + isEVM := node.StoreKey == evm.EVMStoreKey - if !isEVM || !splitWrite { + if !isEVM { if err := sendNode(cosmosCh, node); err != nil { continue } - } - if isEVM { + } else { if err := sendNode(evmCh, node); err != nil { continue } diff --git a/sei-db/state_db/ss/composite/store_test.go b/sei-db/state_db/ss/composite/store_test.go index b8831ce27a..f2bcbf3037 100644 --- a/sei-db/state_db/ss/composite/store_test.go +++ b/sei-db/state_db/ss/composite/store_test.go @@ -1,6 +1,7 @@ package composite import ( + "encoding/binary" "errors" "fmt" "os" @@ -8,10 +9,12 @@ import ( "testing" "time" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/ss/evm" "github.com/stretchr/testify/require" ) @@ -1514,16 +1517,8 @@ func TestImport_OnlyEvmModule(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("1000"), bankVal) - cosmosEVM1, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, []byte("evm_key_1")) - require.NoError(t, err) - - if mode == config.SplitWrite { - require.Nil(t, cosmosEVM1, "SplitWrite should not store evm data in cosmos") - } else { - require.Equal(t, []byte("val_1"), cosmosEVM1) - } - if store.evmStore != nil && mode != config.CosmosOnlyWrite { + // EVM keys go exclusively to EVM store evmVal, err := store.evmStore.Get(evm.EVMStoreKey, 1, []byte("evm_key_1")) require.NoError(t, err) require.Equal(t, []byte("val_1"), evmVal) @@ -1531,12 +1526,43 @@ func TestImport_OnlyEvmModule(t *testing.T) { evmVal2, err := store.evmStore.Get(evm.EVMStoreKey, 1, []byte("evm_key_2")) require.NoError(t, err) require.Equal(t, []byte("val_2"), evmVal2) + + // EVM keys should not be in cosmos store + cosmosEVM1, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, []byte("evm_key_1")) + require.NoError(t, err) + require.Nil(t, cosmosEVM1, "EVM data should not be in cosmos store") + } else { + // No EVM store: EVM keys fall through to cosmos + cosmosEVM1, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, []byte("evm_key_1")) + require.NoError(t, err) + require.Equal(t, []byte("val_1"), cosmosEVM1) } }) } } func TestImport_OnlyEvmFlatkvModule(t *testing.T) { + addr1 := make([]byte, 20) + addr1[19] = 0x01 + addr2 := make([]byte, 20) + addr2[19] = 0x02 + slot := make([]byte, 32) + slot[31] = 0xAA + + storageVal := [32]byte{0: 0xBB} + acctVal := vtype.NewAccountData().SetNonce(42).SetCodeHash(&vtype.CodeHash{0: 0xCC}).Serialize() + storVal := vtype.NewStorageData().SetValue(&storageVal).Serialize() + + physAcct := ktype.EVMPhysicalKey(commonevm.EVMKeyNonce, addr1) + physStor := ktype.EVMPhysicalKey(commonevm.EVMKeyStorage, append(addr2, slot...)) + + nonceKey := commonevm.BuildEVMKey(commonevm.EVMKeyNonce, addr1) + codeHashKey := commonevm.BuildEVMKey(commonevm.EVMKeyCodeHash, addr1) + storageKey := commonevm.BuildEVMKey(commonevm.EVMKeyStorage, append(addr2, slot...)) + + nonceBuf := make([]byte, 8) + binary.BigEndian.PutUint64(nonceBuf, 42) + for _, mode := range []config.WriteMode{config.DualWrite, config.SplitWrite, config.CosmosOnlyWrite} { t.Run("WriteMode="+string(mode), func(t *testing.T) { store, cleanup := setupImportTestStore(t, mode) @@ -1545,8 +1571,8 @@ func TestImport_OnlyEvmFlatkvModule(t *testing.T) { ch := make(chan types.SnapshotNode, 10) nodes := []types.SnapshotNode{ {StoreKey: "bank", Key: []byte("supply"), Value: []byte("2000")}, - {StoreKey: commonevm.EVMFlatKVStoreKey, Key: []byte("flatkv_key_1"), Value: []byte("fv_1")}, - {StoreKey: commonevm.EVMFlatKVStoreKey, Key: []byte("flatkv_key_2"), Value: []byte("fv_2")}, + {StoreKey: commonevm.FlatKVStoreKey, Key: physAcct, Value: acctVal}, + {StoreKey: commonevm.FlatKVStoreKey, Key: physStor, Value: storVal}, } go feedNodes(ch, nodes) @@ -1557,29 +1583,38 @@ func TestImport_OnlyEvmFlatkvModule(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("2000"), bankVal) - cosmosEVM1, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, []byte("flatkv_key_1")) - require.NoError(t, err) - - if mode == config.SplitWrite { - require.Nil(t, cosmosEVM1, "SplitWrite should not store evm data in cosmos") - } else { - require.Equal(t, []byte("fv_1"), cosmosEVM1, "evm_flatkv should be normalized to evm") - } - if store.evmStore != nil && mode != config.CosmosOnlyWrite { - evmVal, err := store.evmStore.Get(evm.EVMStoreKey, 1, []byte("flatkv_key_1")) + evmNonce, err := store.evmStore.Get(evm.EVMStoreKey, 1, nonceKey) require.NoError(t, err) - require.Equal(t, []byte("fv_1"), evmVal) + require.Equal(t, nonceBuf, evmNonce) - evmVal2, err := store.evmStore.Get(evm.EVMStoreKey, 1, []byte("flatkv_key_2")) + evmCodeHash, err := store.evmStore.Get(evm.EVMStoreKey, 1, codeHashKey) + require.NoError(t, err) + require.Equal(t, vtype.CodeHash{0: 0xCC}, vtype.CodeHash(evmCodeHash)) + + evmStor, err := store.evmStore.Get(evm.EVMStoreKey, 1, storageKey) + require.NoError(t, err) + require.Equal(t, storageVal[:], evmStor) + } else { + cosmosNonce, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, nonceKey) require.NoError(t, err) - require.Equal(t, []byte("fv_2"), evmVal2) + require.Equal(t, nonceBuf, cosmosNonce, "converted flatkv data should land in cosmos when no evm store") } }) } } func TestImport_BothEvmAndEvmFlatkv(t *testing.T) { + addr := make([]byte, 20) + addr[19] = 0x03 + slot := make([]byte, 32) + slot[31] = 0x01 + storageVal := [32]byte{0: 0xDD} + + physStor := ktype.EVMPhysicalKey(commonevm.EVMKeyStorage, append(addr, slot...)) + storVal := vtype.NewStorageData().SetValue(&storageVal).Serialize() + storageKey := commonevm.BuildEVMKey(commonevm.EVMKeyStorage, append(addr, slot...)) + for _, mode := range []config.WriteMode{config.DualWrite, config.SplitWrite} { t.Run("WriteMode="+string(mode), func(t *testing.T) { store, cleanup := setupImportTestStore(t, mode) @@ -1588,54 +1623,48 @@ func TestImport_BothEvmAndEvmFlatkv(t *testing.T) { ch := make(chan types.SnapshotNode, 20) nodes := []types.SnapshotNode{ {StoreKey: "bank", Key: []byte("supply"), Value: []byte("3000")}, - // Legacy evm module data - {StoreKey: commonevm.EVMStoreKey, Key: []byte("shared_key"), Value: []byte("from_evm")}, {StoreKey: commonevm.EVMStoreKey, Key: []byte("evm_only_key"), Value: []byte("evm_only")}, - // evm_flatkv data arriving later — should override shared_key and add new keys - {StoreKey: commonevm.EVMFlatKVStoreKey, Key: []byte("shared_key"), Value: []byte("from_flatkv")}, - {StoreKey: commonevm.EVMFlatKVStoreKey, Key: []byte("flatkv_only_key"), Value: []byte("flatkv_only")}, + {StoreKey: commonevm.FlatKVStoreKey, Key: physStor, Value: storVal}, } go feedNodes(ch, nodes) err := store.Import(1, ch) require.NoError(t, err) - // bank data should be in cosmos bankVal, err := store.cosmosStore.Get("bank", 1, []byte("supply")) require.NoError(t, err) require.Equal(t, []byte("3000"), bankVal) - // EVM store should have all keys: evm_only, shared (overridden by flatkv), flatkv_only require.NotNil(t, store.evmStore) evmOnlyVal, err := store.evmStore.Get(evm.EVMStoreKey, 1, []byte("evm_only_key")) require.NoError(t, err) require.Equal(t, []byte("evm_only"), evmOnlyVal) - sharedVal, err := store.evmStore.Get(evm.EVMStoreKey, 1, []byte("shared_key")) + evmStor, err := store.evmStore.Get(evm.EVMStoreKey, 1, storageKey) require.NoError(t, err) - require.Equal(t, []byte("from_flatkv"), sharedVal, "flatkv value should override evm value for shared key") - - flatkvOnlyVal, err := store.evmStore.Get(evm.EVMStoreKey, 1, []byte("flatkv_only_key")) - require.NoError(t, err) - require.Equal(t, []byte("flatkv_only"), flatkvOnlyVal) - - if mode == config.DualWrite { - cosmosShared, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, []byte("shared_key")) - require.NoError(t, err) - require.Equal(t, []byte("from_flatkv"), cosmosShared, "cosmos should also see the flatkv override in DualWrite") - } + require.Equal(t, storageVal[:], evmStor, "flatkv storage data should be in evm store") }) } } -func TestImport_CosmosOnlyWrite_NormalizesEvmFlatkv(t *testing.T) { +func TestImport_CosmosOnlyWrite_ConvertsFlatkvToCosmos(t *testing.T) { + addr := make([]byte, 20) + addr[19] = 0x05 + + physAcct := ktype.EVMPhysicalKey(commonevm.EVMKeyNonce, addr) + acctVal := vtype.NewAccountData().SetNonce(7).SetCodeHash(&vtype.CodeHash{}).Serialize() + + nonceKey := commonevm.BuildEVMKey(commonevm.EVMKeyNonce, addr) + nonceBuf := make([]byte, 8) + binary.BigEndian.PutUint64(nonceBuf, 7) + store, cleanup := setupImportTestStore(t, config.CosmosOnlyWrite) defer cleanup() ch := make(chan types.SnapshotNode, 10) nodes := []types.SnapshotNode{ {StoreKey: "bank", Key: []byte("supply"), Value: []byte("5000")}, - {StoreKey: commonevm.EVMFlatKVStoreKey, Key: []byte("fk_1"), Value: []byte("fv_1")}, + {StoreKey: commonevm.FlatKVStoreKey, Key: physAcct, Value: acctVal}, {StoreKey: commonevm.EVMStoreKey, Key: []byte("ek_1"), Value: []byte("ev_1")}, } go feedNodes(ch, nodes) @@ -1647,10 +1676,9 @@ func TestImport_CosmosOnlyWrite_NormalizesEvmFlatkv(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("5000"), bankVal) - // evm_flatkv normalized to evm — both should land in cosmos store - fv, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, []byte("fk_1")) + cosmosNonce, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, nonceKey) require.NoError(t, err) - require.Equal(t, []byte("fv_1"), fv) + require.Equal(t, nonceBuf, cosmosNonce, "converted flatkv nonce should land in cosmos store") ev, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, []byte("ek_1")) require.NoError(t, err) diff --git a/sei-db/state_db/ss/evm/db_test.go b/sei-db/state_db/ss/evm/db_test.go index 0be0254eb0..7e569f7f9d 100644 --- a/sei-db/state_db/ss/evm/db_test.go +++ b/sei-db/state_db/ss/evm/db_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" diff --git a/sei-db/state_db/ss/evm/store.go b/sei-db/state_db/ss/evm/store.go index 4220bd1e32..8f1f7964a9 100644 --- a/sei-db/state_db/ss/evm/store.go +++ b/sei-db/state_db/ss/evm/store.go @@ -5,7 +5,7 @@ import ( "path/filepath" "sync" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" diff --git a/sei-db/state_db/ss/evm/types.go b/sei-db/state_db/ss/evm/types.go index 8c2413ea0c..bac3552a0d 100644 --- a/sei-db/state_db/ss/evm/types.go +++ b/sei-db/state_db/ss/evm/types.go @@ -1,7 +1,7 @@ package evm import ( - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" ) // EVMStoreKey is the cosmos store key for EVM module. diff --git a/sei-db/tools/cmd/seidb/operations/state_size.go b/sei-db/tools/cmd/seidb/operations/state_size.go index 9bd2cdbb62..88e30b4b94 100644 --- a/sei-db/tools/cmd/seidb/operations/state_size.go +++ b/sei-db/tools/cmd/seidb/operations/state_size.go @@ -6,7 +6,7 @@ import ( "sort" "strings" - commonevm "github.com/sei-protocol/sei-chain/sei-db/common/evm" + commonevm "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/memiavl" "github.com/sei-protocol/sei-chain/sei-db/tools/utils" "github.com/spf13/cobra" From 819b6f139d2237144eae9ab92393c7358210f788 Mon Sep 17 00:00:00 2001 From: yzang2019 Date: Wed, 15 Apr 2026 05:05:13 -0700 Subject: [PATCH 2/7] Rename keys --- sei-db/common/keys/{keys.go => evm.go} | 0 sei-db/common/keys/{keys_test.go => evm_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename sei-db/common/keys/{keys.go => evm.go} (100%) rename sei-db/common/keys/{keys_test.go => evm_test.go} (100%) diff --git a/sei-db/common/keys/keys.go b/sei-db/common/keys/evm.go similarity index 100% rename from sei-db/common/keys/keys.go rename to sei-db/common/keys/evm.go diff --git a/sei-db/common/keys/keys_test.go b/sei-db/common/keys/evm_test.go similarity index 100% rename from sei-db/common/keys/keys_test.go rename to sei-db/common/keys/evm_test.go From 2c632adda83c649c20f32cfa0105ffc647b994ad Mon Sep 17 00:00:00 2001 From: yzang2019 Date: Wed, 15 Apr 2026 05:31:27 -0700 Subject: [PATCH 3/7] Fix ss import --- sei-db/state_db/ss/composite/store.go | 162 ++++++++------------- sei-db/state_db/ss/composite/store_test.go | 44 ++++++ 2 files changed, 103 insertions(+), 103 deletions(-) diff --git a/sei-db/state_db/ss/composite/store.go b/sei-db/state_db/ss/composite/store.go index 8598f3191a..c69461317a 100644 --- a/sei-db/state_db/ss/composite/store.go +++ b/sei-db/state_db/ss/composite/store.go @@ -262,11 +262,15 @@ func stripEVMFromChangesets(changesets []*proto.NamedChangeSet) []*proto.NamedCh } // convertFlatKVNodes transforms a single FlatKV physical-key snapshot node -// into one or more EVM SS nodes by stripping the module prefix from the key, +// into one or more SS nodes by stripping the module prefix from the key, // deserializing the vtype metadata from the value, and (for merged account // rows) splitting into separate nonce and codeHash nodes. +// +// For EVM-specific keys (account, storage, code) the output StoreKey is "evm". +// For legacy keys the original module name is preserved so they route back to +// the correct Cosmos SS module. func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { - _, innerKey, err := ktype.StripModulePrefix(node.Key) + moduleName, innerKey, err := ktype.StripModulePrefix(node.Key) if err != nil { return nil, fmt.Errorf("convertFlatKVNodes: %w", err) } @@ -322,7 +326,7 @@ func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { return nil, fmt.Errorf("convertFlatKVNodes legacy: %w", err) } return []types.SnapshotNode{ - {StoreKey: evm.EVMStoreKey, Key: innerKey, Value: ld.GetValue()}, + {StoreKey: moduleName, Key: innerKey, Value: ld.GetValue()}, }, nil default: @@ -331,142 +335,94 @@ func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { } func (s *CompositeStateStore) Import(version int64, ch <-chan types.SnapshotNode) error { - if s.evmStore == nil || s.config.WriteMode == config.CosmosOnlyWrite { - // FlatKV nodes carry physical keys and vtype-serialized values that - // Cosmos SS cannot interpret directly. Convert them into standard - // EVM-format nodes before forwarding. - converted := make(chan types.SnapshotNode, cap(ch)) - var convertErr error - go func() { - defer close(converted) - for node := range ch { - if node.StoreKey == keys.FlatKVStoreKey { - nodes, err := convertFlatKVNodes(node) - if err != nil { - convertErr = fmt.Errorf("import: %w", err) - return - } - for _, n := range nodes { - converted <- n - } - continue - } - converted <- node - } - }() - if err := s.cosmosStore.Import(version, converted); err != nil { - return err - } - return convertErr - } + importToEVM := s.evmStore != nil && s.config.WriteMode != config.CosmosOnlyWrite cosmosCh := make(chan types.SnapshotNode, 100) - evmCh := make(chan types.SnapshotNode, 100) - importErrCh := make(chan error, 2) + var evmCh chan types.SnapshotNode + if importToEVM { + evmCh = make(chan types.SnapshotNode, 100) + } + done := make(chan struct{}) + var doneOnce sync.Once + errs := make(chan error, 2) var wg sync.WaitGroup - var closeOnce sync.Once - closeImportChans := func() { - closeOnce.Do(func() { - close(cosmosCh) - close(evmCh) - }) + fail := func(err error) { + errs <- err + doneOnce.Do(func() { close(done) }) } wg.Add(1) go func() { defer wg.Done() if err := s.cosmosStore.Import(version, cosmosCh); err != nil { - importErrCh <- err + fail(err) } }() - - wg.Add(1) - go func() { - defer wg.Done() - if err := s.evmStore.Import(version, evmCh); err != nil { - importErrCh <- err - } - }() - - var importErr error - drainImportErr := func() { - for { - select { - case err := <-importErrCh: - if err != nil && importErr == nil { - importErr = err - closeImportChans() - } - default: - return + if importToEVM { + wg.Add(1) + go func() { + defer wg.Done() + if err := s.evmStore.Import(version, evmCh); err != nil { + fail(err) } - } + }() } - sendNode := func(dst chan types.SnapshotNode, node types.SnapshotNode) error { - for { - drainImportErr() - if importErr != nil { - return importErr - } - select { - case dst <- node: - return nil - case err := <-importErrCh: - if err != nil && importErr == nil { - importErr = err - closeImportChans() - } - } + + send := func(dst chan<- types.SnapshotNode, n types.SnapshotNode) bool { + select { + case dst <- n: + return true + case <-done: + return false } } + var routeErr error for node := range ch { - drainImportErr() - if importErr != nil { + if routeErr != nil { continue } + var nodes []types.SnapshotNode if node.StoreKey == keys.FlatKVStoreKey { converted, err := convertFlatKVNodes(node) if err != nil { - importErr = fmt.Errorf("import: %w", err) - closeImportChans() + routeErr = fmt.Errorf("import: %w", err) continue } - for _, n := range converted { - if err := sendNode(evmCh, n); err != nil { - break - } - } - continue + nodes = converted + } else { + nodes = append(nodes, node) } - isEVM := node.StoreKey == evm.EVMStoreKey - if !isEVM { - if err := sendNode(cosmosCh, node); err != nil { - continue - } - } else { - if err := sendNode(evmCh, node); err != nil { - continue + for _, n := range nodes { + if n.StoreKey == evm.EVMStoreKey && importToEVM { + if !send(evmCh, n) { + break + } + } else { + if !send(cosmosCh, n) { + break + } } } } - closeImportChans() + close(cosmosCh) + if evmCh != nil { + close(evmCh) + } wg.Wait() - close(importErrCh) - if importErr == nil { - for err := range importErrCh { - if err != nil { - importErr = err - break - } + close(errs) + + for err := range errs { + if err != nil { + return err } } - return importErr + return routeErr } func (s *CompositeStateStore) Prune(version int64) error { diff --git a/sei-db/state_db/ss/composite/store_test.go b/sei-db/state_db/ss/composite/store_test.go index f2bcbf3037..2cbb10efe5 100644 --- a/sei-db/state_db/ss/composite/store_test.go +++ b/sei-db/state_db/ss/composite/store_test.go @@ -1685,6 +1685,50 @@ func TestImport_CosmosOnlyWrite_ConvertsFlatkvToCosmos(t *testing.T) { require.Equal(t, []byte("ev_1"), ev) } +func TestImport_FlatKVLegacyKeysPreserveModule(t *testing.T) { + addr := make([]byte, 20) + addr[0] = 0xAA + + evmLegacyInnerKey := append([]byte{0x01}, addr...) + evmLegacyPhysKey := ktype.ModulePhysicalKey("evm", evmLegacyInnerKey) + evmLegacyVal := vtype.NewLegacyData().SetValue([]byte("sei1abc")).Serialize() + + bankInnerKey := []byte("balances/addr1") + bankPhysKey := ktype.ModulePhysicalKey("bank", bankInnerKey) + bankLegacyVal := vtype.NewLegacyData().SetValue([]byte("1000usei")).Serialize() + + for _, mode := range []config.WriteMode{config.DualWrite, config.SplitWrite, config.CosmosOnlyWrite} { + t.Run("WriteMode="+string(mode), func(t *testing.T) { + store, cleanup := setupImportTestStore(t, mode) + defer cleanup() + + ch := make(chan types.SnapshotNode, 10) + nodes := []types.SnapshotNode{ + {StoreKey: commonevm.FlatKVStoreKey, Key: evmLegacyPhysKey, Value: evmLegacyVal}, + {StoreKey: commonevm.FlatKVStoreKey, Key: bankPhysKey, Value: bankLegacyVal}, + } + go feedNodes(ch, nodes) + + err := store.Import(1, ch) + require.NoError(t, err) + + if store.evmStore != nil && mode != config.CosmosOnlyWrite { + evmVal, err := store.evmStore.Get(evm.EVMStoreKey, 1, evmLegacyInnerKey) + require.NoError(t, err) + require.Equal(t, []byte("sei1abc"), evmVal, "evm legacy key should land in EVM store") + } + + bankVal, err := store.cosmosStore.Get("bank", 1, bankInnerKey) + require.NoError(t, err) + require.Equal(t, []byte("1000usei"), bankVal, "bank legacy key should land in cosmos under 'bank' module") + + wrongModule, err := store.cosmosStore.Get(evm.EVMStoreKey, 1, bankInnerKey) + require.NoError(t, err) + require.Nil(t, wrongModule, "bank legacy key should NOT land under evm store key") + }) + } +} + func TestImport_NonEvmModulesUnaffected(t *testing.T) { store, cleanup := setupImportTestStore(t, config.DualWrite) defer cleanup() From 6b627454d24479f73c6f8769644fb34d039cfe17 Mon Sep 17 00:00:00 2001 From: yzang2019 Date: Wed, 15 Apr 2026 05:42:12 -0700 Subject: [PATCH 4/7] Fix unit test --- sei-db/state_db/sc/flatkv/import_export_test.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/import_export_test.go b/sei-db/state_db/sc/flatkv/import_export_test.go index 6473dd0be6..10ab2c980d 100644 --- a/sei-db/state_db/sc/flatkv/import_export_test.go +++ b/sei-db/state_db/sc/flatkv/import_export_test.go @@ -629,8 +629,9 @@ func TestImporterCorruptKeyDataPropagatesError(t *testing.T) { // A key without module prefix ("/" separator) should be rejected by // routePhysicalKey during flush. imp.AddNode(&types.SnapshotNode{ - Key: []byte{0xDE, 0xAD}, - Value: []byte{0x01, 0x02}, + Key: []byte{0xDE, 0xAD}, + Value: []byte{0x01, 0x02}, + Version: 1, }) err = imp.Close() @@ -659,8 +660,9 @@ func TestImporterDoubleImport(t *testing.T) { imp1, err := s.Importer(1) require.NoError(t, err) imp1.AddNode(&types.SnapshotNode{ - Key: storagePhysKey(addrN(0x01), slotN(0x01)), - Value: vtype.NewStorageData().SetBlockHeight(1).SetValue(sv1).Serialize(), + Key: storagePhysKey(addrN(0x01), slotN(0x01)), + Value: vtype.NewStorageData().SetBlockHeight(1).SetValue(sv1).Serialize(), + Version: 1, }) require.NoError(t, imp1.Close()) @@ -673,8 +675,9 @@ func TestImporterDoubleImport(t *testing.T) { imp2, err := s.Importer(2) require.NoError(t, err) imp2.AddNode(&types.SnapshotNode{ - Key: storagePhysKey(addrN(0x02), slotN(0x02)), - Value: vtype.NewStorageData().SetBlockHeight(2).SetValue(sv2).Serialize(), + Key: storagePhysKey(addrN(0x02), slotN(0x02)), + Value: vtype.NewStorageData().SetBlockHeight(2).SetValue(sv2).Serialize(), + Version: 2, }) require.NoError(t, imp2.Close()) From a736bfe9a818e43e5243da65bc2caafcbbf24f42 Mon Sep 17 00:00:00 2001 From: yzang2019 Date: Wed, 15 Apr 2026 05:42:49 -0700 Subject: [PATCH 5/7] Fix unit test --- sei-db/state_db/sc/flatkv/perdb_lthash_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go index 48892085f2..79c557e2b8 100644 --- a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go +++ b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go @@ -339,8 +339,8 @@ func TestPerDBLtHashAfterImport(t *testing.T) { slot := slotN(i) storVal := vtype.NewStorageData().SetBlockHeight(1).SetValue(&[32]byte{i, 0xAA}).Serialize() acctVal := vtype.NewAccountData().SetBlockHeight(1).SetNonce(uint64(i)).Serialize() - imp.AddNode(&scTypes.SnapshotNode{Key: storagePhysKey(addr, slot), Value: storVal}) - imp.AddNode(&scTypes.SnapshotNode{Key: accountPhysKey(addr), Value: acctVal}) + imp.AddNode(&scTypes.SnapshotNode{Key: storagePhysKey(addr, slot), Value: storVal, Version: 1}) + imp.AddNode(&scTypes.SnapshotNode{Key: accountPhysKey(addr), Value: acctVal, Version: 1}) } require.NoError(t, imp.Close()) From ee22cb52d1704e9d184b6205443d6391883b5254 Mon Sep 17 00:00:00 2001 From: yzang2019 Date: Wed, 15 Apr 2026 05:56:44 -0700 Subject: [PATCH 6/7] Fix lint --- sei-db/config/sc_config.go | 2 +- .../state_db/sc/flatkv/import_export_test.go | 2 +- .../sc/flatkv/lthash_correctness_test.go | 2 +- sei-db/state_db/sc/flatkv/store_apply.go | 2 +- sei-db/state_db/sc/flatkv/store_iterator.go | 18 +++++++++--------- sei-db/state_db/sc/flatkv/test_helper.go | 5 +++-- 6 files changed, 16 insertions(+), 15 deletions(-) diff --git a/sei-db/config/sc_config.go b/sei-db/config/sc_config.go index 49f9f6d221..d02ae257f8 100644 --- a/sei-db/config/sc_config.go +++ b/sei-db/config/sc_config.go @@ -2,8 +2,8 @@ package config import ( "fmt" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/config" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/memiavl" ) diff --git a/sei-db/state_db/sc/flatkv/import_export_test.go b/sei-db/state_db/sc/flatkv/import_export_test.go index 10ab2c980d..ae0abb2b2b 100644 --- a/sei-db/state_db/sc/flatkv/import_export_test.go +++ b/sei-db/state_db/sc/flatkv/import_export_test.go @@ -779,7 +779,7 @@ func TestExportImportLargerDataset(t *testing.T) { originalHash := s.RootHash() // Export. - exp, err := s.Exporter(0) + exp, err := s.Exporter(1) require.NoError(t, err) nodes := drainExporter(t, exp) require.NoError(t, exp.Close()) diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 4926000e0b..eb02758ece 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -1222,7 +1222,7 @@ func TestLtHashExportImportRoundTrip(t *testing.T) { srcHash := s.RootHash() // Export - exp, err := s.Exporter(0) + exp, err := s.Exporter(1) require.NoError(t, err) var nodes []*scTypes.SnapshotNode for { diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go index 6b52e584e2..88b155bf41 100644 --- a/sei-db/state_db/sc/flatkv/store_apply.go +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -139,7 +139,7 @@ func classifyAndPrefix(changeSets []*proto.NamedChangeSet) (map[keys.EVMKeyKind] } for _, cs := range changeSets { - if cs.Changeset.Pairs == nil || len(cs.Changeset.Pairs) == 0 { + if len(cs.Changeset.Pairs) == 0 { continue } diff --git a/sei-db/state_db/sc/flatkv/store_iterator.go b/sei-db/state_db/sc/flatkv/store_iterator.go index c7ecd9141c..023dcce9d3 100644 --- a/sei-db/state_db/sc/flatkv/store_iterator.go +++ b/sei-db/state_db/sc/flatkv/store_iterator.go @@ -24,16 +24,16 @@ type sequentialIterator struct { // openCurrent opens an iterator on dbs[dbIdx]. Returns false if no more DBs. func (s *sequentialIterator) openCurrent() bool { - for s.dbIdx < len(s.dbs) { - it, err := s.dbs[s.dbIdx].NewIter(nil) - if err != nil { - s.err = err - return false - } - s.iter = it - return true + if s.dbIdx >= len(s.dbs) { + return false + } + it, err := s.dbs[s.dbIdx].NewIter(nil) + if err != nil { + s.err = err + return false } - return false + s.iter = it + return true } // advanceDB closes the current iterator and moves to the next DB, diff --git a/sei-db/state_db/sc/flatkv/test_helper.go b/sei-db/state_db/sc/flatkv/test_helper.go index a3640cae14..065eb3bed2 100644 --- a/sei-db/state_db/sc/flatkv/test_helper.go +++ b/sei-db/state_db/sc/flatkv/test_helper.go @@ -2,6 +2,9 @@ package flatkv import ( "encoding/binary" + "path/filepath" + "testing" + "github.com/sei-protocol/sei-chain/sei-db/common/keys" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" @@ -11,8 +14,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/stretchr/testify/require" - "path/filepath" - "testing" ) // ============================================================================= From e46cd96534101f50faaebececede40a3b1b9af23 Mon Sep 17 00:00:00 2001 From: yzang2019 Date: Wed, 15 Apr 2026 22:36:03 -0700 Subject: [PATCH 7/7] Address comments --- sei-db/state_db/sc/flatkv/importer.go | 6 ++++++ sei-db/state_db/sc/flatkv/store_iterator.go | 16 +++++++++------- sei-db/state_db/ss/composite/store.go | 16 ++++++++-------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/importer.go b/sei-db/state_db/sc/flatkv/importer.go index a26543980a..85287ddb35 100644 --- a/sei-db/state_db/sc/flatkv/importer.go +++ b/sei-db/state_db/sc/flatkv/importer.go @@ -46,6 +46,11 @@ func newDBWorker(dir string, db seidbtypes.KeyValueDB, ltHash *lthash.LtHash) *d // buffer reaches importBatchSize. If done fires, the worker abandons // remaining work and exits immediately. func (w *dbWorker) run(done <-chan struct{}) error { + defer func() { + if w.batch != nil { + _ = w.batch.Close() + } + }() for { select { case kv, ok := <-w.ch: @@ -76,6 +81,7 @@ func (w *dbWorker) flush() error { return nil } + // TODO:In theory, we could offload lattice hash calculation to a work pool and get parallelism between DB operations and hash calculations. Cryptosim performance makes me think we could probably get a 2-3x speedup from this, assuming receiving data from the network isn't the bottleneck. newHash, _ := lthash.ComputeLtHash(w.ltHash, w.ltPairs) w.ltHash = newHash diff --git a/sei-db/state_db/sc/flatkv/store_iterator.go b/sei-db/state_db/sc/flatkv/store_iterator.go index 023dcce9d3..bbf3e8e996 100644 --- a/sei-db/state_db/sc/flatkv/store_iterator.go +++ b/sei-db/state_db/sc/flatkv/store_iterator.go @@ -5,13 +5,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/ktype" ) -// RawGlobalIterator returns an iterator that walks each data DB sequentially -// in fixed order (account → code → storage → legacy). Within each DB the -// keys are returned in PebbleDB's natural order. Per-DB _meta/* keys are -// skipped. Pending writes are not visible. metadataDB is not included. -func (s *CommitStore) RawGlobalIterator() Iterator { - return &sequentialIterator{dbs: s.dataDBs()} -} +var _ Iterator = (*sequentialIterator)(nil) // sequentialIterator iterates through a slice of DBs one at a time. // It fully drains the current DB before moving to the next. @@ -142,3 +136,11 @@ func (s *sequentialIterator) Last() bool { return false } func (s *sequentialIterator) SeekGE([]byte) bool { return false } func (s *sequentialIterator) SeekLT([]byte) bool { return false } func (s *sequentialIterator) Prev() bool { return false } + +// RawGlobalIterator returns an iterator that walks each data DB sequentially +// in fixed order (account → code → storage → legacy). Within each DB the +// keys are returned in PebbleDB's natural order. Per-DB _meta/* keys are +// skipped. Pending writes are not visible. metadataDB is not included. +func (s *CommitStore) RawGlobalIterator() Iterator { + return &sequentialIterator{dbs: s.dataDBs()} +} diff --git a/sei-db/state_db/ss/composite/store.go b/sei-db/state_db/ss/composite/store.go index c69461317a..b096e094c4 100644 --- a/sei-db/state_db/ss/composite/store.go +++ b/sei-db/state_db/ss/composite/store.go @@ -272,7 +272,7 @@ func stripEVMFromChangesets(changesets []*proto.NamedChangeSet) []*proto.NamedCh func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { moduleName, innerKey, err := ktype.StripModulePrefix(node.Key) if err != nil { - return nil, fmt.Errorf("convertFlatKVNodes: %w", err) + return nil, fmt.Errorf("convertFlatKVNodes failed: %w", err) } kind, strippedKey := keys.ParseEVMKey(innerKey) @@ -281,10 +281,10 @@ func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { case keys.EVMKeyNonce: acct, err := vtype.DeserializeAccountData(node.Value) if err != nil { - return nil, fmt.Errorf("convertFlatKVNodes account: %w", err) + return nil, fmt.Errorf("failed to DeserializeAccountData: %w", err) } var nodes []types.SnapshotNode - if nonce := acct.GetNonce(); nonce != 0 { + if nonce := acct.GetNonce(); !acct.IsDelete() { nonceBuf := make([]byte, 8) binary.BigEndian.PutUint64(nonceBuf, nonce) nodes = append(nodes, types.SnapshotNode{ @@ -305,7 +305,7 @@ func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { case keys.EVMKeyStorage: sd, err := vtype.DeserializeStorageData(node.Value) if err != nil { - return nil, fmt.Errorf("convertFlatKVNodes storage: %w", err) + return nil, fmt.Errorf("failed to DeserializeStorageData: %w", err) } return []types.SnapshotNode{ {StoreKey: evm.EVMStoreKey, Key: innerKey, Value: sd.GetValue()[:]}, @@ -314,7 +314,7 @@ func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { case keys.EVMKeyCode: cd, err := vtype.DeserializeCodeData(node.Value) if err != nil { - return nil, fmt.Errorf("convertFlatKVNodes code: %w", err) + return nil, fmt.Errorf("failed to DeserializeCodeData: %w", err) } return []types.SnapshotNode{ {StoreKey: evm.EVMStoreKey, Key: innerKey, Value: cd.GetBytecode()}, @@ -323,14 +323,14 @@ func convertFlatKVNodes(node types.SnapshotNode) ([]types.SnapshotNode, error) { case keys.EVMKeyLegacy: ld, err := vtype.DeserializeLegacyData(node.Value) if err != nil { - return nil, fmt.Errorf("convertFlatKVNodes legacy: %w", err) + return nil, fmt.Errorf("failed to DeserializeLegacyData legacy: %w", err) } return []types.SnapshotNode{ {StoreKey: moduleName, Key: innerKey, Value: ld.GetValue()}, }, nil default: - return nil, nil + return nil, fmt.Errorf("got unexpected type of keys when convertFlatKVNodes") } } @@ -389,7 +389,7 @@ func (s *CompositeStateStore) Import(version int64, ch <-chan types.SnapshotNode if node.StoreKey == keys.FlatKVStoreKey { converted, err := convertFlatKVNodes(node) if err != nil { - routeErr = fmt.Errorf("import: %w", err) + routeErr = fmt.Errorf("SS import failure: %w", err) continue } nodes = converted