diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
deleted file mode 100644
index 21eb1ff843..0000000000
--- a/core/rawdb/accessors_chain.go
+++ /dev/null
@@ -1,606 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "math/big"
-
- "github.com/ava-labs/coreth/consensus/misc/eip4844"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
-func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
- data, _ := db.Get(headerHashKey(number))
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteCanonicalHash stores the hash assigned to a canonical block number.
-func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
- log.Crit("Failed to store number to hash mapping", "err", err)
- }
-}
-
-// DeleteCanonicalHash removes the number to hash canonical mapping.
-func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
- if err := db.Delete(headerHashKey(number)); err != nil {
- log.Crit("Failed to delete number to hash mapping", "err", err)
- }
-}
-
-// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
-// both canonical and reorged forks included.
-func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
- prefix := headerKeyPrefix(number)
-
- hashes := make([]common.Hash, 0, 1)
- it := db.NewIterator(prefix, nil)
- defer it.Release()
-
- for it.Next() {
- if key := it.Key(); len(key) == len(prefix)+32 {
- hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
- }
- }
- return hashes
-}
-
-type NumberHash struct {
- Number uint64
- Hash common.Hash
-}
-
-// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
-// heights, both canonical and reorged forks included.
-// This method considers both limits to be _inclusive_.
-func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
- var (
- start = encodeBlockNumber(first)
- keyLength = len(headerPrefix) + 8 + 32
- hashes = make([]*NumberHash, 0, 1+last-first)
- it = db.NewIterator(headerPrefix, start)
- )
- defer it.Release()
- for it.Next() {
- key := it.Key()
- if len(key) != keyLength {
- continue
- }
- num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
- if num > last {
- break
- }
- hash := common.BytesToHash(key[len(key)-32:])
- hashes = append(hashes, &NumberHash{num, hash})
- }
- return hashes
-}
-
-// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
-// certain chain range. If the accumulated entries reaches the given threshold,
-// abort the iteration and return the semi-finish result.
-func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
- // Short circuit if the limit is 0.
- if limit == 0 {
- return nil, nil
- }
- var (
- numbers []uint64
- hashes []common.Hash
- )
- // Construct the key prefix of start point.
- start, end := headerHashKey(from), headerHashKey(to)
- it := db.NewIterator(nil, start)
- defer it.Release()
-
- for it.Next() {
- if bytes.Compare(it.Key(), end) >= 0 {
- break
- }
- if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
- numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
- hashes = append(hashes, common.BytesToHash(it.Value()))
- // If the accumulated entries reaches the limit threshold, return.
- if len(numbers) >= limit {
- break
- }
- }
- }
- return numbers, hashes
-}
-
-// ReadHeaderNumber returns the header number assigned to a hash.
-func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
- data, _ := db.Get(headerNumberKey(hash))
- if len(data) != 8 {
- return nil
- }
- number := binary.BigEndian.Uint64(data)
- return &number
-}
-
-// WriteHeaderNumber stores the hash->number mapping.
-func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- key := headerNumberKey(hash)
- enc := encodeBlockNumber(number)
- if err := db.Put(key, enc); err != nil {
- log.Crit("Failed to store hash to number mapping", "err", err)
- }
-}
-
-// DeleteHeaderNumber removes hash->number mapping.
-func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(headerNumberKey(hash)); err != nil {
- log.Crit("Failed to delete hash to number mapping", "err", err)
- }
-}
-
-// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
-func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(headHeaderKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteHeadHeaderHash stores the hash of the current canonical head header.
-func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last header's hash", "err", err)
- }
-}
-
-// ReadHeadBlockHash retrieves the hash of the current canonical head block.
-func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(headBlockKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteHeadBlockHash stores the head block's hash.
-func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last block's hash", "err", err)
- }
-}
-
-// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
-func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- data, _ := db.Get(headerKey(number, hash))
- if len(data) > 0 {
- return data
- }
- return nil
-}
-
-// HasHeader verifies the existence of a block header corresponding to the hash.
-func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
- if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
- return false
- }
- return true
-}
-
-// ReadHeader retrieves the block header corresponding to the hash.
-func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
- data := ReadHeaderRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- header := new(types.Header)
- if err := rlp.DecodeBytes(data, header); err != nil {
- log.Error("Invalid block header RLP", "hash", hash, "err", err)
- return nil
- }
- return header
-}
-
-// WriteHeader stores a block header into the database and also stores the hash-
-// to-number mapping.
-func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
- var (
- hash = header.Hash()
- number = header.Number.Uint64()
- )
- // Write the hash -> number mapping
- WriteHeaderNumber(db, hash, number)
-
- // Write the encoded header
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- log.Crit("Failed to RLP encode header", "err", err)
- }
- key := headerKey(number, hash)
- if err := db.Put(key, data); err != nil {
- log.Crit("Failed to store header", "err", err)
- }
-}
-
-// DeleteHeader removes all block header data associated with a hash.
-func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- deleteHeaderWithoutNumber(db, hash, number)
- if err := db.Delete(headerNumberKey(hash)); err != nil {
- log.Crit("Failed to delete hash to number mapping", "err", err)
- }
-}
-
-// deleteHeaderWithoutNumber removes only the block header but does not remove
-// the hash to number mapping.
-func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(headerKey(number, hash)); err != nil {
- log.Crit("Failed to delete header", "err", err)
- }
-}
-
-// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
-func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- data, _ := db.Get(blockBodyKey(number, hash))
- if len(data) > 0 {
- return data
- }
- return nil
-}
-
-// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
-// block at number, in RLP encoding.
-func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
- // Need to get the hash
- data, _ := db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
- if len(data) > 0 {
- return data
- }
- return nil
-}
-
-// WriteBodyRLP stores an RLP encoded block body into the database.
-func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
- if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
- log.Crit("Failed to store block body", "err", err)
- }
-}
-
-// HasBody verifies the existence of a block body corresponding to the hash.
-func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
- if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
- return false
- }
- return true
-}
-
-// ReadBody retrieves the block body corresponding to the hash.
-func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
- data := ReadBodyRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- body := new(types.Body)
- if err := rlp.DecodeBytes(data, body); err != nil {
- log.Error("Invalid block body RLP", "hash", hash, "err", err)
- return nil
- }
- return body
-}
-
-// WriteBody stores a block body into the database.
-func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
- data, err := rlp.EncodeToBytes(body)
- if err != nil {
- log.Crit("Failed to RLP encode body", "err", err)
- }
- WriteBodyRLP(db, hash, number, data)
-}
-
-// DeleteBody removes all block body data associated with a hash.
-func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(blockBodyKey(number, hash)); err != nil {
- log.Crit("Failed to delete block body", "err", err)
- }
-}
-
-// HasReceipts verifies the existence of all the transaction receipts belonging
-// to a block.
-func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
- if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
- return false
- }
- return true
-}
-
-// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
-func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- data, _ := db.Get(blockReceiptsKey(number, hash))
- if len(data) > 0 {
- return data
- }
- return nil
-}
-
-// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
-// The receipt metadata fields are not guaranteed to be populated, so they
-// should not be used. Use ReadReceipts instead if the metadata is needed.
-func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
- // Retrieve the flattened receipt slice
- data := ReadReceiptsRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- // Convert the receipts from their storage form to their internal representation
- storageReceipts := []*types.ReceiptForStorage{}
- if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
- log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
- return nil
- }
- receipts := make(types.Receipts, len(storageReceipts))
- for i, storageReceipt := range storageReceipts {
- receipts[i] = (*types.Receipt)(storageReceipt)
- }
- return receipts
-}
-
-// ReadReceipts retrieves all the transaction receipts belonging to a block, including
-// its corresponding metadata fields. If it is unable to populate these metadata
-// fields then nil is returned.
-//
-// The current implementation populates these metadata fields by reading the receipts'
-// corresponding block body, so if the block body is not found it will return nil even
-// if the receipt itself is stored.
-func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, config *params.ChainConfig) types.Receipts {
- // We're deriving many fields from the block body, retrieve beside the receipt
- receipts := ReadRawReceipts(db, hash, number)
- if receipts == nil {
- return nil
- }
- body := ReadBody(db, hash, number)
- if body == nil {
- log.Error("Missing body but have receipt", "hash", hash, "number", number)
- return nil
- }
- header := ReadHeader(db, hash, number)
-
- var baseFee *big.Int
- if header == nil {
- baseFee = big.NewInt(0)
- } else {
- baseFee = header.BaseFee
- }
- // Compute effective blob gas price.
- var blobGasPrice *big.Int
- if header != nil && header.ExcessBlobGas != nil {
- blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas)
- }
- if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil {
- log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
- return nil
- }
- return receipts
-}
-
-// WriteReceipts stores all the transaction receipts belonging to a block.
-func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
- // Convert the receipts into their storage form and serialize them
- storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
- for i, receipt := range receipts {
- storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
- }
- bytes, err := rlp.EncodeToBytes(storageReceipts)
- if err != nil {
- log.Crit("Failed to encode block receipts", "err", err)
- }
- // Store the flattened receipt slice
- if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
- log.Crit("Failed to store block receipts", "err", err)
- }
-}
-
-// DeleteReceipts removes all receipt data associated with a block hash.
-func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
- log.Crit("Failed to delete block receipts", "err", err)
- }
-}
-
-// storedReceiptRLP is the storage encoding of a receipt.
-// Re-definition in core/types/receipt.go.
-// TODO: Re-use the existing definition.
-type storedReceiptRLP struct {
- PostStateOrStatus []byte
- CumulativeGasUsed uint64
- Logs []*types.Log
-}
-
-// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
-// the list of logs. When decoding a stored receipt into this object we
-// avoid creating the bloom filter.
-type receiptLogs struct {
- Logs []*types.Log
-}
-
-// DecodeRLP implements rlp.Decoder.
-func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
- var stored storedReceiptRLP
- if err := s.Decode(&stored); err != nil {
- return err
- }
- r.Logs = stored.Logs
- return nil
-}
-
-// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
-func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
- logIndex := uint(0)
- if len(txs) != len(receipts) {
- return errors.New("transaction and receipt count mismatch")
- }
- for i := 0; i < len(receipts); i++ {
- txHash := txs[i].Hash()
- // The derived log fields can simply be set from the block and transaction
- for j := 0; j < len(receipts[i].Logs); j++ {
- receipts[i].Logs[j].BlockNumber = number
- receipts[i].Logs[j].BlockHash = hash
- receipts[i].Logs[j].TxHash = txHash
- receipts[i].Logs[j].TxIndex = uint(i)
- receipts[i].Logs[j].Index = logIndex
- logIndex++
- }
- }
- return nil
-}
-
-// ReadLogs retrieves the logs for all transactions in a block. In case
-// receipts is not found, a nil is returned.
-// Note: ReadLogs does not derive unstored log fields.
-func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
- // Retrieve the flattened receipt slice
- data := ReadReceiptsRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- receipts := []*receiptLogs{}
- if err := rlp.DecodeBytes(data, &receipts); err != nil {
- log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
- return nil
- }
-
- logs := make([][]*types.Log, len(receipts))
- for i, receipt := range receipts {
- logs[i] = receipt.Logs
- }
- return logs
-}
-
-// ReadBlock retrieves an entire block corresponding to the hash, assembling it
-// back from the stored header and body. If either the header or body could not
-// be retrieved nil is returned.
-//
-// Note, due to concurrent download of header and block body the header and thus
-// canonical hash can be stored in the database but the body data not (yet).
-func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
- header := ReadHeader(db, hash, number)
- if header == nil {
- return nil
- }
- body := ReadBody(db, hash, number)
- if body == nil {
- return nil
- }
- return types.NewBlockWithHeader(header).WithBody(*body)
-}
-
-// WriteBlock serializes a block into the database, header and body separately.
-func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
- WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
- WriteHeader(db, block.Header())
-}
-
-// DeleteBlock removes all block data associated with a hash.
-func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- DeleteReceipts(db, hash, number)
- DeleteHeader(db, hash, number)
- DeleteBody(db, hash, number)
-}
-
-// DeleteBlockWithoutNumber removes all block data associated with a hash, except
-// the hash to number mapping.
-func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- DeleteReceipts(db, hash, number)
- deleteHeaderWithoutNumber(db, hash, number)
- DeleteBody(db, hash, number)
-}
-
-// FindCommonAncestor returns the last common ancestor of two block headers
-func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
- for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
- a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
- if a == nil {
- return nil
- }
- }
- for an := a.Number.Uint64(); an < b.Number.Uint64(); {
- b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
- if b == nil {
- return nil
- }
- }
- for a.Hash() != b.Hash() {
- a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
- if a == nil {
- return nil
- }
- b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
- if b == nil {
- return nil
- }
- }
- return a
-}
-
-// ReadHeadBlock returns the current canonical head block.
-func ReadHeadBlock(db ethdb.Reader) *types.Block {
- headBlockHash := ReadHeadBlockHash(db)
- if headBlockHash == (common.Hash{}) {
- return nil
- }
- headBlockNumber := ReadHeaderNumber(db, headBlockHash)
- if headBlockNumber == nil {
- return nil
- }
- return ReadBlock(db, headBlockHash, *headBlockNumber)
-}
-
-// ReadTxIndexTail retrieves the number of oldest indexed block
-// whose transaction indices has been indexed.
-func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
- data, _ := db.Get(txIndexTailKey)
- if len(data) != 8 {
- return nil
- }
- number := binary.BigEndian.Uint64(data)
- return &number
-}
-
-// WriteTxIndexTail stores the number of oldest indexed block
-// into database.
-func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
- if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
- log.Crit("Failed to store the transaction index tail", "err", err)
- }
-}
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
deleted file mode 100644
index 3ec409066e..0000000000
--- a/core/rawdb/accessors_chain_test.go
+++ /dev/null
@@ -1,622 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "math/big"
- "os"
- "reflect"
- "testing"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
- "golang.org/x/crypto/sha3"
-)
-
-// Tests block header storage and retrieval operations.
-func TestHeaderStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test header to move around the database and make sure it's really new
- header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
- if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
- t.Fatalf("Non existent header returned: %v", entry)
- }
- // Write and verify the header in the database
- WriteHeader(db, header)
- if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
- t.Fatalf("Stored header not found")
- } else if entry.Hash() != header.Hash() {
- t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
- }
- if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
- t.Fatalf("Stored header RLP not found")
- } else {
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(entry)
-
- if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
- t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
- }
- }
- // Delete the header and verify the execution
- DeleteHeader(db, header.Hash(), header.Number.Uint64())
- if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
- t.Fatalf("Deleted header returned: %v", entry)
- }
-}
-
-// Tests block body storage and retrieval operations.
-func TestBodyStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test body to move around the database and make sure it's really new
- body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
-
- hasher := sha3.NewLegacyKeccak256()
- rlp.Encode(hasher, body)
- hash := common.BytesToHash(hasher.Sum(nil))
-
- if entry := ReadBody(db, hash, 0); entry != nil {
- t.Fatalf("Non existent body returned: %v", entry)
- }
- // Write and verify the body in the database
- WriteBody(db, hash, 0, body)
- if entry := ReadBody(db, hash, 0); entry == nil {
- t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
- t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
- }
- if entry := ReadBodyRLP(db, hash, 0); entry == nil {
- t.Fatalf("Stored body RLP not found")
- } else {
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(entry)
-
- if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
- t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
- }
- }
- // Delete the body and verify the execution
- DeleteBody(db, hash, 0)
- if entry := ReadBody(db, hash, 0); entry != nil {
- t.Fatalf("Deleted body returned: %v", entry)
- }
-}
-
-// Tests block storage and retrieval operations.
-func TestBlockStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test block to move around the database and make sure it's really new
- block := types.NewBlockWithHeader(&types.Header{
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent header returned: %v", entry)
- }
- if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent body returned: %v", entry)
- }
- // Write and verify the block in the database
- WriteBlock(db, block)
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored block not found")
- } else if entry.Hash() != block.Hash() {
- t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
- }
- if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored header not found")
- } else if entry.Hash() != block.Header().Hash() {
- t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
- }
- if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
- t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
- }
- // Delete the block and verify the execution
- DeleteBlock(db, block.Hash(), block.NumberU64())
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted block returned: %v", entry)
- }
- if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted header returned: %v", entry)
- }
- if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted body returned: %v", entry)
- }
-}
-
-// Tests that partial block contents don't get reassembled into full blocks.
-func TestPartialBlockStorage(t *testing.T) {
- db := NewMemoryDatabase()
- block := types.NewBlockWithHeader(&types.Header{
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- // Store a header and check that it's not recognized as a block
- WriteHeader(db, block.Header())
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- DeleteHeader(db, block.Hash(), block.NumberU64())
-
- // Store a body and check that it's not recognized as a block
- WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- DeleteBody(db, block.Hash(), block.NumberU64())
-
- // Store a header and a body separately and check reassembly
- WriteHeader(db, block.Header())
- WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
-
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored block not found")
- } else if entry.Hash() != block.Hash() {
- t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
- }
-}
-
-// Tests that canonical numbers can be mapped to hashes and retrieved.
-func TestCanonicalMappingStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test canonical number and assigned hash to move around
- hash, number := common.Hash{0: 0xff}, uint64(314)
- if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
- t.Fatalf("Non existent canonical mapping returned: %v", entry)
- }
- // Write and verify the TD in the database
- WriteCanonicalHash(db, hash, number)
- if entry := ReadCanonicalHash(db, number); entry == (common.Hash{}) {
- t.Fatalf("Stored canonical mapping not found")
- } else if entry != hash {
- t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
- }
- // Delete the TD and verify the execution
- DeleteCanonicalHash(db, number)
- if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
- t.Fatalf("Deleted canonical mapping returned: %v", entry)
- }
-}
-
-// Tests that head headers and head blocks can be assigned, individually.
-func TestHeadStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
- blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
-
- // Check that no head entries are in a pristine database
- if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non head header entry returned: %v", entry)
- }
- if entry := ReadHeadBlockHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non head block entry returned: %v", entry)
- }
- // Assign separate entries for the head header and block
- WriteHeadHeaderHash(db, blockHead.Hash())
- WriteHeadBlockHash(db, blockFull.Hash())
-
- // Check that both heads are present, and different (i.e. two heads maintained)
- if entry := ReadHeadHeaderHash(db); entry != blockHead.Hash() {
- t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
- }
- if entry := ReadHeadBlockHash(db); entry != blockFull.Hash() {
- t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
- }
-}
-
-// Tests that receipts associated with a single block can be stored and retrieved.
-func TestBlockReceiptStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a live block since we need metadata to reconstruct the receipt
- tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
- tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
-
- body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
-
- // Create the two receipts to manage afterwards
- receipt1 := &types.Receipt{
- Status: types.ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x11})},
- {Address: common.BytesToAddress([]byte{0x01, 0x11})},
- },
- TxHash: tx1.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
- GasUsed: 111111,
- }
- receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
-
- receipt2 := &types.Receipt{
- PostState: common.Hash{2}.Bytes(),
- CumulativeGasUsed: 2,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x22})},
- {Address: common.BytesToAddress([]byte{0x02, 0x22})},
- },
- TxHash: tx2.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
- GasUsed: 222222,
- }
- receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
- receipts := []*types.Receipt{receipt1, receipt2}
-
- // Check that no receipt entries are in a pristine database
- header := &types.Header{Number: big.NewInt(0), Extra: []byte("test header")}
- hash := header.Hash()
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
- t.Fatalf("non existent receipts returned: %v", rs)
- }
- // Insert the body that corresponds to the receipts
- WriteHeader(db, header)
- WriteBody(db, hash, 0, body)
- if header := ReadHeader(db, hash, 0); header == nil {
- t.Fatal("header is nil")
- }
-
- // Insert the receipt slice into the database and check presence
- WriteReceipts(db, hash, 0, receipts)
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 {
- t.Fatal("no receipts returned")
- } else {
- if err := checkReceiptsRLP(rs, receipts); err != nil {
- t.Fatal(err)
- }
- }
- // Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
- DeleteHeader(db, hash, 0)
- DeleteBody(db, hash, 0)
- if header := ReadHeader(db, hash, 0); header != nil {
- t.Fatal("header is not nil")
- }
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); rs != nil {
- t.Fatalf("receipts returned when body was deleted: %v", rs)
- }
- // Ensure that receipts without metadata can be returned without the block body too
- if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
- t.Fatal(err)
- }
- // Sanity check that body and header alone without the receipt is a full purge
- WriteHeader(db, header)
- WriteBody(db, hash, 0, body)
-
- DeleteReceipts(db, hash, 0)
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
- t.Fatalf("deleted receipts returned: %v", rs)
- }
-}
-
-func checkReceiptsRLP(have, want types.Receipts) error {
- if len(have) != len(want) {
- return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want))
- }
- for i := 0; i < len(want); i++ {
- rlpHave, err := rlp.EncodeToBytes(have[i])
- if err != nil {
- return err
- }
- rlpWant, err := rlp.EncodeToBytes(want[i])
- if err != nil {
- return err
- }
- if !bytes.Equal(rlpHave, rlpWant) {
- return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
- }
- }
- return nil
-}
-
-func TestCanonicalHashIteration(t *testing.T) {
- var cases = []struct {
- from, to uint64
- limit int
- expect []uint64
- }{
- {1, 8, 0, nil},
- {1, 8, 1, []uint64{1}},
- {1, 8, 10, []uint64{1, 2, 3, 4, 5, 6, 7}},
- {1, 9, 10, []uint64{1, 2, 3, 4, 5, 6, 7, 8}},
- {2, 9, 10, []uint64{2, 3, 4, 5, 6, 7, 8}},
- {9, 10, 10, nil},
- }
- // Test empty db iteration
- db := NewMemoryDatabase()
- numbers, _ := ReadAllCanonicalHashes(db, 0, 10, 10)
- if len(numbers) != 0 {
- t.Fatalf("No entry should be returned to iterate an empty db")
- }
- // Fill database with testing data.
- for i := uint64(1); i <= 8; i++ {
- WriteCanonicalHash(db, common.Hash{}, i)
- }
- for i, c := range cases {
- numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit)
- if !reflect.DeepEqual(numbers, c.expect) {
- t.Fatalf("Case %d failed, want %v, got %v", i, c.expect, numbers)
- }
- }
-}
-
-func TestHashesInRange(t *testing.T) {
- mkHeader := func(number, seq int) *types.Header {
- h := types.Header{
- Difficulty: new(big.Int),
- Number: big.NewInt(int64(number)),
- GasLimit: uint64(seq),
- }
- return &h
- }
- db := NewMemoryDatabase()
- // For each number, write N versions of that particular number
- total := 0
- for i := 0; i < 15; i++ {
- for ii := 0; ii < i; ii++ {
- WriteHeader(db, mkHeader(i, ii))
- total++
- }
- }
- if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
-}
-
-type fullLogRLP struct {
- Address common.Address
- Topics []common.Hash
- Data []byte
- BlockNumber uint64
- TxHash common.Hash
- TxIndex uint
- BlockHash common.Hash
- Index uint
-}
-
-func newFullLogRLP(l *types.Log) *fullLogRLP {
- return &fullLogRLP{
- Address: l.Address,
- Topics: l.Topics,
- Data: l.Data,
- BlockNumber: l.BlockNumber,
- TxHash: l.TxHash,
- TxIndex: l.TxIndex,
- BlockHash: l.BlockHash,
- Index: l.Index,
- }
-}
-
-// Tests that logs associated with a single block can be retrieved.
-func TestReadLogs(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a live block since we need metadata to reconstruct the receipt
- tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
- tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
-
- body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
-
- // Create the two receipts to manage afterwards
- receipt1 := &types.Receipt{
- Status: types.ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x11})},
- {Address: common.BytesToAddress([]byte{0x01, 0x11})},
- },
- TxHash: tx1.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
- GasUsed: 111111,
- }
- receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
-
- receipt2 := &types.Receipt{
- PostState: common.Hash{2}.Bytes(),
- CumulativeGasUsed: 2,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x22})},
- {Address: common.BytesToAddress([]byte{0x02, 0x22})},
- },
- TxHash: tx2.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
- GasUsed: 222222,
- }
- receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
- receipts := []*types.Receipt{receipt1, receipt2}
-
- hash := common.BytesToHash([]byte{0x03, 0x14})
- // Check that no receipt entries are in a pristine database
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
- t.Fatalf("non existent receipts returned: %v", rs)
- }
- // Insert the body that corresponds to the receipts
- WriteBody(db, hash, 0, body)
-
- // Insert the receipt slice into the database and check presence
- WriteReceipts(db, hash, 0, receipts)
-
- logs := ReadLogs(db, hash, 0)
- if len(logs) == 0 {
- t.Fatalf("no logs returned")
- }
- if have, want := len(logs), 2; have != want {
- t.Fatalf("unexpected number of logs returned, have %d want %d", have, want)
- }
- if have, want := len(logs[0]), 2; have != want {
- t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want)
- }
- if have, want := len(logs[1]), 2; have != want {
- t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
- }
-
- for i, pr := range receipts {
- for j, pl := range pr.Logs {
- rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))
- if err != nil {
- t.Fatal(err)
- }
- rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl))
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(rlpHave, rlpWant) {
- t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
- }
- }
- }
-}
-
-func TestDeriveLogFields(t *testing.T) {
- // Create a few transactions to have receipts for
- to2 := common.HexToAddress("0x2")
- to3 := common.HexToAddress("0x3")
- txs := types.Transactions{
- types.NewTx(&types.LegacyTx{
- Nonce: 1,
- Value: big.NewInt(1),
- Gas: 1,
- GasPrice: big.NewInt(1),
- }),
- types.NewTx(&types.LegacyTx{
- To: &to2,
- Nonce: 2,
- Value: big.NewInt(2),
- Gas: 2,
- GasPrice: big.NewInt(2),
- }),
- types.NewTx(&types.AccessListTx{
- To: &to3,
- Nonce: 3,
- Value: big.NewInt(3),
- Gas: 3,
- GasPrice: big.NewInt(3),
- }),
- }
- // Create the corresponding receipts
- receipts := []*receiptLogs{
- {
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x11})},
- {Address: common.BytesToAddress([]byte{0x01, 0x11})},
- },
- },
- {
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x22})},
- {Address: common.BytesToAddress([]byte{0x02, 0x22})},
- },
- },
- {
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x33})},
- {Address: common.BytesToAddress([]byte{0x03, 0x33})},
- },
- },
- }
-
- // Derive log metadata fields
- number := big.NewInt(1)
- hash := common.BytesToHash([]byte{0x03, 0x14})
- if err := deriveLogFields(receipts, hash, number.Uint64(), txs); err != nil {
- t.Fatal(err)
- }
-
- // Iterate over all the computed fields and check that they're correct
- logIndex := uint(0)
- for i := range receipts {
- for j := range receipts[i].Logs {
- if receipts[i].Logs[j].BlockNumber != number.Uint64() {
- t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
- }
- if receipts[i].Logs[j].BlockHash != hash {
- t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
- }
- if receipts[i].Logs[j].TxHash != txs[i].Hash() {
- t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
- }
- if receipts[i].Logs[j].TxIndex != uint(i) {
- t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
- }
- if receipts[i].Logs[j].Index != logIndex {
- t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
- }
- logIndex++
- }
- }
-}
-
-func BenchmarkDecodeRLPLogs(b *testing.B) {
- // Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269
- buf, err := os.ReadFile("testdata/stored_receipts.bin")
- if err != nil {
- b.Fatal(err)
- }
- b.Run("ReceiptForStorage", func(b *testing.B) {
- b.ReportAllocs()
- var r []*types.ReceiptForStorage
- for i := 0; i < b.N; i++ {
- if err := rlp.DecodeBytes(buf, &r); err != nil {
- b.Fatal(err)
- }
- }
- })
- b.Run("rlpLogs", func(b *testing.B) {
- b.ReportAllocs()
- var r []*receiptLogs
- for i := 0; i < b.N; i++ {
- if err := rlp.DecodeBytes(buf, &r); err != nil {
- b.Fatal(err)
- }
- }
- })
-}
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
deleted file mode 100644
index afb50354c9..0000000000
--- a/core/rawdb/accessors_indexes.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "math/big"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// ReadTxLookupEntry retrieves the positional metadata associated with a transaction
-// hash to allow retrieving the transaction or receipt by hash.
-func ReadTxLookupEntry(db ethdb.Reader, hash common.Hash) *uint64 {
- data, _ := db.Get(txLookupKey(hash))
- if len(data) == 0 {
- return nil
- }
- // Database v6 tx lookup just stores the block number
- if len(data) < common.HashLength {
- number := new(big.Int).SetBytes(data).Uint64()
- return &number
- }
- // Database v4-v5 tx lookup format just stores the hash
- if len(data) == common.HashLength {
- return ReadHeaderNumber(db, common.BytesToHash(data))
- }
- // Finally try database v3 tx lookup format
- var entry LegacyTxLookupEntry
- if err := rlp.DecodeBytes(data, &entry); err != nil {
- log.Error("Invalid transaction lookup entry RLP", "hash", hash, "blob", data, "err", err)
- return nil
- }
- return &entry.BlockIndex
-}
-
-// writeTxLookupEntry stores a positional metadata for a transaction,
-// enabling hash based transaction and receipt lookups.
-func writeTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash, numberBytes []byte) {
- if err := db.Put(txLookupKey(hash), numberBytes); err != nil {
- log.Crit("Failed to store transaction lookup entry", "err", err)
- }
-}
-
-// WriteTxLookupEntries is identical to WriteTxLookupEntry, but it works on
-// a list of hashes
-func WriteTxLookupEntries(db ethdb.KeyValueWriter, number uint64, hashes []common.Hash) {
- numberBytes := new(big.Int).SetUint64(number).Bytes()
- for _, hash := range hashes {
- writeTxLookupEntry(db, hash, numberBytes)
- }
-}
-
-// WriteTxLookupEntriesByBlock stores a positional metadata for every transaction from
-// a block, enabling hash based transaction and receipt lookups.
-func WriteTxLookupEntriesByBlock(db ethdb.KeyValueWriter, block *types.Block) {
- numberBytes := block.Number().Bytes()
- for _, tx := range block.Transactions() {
- writeTxLookupEntry(db, tx.Hash(), numberBytes)
- }
-}
-
-// DeleteTxLookupEntry removes all transaction data associated with a hash.
-func DeleteTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(txLookupKey(hash)); err != nil {
- log.Crit("Failed to delete transaction lookup entry", "err", err)
- }
-}
-
-// DeleteTxLookupEntries removes all transaction lookups for a given block.
-func DeleteTxLookupEntries(db ethdb.KeyValueWriter, hashes []common.Hash) {
- for _, hash := range hashes {
- DeleteTxLookupEntry(db, hash)
- }
-}
-
-// ReadTransaction retrieves a specific transaction from the database, along with
-// its added positional metadata.
-func ReadTransaction(db ethdb.Reader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
- blockNumber := ReadTxLookupEntry(db, hash)
- if blockNumber == nil {
- return nil, common.Hash{}, 0, 0
- }
- blockHash := ReadCanonicalHash(db, *blockNumber)
- if blockHash == (common.Hash{}) {
- return nil, common.Hash{}, 0, 0
- }
- body := ReadBody(db, blockHash, *blockNumber)
- if body == nil {
- log.Error("Transaction referenced missing", "number", *blockNumber, "hash", blockHash)
- return nil, common.Hash{}, 0, 0
- }
- for txIndex, tx := range body.Transactions {
- if tx.Hash() == hash {
- return tx, blockHash, *blockNumber, uint64(txIndex)
- }
- }
- log.Error("Transaction not found", "number", *blockNumber, "hash", blockHash, "txhash", hash)
- return nil, common.Hash{}, 0, 0
-}
-
-// ReadReceipt retrieves a specific transaction receipt from the database, along with
-// its added positional metadata.
-func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) (*types.Receipt, common.Hash, uint64, uint64) {
- // Retrieve the context of the receipt based on the transaction hash
- blockNumber := ReadTxLookupEntry(db, hash)
- if blockNumber == nil {
- return nil, common.Hash{}, 0, 0
- }
- blockHash := ReadCanonicalHash(db, *blockNumber)
- if blockHash == (common.Hash{}) {
- return nil, common.Hash{}, 0, 0
- }
- blockHeader := ReadHeader(db, blockHash, *blockNumber)
- if blockHeader == nil {
- return nil, common.Hash{}, 0, 0
- }
- // Read all the receipts from the block and return the one with the matching hash
- receipts := ReadReceipts(db, blockHash, *blockNumber, blockHeader.Time, config)
- for receiptIndex, receipt := range receipts {
- if receipt.TxHash == hash {
- return receipt, blockHash, *blockNumber, uint64(receiptIndex)
- }
- }
- log.Error("Receipt not found", "number", *blockNumber, "hash", blockHash, "txhash", hash)
- return nil, common.Hash{}, 0, 0
-}
-
-// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
-// section and bit index from the.
-func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
- return db.Get(bloomBitsKey(bit, section, head))
-}
-
-// WriteBloomBits stores the compressed bloom bits vector belonging to the given
-// section and bit index.
-func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
- if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
- log.Crit("Failed to store bloom bits", "err", err)
- }
-}
-
-// DeleteBloombits removes all compressed bloom bits vector belonging to the
-// given section range and bit index.
-func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) {
- start, end := bloomBitsKey(bit, from, common.Hash{}), bloomBitsKey(bit, to, common.Hash{})
- it := db.NewIterator(nil, start)
- defer it.Release()
-
- for it.Next() {
- if bytes.Compare(it.Key(), end) >= 0 {
- break
- }
- if len(it.Key()) != len(bloomBitsPrefix)+2+8+32 {
- continue
- }
- db.Delete(it.Key())
- }
- if it.Error() != nil {
- log.Crit("Failed to delete bloom bits", "err", it.Error())
- }
-}
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
deleted file mode 100644
index e4fac23e75..0000000000
--- a/core/rawdb/accessors_indexes_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "math/big"
- "testing"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/internal/blocktest"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/rlp"
-)
-
-var newTestHasher = blocktest.NewHasher
-
-// Tests that positional lookup metadata can be stored and retrieved.
-func TestLookupStorage(t *testing.T) {
- tests := []struct {
- name string
- writeTxLookupEntriesByBlock func(ethdb.Writer, *types.Block)
- }{
- {
- "DatabaseV6",
- func(db ethdb.Writer, block *types.Block) {
- WriteTxLookupEntriesByBlock(db, block)
- },
- },
- {
- "DatabaseV4-V5",
- func(db ethdb.Writer, block *types.Block) {
- for _, tx := range block.Transactions() {
- db.Put(txLookupKey(tx.Hash()), block.Hash().Bytes())
- }
- },
- },
- {
- "DatabaseV3",
- func(db ethdb.Writer, block *types.Block) {
- for index, tx := range block.Transactions() {
- entry := LegacyTxLookupEntry{
- BlockHash: block.Hash(),
- BlockIndex: block.NumberU64(),
- Index: uint64(index),
- }
- data, _ := rlp.EncodeToBytes(entry)
- db.Put(txLookupKey(tx.Hash()), data)
- }
- },
- },
- }
-
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- db := NewMemoryDatabase()
-
- tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
- tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), 2222, big.NewInt(22222), []byte{0x22, 0x22, 0x22})
- tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
- txs := []*types.Transaction{tx1, tx2, tx3}
-
- block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher())
-
- // Check that no transactions entries are in a pristine database
- for i, tx := range txs {
- if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil {
- t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn)
- }
- }
- // Insert all the transactions into the database, and verify contents
- WriteCanonicalHash(db, block.Hash(), block.NumberU64())
- WriteBlock(db, block)
- tc.writeTxLookupEntriesByBlock(db, block)
-
- for i, tx := range txs {
- if txn, hash, number, index := ReadTransaction(db, tx.Hash()); txn == nil {
- t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash())
- } else {
- if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) {
- t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i)
- }
- if tx.Hash() != txn.Hash() {
- t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx)
- }
- }
- }
- // Delete the transactions and check purge
- for i, tx := range txs {
- DeleteTxLookupEntry(db, tx.Hash())
- if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil {
- t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn)
- }
- }
- })
- }
-}
-
-func TestDeleteBloomBits(t *testing.T) {
- // Prepare testing data
- db := NewMemoryDatabase()
-
- genesisHash0 := common.BytesToHash([]byte{1, 2, 3, 4, 5})
- genesisHash1 := common.BytesToHash([]byte{5, 4, 3, 2, 1})
- for i := uint(0); i < 2; i++ {
- for s := uint64(0); s < 2; s++ {
- WriteBloomBits(db, i, s, genesisHash0, []byte{0x01, 0x02})
- WriteBloomBits(db, i, s, genesisHash1, []byte{0x01, 0x02})
- }
- }
- check := func(bit uint, section uint64, head common.Hash, exist bool) {
- bits, _ := ReadBloomBits(db, bit, section, head)
- if exist && !bytes.Equal(bits, []byte{0x01, 0x02}) {
- t.Fatalf("Bloombits mismatch")
- }
- if !exist && len(bits) > 0 {
- t.Fatalf("Bloombits should be removed")
- }
- }
- // Check the existence of written data.
- check(0, 0, genesisHash0, true)
- check(0, 0, genesisHash1, true)
-
- // Check the existence of deleted data.
- DeleteBloombits(db, 0, 0, 1)
- check(0, 0, genesisHash0, false)
- check(0, 0, genesisHash1, false)
- check(0, 1, genesisHash0, true)
- check(0, 1, genesisHash1, true)
-
- // Check the existence of deleted data.
- DeleteBloombits(db, 0, 0, 2)
- check(0, 0, genesisHash0, false)
- check(0, 0, genesisHash1, false)
- check(0, 1, genesisHash0, false)
- check(0, 1, genesisHash1, false)
-
- // Bit1 shouldn't be affect.
- check(1, 0, genesisHash0, true)
- check(1, 0, genesisHash1, true)
- check(1, 1, genesisHash0, true)
- check(1, 1, genesisHash1, true)
-}
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
deleted file mode 100644
index 3bfe634a12..0000000000
--- a/core/rawdb/accessors_metadata.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "encoding/json"
- "time"
-
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// ReadDatabaseVersion retrieves the version number of the database.
-func ReadDatabaseVersion(db ethdb.KeyValueReader) *uint64 {
- var version uint64
-
- enc, _ := db.Get(databaseVersionKey)
- if len(enc) == 0 {
- return nil
- }
- if err := rlp.DecodeBytes(enc, &version); err != nil {
- return nil
- }
-
- return &version
-}
-
-// WriteDatabaseVersion stores the version number of the database
-func WriteDatabaseVersion(db ethdb.KeyValueWriter, version uint64) {
- enc, err := rlp.EncodeToBytes(version)
- if err != nil {
- log.Crit("Failed to encode database version", "err", err)
- }
- if err = db.Put(databaseVersionKey, enc); err != nil {
- log.Crit("Failed to store the database version", "err", err)
- }
-}
-
-// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
-func ReadChainConfig(db ethdb.KeyValueReader, hash common.Hash) *params.ChainConfig {
- data, _ := db.Get(configKey(hash))
- if len(data) == 0 {
- return nil
- }
- var config params.ChainConfig
- if err := json.Unmarshal(data, &config); err != nil {
- log.Error("Invalid chain config JSON", "hash", hash, "err", err)
- return nil
- }
- return &config
-}
-
-// WriteChainConfig writes the chain config settings to the database.
-func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.ChainConfig) {
- if cfg == nil {
- return
- }
- data, err := json.Marshal(cfg)
- if err != nil {
- log.Crit("Failed to JSON encode chain config", "err", err)
- }
- if err := db.Put(configKey(hash), data); err != nil {
- log.Crit("Failed to store chain config", "err", err)
- }
-}
-
-// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
-// database
-type crashList struct {
- Discarded uint64 // how many ucs have we deleted
- Recent []uint64 // unix timestamps of 10 latest unclean shutdowns
-}
-
-const crashesToKeep = 10
-
-// PushUncleanShutdownMarker appends a new unclean shutdown marker and returns
-// the previous data
-// - a list of timestamps
-// - a count of how many old unclean-shutdowns have been discarded
-func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) {
- var uncleanShutdowns crashList
- // Read old data
- if data, err := db.Get(uncleanShutdownKey); err == nil {
- if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
- return nil, 0, err
- }
- }
- var discarded = uncleanShutdowns.Discarded
- var previous = make([]uint64, len(uncleanShutdowns.Recent))
- copy(previous, uncleanShutdowns.Recent)
- // Add a new (but cap it)
- uncleanShutdowns.Recent = append(uncleanShutdowns.Recent, uint64(time.Now().Unix()))
- if count := len(uncleanShutdowns.Recent); count > crashesToKeep+1 {
- numDel := count - (crashesToKeep + 1)
- uncleanShutdowns.Recent = uncleanShutdowns.Recent[numDel:]
- uncleanShutdowns.Discarded += uint64(numDel)
- }
- // And save it again
- data, _ := rlp.EncodeToBytes(uncleanShutdowns)
- if err := db.Put(uncleanShutdownKey, data); err != nil {
- log.Warn("Failed to write unclean-shutdown marker", "err", err)
- return nil, 0, err
- }
- return previous, discarded, nil
-}
-
-// PopUncleanShutdownMarker removes the last unclean shutdown marker
-func PopUncleanShutdownMarker(db ethdb.KeyValueStore) {
- var uncleanShutdowns crashList
- // Read old data
- if data, err := db.Get(uncleanShutdownKey); err != nil {
- log.Warn("Error reading unclean shutdown markers", "error", err)
- } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
- log.Error("Error decoding unclean shutdown markers", "error", err) // Should mos def _not_ happen
- }
- if l := len(uncleanShutdowns.Recent); l > 0 {
- uncleanShutdowns.Recent = uncleanShutdowns.Recent[:l-1]
- }
- data, _ := rlp.EncodeToBytes(uncleanShutdowns)
- if err := db.Put(uncleanShutdownKey, data); err != nil {
- log.Warn("Failed to clear unclean-shutdown marker", "err", err)
- }
-}
-
-// UpdateUncleanShutdownMarker updates the last marker's timestamp to now.
-func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) {
- var uncleanShutdowns crashList
- // Read old data
- if data, err := db.Get(uncleanShutdownKey); err != nil {
- log.Warn("Error reading unclean shutdown markers", "error", err)
- } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
- log.Warn("Error decoding unclean shutdown markers", "error", err)
- }
- // This shouldn't happen because we push a marker on Backend instantiation
- count := len(uncleanShutdowns.Recent)
- if count == 0 {
- log.Warn("No unclean shutdown marker to update")
- return
- }
- uncleanShutdowns.Recent[count-1] = uint64(time.Now().Unix())
- data, _ := rlp.EncodeToBytes(uncleanShutdowns)
- if err := db.Put(uncleanShutdownKey, data); err != nil {
- log.Warn("Failed to write unclean-shutdown marker", "err", err)
- }
-}
-
-// WriteTimeMarker writes a marker of the current time in the db at [key]
-func WriteTimeMarker(db ethdb.KeyValueStore, key []byte) error {
- data, err := rlp.EncodeToBytes(uint64(time.Now().Unix()))
- if err != nil {
- return err
- }
- return db.Put(key, data)
-}
-
-// ReadTimeMarker reads the timestamp stored at [key]
-func ReadTimeMarker(db ethdb.KeyValueStore, key []byte) (time.Time, error) {
- data, err := db.Get(key)
- if err != nil {
- return time.Time{}, err
- }
-
- var lastRun uint64
- if err := rlp.DecodeBytes(data, &lastRun); err != nil {
- return time.Time{}, err
- }
-
- return time.Unix(int64(lastRun), 0), nil
-}
-
-// DeleteTimeMarker deletes any value stored at [key]
-func DeleteTimeMarker(db ethdb.KeyValueStore, key []byte) error {
- return db.Delete(key)
-}
-
-// WriteOfflinePruning writes a marker of the last attempt to run offline pruning
-// The marker is written when offline pruning completes and is deleted when the node
-// is started successfully with offline pruning disabled. This ensures users must
-// disable offline pruning and start their node successfully between runs of offline
-// pruning.
-func WriteOfflinePruning(db ethdb.KeyValueStore) error {
- return WriteTimeMarker(db, offlinePruningKey)
-}
-
-// ReadOfflinePruning reads the most recent timestamp of an attempt to run offline
-// pruning if present.
-func ReadOfflinePruning(db ethdb.KeyValueStore) (time.Time, error) {
- return ReadTimeMarker(db, offlinePruningKey)
-}
-
-// DeleteOfflinePruning deletes any marker of the last attempt to run offline pruning.
-func DeleteOfflinePruning(db ethdb.KeyValueStore) error {
- return DeleteTimeMarker(db, offlinePruningKey)
-}
-
-// WritePopulateMissingTries writes a marker for the current attempt to populate
-// missing tries.
-func WritePopulateMissingTries(db ethdb.KeyValueStore) error {
- return WriteTimeMarker(db, populateMissingTriesKey)
-}
-
-// ReadPopulateMissingTries reads the most recent timestamp of an attempt to
-// re-populate missing trie nodes.
-func ReadPopulateMissingTries(db ethdb.KeyValueStore) (time.Time, error) {
- return ReadTimeMarker(db, populateMissingTriesKey)
-}
-
-// DeletePopulateMissingTries deletes any marker of the last attempt to
-// re-populate missing trie nodes.
-func DeletePopulateMissingTries(db ethdb.KeyValueStore) error {
- return DeleteTimeMarker(db, populateMissingTriesKey)
-}
-
-// WritePruningDisabled writes a marker to track whether the node has ever run
-// with pruning disabled.
-func WritePruningDisabled(db ethdb.KeyValueStore) error {
- return db.Put(pruningDisabledKey, nil)
-}
-
-// HasPruningDisabled returns true if there is a marker present indicating that
-// the node has run with pruning disabled at some pooint.
-func HasPruningDisabled(db ethdb.KeyValueStore) (bool, error) {
- return db.Has(pruningDisabledKey)
-}
-
-// DeletePruningDisabled deletes the marker indicating that the node has
-// run with pruning disabled.
-func DeletePruningDisabled(db ethdb.KeyValueStore) error {
- return db.Delete(pruningDisabledKey)
-}
-
-// WriteAcceptorTip writes [hash] as the last accepted block that has been fully processed.
-func WriteAcceptorTip(db ethdb.KeyValueWriter, hash common.Hash) error {
- return db.Put(acceptorTipKey, hash[:])
-}
-
-// ReadAcceptorTip reads the hash of the last accepted block that was fully processed.
-// If there is no value present (the index is being initialized for the first time), then the
-// empty hash is returned.
-func ReadAcceptorTip(db ethdb.KeyValueReader) (common.Hash, error) {
- has, err := db.Has(acceptorTipKey)
- // If the index is not present on disk, the [acceptorTipKey] index has not been initialized yet.
- if !has || err != nil {
- return common.Hash{}, err
- }
- h, err := db.Get(acceptorTipKey)
- if err != nil {
- return common.Hash{}, err
- }
- return common.BytesToHash(h), nil
-}
diff --git a/core/rawdb/accessors_metadata_ext.go b/core/rawdb/accessors_metadata_ext.go
new file mode 100644
index 0000000000..54438d288b
--- /dev/null
+++ b/core/rawdb/accessors_metadata_ext.go
@@ -0,0 +1,114 @@
+// (c) 2025, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package rawdb
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/rlp"
+)
+
+// writeCurrentTimeMarker writes a marker of the current time in the db at `key`.
+func writeCurrentTimeMarker(db ethdb.KeyValueStore, key []byte) error {
+ data, err := rlp.EncodeToBytes(uint64(time.Now().Unix()))
+ if err != nil {
+ return err
+ }
+ return db.Put(key, data)
+}
+
+// readTimeMarker reads the timestamp stored at `key`
+func readTimeMarker(db ethdb.KeyValueStore, key []byte) (time.Time, error) {
+ data, err := db.Get(key)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ var unix uint64
+ if err := rlp.DecodeBytes(data, &unix); err != nil {
+ return time.Time{}, err
+ }
+
+ return time.Unix(int64(unix), 0), nil
+}
+
+// WriteOfflinePruning writes a time marker of the last attempt to run offline pruning.
+// The marker is written when offline pruning completes and is deleted when the node
+// is started successfully with offline pruning disabled. This ensures users must
+// disable offline pruning and start their node successfully between runs of offline
+// pruning.
+func WriteOfflinePruning(db ethdb.KeyValueStore) error {
+ return writeCurrentTimeMarker(db, offlinePruningKey)
+}
+
+// ReadOfflinePruning reads the most recent timestamp of an attempt to run offline
+// pruning if present.
+func ReadOfflinePruning(db ethdb.KeyValueStore) (time.Time, error) {
+ return readTimeMarker(db, offlinePruningKey)
+}
+
+// DeleteOfflinePruning deletes any marker of the last attempt to run offline pruning.
+func DeleteOfflinePruning(db ethdb.KeyValueStore) error {
+ return db.Delete(offlinePruningKey)
+}
+
+// WritePopulateMissingTries writes a marker for the current attempt to populate
+// missing tries.
+func WritePopulateMissingTries(db ethdb.KeyValueStore) error {
+ return writeCurrentTimeMarker(db, populateMissingTriesKey)
+}
+
+// ReadPopulateMissingTries reads the most recent timestamp of an attempt to
+// re-populate missing trie nodes.
+func ReadPopulateMissingTries(db ethdb.KeyValueStore) (time.Time, error) {
+ return readTimeMarker(db, populateMissingTriesKey)
+}
+
+// DeletePopulateMissingTries deletes any marker of the last attempt to
+// re-populate missing trie nodes.
+func DeletePopulateMissingTries(db ethdb.KeyValueStore) error {
+ return db.Delete(populateMissingTriesKey)
+}
+
+// WritePruningDisabled writes a marker to track whether the node has ever run
+// with pruning disabled.
+func WritePruningDisabled(db ethdb.KeyValueStore) error {
+ return db.Put(pruningDisabledKey, nil)
+}
+
+// HasPruningDisabled returns true if there is a marker present indicating that
+// the node has run with pruning disabled at some pooint.
+func HasPruningDisabled(db ethdb.KeyValueStore) (bool, error) {
+ return db.Has(pruningDisabledKey)
+}
+
+// WriteAcceptorTip writes `hash` as the last accepted block that has been fully processed.
+func WriteAcceptorTip(db ethdb.KeyValueWriter, hash common.Hash) error {
+ return db.Put(acceptorTipKey, hash[:])
+}
+
+// ReadAcceptorTip reads the hash of the last accepted block that was fully processed.
+// If there is no value present (the index is being initialized for the first time), then the
+// empty hash is returned.
+func ReadAcceptorTip(db ethdb.KeyValueReader) (common.Hash, error) {
+ has, err := db.Has(acceptorTipKey)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ if !has {
+ // If the index is not present on disk, the [acceptorTipKey] index has not been initialized yet.
+ return common.Hash{}, nil
+ }
+ h, err := db.Get(acceptorTipKey)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ if len(h) != common.HashLength {
+ return common.Hash{}, fmt.Errorf("value has incorrect length %d", len(h))
+ }
+ return common.BytesToHash(h), nil
+}
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
deleted file mode 100644
index f091b63831..0000000000
--- a/core/rawdb/accessors_snapshot.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// ReadSnapshotRoot retrieves the root of the block whose state is contained in
-// the persisted snapshot.
-func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(snapshotRootKey)
- if len(data) != common.HashLength {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteSnapshotRoot stores the root of the block whose state is contained in
-// the persisted snapshot.
-func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
- if err := db.Put(snapshotRootKey, root[:]); err != nil {
- log.Crit("Failed to store snapshot root", "err", err)
- }
-}
-
-// DeleteSnapshotRoot deletes the root of the block whose state is contained in
-// the persisted snapshot. Since snapshots are not immutable, this method can
-// be used during updates, so a crash or failure will mark the entire snapshot
-// invalid.
-func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
- if err := db.Delete(snapshotRootKey); err != nil {
- log.Crit("Failed to remove snapshot root", "err", err)
- }
-}
-
-// ReadSnapshotBlockHash retrieves the hash of the block whose state is contained in
-// the persisted snapshot.
-func ReadSnapshotBlockHash(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(snapshotBlockHashKey)
- if len(data) != common.HashLength {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteSnapshotBlockHash stores the root of the block whose state is contained in
-// the persisted snapshot.
-func WriteSnapshotBlockHash(db ethdb.KeyValueWriter, blockHash common.Hash) {
- if err := db.Put(snapshotBlockHashKey, blockHash[:]); err != nil {
- log.Crit("Failed to store snapshot block hash", "err", err)
- }
-}
-
-// DeleteSnapshotBlockHash deletes the hash of the block whose state is contained in
-// the persisted snapshot. Since snapshots are not immutable, this method can
-// be used during updates, so a crash or failure will mark the entire snapshot
-// invalid.
-func DeleteSnapshotBlockHash(db ethdb.KeyValueWriter) {
- if err := db.Delete(snapshotBlockHashKey); err != nil {
- log.Crit("Failed to remove snapshot block hash", "err", err)
- }
-}
-
-// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
-func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(accountSnapshotKey(hash))
- return data
-}
-
-// WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
-func WriteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash, entry []byte) {
- if err := db.Put(accountSnapshotKey(hash), entry); err != nil {
- log.Crit("Failed to store account snapshot", "err", err)
- }
-}
-
-// DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
-func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(accountSnapshotKey(hash)); err != nil {
- log.Crit("Failed to delete account snapshot", "err", err)
- }
-}
-
-// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
-func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte {
- data, _ := db.Get(storageSnapshotKey(accountHash, storageHash))
- return data
-}
-
-// WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
-func WriteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) {
- if err := db.Put(storageSnapshotKey(accountHash, storageHash), entry); err != nil {
- log.Crit("Failed to store storage snapshot", "err", err)
- }
-}
-
-// DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
-func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash) {
- if err := db.Delete(storageSnapshotKey(accountHash, storageHash)); err != nil {
- log.Crit("Failed to delete storage snapshot", "err", err)
- }
-}
-
-// IterateStorageSnapshots returns an iterator for walking the entire storage
-// space of a specific account.
-func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
- return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength)
-}
-
-// IterateAccountSnapshots returns an iterator for walking all of the accounts in the snapshot
-func IterateAccountSnapshots(db ethdb.Iteratee) ethdb.Iterator {
- return NewKeyLengthIterator(db.NewIterator(SnapshotAccountPrefix, nil), len(SnapshotAccountPrefix)+common.HashLength)
-}
-
-// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
-// the last shutdown.
-func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(snapshotGeneratorKey)
- return data
-}
-
-// WriteSnapshotGenerator stores the serialized snapshot generator to save at
-// shutdown.
-func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) {
- if err := db.Put(snapshotGeneratorKey, generator); err != nil {
- log.Crit("Failed to store snapshot generator", "err", err)
- }
-}
diff --git a/core/rawdb/accessors_snapshot_ext.go b/core/rawdb/accessors_snapshot_ext.go
new file mode 100644
index 0000000000..c842056f3a
--- /dev/null
+++ b/core/rawdb/accessors_snapshot_ext.go
@@ -0,0 +1,45 @@
+// (c) 2025, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package rawdb
+
+import (
+ "github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/log"
+)
+
+// ReadSnapshotBlockHash retrieves the hash of the block whose state is contained in
+// the persisted snapshot.
+func ReadSnapshotBlockHash(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(snapshotBlockHashKey)
+ if len(data) != common.HashLength {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// WriteSnapshotBlockHash stores the root of the block whose state is contained in
+// the persisted snapshot.
+func WriteSnapshotBlockHash(db ethdb.KeyValueWriter, blockHash common.Hash) {
+ if err := db.Put(snapshotBlockHashKey, blockHash[:]); err != nil {
+ log.Crit("Failed to store snapshot block hash", "err", err)
+ }
+}
+
+// DeleteSnapshotBlockHash deletes the hash of the block whose state is contained in
+// the persisted snapshot. Since snapshots are not immutable, this method can
+// be used during updates, so a crash or failure will mark the entire snapshot
+// invalid.
+func DeleteSnapshotBlockHash(db ethdb.KeyValueWriter) {
+ if err := db.Delete(snapshotBlockHashKey); err != nil {
+ log.Crit("Failed to remove snapshot block hash", "err", err)
+ }
+}
+
+// IterateAccountSnapshots returns an iterator for walking all of the accounts in the snapshot
+func IterateAccountSnapshots(db ethdb.Iteratee) ethdb.Iterator {
+ it := db.NewIterator(SnapshotAccountPrefix, nil)
+ keyLen := len(SnapshotAccountPrefix) + common.HashLength
+ return NewKeyLengthIterator(it, keyLen)
+}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
deleted file mode 100644
index 509dfba818..0000000000
--- a/core/rawdb/accessors_state.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "encoding/binary"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// ReadPreimage retrieves a single preimage of the provided hash.
-func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(preimageKey(hash))
- return data
-}
-
-// WritePreimages writes the provided set of preimages to the database.
-func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
- for hash, preimage := range preimages {
- if err := db.Put(preimageKey(hash), preimage); err != nil {
- log.Crit("Failed to store trie preimage", "err", err)
- }
- }
- preimageCounter.Inc(int64(len(preimages)))
- preimageHitCounter.Inc(int64(len(preimages)))
-}
-
-// ReadCode retrieves the contract code of the provided code hash.
-func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- // Try with the prefixed code scheme first and only. The legacy scheme was never used in coreth.
- data, _ := db.Get(codeKey(hash))
- return data
-}
-
-// HasCode checks if the contract code corresponding to the
-// provided code hash is present in the db.
-func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
- // Try with the prefixed code scheme first and only. The legacy scheme was never used in coreth.
- ok, _ := db.Has(codeKey(hash))
- return ok
-}
-
-// WriteCode writes the provided contract code database.
-func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
- if err := db.Put(codeKey(hash), code); err != nil {
- log.Crit("Failed to store contract code", "err", err)
- }
-}
-
-// DeleteCode deletes the specified contract code from the database.
-func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(codeKey(hash)); err != nil {
- log.Crit("Failed to delete contract code", "err", err)
- }
-}
-
-// ReadStateID retrieves the state id with the provided state root.
-func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 {
- data, err := db.Get(stateIDKey(root))
- if err != nil || len(data) == 0 {
- return nil
- }
- number := binary.BigEndian.Uint64(data)
- return &number
-}
-
-// WriteStateID writes the provided state lookup to database.
-func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) {
- var buff [8]byte
- binary.BigEndian.PutUint64(buff[:], id)
- if err := db.Put(stateIDKey(root), buff[:]); err != nil {
- log.Crit("Failed to store state ID", "err", err)
- }
-}
-
-// DeleteStateID deletes the specified state lookup from the database.
-func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) {
- if err := db.Delete(stateIDKey(root)); err != nil {
- log.Crit("Failed to delete state ID", "err", err)
- }
-}
-
-// ReadPersistentStateID retrieves the id of the persistent state from the database.
-func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 {
- data, _ := db.Get(persistentStateIDKey)
- if len(data) != 8 {
- return 0
- }
- return binary.BigEndian.Uint64(data)
-}
-
-// WritePersistentStateID stores the id of the persistent state into database.
-func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) {
- if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil {
- log.Crit("Failed to store the persistent state ID", "err", err)
- }
-}
-
-// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at
-// the last shutdown.
-func ReadTrieJournal(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(trieJournalKey)
- return data
-}
-
-// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at
-// shutdown.
-func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) {
- if err := db.Put(trieJournalKey, journal); err != nil {
- log.Crit("Failed to store tries journal", "err", err)
- }
-}
-
-// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at
-// the last shutdown.
-func DeleteTrieJournal(db ethdb.KeyValueWriter) {
- if err := db.Delete(trieJournalKey); err != nil {
- log.Crit("Failed to remove tries journal", "err", err)
- }
-}
diff --git a/core/rawdb/accessors_state_sync.go b/core/rawdb/accessors_state_sync.go
index 538f9768cb..1b4b1ad1ae 100644
--- a/core/rawdb/accessors_state_sync.go
+++ b/core/rawdb/accessors_state_sync.go
@@ -31,14 +31,14 @@ func WriteSyncRoot(db ethdb.KeyValueWriter, root common.Hash) error {
return db.Put(syncRootKey, root[:])
}
-// AddCodeToFetch adds a marker that we need to fetch the code for [hash].
+// AddCodeToFetch adds a marker that we need to fetch the code for `hash`.
func AddCodeToFetch(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Put(codeToFetchKey(hash), nil); err != nil {
log.Crit("Failed to put code to fetch", "codeHash", hash, "err", err)
}
}
-// DeleteCodeToFetch removes the marker that the code corresponding to [hash] needs to be fetched.
+// DeleteCodeToFetch removes the marker that the code corresponding to `hash` needs to be fetched.
func DeleteCodeToFetch(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(codeToFetchKey(hash)); err != nil {
log.Crit("Failed to delete code to fetch", "codeHash", hash, "err", err)
@@ -86,12 +86,12 @@ func ClearSyncSegments(db ethdb.KeyValueStore, root common.Hash) error {
segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength)
copy(segmentsPrefix, syncSegmentsPrefix)
copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:])
- return ClearPrefix(db, segmentsPrefix, syncSegmentsKeyLength)
+ return clearPrefix(db, segmentsPrefix, syncSegmentsKeyLength)
}
// ClearAllSyncSegments removes all segment markers from db
func ClearAllSyncSegments(db ethdb.KeyValueStore) error {
- return ClearPrefix(db, syncSegmentsPrefix, syncSegmentsKeyLength)
+ return clearPrefix(db, syncSegmentsPrefix, syncSegmentsKeyLength)
}
// UnpackSyncSegmentKey returns the root and start position for a trie segment
@@ -130,12 +130,12 @@ func ClearSyncStorageTrie(db ethdb.KeyValueStore, root common.Hash) error {
accountsPrefix := make([]byte, len(syncStorageTriesPrefix)+common.HashLength)
copy(accountsPrefix, syncStorageTriesPrefix)
copy(accountsPrefix[len(syncStorageTriesPrefix):], root[:])
- return ClearPrefix(db, accountsPrefix, syncStorageTriesKeyLength)
+ return clearPrefix(db, accountsPrefix, syncStorageTriesKeyLength)
}
// ClearAllSyncStorageTries removes all storage tries added for syncing from db
func ClearAllSyncStorageTries(db ethdb.KeyValueStore) error {
- return ClearPrefix(db, syncStorageTriesPrefix, syncStorageTriesKeyLength)
+ return clearPrefix(db, syncStorageTriesPrefix, syncStorageTriesKeyLength)
}
// UnpackSyncStorageTrieKey returns the root and account for a storage trie
@@ -156,7 +156,7 @@ func packSyncStorageTrieKey(root common.Hash, account common.Hash) []byte {
return bytes
}
-// WriteSyncPerformed logs an entry in [db] indicating the VM state synced to [blockNumber].
+// WriteSyncPerformed logs an entry in `db` indicating the VM state synced to `blockNumber`.
func WriteSyncPerformed(db ethdb.KeyValueWriter, blockNumber uint64) error {
syncPerformedPrefixLen := len(syncPerformedPrefix)
bytes := make([]byte, syncPerformedPrefixLen+wrappers.LongLen)
@@ -191,3 +191,31 @@ func GetLatestSyncPerformed(db ethdb.Iteratee) uint64 {
}
return latestSyncPerformed
}
+
+// clearPrefix removes all keys in db that begin with prefix and match an
+// expected key length. `keyLen` must include the length of the prefix.
+func clearPrefix(db ethdb.KeyValueStore, prefix []byte, keyLen int) error {
+ it := db.NewIterator(prefix, nil)
+ defer it.Release()
+
+ batch := db.NewBatch()
+ for it.Next() {
+ key := common.CopyBytes(it.Key())
+ if len(key) != keyLen {
+ continue
+ }
+ if err := batch.Delete(key); err != nil {
+ return err
+ }
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ batch.Reset()
+ }
+ }
+ if err := it.Error(); err != nil {
+ return err
+ }
+ return batch.Write()
+}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
deleted file mode 100644
index 742a462c7c..0000000000
--- a/core/rawdb/accessors_trie.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package rawdb
-
-import (
- "fmt"
- "sync"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "golang.org/x/crypto/sha3"
-)
-
-// HashScheme is the legacy hash-based state scheme with which trie nodes are
-// stored in the disk with node hash as the database key. The advantage of this
-// scheme is that different versions of trie nodes can be stored in disk, which
-// is very beneficial for constructing archive nodes. The drawback is it will
-// store different trie nodes on the same path to different locations on the disk
-// with no data locality, and it's unfriendly for designing state pruning.
-//
-// Now this scheme is still kept for backward compatibility, and it will be used
-// for archive node and some other tries(e.g. light trie).
-const HashScheme = "hash"
-
-// PathScheme is the new path-based state scheme with which trie nodes are stored
-// in the disk with node path as the database key. This scheme will only store one
-// version of state data in the disk, which means that the state pruning operation
-// is native. At the same time, this scheme will put adjacent trie nodes in the same
-// area of the disk with good data locality property. But this scheme needs to rely
-// on extra state diffs to survive deep reorg.
-const PathScheme = "path"
-
-// hasher is used to compute the sha256 hash of the provided data.
-type hasher struct{ sha crypto.KeccakState }
-
-var hasherPool = sync.Pool{
- New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
-}
-
-func newHasher() *hasher {
- return hasherPool.Get().(*hasher)
-}
-
-func (h *hasher) hash(data []byte) common.Hash {
- return crypto.HashData(h.sha, data)
-}
-
-func (h *hasher) release() {
- hasherPool.Put(h)
-}
-
-// ReadAccountTrieNode retrieves the account trie node and the associated node
-// hash with the specified node path.
-func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
- data, err := db.Get(accountTrieNodeKey(path))
- if err != nil {
- return nil, common.Hash{}
- }
- h := newHasher()
- defer h.release()
- return data, h.hash(data)
-}
-
-// HasAccountTrieNode checks the account trie node presence with the specified
-// node path and the associated node hash.
-func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool {
- data, err := db.Get(accountTrieNodeKey(path))
- if err != nil {
- return false
- }
- h := newHasher()
- defer h.release()
- return h.hash(data) == hash
-}
-
-// ExistsAccountTrieNode checks the presence of the account trie node with the
-// specified node path, regardless of the node hash.
-func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool {
- has, err := db.Has(accountTrieNodeKey(path))
- if err != nil {
- return false
- }
- return has
-}
-
-// WriteAccountTrieNode writes the provided account trie node into database.
-func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
- if err := db.Put(accountTrieNodeKey(path), node); err != nil {
- log.Crit("Failed to store account trie node", "err", err)
- }
-}
-
-// DeleteAccountTrieNode deletes the specified account trie node from the database.
-func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) {
- if err := db.Delete(accountTrieNodeKey(path)); err != nil {
- log.Crit("Failed to delete account trie node", "err", err)
- }
-}
-
-// ReadStorageTrieNode retrieves the storage trie node and the associated node
-// hash with the specified node path.
-func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) {
- data, err := db.Get(storageTrieNodeKey(accountHash, path))
- if err != nil {
- return nil, common.Hash{}
- }
- h := newHasher()
- defer h.release()
- return data, h.hash(data)
-}
-
-// HasStorageTrieNode checks the storage trie node presence with the provided
-// node path and the associated node hash.
-func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool {
- data, err := db.Get(storageTrieNodeKey(accountHash, path))
- if err != nil {
- return false
- }
- h := newHasher()
- defer h.release()
- return h.hash(data) == hash
-}
-
-// ExistsStorageTrieNode checks the presence of the storage trie node with the
-// specified account hash and node path, regardless of the node hash.
-func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool {
- has, err := db.Has(storageTrieNodeKey(accountHash, path))
- if err != nil {
- return false
- }
- return has
-}
-
-// WriteStorageTrieNode writes the provided storage trie node into database.
-func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
- if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {
- log.Crit("Failed to store storage trie node", "err", err)
- }
-}
-
-// DeleteStorageTrieNode deletes the specified storage trie node from the database.
-func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) {
- if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil {
- log.Crit("Failed to delete storage trie node", "err", err)
- }
-}
-
-// ReadLegacyTrieNode retrieves the legacy trie node with the given
-// associated node hash.
-func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, err := db.Get(hash.Bytes())
- if err != nil {
- return nil
- }
- return data
-}
-
-// HasLegacyTrieNode checks if the trie node with the provided hash is present in db.
-func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
- ok, _ := db.Has(hash.Bytes())
- return ok
-}
-
-// WriteLegacyTrieNode writes the provided legacy trie node to database.
-func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
- if err := db.Put(hash.Bytes(), node); err != nil {
- log.Crit("Failed to store legacy trie node", "err", err)
- }
-}
-
-// DeleteLegacyTrieNode deletes the specified legacy trie node from database.
-func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(hash.Bytes()); err != nil {
- log.Crit("Failed to delete legacy trie node", "err", err)
- }
-}
-
-// HasTrieNode checks the trie node presence with the provided node info and
-// the associated node hash.
-func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool {
- switch scheme {
- case HashScheme:
- return HasLegacyTrieNode(db, hash)
- case PathScheme:
- if owner == (common.Hash{}) {
- return HasAccountTrieNode(db, path, hash)
- }
- return HasStorageTrieNode(db, owner, path, hash)
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// ReadTrieNode retrieves the trie node from database with the provided node info
-// and associated node hash.
-// hashScheme-based lookup requires the following:
-// - hash
-//
-// pathScheme-based lookup requires the following:
-// - owner
-// - path
-func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte {
- switch scheme {
- case HashScheme:
- return ReadLegacyTrieNode(db, hash)
- case PathScheme:
- var (
- blob []byte
- nHash common.Hash
- )
- if owner == (common.Hash{}) {
- blob, nHash = ReadAccountTrieNode(db, path)
- } else {
- blob, nHash = ReadStorageTrieNode(db, owner, path)
- }
- if nHash != hash {
- return nil
- }
- return blob
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// WriteTrieNode writes the trie node into database with the provided node info
-// and associated node hash.
-// hashScheme-based lookup requires the following:
-// - hash
-//
-// pathScheme-based lookup requires the following:
-// - owner
-// - path
-func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) {
- switch scheme {
- case HashScheme:
- WriteLegacyTrieNode(db, hash, node)
- case PathScheme:
- if owner == (common.Hash{}) {
- WriteAccountTrieNode(db, path, node)
- } else {
- WriteStorageTrieNode(db, owner, path, node)
- }
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// DeleteTrieNode deletes the trie node from database with the provided node info
-// and associated node hash.
-// hashScheme-based lookup requires the following:
-// - hash
-//
-// pathScheme-based lookup requires the following:
-// - owner
-// - path
-func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) {
- switch scheme {
- case HashScheme:
- DeleteLegacyTrieNode(db, hash)
- case PathScheme:
- if owner == (common.Hash{}) {
- DeleteAccountTrieNode(db, path)
- } else {
- DeleteStorageTrieNode(db, owner, path)
- }
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// ReadStateScheme reads the state scheme of persistent state, or none
-// if the state is not present in database.
-func ReadStateScheme(db ethdb.Reader) string {
- // Check if state in path-based scheme is present
- blob, _ := ReadAccountTrieNode(db, nil)
- if len(blob) != 0 {
- return PathScheme
- }
- // The root node might be deleted during the initial snap sync, check
- // the persistent state id then.
- if id := ReadPersistentStateID(db); id != 0 {
- return PathScheme
- }
- // In a hash-based scheme, the genesis state is consistently stored
- // on the disk. To assess the scheme of the persistent state, it
- // suffices to inspect the scheme of the genesis state.
- header := ReadHeader(db, ReadCanonicalHash(db, 0), 0)
- if header == nil {
- return "" // empty datadir
- }
- blob = ReadLegacyTrieNode(db, header.Root)
- if len(blob) == 0 {
- return "" // no state in disk
- }
- return HashScheme
-}
-
-// ParseStateScheme checks if the specified state scheme is compatible with
-// the stored state.
-//
-// - If the provided scheme is none, use the scheme consistent with persistent
-// state, or fallback to hash-based scheme if state is empty.
-//
-// - If the provided scheme is hash, use hash-based scheme or error out if not
-// compatible with persistent state scheme.
-//
-// - If the provided scheme is path: use path-based scheme or error out if not
-// compatible with persistent state scheme.
-func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
- // If state scheme is not specified, use the scheme consistent
- // with persistent state, or fallback to hash mode if database
- // is empty.
- stored := ReadStateScheme(disk)
- if provided == "" {
- if stored == "" {
- // use default scheme for empty database, flip it when
- // path mode is chosen as default
- log.Info("State schema set to default", "scheme", "hash")
- return HashScheme, nil
- }
- log.Info("State scheme set to already existing", "scheme", stored)
- return stored, nil // reuse scheme of persistent scheme
- }
- // If state scheme is specified, ensure it's compatible with
- // persistent state.
- if stored == "" || provided == stored {
- log.Info("State scheme set by user", "scheme", provided)
- return provided, nil
- }
- return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, provided)
-}
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
deleted file mode 100644
index f4c79fd0b4..0000000000
--- a/core/rawdb/chain_iterator.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// (c) 2019-2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "runtime"
- "sync/atomic"
- "time"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/common/prque"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-type blockTxHashes struct {
- number uint64
- hashes []common.Hash
-}
-
-// iterateTransactions iterates over all transactions in the (canon) block
-// number(s) given, and yields the hashes on a channel. If there is a signal
-// received from interrupt channel, the iteration will be aborted and result
-// channel will be closed.
-// Iterates blocks in the range [from, to)
-func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool, interrupt chan struct{}) chan *blockTxHashes {
- // One thread sequentially reads data from db
- type numberRlp struct {
- number uint64
- rlp rlp.RawValue
- }
- if to == from {
- return nil
- }
- threads := to - from
- if cpus := runtime.NumCPU(); threads > uint64(cpus) {
- threads = uint64(cpus)
- }
- var (
- rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
- hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
- )
- // lookup runs in one instance
- lookup := func() {
- n, end := from, to
- if reverse {
- n, end = to-1, from-1
- }
- defer close(rlpCh)
- for n != end {
- data := ReadCanonicalBodyRLP(db, n)
- // Feed the block to the aggregator, or abort on interrupt
- select {
- case rlpCh <- &numberRlp{n, data}:
- case <-interrupt:
- return
- }
- if reverse {
- n--
- } else {
- n++
- }
- }
- }
- // process runs in parallel
- var nThreadsAlive atomic.Int32
- nThreadsAlive.Store(int32(threads))
- process := func() {
- defer func() {
- // Last processor closes the result channel
- if nThreadsAlive.Add(-1) == 0 {
- close(hashesCh)
- }
- }()
- for data := range rlpCh {
- var body types.Body
- if err := rlp.DecodeBytes(data.rlp, &body); err != nil {
- log.Warn("Failed to decode block body", "block", data.number, "error", err)
- return
- }
- var hashes []common.Hash
- for _, tx := range body.Transactions {
- hashes = append(hashes, tx.Hash())
- }
- result := &blockTxHashes{
- hashes: hashes,
- number: data.number,
- }
- // Feed the block to the aggregator, or abort on interrupt
- select {
- case hashesCh <- result:
- case <-interrupt:
- return
- }
- }
- }
- go lookup() // start the sequential db accessor
- for i := 0; i < int(threads); i++ {
- go process()
- }
- return hashesCh
-}
-
-// indexTransactions creates txlookup indices of the specified block range.
-//
-// This function iterates canonical chain in reverse order, it has one main advantage:
-// We can write tx index tail flag periodically even without the whole indexing
-// procedure is finished. So that we can resume indexing procedure next time quickly.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
- // short circuit for invalid range
- if from >= to {
- return
- }
- var (
- hashesCh = iterateTransactions(db, from, to, true, interrupt)
- batch = db.NewBatch()
- start = time.Now()
- logged = start.Add(-7 * time.Second)
-
- // Since we iterate in reverse, we expect the first number to come
- // in to be [to-1]. Therefore, setting lastNum to means that the
- // queue gap-evaluation will work correctly
- lastNum = to
- queue = prque.New[int64, *blockTxHashes](nil)
- blocks, txs = 0, 0 // for stats reporting
- )
- for chanDelivery := range hashesCh {
- // Push the delivery into the queue and process contiguous ranges.
- // Since we iterate in reverse, so lower numbers have lower prio, and
- // we can use the number directly as prio marker
- queue.Push(chanDelivery, int64(chanDelivery.number))
- for !queue.Empty() {
- // If the next available item is gapped, return
- if _, priority := queue.Peek(); priority != int64(lastNum-1) {
- break
- }
- // For testing
- if hook != nil && !hook(lastNum-1) {
- break
- }
- // Next block available, pop it off and index it
- delivery := queue.PopItem()
- lastNum = delivery.number
- WriteTxLookupEntries(batch, delivery.number, delivery.hashes)
- blocks++
- txs += len(delivery.hashes)
- // If enough data was accumulated in memory or we're at the last block, dump to disk
- if batch.ValueSize() > ethdb.IdealBatchSize {
- WriteTxIndexTail(batch, lastNum) // Also write the tail here
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- batch.Reset()
- }
- // If we've spent too much time already, notify the user of what we're doing
- if time.Since(logged) > 8*time.Second {
- log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- }
- }
- // Flush the new indexing tail and the last committed data. It can also happen
- // that the last batch is empty because nothing to index, but the tail has to
- // be flushed anyway.
- WriteTxIndexTail(batch, lastNum)
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- logger := log.Debug
- if report {
- logger = log.Info
- }
- select {
- case <-interrupt:
- logger("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
- default:
- logger("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
- }
-}
-
-// IndexTransactions creates txlookup indices of the specified block range. The from
-// is included while to is excluded.
-//
-// This function iterates canonical chain in reverse order, it has one main advantage:
-// We can write tx index tail flag periodically even without the whole indexing
-// procedure is finished. So that we can resume indexing procedure next time quickly.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, report bool) {
- indexTransactions(db, from, to, interrupt, nil, report)
-}
-
-// indexTransactionsForTesting is the internal debug version with an additional hook.
-func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
- indexTransactions(db, from, to, interrupt, hook, false)
-}
-
-// unindexTransactions removes txlookup indices of the specified block range.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
- // short circuit for invalid range
- if from >= to {
- return
- }
- var (
- hashesCh = iterateTransactions(db, from, to, false, interrupt)
- batch = db.NewBatch()
- start = time.Now()
- logged = start.Add(-7 * time.Second)
-
- // we expect the first number to come in to be [from]. Therefore, setting
- // nextNum to from means that the queue gap-evaluation will work correctly
- nextNum = from
- queue = prque.New[int64, *blockTxHashes](nil)
- blocks, txs = 0, 0 // for stats reporting
- )
- // Otherwise spin up the concurrent iterator and unindexer
- for delivery := range hashesCh {
- // Push the delivery into the queue and process contiguous ranges.
- queue.Push(delivery, -int64(delivery.number))
- for !queue.Empty() {
- // If the next available item is gapped, return
- if _, priority := queue.Peek(); -priority != int64(nextNum) {
- break
- }
- // For testing
- if hook != nil && !hook(nextNum) {
- break
- }
- delivery := queue.PopItem()
- nextNum = delivery.number + 1
- DeleteTxLookupEntries(batch, delivery.hashes)
- txs += len(delivery.hashes)
- blocks++
-
- // If enough data was accumulated in memory or we're at the last block, dump to disk
- // A batch counts the size of deletion as '1', so we need to flush more
- // often than that.
- if blocks%1000 == 0 {
- WriteTxIndexTail(batch, nextNum)
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- batch.Reset()
- }
- // If we've spent too much time already, notify the user of what we're doing
- if time.Since(logged) > 8*time.Second {
- log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- }
- }
- // Flush the new indexing tail and the last committed data. It can also happen
- // that the last batch is empty because nothing to unindex, but the tail has to
- // be flushed anyway.
- WriteTxIndexTail(batch, nextNum)
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- logger := log.Debug
- if report {
- logger = log.Info
- }
- select {
- case <-interrupt:
- logger("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
- default:
- logger("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
- }
-}
-
-// UnindexTransactions removes txlookup indices of the specified block range.
-// The from is included while to is excluded.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, report bool) {
- unindexTransactions(db, from, to, interrupt, nil, report)
-}
-
-// unindexTransactionsForTesting is the internal debug version with an additional hook.
-func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
- unindexTransactions(db, from, to, interrupt, hook, false)
-}
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
deleted file mode 100644
index 2086fa72b5..0000000000
--- a/core/rawdb/chain_iterator_test.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// (c) 2019-2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "math/big"
- "reflect"
- "sort"
- "sync"
- "testing"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/common"
-)
-
-func TestChainIterator(t *testing.T) {
- // Construct test chain db
- chainDb := NewMemoryDatabase()
-
- var block *types.Block
- var txs []*types.Transaction
- to := common.BytesToAddress([]byte{0x11})
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
- for i := uint64(1); i <= 10; i++ {
- var tx *types.Transaction
- if i%2 == 0 {
- tx = types.NewTx(&types.LegacyTx{
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- } else {
- tx = types.NewTx(&types.AccessListTx{
- ChainID: big.NewInt(1337),
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- }
- txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
- }
-
- var cases = []struct {
- from, to uint64
- reverse bool
- expect []int
- }{
- {0, 11, true, []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}},
- {0, 0, true, nil},
- {0, 5, true, []int{4, 3, 2, 1, 0}},
- {10, 11, true, []int{10}},
- {0, 11, false, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
- {0, 0, false, nil},
- {10, 11, false, []int{10}},
- }
- for i, c := range cases {
- var numbers []int
- hashCh := iterateTransactions(chainDb, c.from, c.to, c.reverse, nil)
- if hashCh != nil {
- for h := range hashCh {
- numbers = append(numbers, int(h.number))
- if len(h.hashes) > 0 {
- if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp {
- t.Fatalf("block %d: hash wrong, got %x exp %x", h.number, got, exp)
- }
- }
- }
- }
- if !c.reverse {
- sort.Ints(numbers)
- } else {
- sort.Sort(sort.Reverse(sort.IntSlice(numbers)))
- }
- if !reflect.DeepEqual(numbers, c.expect) {
- t.Fatalf("Case %d failed, visit element mismatch, want %v, got %v", i, c.expect, numbers)
- }
- }
-}
-
-func TestIndexTransactions(t *testing.T) {
- // Construct test chain db
- chainDb := NewMemoryDatabase()
-
- var block *types.Block
- var txs []*types.Transaction
- to := common.BytesToAddress([]byte{0x11})
-
- // Write empty genesis block
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher())
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
-
- for i := uint64(1); i <= 10; i++ {
- var tx *types.Transaction
- if i%2 == 0 {
- tx = types.NewTx(&types.LegacyTx{
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- } else {
- tx = types.NewTx(&types.AccessListTx{
- ChainID: big.NewInt(1337),
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- }
- txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
- }
- // verify checks whether the tx indices in the range [from, to)
- // is expected.
- verify := func(from, to int, exist bool, tail uint64) {
- for i := from; i < to; i++ {
- if i == 0 {
- continue
- }
- number := ReadTxLookupEntry(chainDb, txs[i-1].Hash())
- if exist && number == nil {
- t.Fatalf("Transaction index %d missing", i)
- }
- if !exist && number != nil {
- t.Fatalf("Transaction index %d is not deleted", i)
- }
- }
- number := ReadTxIndexTail(chainDb)
- if number == nil || *number != tail {
- t.Fatalf("Transaction tail mismatch")
- }
- }
- IndexTransactions(chainDb, 5, 11, nil, false)
- verify(5, 11, true, 5)
- verify(0, 5, false, 5)
-
- IndexTransactions(chainDb, 0, 5, nil, false)
- verify(0, 11, true, 0)
-
- UnindexTransactions(chainDb, 0, 5, nil, false)
- verify(5, 11, true, 5)
- verify(0, 5, false, 5)
-
- UnindexTransactions(chainDb, 5, 11, nil, false)
- verify(0, 11, false, 11)
-
- // Testing corner cases
- signal := make(chan struct{})
- var once sync.Once
- indexTransactionsForTesting(chainDb, 5, 11, signal, func(n uint64) bool {
- if n <= 8 {
- once.Do(func() {
- close(signal)
- })
- return false
- }
- return true
- })
- verify(9, 11, true, 9)
- verify(0, 9, false, 9)
- IndexTransactions(chainDb, 0, 9, nil, false)
-
- signal = make(chan struct{})
- var once2 sync.Once
- unindexTransactionsForTesting(chainDb, 0, 11, signal, func(n uint64) bool {
- if n >= 8 {
- once2.Do(func() {
- close(signal)
- })
- return false
- }
- return true
- })
- verify(8, 11, true, 8)
- verify(0, 8, false, 8)
-}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
deleted file mode 100644
index a22b81834d..0000000000
--- a/core/rawdb/database.go
+++ /dev/null
@@ -1,334 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/ava-labs/libevm/common"
- ethrawdb "github.com/ava-labs/libevm/core/rawdb"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/ethdb/leveldb"
- "github.com/ava-labs/libevm/ethdb/memorydb"
- "github.com/ava-labs/libevm/ethdb/pebble"
- "github.com/ava-labs/libevm/log"
-)
-
-// nofreezedb is a database wrapper that disables freezer data retrievals.
-type nofreezedb struct {
- ethdb.KeyValueStore
-}
-
-// HasAncient returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
- return false, errNotSupported
-}
-
-// Ancient returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
- return nil, errNotSupported
-}
-
-// AncientRange returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
- return nil, errNotSupported
-}
-
-// Ancients returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Ancients() (uint64, error) {
- return 0, errNotSupported
-}
-
-// Tail returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Tail() (uint64, error) {
- return 0, errNotSupported
-}
-
-// AncientSize returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
- return 0, errNotSupported
-}
-
-// ModifyAncients is not supported.
-func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
- return 0, errNotSupported
-}
-
-// TruncateHead returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) {
- return 0, errNotSupported
-}
-
-// TruncateTail returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) {
- return 0, errNotSupported
-}
-
-// Sync returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Sync() error {
- return errNotSupported
-}
-
-func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
- // Unlike other ancient-related methods, this method does not return
- // errNotSupported when invoked.
- // The reason for this is that the caller might want to do several things:
- // 1. Check if something is in the freezer,
- // 2. If not, check leveldb.
- //
- // This will work, since the ancient-checks inside 'fn' will return errors,
- // and the leveldb work will continue.
- //
- // If we instead were to return errNotSupported here, then the caller would
- // have to explicitly check for that, having an extra clause to do the
- // non-ancient operations.
- return fn(db)
-}
-
-// MigrateTable processes the entries in a given table in sequence
-// converting them to a new format if they're of an old format.
-func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
- return errNotSupported
-}
-
-// AncientDatadir returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) AncientDatadir() (string, error) {
- return "", errNotSupported
-}
-
-// NewDatabase creates a high level database on top of a given key-value data
-// store without a freezer moving immutable chain segments into cold storage.
-func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
- return &nofreezedb{KeyValueStore: db}
-}
-
-// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
-// freezer moving immutable chain segments into cold storage.
-func NewMemoryDatabase() ethdb.Database {
- return NewDatabase(memorydb.New())
-}
-
-// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
-// with an initial starting capacity, but without a freezer moving immutable
-// chain segments into cold storage.
-func NewMemoryDatabaseWithCap(size int) ethdb.Database {
- return NewDatabase(memorydb.NewWithCap(size))
-}
-
-// NewLevelDBDatabase creates a persistent key-value database without a freezer
-// moving immutable chain segments into cold storage.
-func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
- db, err := leveldb.New(file, cache, handles, namespace, readonly)
- if err != nil {
- return nil, err
- }
- log.Info("Using LevelDB as the backing database")
- return NewDatabase(db), nil
-}
-
-// NewPebbleDBDatabase creates a persistent key-value database without a freezer
-// moving immutable chain segments into cold storage.
-func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
- db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
- if err != nil {
- return nil, err
- }
- return NewDatabase(db), nil
-}
-
-const (
- dbPebble = "pebble"
- dbLeveldb = "leveldb"
-)
-
-// PreexistingDatabase checks the given data directory whether a database is already
-// instantiated at that location, and if so, returns the type of database (or the
-// empty string).
-func PreexistingDatabase(path string) string {
- if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
- return "" // No pre-existing db
- }
- if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil {
- if err != nil {
- panic(err) // only possible if the pattern is malformed
- }
- return dbPebble
- }
- return dbLeveldb
-}
-
-// OpenOptions contains the options to apply when opening a database.
-// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
-type OpenOptions struct {
- Type string // "leveldb" | "pebble"
- Directory string // the datadir
- Namespace string // the namespace for database relevant metrics
- Cache int // the capacity(in megabytes) of the data caching
- Handles int // number of files to be open simultaneously
- ReadOnly bool
- // Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
- // a crash is not important. This option should typically be used in tests.
- Ephemeral bool
-}
-
-// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
-//
-// type == null type != null
-// +----------------------------------------
-// db is non-existent | pebble default | specified type
-// db is existent | from db | specified type (if compatible)
-func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
- // Reject any unsupported database type
- if len(o.Type) != 0 && o.Type != dbLeveldb && o.Type != dbPebble {
- return nil, fmt.Errorf("unknown db.engine %v", o.Type)
- }
- // Retrieve any pre-existing database's type and use that or the requested one
- // as long as there's no conflict between the two types
- existingDb := PreexistingDatabase(o.Directory)
- if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb {
- return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
- }
- if o.Type == dbPebble || existingDb == dbPebble {
- log.Info("Using pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
- }
- if o.Type == dbLeveldb || existingDb == dbLeveldb {
- log.Info("Using leveldb as the backing database")
- return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
- }
- // No pre-existing database, no user-requested one either. Default to Pebble.
- log.Info("Defaulting to pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
-}
-
-// Open opens both a disk-based key-value database such as leveldb or pebble, but also
-// integrates it with a freezer database -- if the AncientDir option has been
-// set on the provided OpenOptions.
-// The passed o.AncientDir indicates the path of root ancient directory where
-// the chain freezer can be opened.
-func Open(o OpenOptions) (ethdb.Database, error) {
- kvdb, err := openKeyValueDatabase(o)
- if err != nil {
- return nil, err
- }
- return kvdb, nil
-}
-
-// InspectDatabase traverses the entire database and checks the size
-// of all different categories of data.
-func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
- var (
- codeToFetch ethrawdb.DatabaseStat
- syncPerformed ethrawdb.DatabaseStat
- syncProgress ethrawdb.DatabaseStat
- syncSegments ethrawdb.DatabaseStat
- )
-
- options := []ethrawdb.InspectDatabaseOption{
- ethrawdb.WithDatabaseMetadataKeys(func(key []byte) bool {
- return bytes.Equal(key, snapshotBlockHashKey) ||
- bytes.Equal(key, syncRootKey)
- }),
- ethrawdb.WithDatabaseStatRecorder(func(key []byte, size common.StorageSize) bool {
- switch {
- case bytes.HasPrefix(key, syncSegmentsPrefix) && len(key) == syncSegmentsKeyLength:
- syncSegments.Add(size)
- return true
- case bytes.HasPrefix(key, syncStorageTriesPrefix) && len(key) == syncStorageTriesKeyLength:
- syncProgress.Add(size)
- return true
- case bytes.HasPrefix(key, CodeToFetchPrefix) && len(key) == codeToFetchKeyLength:
- codeToFetch.Add(size)
- return true
- case bytes.HasPrefix(key, syncPerformedPrefix) && len(key) == syncPerformedKeyLength:
- syncPerformed.Add(size)
- return true
- default:
- return false
- }
- }),
- ethrawdb.WithDatabaseStatsTransformer(func(rows [][]string) [][]string {
- newRows := make([][]string, 0, len(rows))
- for _, row := range rows {
- database := row[0]
- category := row[1]
- switch {
- case database == "Key-Value store" && category == "Difficulties",
- database == "Key-Value store" && category == "Beacon sync headers",
- database == "Ancient store (Chain)":
- // Discard rows specific to libevm (geth) but irrelevant to coreth.
- continue
- }
- newRows = append(newRows, row)
- }
-
- return append(
- newRows,
- []string{"State sync", "Trie segments", syncSegments.Size(), syncSegments.Count()},
- []string{"State sync", "Storage tries to fetch", syncProgress.Size(), syncProgress.Count()},
- []string{"State sync", "Code to fetch", codeToFetch.Size(), codeToFetch.Count()},
- []string{"State sync", "Block numbers synced to", syncPerformed.Size(), syncPerformed.Count()},
- )
- }),
- }
-
- return ethrawdb.InspectDatabase(db, keyPrefix, keyStart, options...)
-}
-
-// ClearPrefix removes all keys in db that begin with prefix and match an
-// expected key length. [keyLen] should include the length of the prefix.
-func ClearPrefix(db ethdb.KeyValueStore, prefix []byte, keyLen int) error {
- it := db.NewIterator(prefix, nil)
- defer it.Release()
-
- batch := db.NewBatch()
- for it.Next() {
- key := common.CopyBytes(it.Key())
- if len(key) != keyLen {
- // avoid deleting keys that do not match the expected length
- continue
- }
- if err := batch.Delete(key); err != nil {
- return err
- }
- if batch.ValueSize() > ethdb.IdealBatchSize {
- if err := batch.Write(); err != nil {
- return err
- }
- batch.Reset()
- }
- }
- if err := it.Error(); err != nil {
- return err
- }
- return batch.Write()
-}
-
-/// TODO: Consider adding ReadChainMetadata
diff --git a/core/rawdb/database_ext.go b/core/rawdb/database_ext.go
new file mode 100644
index 0000000000..f2a350b618
--- /dev/null
+++ b/core/rawdb/database_ext.go
@@ -0,0 +1,73 @@
+// (c) 2025, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package rawdb
+
+import (
+ "bytes"
+
+ "github.com/ava-labs/libevm/common"
+ ethrawdb "github.com/ava-labs/libevm/core/rawdb"
+ "github.com/ava-labs/libevm/ethdb"
+)
+
+// InspectDatabase traverses the entire database and checks the size
+// of all different categories of data.
+func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
+ var (
+ codeToFetch ethrawdb.DatabaseStat
+ syncPerformed ethrawdb.DatabaseStat
+ syncProgress ethrawdb.DatabaseStat
+ syncSegments ethrawdb.DatabaseStat
+ )
+
+ options := []ethrawdb.InspectDatabaseOption{
+ ethrawdb.WithDatabaseMetadataKeys(func(key []byte) bool {
+ return bytes.Equal(key, snapshotBlockHashKey) ||
+ bytes.Equal(key, syncRootKey)
+ }),
+ ethrawdb.WithDatabaseStatRecorder(func(key []byte, size common.StorageSize) bool {
+ switch {
+ case bytes.HasPrefix(key, syncSegmentsPrefix) && len(key) == syncSegmentsKeyLength:
+ syncSegments.Add(size)
+ return true
+ case bytes.HasPrefix(key, syncStorageTriesPrefix) && len(key) == syncStorageTriesKeyLength:
+ syncProgress.Add(size)
+ return true
+ case bytes.HasPrefix(key, CodeToFetchPrefix) && len(key) == codeToFetchKeyLength:
+ codeToFetch.Add(size)
+ return true
+ case bytes.HasPrefix(key, syncPerformedPrefix) && len(key) == syncPerformedKeyLength:
+ syncPerformed.Add(size)
+ return true
+ default:
+ return false
+ }
+ }),
+ ethrawdb.WithDatabaseStatsTransformer(func(rows [][]string) [][]string {
+ newRows := make([][]string, 0, len(rows))
+ for _, row := range rows {
+ database := row[0]
+ category := row[1]
+ switch {
+ case database == "Key-Value store" && category == "Difficulties",
+ database == "Key-Value store" && category == "Beacon sync headers",
+ database == "Ancient store (Chain)":
+ // Discard rows specific to libevm (geth) but irrelevant to coreth.
+ continue
+ }
+ newRows = append(newRows, row)
+ }
+
+ return append(
+ newRows,
+ []string{"State sync", "Trie segments", syncSegments.Size(), syncSegments.Count()},
+ []string{"State sync", "Storage tries to fetch", syncProgress.Size(), syncProgress.Count()},
+ []string{"State sync", "Code to fetch", codeToFetch.Size(), codeToFetch.Count()},
+ []string{"State sync", "Block numbers synced to", syncPerformed.Size(), syncPerformed.Count()},
+ )
+ }),
+ }
+
+ return ethrawdb.InspectDatabase(db, keyPrefix, keyStart, options...)
+}
diff --git a/core/rawdb/database_ext_test.go b/core/rawdb/database_ext_test.go
index 91e62f3274..f9519c0e9e 100644
--- a/core/rawdb/database_ext_test.go
+++ b/core/rawdb/database_ext_test.go
@@ -1,3 +1,6 @@
+// (c) 2025, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
package rawdb
import (
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
deleted file mode 100644
index 622cbb3ff9..0000000000
--- a/core/rawdb/freezer.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-// convertLegacyFn takes a raw freezer entry in an older format and
-// returns it in the new format.
-type convertLegacyFn = func([]byte) ([]byte, error)
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
deleted file mode 100644
index bc999be25b..0000000000
--- a/core/rawdb/freezer_table.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import "errors"
-
-var (
- // errNotSupported is returned if the database doesn't support the required operation.
- errNotSupported = errors.New("this operation is not supported")
-)
diff --git a/core/rawdb/imports.go b/core/rawdb/imports.go
new file mode 100644
index 0000000000..4702fb915e
--- /dev/null
+++ b/core/rawdb/imports.go
@@ -0,0 +1,127 @@
+// (c) 2025, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package rawdb
+
+import (
+ ethrawdb "github.com/ava-labs/libevm/core/rawdb"
+)
+
+// Types used directly as their upstream definition.
+type (
+ LegacyTxLookupEntry = ethrawdb.LegacyTxLookupEntry
+ OpenOptions = ethrawdb.OpenOptions
+)
+
+// Constants used directly as their upstream definition.
+const (
+ PathScheme = ethrawdb.PathScheme
+)
+
+// Variables used directly as their upstream definition.
+var (
+ BloomBitsIndexPrefix = ethrawdb.BloomBitsIndexPrefix
+ CodePrefix = ethrawdb.CodePrefix
+)
+
+// Functions used directly as their upstream definition.
+var (
+ DeleteAccountSnapshot = ethrawdb.DeleteAccountSnapshot
+ DeleteAccountTrieNode = ethrawdb.DeleteAccountTrieNode
+ DeleteBlock = ethrawdb.DeleteBlock
+ DeleteCanonicalHash = ethrawdb.DeleteCanonicalHash
+ DeleteSnapshotRoot = ethrawdb.DeleteSnapshotRoot
+ DeleteStorageSnapshot = ethrawdb.DeleteStorageSnapshot
+ DeleteStorageTrieNode = ethrawdb.DeleteStorageTrieNode
+ DeleteTrieJournal = ethrawdb.DeleteTrieJournal
+ DeleteTrieNode = ethrawdb.DeleteTrieNode
+ ExistsAccountTrieNode = ethrawdb.ExistsAccountTrieNode
+ FindCommonAncestor = ethrawdb.FindCommonAncestor
+ HasBody = ethrawdb.HasBody
+ HasCode = ethrawdb.HasCode
+ HasHeader = ethrawdb.HasHeader
+ HashScheme = ethrawdb.HashScheme
+ HasLegacyTrieNode = ethrawdb.HasLegacyTrieNode
+ HasReceipts = ethrawdb.HasReceipts
+ IsCodeKey = ethrawdb.IsCodeKey
+ IterateStorageSnapshots = ethrawdb.IterateStorageSnapshots
+ NewDatabase = ethrawdb.NewDatabase
+ NewDatabaseWithFreezer = ethrawdb.NewDatabaseWithFreezer
+ NewKeyLengthIterator = ethrawdb.NewKeyLengthIterator
+ NewLevelDBDatabase = ethrawdb.NewLevelDBDatabase
+ NewMemoryDatabase = ethrawdb.NewMemoryDatabase
+ NewStateFreezer = ethrawdb.NewStateFreezer
+ NewTable = ethrawdb.NewTable
+ Open = ethrawdb.Open
+ ParseStateScheme = ethrawdb.ParseStateScheme
+ PopUncleanShutdownMarker = ethrawdb.PopUncleanShutdownMarker
+ PushUncleanShutdownMarker = ethrawdb.PushUncleanShutdownMarker
+ ReadAccountSnapshot = ethrawdb.ReadAccountSnapshot
+ ReadAccountTrieNode = ethrawdb.ReadAccountTrieNode
+ ReadAllHashes = ethrawdb.ReadAllHashes
+ ReadBlock = ethrawdb.ReadBlock
+ ReadBloomBits = ethrawdb.ReadBloomBits
+ ReadBody = ethrawdb.ReadBody
+ ReadCanonicalHash = ethrawdb.ReadCanonicalHash
+ ReadChainConfig = ethrawdb.ReadChainConfig
+ ReadCode = ethrawdb.ReadCode
+ ReadDatabaseVersion = ethrawdb.ReadDatabaseVersion
+ ReadHeadBlock = ethrawdb.ReadHeadBlock
+ ReadHeadBlockHash = ethrawdb.ReadHeadBlockHash
+ ReadHeader = ethrawdb.ReadHeader
+ ReadHeaderNumber = ethrawdb.ReadHeaderNumber
+ ReadHeadFastBlockHash = ethrawdb.ReadHeadFastBlockHash
+ ReadHeadHeaderHash = ethrawdb.ReadHeadHeaderHash
+ ReadLastPivotNumber = ethrawdb.ReadLastPivotNumber
+ ReadLegacyTrieNode = ethrawdb.ReadLegacyTrieNode
+ ReadLogs = ethrawdb.ReadLogs
+ ReadPersistentStateID = ethrawdb.ReadPersistentStateID
+ ReadPreimage = ethrawdb.ReadPreimage
+ ReadRawReceipts = ethrawdb.ReadRawReceipts
+ ReadReceipts = ethrawdb.ReadReceipts
+ ReadSkeletonSyncStatus = ethrawdb.ReadSkeletonSyncStatus
+ ReadSnapshotDisabled = ethrawdb.ReadSnapshotDisabled
+ ReadSnapshotGenerator = ethrawdb.ReadSnapshotGenerator
+ ReadSnapshotJournal = ethrawdb.ReadSnapshotJournal
+ ReadSnapshotRecoveryNumber = ethrawdb.ReadSnapshotRecoveryNumber
+ ReadSnapshotRoot = ethrawdb.ReadSnapshotRoot
+ ReadSnapshotSyncStatus = ethrawdb.ReadSnapshotSyncStatus
+ ReadSnapSyncStatusFlag = ethrawdb.ReadSnapSyncStatusFlag
+ ReadStateID = ethrawdb.ReadStateID
+ ReadStorageSnapshot = ethrawdb.ReadStorageSnapshot
+ ReadStorageTrieNode = ethrawdb.ReadStorageTrieNode
+ ReadTransaction = ethrawdb.ReadTransaction
+ ReadTrieJournal = ethrawdb.ReadTrieJournal
+ ReadTxIndexTail = ethrawdb.ReadTxIndexTail
+ ReadTxLookupEntry = ethrawdb.ReadTxLookupEntry
+ SnapshotAccountPrefix = ethrawdb.SnapshotAccountPrefix
+ SnapshotStoragePrefix = ethrawdb.SnapshotStoragePrefix
+ UnindexTransactions = ethrawdb.UnindexTransactions
+ UpdateUncleanShutdownMarker = ethrawdb.UpdateUncleanShutdownMarker
+ WriteAccountSnapshot = ethrawdb.WriteAccountSnapshot
+ WriteAccountTrieNode = ethrawdb.WriteAccountTrieNode
+ WriteBlock = ethrawdb.WriteBlock
+ WriteBloomBits = ethrawdb.WriteBloomBits
+ WriteBody = ethrawdb.WriteBody
+ WriteCanonicalHash = ethrawdb.WriteCanonicalHash
+ WriteChainConfig = ethrawdb.WriteChainConfig
+ WriteCode = ethrawdb.WriteCode
+ WriteDatabaseVersion = ethrawdb.WriteDatabaseVersion
+ WriteHeadBlockHash = ethrawdb.WriteHeadBlockHash
+ WriteHeader = ethrawdb.WriteHeader
+ WriteHeadHeaderHash = ethrawdb.WriteHeadHeaderHash
+ WriteLegacyTrieNode = ethrawdb.WriteLegacyTrieNode
+ WritePersistentStateID = ethrawdb.WritePersistentStateID
+ WritePreimages = ethrawdb.WritePreimages
+ WriteReceipts = ethrawdb.WriteReceipts
+ WriteSnapshotGenerator = ethrawdb.WriteSnapshotGenerator
+ WriteSnapshotRoot = ethrawdb.WriteSnapshotRoot
+ WriteSnapSyncStatusFlag = ethrawdb.WriteSnapSyncStatusFlag
+ WriteStateID = ethrawdb.WriteStateID
+ WriteStorageSnapshot = ethrawdb.WriteStorageSnapshot
+ WriteStorageTrieNode = ethrawdb.WriteStorageTrieNode
+ WriteTrieJournal = ethrawdb.WriteTrieJournal
+ WriteTrieNode = ethrawdb.WriteTrieNode
+ WriteTxIndexTail = ethrawdb.WriteTxIndexTail
+ WriteTxLookupEntriesByBlock = ethrawdb.WriteTxLookupEntriesByBlock
+)
diff --git a/core/rawdb/key_length_iterator.go b/core/rawdb/key_length_iterator.go
deleted file mode 100644
index 8d1a7d2f54..0000000000
--- a/core/rawdb/key_length_iterator.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// (c) 2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import "github.com/ava-labs/libevm/ethdb"
-
-// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
-// with a specific key length will be returned.
-type KeyLengthIterator struct {
- requiredKeyLength int
- ethdb.Iterator
-}
-
-// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
-// pairs where keys with a specific key length will be returned.
-func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator {
- return &KeyLengthIterator{
- Iterator: it,
- requiredKeyLength: keyLen,
- }
-}
-
-func (it *KeyLengthIterator) Next() bool {
- // Return true as soon as a key with the required key length is discovered
- for it.Iterator.Next() {
- if len(it.Iterator.Key()) == it.requiredKeyLength {
- return true
- }
- }
-
- // Return false when we exhaust the keys in the underlying iterator.
- return false
-}
diff --git a/core/rawdb/key_length_iterator_test.go b/core/rawdb/key_length_iterator_test.go
deleted file mode 100644
index 654efc5b55..0000000000
--- a/core/rawdb/key_length_iterator_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "encoding/binary"
- "testing"
-)
-
-func TestKeyLengthIterator(t *testing.T) {
- db := NewMemoryDatabase()
-
- keyLen := 8
- expectedKeys := make(map[string]struct{})
- for i := 0; i < 100; i++ {
- key := make([]byte, keyLen)
- binary.BigEndian.PutUint64(key, uint64(i))
- if err := db.Put(key, []byte{0x1}); err != nil {
- t.Fatal(err)
- }
- expectedKeys[string(key)] = struct{}{}
-
- longerKey := make([]byte, keyLen*2)
- binary.BigEndian.PutUint64(longerKey, uint64(i))
- if err := db.Put(longerKey, []byte{0x1}); err != nil {
- t.Fatal(err)
- }
- }
-
- it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen)
- for it.Next() {
- key := it.Key()
- _, exists := expectedKeys[string(key)]
- if !exists {
- t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key))
- }
- delete(expectedKeys, string(key))
- if len(key) != keyLen {
- t.Fatalf("Found unexpected key in key length iterator with length %d", len(key))
- }
- }
-
- if len(expectedKeys) != 0 {
- t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen)
- }
-}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
deleted file mode 100644
index 6fa4741a2e..0000000000
--- a/core/rawdb/schema.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package rawdb contains a collection of low level database accessors.
-package rawdb
-
-import (
- "bytes"
- "encoding/binary"
-
- "github.com/ava-labs/avalanchego/utils/wrappers"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/metrics"
-)
-
-// The fields below define the low level database schema prefixing.
-var (
- // databaseVersionKey tracks the current database version.
- databaseVersionKey = []byte("DatabaseVersion")
-
- // headHeaderKey tracks the latest known header's hash.
- headHeaderKey = []byte("LastHeader")
-
- // headBlockKey tracks the latest known full block's hash.
- headBlockKey = []byte("LastBlock")
-
- // persistentStateIDKey tracks the id of latest stored state(for path-based only).
- persistentStateIDKey = []byte("LastStateID")
-
- // snapshotRootKey tracks the hash of the last snapshot.
- snapshotRootKey = []byte("SnapshotRoot")
-
- // snapshotBlockHashKey tracks the block hash of the last snapshot.
- snapshotBlockHashKey = []byte("SnapshotBlockHash")
-
- // snapshotGeneratorKey tracks the snapshot generation marker across restarts.
- snapshotGeneratorKey = []byte("SnapshotGenerator")
-
- // trieJournalKey tracks the in-memory trie node layers across restarts.
- trieJournalKey = []byte("TrieJournal")
-
- // txIndexTailKey tracks the oldest block whose transactions have been indexed.
- txIndexTailKey = []byte("TransactionIndexTail")
-
- // uncleanShutdownKey tracks the list of local crashes
- uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db
-
- // offlinePruningKey tracks runs of offline pruning
- offlinePruningKey = []byte("OfflinePruning")
-
- // populateMissingTriesKey tracks runs of trie backfills
- populateMissingTriesKey = []byte("PopulateMissingTries")
-
- // pruningDisabledKey tracks whether the node has ever run in archival mode
- // to ensure that a user does not accidentally corrupt an archival node.
- pruningDisabledKey = []byte("PruningDisabled")
-
- // acceptorTipKey tracks the tip of the last accepted block that has been fully processed.
- acceptorTipKey = []byte("AcceptorTipKey")
-
- // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
- headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
- headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash
- headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian)
-
- blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
- blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
-
- txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
- bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
- SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
- SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
- CodePrefix = []byte("c") // CodePrefix + code hash -> account code
-
- // Path-based storage scheme of merkle patricia trie.
- trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
- trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
- stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
-
- PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
- configPrefix = []byte("ethereum-config-") // config prefix for the db
-
- // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
- BloomBitsIndexPrefix = []byte("iB")
-
- preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
- preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
-
- // State sync progress keys and prefixes
- syncRootKey = []byte("sync_root") // indicates the root of the main account trie currently being synced
- syncStorageTriesPrefix = []byte("sync_storage") // syncStorageTriesPrefix + trie root + account hash: indicates a storage trie must be fetched for the account
- syncSegmentsPrefix = []byte("sync_segments") // syncSegmentsPrefix + trie root + 32-byte start key: indicates the trie at root has a segment starting at the specified key
- CodeToFetchPrefix = []byte("CP") // CodeToFetchPrefix + code hash -> empty value tracks the outstanding code hashes we need to fetch.
-
- // State sync progress key lengths
- syncStorageTriesKeyLength = len(syncStorageTriesPrefix) + 2*common.HashLength
- syncSegmentsKeyLength = len(syncSegmentsPrefix) + 2*common.HashLength
- codeToFetchKeyLength = len(CodeToFetchPrefix) + common.HashLength
-
- // State sync metadata
- syncPerformedPrefix = []byte("sync_performed")
- syncPerformedKeyLength = len(syncPerformedPrefix) + wrappers.LongLen // prefix + block number as uint64
-)
-
-// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
-// fields.
-type LegacyTxLookupEntry struct {
- BlockHash common.Hash
- BlockIndex uint64
- Index uint64
-}
-
-// encodeBlockNumber encodes a block number as big endian uint64
-func encodeBlockNumber(number uint64) []byte {
- enc := make([]byte, 8)
- binary.BigEndian.PutUint64(enc, number)
- return enc
-}
-
-// headerKeyPrefix = headerPrefix + num (uint64 big endian)
-func headerKeyPrefix(number uint64) []byte {
- return append(headerPrefix, encodeBlockNumber(number)...)
-}
-
-// headerKey = headerPrefix + num (uint64 big endian) + hash
-func headerKey(number uint64, hash common.Hash) []byte {
- return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix
-func headerHashKey(number uint64) []byte {
- return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)
-}
-
-// headerNumberKey = headerNumberPrefix + hash
-func headerNumberKey(hash common.Hash) []byte {
- return append(headerNumberPrefix, hash.Bytes()...)
-}
-
-// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash
-func blockBodyKey(number uint64, hash common.Hash) []byte {
- return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
-func blockReceiptsKey(number uint64, hash common.Hash) []byte {
- return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-// txLookupKey = txLookupPrefix + hash
-func txLookupKey(hash common.Hash) []byte {
- return append(txLookupPrefix, hash.Bytes()...)
-}
-
-// accountSnapshotKey = SnapshotAccountPrefix + hash
-func accountSnapshotKey(hash common.Hash) []byte {
- return append(SnapshotAccountPrefix, hash.Bytes()...)
-}
-
-// storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
-func storageSnapshotKey(accountHash, storageHash common.Hash) []byte {
- buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength)
- n := copy(buf, SnapshotStoragePrefix)
- n += copy(buf[n:], accountHash.Bytes())
- copy(buf[n:], storageHash.Bytes())
- return buf
-}
-
-// storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
-func storageSnapshotsKey(accountHash common.Hash) []byte {
- return append(SnapshotStoragePrefix, accountHash.Bytes()...)
-}
-
-// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
-func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
- key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
-
- binary.BigEndian.PutUint16(key[1:], uint16(bit))
- binary.BigEndian.PutUint64(key[3:], section)
-
- return key
-}
-
-// preimageKey = preimagePrefix + hash
-func preimageKey(hash common.Hash) []byte {
- return append(PreimagePrefix, hash.Bytes()...)
-}
-
-// codeKey = CodePrefix + hash
-func codeKey(hash common.Hash) []byte {
- return append(CodePrefix, hash.Bytes()...)
-}
-
-// IsCodeKey reports whether the given byte slice is the key of contract code,
-// if so return the raw code hash as well.
-func IsCodeKey(key []byte) (bool, []byte) {
- if bytes.HasPrefix(key, CodePrefix) && len(key) == common.HashLength+len(CodePrefix) {
- return true, key[len(CodePrefix):]
- }
- return false, nil
-}
-
-// configKey = configPrefix + hash
-func configKey(hash common.Hash) []byte {
- return append(configPrefix, hash.Bytes()...)
-}
-
-// stateIDKey = stateIDPrefix + root (32 bytes)
-func stateIDKey(root common.Hash) []byte {
- return append(stateIDPrefix, root.Bytes()...)
-}
-
-// accountTrieNodeKey = trieNodeAccountPrefix + nodePath.
-func accountTrieNodeKey(path []byte) []byte {
- return append(trieNodeAccountPrefix, path...)
-}
-
-// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
-func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
- buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path))
- n := copy(buf, trieNodeStoragePrefix)
- n += copy(buf[n:], accountHash.Bytes())
- copy(buf[n:], path)
- return buf
-}
-
-// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
-// node. The characteristics of legacy trie node are:
-// - the key length is 32 bytes
-// - the key is the hash of val
-func IsLegacyTrieNode(key []byte, val []byte) bool {
- if len(key) != common.HashLength {
- return false
- }
- return bytes.Equal(key, crypto.Keccak256(val))
-}
-
-// ResolveAccountTrieNodeKey reports whether a provided database entry is an
-// account trie node in path-based state scheme, and returns the resolved
-// node path if so.
-func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) {
- if !bytes.HasPrefix(key, trieNodeAccountPrefix) {
- return false, nil
- }
- // The remaining key should only consist a hex node path
- // whose length is in the range 0 to 64 (64 is excluded
- // since leaves are always wrapped with shortNode).
- if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 {
- return false, nil
- }
- return true, key[len(trieNodeAccountPrefix):]
-}
-
-// IsAccountTrieNode reports whether a provided database entry is an account
-// trie node in path-based state scheme.
-func IsAccountTrieNode(key []byte) bool {
- ok, _ := ResolveAccountTrieNodeKey(key)
- return ok
-}
-
-// ResolveStorageTrieNode reports whether a provided database entry is a storage
-// trie node in path-based state scheme, and returns the resolved account hash
-// and node path if so.
-func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
- if !bytes.HasPrefix(key, trieNodeStoragePrefix) {
- return false, common.Hash{}, nil
- }
- // The remaining key consists of 2 parts:
- // - 32 bytes account hash
- // - hex node path whose length is in the range 0 to 64
- if len(key) < len(trieNodeStoragePrefix)+common.HashLength {
- return false, common.Hash{}, nil
- }
- if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 {
- return false, common.Hash{}, nil
- }
- accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength])
- return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:]
-}
-
-// IsStorageTrieNode reports whether a provided database entry is a storage
-// trie node in path-based state scheme.
-func IsStorageTrieNode(key []byte) bool {
- ok, _, _ := ResolveStorageTrieNode(key)
- return ok
-}
diff --git a/core/rawdb/schema_ext.go b/core/rawdb/schema_ext.go
new file mode 100644
index 0000000000..2bff67560d
--- /dev/null
+++ b/core/rawdb/schema_ext.go
@@ -0,0 +1,53 @@
+// (c) 2025, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package rawdb
+
+import (
+ "github.com/ava-labs/avalanchego/utils/wrappers"
+ "github.com/ava-labs/libevm/common"
+)
+
+var (
+ // snapshotBlockHashKey tracks the block hash of the last snapshot.
+ snapshotBlockHashKey = []byte("SnapshotBlockHash")
+ // offlinePruningKey tracks runs of offline pruning
+ offlinePruningKey = []byte("OfflinePruning")
+ // populateMissingTriesKey tracks runs of trie backfills
+ populateMissingTriesKey = []byte("PopulateMissingTries")
+ // pruningDisabledKey tracks whether the node has ever run in archival mode
+ // to ensure that a user does not accidentally corrupt an archival node.
+ pruningDisabledKey = []byte("PruningDisabled")
+ // acceptorTipKey tracks the tip of the last accepted block that has been fully processed.
+ acceptorTipKey = []byte("AcceptorTipKey")
+)
+
+// State sync progress keys and prefixes
+var (
+ // syncRootKey indicates the root of the main account trie currently being synced
+ syncRootKey = []byte("sync_root")
+ // syncStorageTriesPrefix is the prefix for storage tries that need to be fetched.
+ // syncStorageTriesPrefix + trie root + account hash: indicates a storage trie must be fetched for the account
+ syncStorageTriesPrefix = []byte("sync_storage")
+ // syncSegmentsPrefix is the prefix for segments.
+ // syncSegmentsPrefix + trie root + 32-byte start key: indicates the trie at root has a segment starting at the specified key
+ syncSegmentsPrefix = []byte("sync_segments")
+ // CodeToFetchPrefix is the prefix for code hashes that need to be fetched.
+ // CodeToFetchPrefix + code hash -> empty value tracks the outstanding code hashes we need to fetch.
+ CodeToFetchPrefix = []byte("CP")
+)
+
+// State sync progress key lengths
+var (
+ syncStorageTriesKeyLength = len(syncStorageTriesPrefix) + 2*common.HashLength
+ syncSegmentsKeyLength = len(syncSegmentsPrefix) + 2*common.HashLength
+ codeToFetchKeyLength = len(CodeToFetchPrefix) + common.HashLength
+)
+
+// State sync metadata
+var (
+ syncPerformedPrefix = []byte("sync_performed")
+ // syncPerformedKeyLength is the length of the key for the sync performed metadata key,
+ // and is equal to [syncPerformedPrefix] + block number as uint64.
+ syncPerformedKeyLength = len(syncPerformedPrefix) + wrappers.LongLen
+)
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
deleted file mode 100644
index cb9156173a..0000000000
--- a/core/rawdb/table.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "github.com/ava-labs/libevm/ethdb"
-)
-
-// table is a wrapper around a database that prefixes each key access with a pre-
-// configured string.
-type table struct {
- db ethdb.Database
- prefix string
-}
-
-// NewTable returns a database object that prefixes all keys with a given string.
-func NewTable(db ethdb.Database, prefix string) ethdb.Database {
- return &table{
- db: db,
- prefix: prefix,
- }
-}
-
-// Close is a noop to implement the Database interface.
-func (t *table) Close() error {
- return nil
-}
-
-// Has retrieves if a prefixed version of a key is present in the database.
-func (t *table) Has(key []byte) (bool, error) {
- return t.db.Has(append([]byte(t.prefix), key...))
-}
-
-// Get retrieves the given prefixed key if it's present in the database.
-func (t *table) Get(key []byte) ([]byte, error) {
- return t.db.Get(append([]byte(t.prefix), key...))
-}
-
-// HasAncient is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) HasAncient(kind string, number uint64) (bool, error) {
- return t.db.HasAncient(kind, number)
-}
-
-// Ancient is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
- return t.db.Ancient(kind, number)
-}
-
-// AncientRange is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
- return t.db.AncientRange(kind, start, count, maxBytes)
-}
-
-// Ancients is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Ancients() (uint64, error) {
- return t.db.Ancients()
-}
-
-// Tail is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Tail() (uint64, error) {
- return t.db.Tail()
-}
-
-// AncientSize is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) AncientSize(kind string) (uint64, error) {
- return t.db.AncientSize(kind)
-}
-
-// ModifyAncients runs an ancient write operation on the underlying database.
-func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
- return t.db.ModifyAncients(fn)
-}
-
-func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
- return t.db.ReadAncients(fn)
-}
-
-// TruncateHead is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) TruncateHead(items uint64) (uint64, error) {
- return t.db.TruncateHead(items)
-}
-
-// TruncateTail is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) TruncateTail(items uint64) (uint64, error) {
- return t.db.TruncateTail(items)
-}
-
-// Sync is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Sync() error {
- return t.db.Sync()
-}
-
-// MigrateTable processes the entries in a given table in sequence
-// converting them to a new format if they're of an old format.
-func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
- return t.db.MigrateTable(kind, convert)
-}
-
-// AncientDatadir returns the ancient datadir of the underlying database.
-func (t *table) AncientDatadir() (string, error) {
- return t.db.AncientDatadir()
-}
-
-// Put inserts the given value into the database at a prefixed version of the
-// provided key.
-func (t *table) Put(key []byte, value []byte) error {
- return t.db.Put(append([]byte(t.prefix), key...), value)
-}
-
-// Delete removes the given prefixed key from the database.
-func (t *table) Delete(key []byte) error {
- return t.db.Delete(append([]byte(t.prefix), key...))
-}
-
-// NewIterator creates a binary-alphabetical iterator over a subset
-// of database content with a particular key prefix, starting at a particular
-// initial key (or after, if it does not exist).
-func (t *table) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- innerPrefix := append([]byte(t.prefix), prefix...)
- iter := t.db.NewIterator(innerPrefix, start)
- return &tableIterator{
- iter: iter,
- prefix: t.prefix,
- }
-}
-
-// Stat returns a particular internal stat of the database.
-func (t *table) Stat(property string) (string, error) {
- return t.db.Stat(property)
-}
-
-// Compact flattens the underlying data store for the given key range. In essence,
-// deleted and overwritten versions are discarded, and the data is rearranged to
-// reduce the cost of operations needed to access them.
-//
-// A nil start is treated as a key before all keys in the data store; a nil limit
-// is treated as a key after all keys in the data store. If both is nil then it
-// will compact entire data store.
-func (t *table) Compact(start []byte, limit []byte) error {
- // If no start was specified, use the table prefix as the first value
- if start == nil {
- start = []byte(t.prefix)
- } else {
- start = append([]byte(t.prefix), start...)
- }
- // If no limit was specified, use the first element not matching the prefix
- // as the limit
- if limit == nil {
- limit = []byte(t.prefix)
- for i := len(limit) - 1; i >= 0; i-- {
- // Bump the current character, stopping if it doesn't overflow
- limit[i]++
- if limit[i] > 0 {
- break
- }
- // Character overflown, proceed to the next or nil if the last
- if i == 0 {
- limit = nil
- }
- }
- } else {
- limit = append([]byte(t.prefix), limit...)
- }
- // Range correctly calculated based on table prefix, delegate down
- return t.db.Compact(start, limit)
-}
-
-// NewBatch creates a write-only database that buffers changes to its host db
-// until a final write is called, each operation prefixing all keys with the
-// pre-configured string.
-func (t *table) NewBatch() ethdb.Batch {
- return &tableBatch{t.db.NewBatch(), t.prefix}
-}
-
-// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
-func (t *table) NewBatchWithSize(size int) ethdb.Batch {
- return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
-}
-
-// NewSnapshot creates a database snapshot based on the current state.
-// The created snapshot will not be affected by all following mutations
-// happened on the database.
-func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
- return t.db.NewSnapshot()
-}
-
-// tableBatch is a wrapper around a database batch that prefixes each key access
-// with a pre-configured string.
-type tableBatch struct {
- batch ethdb.Batch
- prefix string
-}
-
-// Put inserts the given value into the batch for later committing.
-func (b *tableBatch) Put(key, value []byte) error {
- return b.batch.Put(append([]byte(b.prefix), key...), value)
-}
-
-// Delete inserts a key removal into the batch for later committing.
-func (b *tableBatch) Delete(key []byte) error {
- return b.batch.Delete(append([]byte(b.prefix), key...))
-}
-
-// ValueSize retrieves the amount of data queued up for writing.
-func (b *tableBatch) ValueSize() int {
- return b.batch.ValueSize()
-}
-
-// Write flushes any accumulated data to disk.
-func (b *tableBatch) Write() error {
- return b.batch.Write()
-}
-
-// Reset resets the batch for reuse.
-func (b *tableBatch) Reset() {
- b.batch.Reset()
-}
-
-// tableReplayer is a wrapper around a batch replayer which truncates
-// the added prefix.
-type tableReplayer struct {
- w ethdb.KeyValueWriter
- prefix string
-}
-
-// Put implements the interface KeyValueWriter.
-func (r *tableReplayer) Put(key []byte, value []byte) error {
- trimmed := key[len(r.prefix):]
- return r.w.Put(trimmed, value)
-}
-
-// Delete implements the interface KeyValueWriter.
-func (r *tableReplayer) Delete(key []byte) error {
- trimmed := key[len(r.prefix):]
- return r.w.Delete(trimmed)
-}
-
-// Replay replays the batch contents.
-func (b *tableBatch) Replay(w ethdb.KeyValueWriter) error {
- return b.batch.Replay(&tableReplayer{w: w, prefix: b.prefix})
-}
-
-// tableIterator is a wrapper around a database iterator that prefixes each key access
-// with a pre-configured string.
-type tableIterator struct {
- iter ethdb.Iterator
- prefix string
-}
-
-// Next moves the iterator to the next key/value pair. It returns whether the
-// iterator is exhausted.
-func (iter *tableIterator) Next() bool {
- return iter.iter.Next()
-}
-
-// Error returns any accumulated error. Exhausting all the key/value pairs
-// is not considered to be an error.
-func (iter *tableIterator) Error() error {
- return iter.iter.Error()
-}
-
-// Key returns the key of the current key/value pair, or nil if done. The caller
-// should not modify the contents of the returned slice, and its contents may
-// change on the next call to Next.
-func (iter *tableIterator) Key() []byte {
- key := iter.iter.Key()
- if key == nil {
- return nil
- }
- return key[len(iter.prefix):]
-}
-
-// Value returns the value of the current key/value pair, or nil if done. The
-// caller should not modify the contents of the returned slice, and its contents
-// may change on the next call to Next.
-func (iter *tableIterator) Value() []byte {
- return iter.iter.Value()
-}
-
-// Release releases associated resources. Release should always succeed and can
-// be called multiple times without causing error.
-func (iter *tableIterator) Release() {
- iter.iter.Release()
-}
diff --git a/core/rawdb/table_test.go b/core/rawdb/table_test.go
deleted file mode 100644
index a6f4b454f6..0000000000
--- a/core/rawdb/table_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "testing"
-
- "github.com/ava-labs/libevm/ethdb"
-)
-
-func TestTableDatabase(t *testing.T) { testTableDatabase(t, "prefix") }
-func TestEmptyPrefixTableDatabase(t *testing.T) { testTableDatabase(t, "") }
-
-type testReplayer struct {
- puts [][]byte
- dels [][]byte
-}
-
-func (r *testReplayer) Put(key []byte, value []byte) error {
- r.puts = append(r.puts, key)
- return nil
-}
-
-func (r *testReplayer) Delete(key []byte) error {
- r.dels = append(r.dels, key)
- return nil
-}
-
-func testTableDatabase(t *testing.T, prefix string) {
- db := NewTable(NewMemoryDatabase(), prefix)
-
- var entries = []struct {
- key []byte
- value []byte
- }{
- {[]byte{0x01, 0x02}, []byte{0x0a, 0x0b}},
- {[]byte{0x03, 0x04}, []byte{0x0c, 0x0d}},
- {[]byte{0x05, 0x06}, []byte{0x0e, 0x0f}},
-
- {[]byte{0xff, 0xff, 0x01}, []byte{0x1a, 0x1b}},
- {[]byte{0xff, 0xff, 0x02}, []byte{0x1c, 0x1d}},
- {[]byte{0xff, 0xff, 0x03}, []byte{0x1e, 0x1f}},
- }
-
- // Test Put/Get operation
- for _, entry := range entries {
- db.Put(entry.key, entry.value)
- }
- for _, entry := range entries {
- got, err := db.Get(entry.key)
- if err != nil {
- t.Fatalf("Failed to get value: %v", err)
- }
- if !bytes.Equal(got, entry.value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got)
- }
- }
-
- // Test batch operation
- db = NewTable(NewMemoryDatabase(), prefix)
- batch := db.NewBatch()
- for _, entry := range entries {
- batch.Put(entry.key, entry.value)
- }
- batch.Write()
- for _, entry := range entries {
- got, err := db.Get(entry.key)
- if err != nil {
- t.Fatalf("Failed to get value: %v", err)
- }
- if !bytes.Equal(got, entry.value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got)
- }
- }
-
- // Test batch replayer
- r := &testReplayer{}
- batch.Replay(r)
- for index, entry := range entries {
- got := r.puts[index]
- if !bytes.Equal(got, entry.key) {
- t.Fatalf("Key mismatch: want=%v, got=%v", entry.key, got)
- }
- }
-
- check := func(iter ethdb.Iterator, expCount, index int) {
- count := 0
- for iter.Next() {
- key, value := iter.Key(), iter.Value()
- if !bytes.Equal(key, entries[index].key) {
- t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key)
- }
- if !bytes.Equal(value, entries[index].value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value)
- }
- index += 1
- count++
- }
- if count != expCount {
- t.Fatalf("Wrong number of elems, exp %d got %d", expCount, count)
- }
- iter.Release()
- }
- // Test iterators
- check(db.NewIterator(nil, nil), 6, 0)
- // Test iterators with prefix
- check(db.NewIterator([]byte{0xff, 0xff}, nil), 3, 3)
- // Test iterators with start point
- check(db.NewIterator(nil, []byte{0xff, 0xff, 0x02}), 2, 4)
- // Test iterators with prefix and start point
- check(db.NewIterator([]byte{0xee}, nil), 0, 0)
- check(db.NewIterator(nil, []byte{0x00}), 6, 0)
-}