Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add skip index flag #399

Merged
merged 11 commits into from
Nov 21, 2023
13 changes: 9 additions & 4 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ type CacheConfig struct {
Preimages bool // Whether to store preimage of trie key to the disk
AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip
TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices
SkipTxIndexing bool // Whether to skip transaction indexing

SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
Expand Down Expand Up @@ -403,6 +404,8 @@ func NewBlockChain(
}

// Start tx indexer/unindexer if required.
// TODO: with skip tx indexing, we should not start the tx indexer if there is no tx to remove.
// or we can stop it once there is no lookback tx index to remove.
if bc.cacheConfig.TxLookupLimit != 0 {
bc.wg.Add(1)
go bc.dispatchTxUnindexer()
Expand Down Expand Up @@ -481,9 +484,11 @@ func (bc *BlockChain) dispatchTxUnindexer() {
// This includes the following:
// - transaction lookup indices
// - updating the acceptor tip index
func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block) error {
func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block, skipTxIndices bool) error {
patrick-ogrady marked this conversation as resolved.
Show resolved Hide resolved
batch := bc.db.NewBatch()
rawdb.WriteTxLookupEntriesByBlock(batch, b)
if !skipTxIndices {
rawdb.WriteTxLookupEntriesByBlock(batch, b)
}
if err := rawdb.WriteAcceptorTip(batch, b.Hash()); err != nil {
return fmt.Errorf("%w: failed to write acceptor tip key", err)
}
Expand Down Expand Up @@ -574,7 +579,7 @@ func (bc *BlockChain) startAcceptor() {
}

// Update last processed and transaction lookup index
if err := bc.writeBlockAcceptedIndices(next); err != nil {
if err := bc.writeBlockAcceptedIndices(next, bc.cacheConfig.SkipTxIndexing); err != nil {
log.Crit("failed to write accepted block effects", "err", err)
}

Expand Down Expand Up @@ -1863,7 +1868,7 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error

// Write any unsaved indices to disk
if writeIndices {
if err := bc.writeBlockAcceptedIndices(current); err != nil {
if err := bc.writeBlockAcceptedIndices(current, bc.cacheConfig.SkipTxIndexing); err != nil {
return fmt.Errorf("%w: failed to process accepted block indices", err)
}
}
Expand Down
161 changes: 154 additions & 7 deletions core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -652,11 +652,14 @@ func TestTransactionIndices(t *testing.T) {

check := func(tail *uint64, chain *BlockChain) {
stored := rawdb.ReadTxIndexTail(chain.db)
require.EqualValues(tail, stored)

if tail == nil {
return
require.Nil(stored)
tail = new(uint64)
} else {
require.EqualValues(tail, stored, "expected tail %d, got %d", *tail, *stored)
}

for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 {
Expand Down Expand Up @@ -737,10 +740,128 @@ func TestTransactionIndices(t *testing.T) {
}
}

func TestTransactionIndicesSkipIndexing(t *testing.T) {
// Configure and generate a sample block chain
require := require.New(t)
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
funds = big.NewInt(10000000000000)
gspec = &Genesis{
Config: &params.ChainConfig{HomesteadBlock: new(big.Int)},
Alloc: GenesisAlloc{addr1: {Balance: funds}},
}
signer = types.LatestSigner(gspec.Config)
)
genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewDummyEngine(&TestCallbacks), 128, 10, func(i int, block *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1)
require.NoError(err)
block.AddTx(tx)
})
require.NoError(err)

blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewDummyEngine(&TestCallbacks), genDb, 10, 10, nil)
require.NoError(err)

check := func(tail *uint64, to uint64, chain *BlockChain) {
stored := rawdb.ReadTxIndexTail(chain.db)
if tail == nil {
require.Nil(stored)
tail = new(uint64)
} else {
require.EqualValues(tail, stored, "expected tail %d, got %d", *tail, *stored)
}

for i := *tail; i < to; i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 {
continue
}
for _, tx := range block.Transactions() {
index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash())
require.NotNilf(index, "Miss transaction indices, number %d hash %s", i, tx.Hash().Hex())
}
}

for i := to; i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 {
continue
}
for _, tx := range block.Transactions() {
index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash())
require.Nilf(index, "Transaction indices should be deleted, number %d hash %s", i, tx.Hash().Hex())
}
}
}

conf := &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieDirtyCommitTarget: 20,
Pruning: true,
CommitInterval: 4096,
SnapshotLimit: 256,
SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails
AcceptorQueueLimit: 64,
}

// Init block chain and check all needed indices has been indexed.
chainDB := rawdb.NewMemoryDatabase()
chain, err := createBlockChain(chainDB, conf, gspec, common.Hash{})
require.NoError(err)

_, err = chain.InsertChain(blocks)
require.NoError(err)

for _, block := range blocks {
err := chain.Accept(block)
require.NoError(err)
}
chain.DrainAcceptorQueue()

chain.Stop()
check(nil, chain.CurrentHeader().Number.Uint64(), chain) // check all indices has been indexed until that point

lastAcceptedHash := chain.CurrentHeader().Hash()

// Reconstruct a block chain which only reserves limited tx indices
// 128 blocks were previously created with indexing. Now we add a new block at each test step.
limit := []uint64{130 /* 129 + 1 reserve all */, 64 /* drop stale */, 32 /* shorten history */}
to := []uint64{129 /* reserve all */, 130, 131}
tails := []uint64{0 /* reserve all */, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */}
conf.SkipTxIndexing = true
for i, l := range limit {
conf.TxLookupLimit = l

chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedHash)
require.NoError(err)

newBlks := blocks2[i : i+1]
_, err = chain.InsertChain(newBlks) // Feed chain a higher block to trigger indices updater.
require.NoError(err)

err = chain.Accept(newBlks[0]) // Accept the block to trigger indices updater.
require.NoError(err)

chain.DrainAcceptorQueue()
time.Sleep(50 * time.Millisecond) // Wait for indices initialisation

chain.Stop()
current := chain.CurrentHeader().Number.Uint64()
fmt.Println(current)
check(&tails[i], to[i], chain)

lastAcceptedHash = chain.CurrentHeader().Hash()
}
}

// TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted
// correctly in case reorg is called.
func TestCanonicalHashMarker(t *testing.T) {
var cases = []struct {
cases := []struct {
forkA int
forkB int
}{
Expand Down Expand Up @@ -871,6 +992,29 @@ func TestTxLookupBlockChain(t *testing.T) {
}
}

func TestTxLookupSkipIndexingBlockChain(t *testing.T) {
cacheConf := &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieDirtyCommitTarget: 20,
Pruning: true,
CommitInterval: 4096,
SnapshotLimit: 256,
SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails
AcceptorQueueLimit: 64, // ensure channel doesn't block
TxLookupLimit: 5,
SkipTxIndexing: true,
}
createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) {
return createBlockChain(db, cacheConf, gspec, lastAcceptedHash)
}
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
tt.testFunc(t, createTxLookupBlockChain)
})
}
}

func TestCreateThenDeletePreByzantium(t *testing.T) {
// We want to use pre-byzantium rules where we have intermediate state roots
// between transactions.
Expand All @@ -882,6 +1026,7 @@ func TestCreateThenDeletePreByzantium(t *testing.T) {
config.MuirGlacierBlock = nil
testCreateThenDelete(t, &config)
}

func TestCreateThenDeletePostByzantium(t *testing.T) {
testCreateThenDelete(t, params.TestChainConfig)
}
Expand All @@ -906,7 +1051,8 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
byte(vm.PUSH1), 0x1,
byte(vm.SSTORE),
// Get the runtime-code on the stack
byte(vm.PUSH32)}
byte(vm.PUSH32),
}
initCode = append(initCode, code...)
initCode = append(initCode, []byte{
byte(vm.PUSH1), 0x0, // offset
Expand Down Expand Up @@ -948,8 +1094,8 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
})
// Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{
//Debug: true,
//Tracer: logger.NewJSONLogger(nil, os.Stdout),
// Debug: true,
// Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, common.Hash{}, false)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
Expand Down Expand Up @@ -994,7 +1140,8 @@ func TestTransientStorageReset(t *testing.T) {
byte(vm.TSTORE),

// Get the runtime-code on the stack
byte(vm.PUSH32)}
byte(vm.PUSH32),
}
initCode = append(initCode, code...)
initCode = append(initCode, []byte{
byte(vm.PUSH1), 0x0, // offset
Expand Down
1 change: 1 addition & 0 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ func New(
Preimages: config.Preimages,
AcceptedCacheSize: config.AcceptedCacheSize,
TxLookupLimit: config.TxLookupLimit,
SkipTxIndexing: config.SkipTxIndexing,
}
)

Expand Down
5 changes: 5 additions & 0 deletions eth/ethconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,4 +156,9 @@ type Config struct {
// * 0: means no limit
// * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes
TxLookupLimit uint64

// SkipTxIndexing skips indexing transactions.
// This is useful for light clients that don't need to index transactions.
// This cannot be enabled at the same time with TxLookupLimit = 0 (meaning no limit for tx indexing).
SkipTxIndexing bool
}
10 changes: 10 additions & 0 deletions plugin/evm/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ var (
defaultAllowUnprotectedTxHashes = []common.Hash{
common.HexToHash("0xfefb2da535e927b85fe68eb81cb2e4a5827c905f78381a01ef2322aa9b0aee8e"), // EIP-1820: https://eips.ethereum.org/EIPS/eip-1820
}

errSkipIndexingWithNoLimit = fmt.Errorf("cannot skip tx indexing and have no limit for tx lookup at the same time")
)

type Duration struct {
Expand Down Expand Up @@ -196,6 +198,11 @@ type Config struct {
// * 0: means no limit
// * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes
TxLookupLimit uint64 `json:"tx-lookup-limit"`

// SkipTxIndexing skips indexing transactions.
// This is useful for light clients that don't need to index transactions.
patrick-ogrady marked this conversation as resolved.
Show resolved Hide resolved
// This cannot be enabled at the same time with TxLookupLimit = 0 (meaning no limit for tx indexing).
patrick-ogrady marked this conversation as resolved.
Show resolved Hide resolved
SkipTxIndexing bool `json:"skip-tx-indexing"`
}

// EthAPIs returns an array of strings representing the Eth APIs that should be enabled
Expand Down Expand Up @@ -288,6 +295,9 @@ func (c *Config) Validate() error {
return fmt.Errorf("cannot use commit interval of 0 with pruning enabled")
}

if c.SkipTxIndexing && c.TxLookupLimit == 0 {
return errSkipIndexingWithNoLimit
}
return nil
}

Expand Down