Skip to content

Commit

Permalink
core: persist bad blocks (ethereum#21827)
Browse files Browse the repository at this point in the history
* core: persist bad blocks

* core, eth, internal: address comments

* core/rawdb: add badblocks to inspector

* core, eth: update

* internal: revert

* core, eth: only save 10 bad blocks

* core/rawdb: address comments

* core/rawdb: fix

* core: address comments
  • Loading branch information
rjl493456442 committed Jan 10, 2021
1 parent 89030ec commit 5a1b384
Show file tree
Hide file tree
Showing 7 changed files with 196 additions and 41 deletions.
23 changes: 1 addition & 22 deletions core/blockchain.go
Expand Up @@ -89,7 +89,6 @@ const (
txLookupCacheLimit = 1024
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
badBlockLimit = 10
TriesInMemory = 128

// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
Expand Down Expand Up @@ -208,7 +207,6 @@ type BlockChain struct {
processor Processor // Block transaction processor interface
vmConfig vm.Config

badBlocks *lru.Cache // Bad block cache
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
writeLegacyJournal bool // Testing flag used to flush the snapshot journal in legacy format.
Expand All @@ -227,7 +225,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
blockCache, _ := lru.New(blockCacheLimit)
txLookupCache, _ := lru.New(txLookupCacheLimit)
futureBlocks, _ := lru.New(maxFutureBlocks)
badBlocks, _ := lru.New(badBlockLimit)

bc := &BlockChain{
chainConfig: chainConfig,
Expand All @@ -249,7 +246,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
futureBlocks: futureBlocks,
engine: engine,
vmConfig: vmConfig,
badBlocks: badBlocks,
}
bc.validator = NewBlockValidator(chainConfig, bc, engine)
bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
Expand Down Expand Up @@ -2374,26 +2370,9 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) {
}
}

// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
func (bc *BlockChain) BadBlocks() []*types.Block {
blocks := make([]*types.Block, 0, bc.badBlocks.Len())
for _, hash := range bc.badBlocks.Keys() {
if blk, exist := bc.badBlocks.Peek(hash); exist {
block := blk.(*types.Block)
blocks = append(blocks, block)
}
}
return blocks
}

// addBadBlock adds a bad block to the bad-block LRU cache
func (bc *BlockChain) addBadBlock(block *types.Block) {
bc.badBlocks.Add(block.Hash(), block)
}

// reportBlock logs a bad block error.
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
bc.addBadBlock(block)
rawdb.WriteBadBlock(bc.db, block)

var receiptString string
for i, receipt := range receipts {
Expand Down
97 changes: 97 additions & 0 deletions core/rawdb/accessors_chain.go
Expand Up @@ -20,6 +20,7 @@ import (
"bytes"
"encoding/binary"
"math/big"
"sort"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
Expand Down Expand Up @@ -702,6 +703,102 @@ func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
DeleteTd(db, hash, number)
}

const badBlockToKeep = 10

type badBlock struct {
Header *types.Header
Body *types.Body
}

// badBlockList implements the sort interface to allow sorting a list of
// bad blocks by their number in the reverse order.
type badBlockList []*badBlock

func (s badBlockList) Len() int { return len(s) }
func (s badBlockList) Less(i, j int) bool {
return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
}
func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }

// ReadBadBlock retrieves the bad block with the corresponding block hash.
func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
blob, err := db.Get(badBlockKey)
if err != nil {
return nil
}
var badBlocks badBlockList
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
return nil
}
for _, bad := range badBlocks {
if bad.Header.Hash() == hash {
return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)
}
}
return nil
}

// ReadAllBadBlocks retrieves all the bad blocks in the database.
// All returned blocks are sorted in reverse order by number.
func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
blob, err := db.Get(badBlockKey)
if err != nil {
return nil
}
var badBlocks badBlockList
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
return nil
}
var blocks []*types.Block
for _, bad := range badBlocks {
blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles))
}
return blocks
}

// WriteBadBlock serializes the bad block into the database. If the cumulated
// bad blocks exceeds the limitation, the oldest will be dropped.
func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
blob, err := db.Get(badBlockKey)
if err != nil {
log.Warn("Failed to load old bad blocks", "error", err)
}
var badBlocks badBlockList
if len(blob) > 0 {
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
log.Crit("Failed to decode old bad blocks", "error", err)
}
}
for _, b := range badBlocks {
if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
return
}
}
badBlocks = append(badBlocks, &badBlock{
Header: block.Header(),
Body: block.Body(),
})
sort.Sort(sort.Reverse(badBlocks))
if len(badBlocks) > badBlockToKeep {
badBlocks = badBlocks[:badBlockToKeep]
}
data, err := rlp.EncodeToBytes(badBlocks)
if err != nil {
log.Crit("Failed to encode bad blocks", "err", err)
}
if err := db.Put(badBlockKey, data); err != nil {
log.Crit("Failed to write bad blocks", "err", err)
}
}

// DeleteBadBlocks deletes all the bad blocks from the database
func DeleteBadBlocks(db ethdb.KeyValueWriter) {
if err := db.Delete(badBlockKey); err != nil {
log.Crit("Failed to delete bad blocks", "err", err)
}
}

// FindCommonAncestor returns the last common ancestor of two block headers
func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
Expand Down
70 changes: 70 additions & 0 deletions core/rawdb/accessors_chain_test.go
Expand Up @@ -22,6 +22,7 @@ import (
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"os"
"reflect"
"testing"
Expand Down Expand Up @@ -188,6 +189,75 @@ func TestPartialBlockStorage(t *testing.T) {
}
}

// Tests block storage and retrieval operations.
func TestBadBlockStorage(t *testing.T) {
db := NewMemoryDatabase()

// Create a test block to move around the database and make sure it's really new
block := types.NewBlockWithHeader(&types.Header{
Number: big.NewInt(1),
Extra: []byte("bad block"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
})
if entry := ReadBadBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
// Write and verify the block in the database
WriteBadBlock(db, block)
if entry := ReadBadBlock(db, block.Hash()); entry == nil {
t.Fatalf("Stored block not found")
} else if entry.Hash() != block.Hash() {
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
}
// Write one more bad block
blockTwo := types.NewBlockWithHeader(&types.Header{
Number: big.NewInt(2),
Extra: []byte("bad block two"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
})
WriteBadBlock(db, blockTwo)

// Write the block one again, should be filtered out.
WriteBadBlock(db, block)
badBlocks := ReadAllBadBlocks(db)
if len(badBlocks) != 2 {
t.Fatalf("Failed to load all bad blocks")
}

// Write a bunch of bad blocks, all the blocks are should sorted
// in reverse order. The extra blocks should be truncated.
for _, n := range rand.Perm(100) {
block := types.NewBlockWithHeader(&types.Header{
Number: big.NewInt(int64(n)),
Extra: []byte("bad block"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
})
WriteBadBlock(db, block)
}
badBlocks = ReadAllBadBlocks(db)
if len(badBlocks) != badBlockToKeep {
t.Fatalf("The number of persised bad blocks in incorrect %d", len(badBlocks))
}
for i := 0; i < len(badBlocks)-1; i++ {
if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() {
t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64())
}
}

// Delete all bad blocks
DeleteBadBlocks(db)
badBlocks = ReadAllBadBlocks(db)
if len(badBlocks) != 0 {
t.Fatalf("Failed to delete bad blocks")
}
}

// Tests block total difficulty storage and retrieval operations.
func TestTdStorage(t *testing.T) {
db := NewMemoryDatabase()
Expand Down
2 changes: 1 addition & 1 deletion core/rawdb/database.go
Expand Up @@ -355,7 +355,7 @@ func InspectDatabase(db ethdb.Database) error {
bloomTrieNodes.Add(size)
default:
var accounted bool
for _, meta := range [][]byte{databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey, uncleanShutdownKey} {
for _, meta := range [][]byte{databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey, uncleanShutdownKey, badBlockKey} {
if bytes.Equal(key, meta) {
metadata.Add(size)
accounted = true
Expand Down
8 changes: 6 additions & 2 deletions core/rawdb/schema.go
Expand Up @@ -66,6 +66,12 @@ var (
// fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")

// badBlockKey tracks the list of bad blocks seen by local
badBlockKey = []byte("InvalidBlock")

// uncleanShutdownKey tracks the list of local crashes
uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db

// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
Expand All @@ -84,8 +90,6 @@ var (
preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db

uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db

// Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress

Expand Down
31 changes: 19 additions & 12 deletions eth/api.go
Expand Up @@ -331,22 +331,29 @@ type BadBlockArgs struct {
// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
// and returns them as a JSON list of block-hashes
func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) {
blocks := api.eth.BlockChain().BadBlocks()
results := make([]*BadBlockArgs, len(blocks))

var err error
for i, block := range blocks {
results[i] = &BadBlockArgs{
Hash: block.Hash(),
}
var (
err error
blocks = rawdb.ReadAllBadBlocks(api.eth.chainDb)
results = make([]*BadBlockArgs, 0, len(blocks))
)
for _, block := range blocks {
var (
blockRlp string
blockJSON map[string]interface{}
)
if rlpBytes, err := rlp.EncodeToBytes(block); err != nil {
results[i].RLP = err.Error() // Hacky, but hey, it works
blockRlp = err.Error() // Hacky, but hey, it works
} else {
results[i].RLP = fmt.Sprintf("0x%x", rlpBytes)
blockRlp = fmt.Sprintf("0x%x", rlpBytes)
}
if results[i].Block, err = ethapi.RPCMarshalBlock(block, true, true); err != nil {
results[i].Block = map[string]interface{}{"error": err.Error()}
if blockJSON, err = ethapi.RPCMarshalBlock(block, true, true); err != nil {
blockJSON = map[string]interface{}{"error": err.Error()}
}
results = append(results, &BadBlockArgs{
Hash: block.Hash(),
RLP: blockRlp,
Block: blockJSON,
})
}
return results, nil
}
Expand Down
6 changes: 2 additions & 4 deletions eth/api_tracer.go
Expand Up @@ -404,8 +404,7 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string,
// EVM against a block pulled from the pool of bad ones and returns them as a JSON
// object.
func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
blocks := api.eth.blockchain.BadBlocks()
for _, block := range blocks {
for _, block := range rawdb.ReadAllBadBlocks(api.eth.chainDb) {
if block.Hash() == hash {
return api.traceBlock(ctx, block, config)
}
Expand All @@ -428,8 +427,7 @@ func (api *PrivateDebugAPI) StandardTraceBlockToFile(ctx context.Context, hash c
// execution of EVM against a block pulled from the pool of bad ones to the
// local file system and returns a list of files to the caller.
func (api *PrivateDebugAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
blocks := api.eth.blockchain.BadBlocks()
for _, block := range blocks {
for _, block := range rawdb.ReadAllBadBlocks(api.eth.chainDb) {
if block.Hash() == hash {
return api.standardTraceBlockToFile(ctx, block, config)
}
Expand Down

0 comments on commit 5a1b384

Please sign in to comment.