Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core/rawdb: refactor db inspector for extending multiple ancient store #25896

Merged
merged 8 commits into from
Oct 28, 2022
8 changes: 5 additions & 3 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ var (
storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
storageDeleteTimer = metrics.NewRegisteredTimer("chain/storage/deletes", nil)
storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)

snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil)
Expand Down Expand Up @@ -124,7 +125,7 @@ const (
BlockChainVersion uint64 = 8
)

// CacheConfig contains the configuration values for the trie caching/pruning
// CacheConfig contains the configuration values for the trie database
// that's resident in a blockchain.
type CacheConfig struct {
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
Expand Down Expand Up @@ -1409,7 +1410,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
// In theory we should fire a ChainHeadEvent when we inject
// In theory, we should fire a ChainHeadEvent when we inject
// a canonical block, but sometimes we can insert a batch of
// canonical blocks. Avoid firing too many ChainHeadEvents,
// we will fire an accumulated ChainHeadEvent and disable fire
Expand Down Expand Up @@ -1716,11 +1717,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
storageDeleteTimer.Update(statedb.StorageDeletes) // Storage deletes are complete, we can mark them
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates + statedb.StorageDeletes

blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)

Expand Down
33 changes: 0 additions & 33 deletions core/rawdb/ancient_scheme.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

package rawdb

import "fmt"

// The list of table names of chain freezer.
const (
// chainFreezerHeaderTable indicates the name of the freezer header table.
Expand Down Expand Up @@ -53,34 +51,3 @@ var (

// freezers the collections of all builtin freezers.
var freezers = []string{chainFreezerName}

// InspectFreezerTable dumps out the index of a specific freezer table. The passed
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
noSnappy, exist := tables[tableName]
if !exist {
var names []string
for name := range tables {
names = append(names, name)
}
return fmt.Errorf("unknown table, supported ones: %v", names)
}
table, err := newFreezerTable(path, tableName, noSnappy, true)
if err != nil {
return err
}
table.dumpIndexStdout(start, end)
return nil
}
134 changes: 134 additions & 0 deletions core/rawdb/ancient_utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package rawdb

import (
"fmt"
"strings"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
)

// freezerInfo contains the basic information of the freezer.
type freezerInfo struct {
name string // The identifier of freezer
head uint64 // The number of last stored item in the freezer
tail uint64 // The number of first stored item in the freezer
sizes map[string]common.StorageSize // The storage size per table
}

// count returns the number of stored items in the freezer.
func (info freezerInfo) count() uint64 {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
return info.head - info.tail + 1
}

// totalSize returns the storage size of entire freezer.
func (info freezerInfo) totalSize() common.StorageSize {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
var total common.StorageSize
for _, size := range info.sizes {
total += size
}
return total
}

// summary returns a string-representation of the freezerInfo.
func (info freezerInfo) summary() [][]string {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
var ret [][]string
for table, size := range info.sizes {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
ret = append(ret, []string{
fmt.Sprintf("Ancient store (%s)", strings.Title(info.name)),
strings.Title(table),
size.String(),
fmt.Sprintf("%d", info.count()),
})
}
return ret
}

// inspectFreezers inspects all freezers registered in the system.
func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
var infos []freezerInfo
for _, freezer := range freezers {
switch freezer {
case chainFreezerName:
// Chain ancient store is a bit special. It's always opened along
// with the key-value store, inspect the chain store directly.
info := freezerInfo{
name: freezer,
sizes: make(map[string]common.StorageSize),
}
// Retrieve storage size of every contained table.
for table := range chainFreezerNoSnappy {
size, err := db.AncientSize(table)
if err != nil {
return nil, err
}
info.sizes[table] = common.StorageSize(size)
}
// Retrieve the number of last stored item
ancients, err := db.Ancients()
if err != nil {
return nil, err
}
info.head = ancients - 1

// Retrieve the number of first stored item
tail, err := db.Tail()
if err != nil {
return nil, err
}
info.tail = tail
infos = append(infos, info)

default:
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
}
return infos, nil
}

// InspectFreezerTable dumps out the index of a specific freezer table. The passed
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
noSnappy, exist := tables[tableName]
if !exist {
var names []string
for name := range tables {
names = append(names, name)
}
return fmt.Errorf("unknown table, supported ones: %v", names)
}
table, err := newFreezerTable(path, tableName, noSnappy, true)
if err != nil {
return err
}
table.dumpIndexStdout(start, end)
return nil
}
37 changes: 10 additions & 27 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -379,13 +379,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
beaconHeaders stat
cliqueSnaps stat

// Ancient store statistics
ancientHeadersSize common.StorageSize
ancientBodiesSize common.StorageSize
ancientReceiptsSize common.StorageSize
ancientTdsSize common.StorageSize
ancientHashesSize common.StorageSize

// Les statistic
chtTrieNodes stat
bloomTrieNodes stat
Expand Down Expand Up @@ -473,20 +466,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now()
}
}
// Inspect append-only file store then.
ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
for i, category := range []string{chainFreezerHeaderTable, chainFreezerBodiesTable, chainFreezerReceiptTable, chainFreezerHashTable, chainFreezerDifficultyTable} {
if size, err := db.AncientSize(category); err == nil {
*ancientSizes[i] += common.StorageSize(size)
total += common.StorageSize(size)
}
}
// Get number of ancient rows inside the freezer
ancients := counter(0)
if count, err := db.Ancients(); err == nil {
ancients = counter(count)
}
// Display the database statistic.
// Display the database statistic of key-value store.
stats := [][]string{
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
Expand All @@ -504,14 +484,18 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
{"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
{"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
{"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
{"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
}
// Inspect all registered append-only file store then.
ancients, err := inspectFreezers(db)
if err != nil {
return err
}
for _, ancient := range ancients {
stats = append(stats, ancient.summary()...)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moving parts of the formatting code into the freezerInfo feels wrong. It is very very very specific on how this table is formatted here. I think it's a very low level impl detail leak into an API method (even if private). Please inline the formatting and use use getters on ancient to get the infos needed for the table.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

total += ancient.totalSize()
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "})
Expand All @@ -521,6 +505,5 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
if unaccounted.size > 0 {
log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
}

return nil
}
13 changes: 7 additions & 6 deletions core/state/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,9 @@ type Tree struct {
// If the memory layers in the journal do not match the disk layer (e.g. there is
// a gap) or the journal is missing, there are two repair cases:
//
// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
// This case happens when the snapshot is 'ahead' of the state trie.
// - if the 'recovery' parameter is true, memory diff-layers and the disk-layer
// will all be kept. This case happens when the snapshot is 'ahead' of the
// state trie.
// - otherwise, the entire snapshot is considered invalid and will be recreated on
// a background thread.
func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) {
Expand All @@ -199,16 +200,16 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root
triedb: triedb,
layers: make(map[common.Hash]snapshot),
}
// Create the building waiter iff the background generation is allowed
if !config.NoBuild && !config.AsyncBuild {
defer snap.waitBuild()
}
// Attempt to load a previously persisted snapshot and rebuild one if failed
head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild)
if disabled {
log.Warn("Snapshot maintenance disabled (syncing)")
return snap, nil
}
// Create the building waiter iff the background generation is allowed
if !config.NoBuild && !config.AsyncBuild {
defer snap.waitBuild()
}
Comment on lines +209 to +212
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is an interesting change, and not immediately obvious why it was needed (or correct). It would make a difference in case we're syncing, and in the old code we would create a goroutine to waitBuild, and in the new code we would not.

Any comments about this?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Previously, we will always create a waiter if "sync-style" snapshot construction is required. Although in production, sync-style generation is always disabled which means the waiter is never be created.

While if we are in sync, the snapshot construction is disabled, if we still wait the construction by waiter, then it's a deadlook.

This change ensures we only create the waiter that the construction is really initialized.

if err != nil {
log.Warn("Failed to load snapshot", "err", err)
if !config.NoBuild {
Expand Down
37 changes: 37 additions & 0 deletions core/state/state_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -395,6 +395,43 @@ func (s *stateObject) CommitTrie(db Database) (*trie.NodeSet, error) {
return nodes, err
}

// DeleteTrie the storage trie of the object from db.
func (s *stateObject) DeleteTrie(db Database) (*trie.NodeSet, error) {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
// Track the amount of time wasted on iterating and deleting the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageDeletes += time.Since(start) }(time.Now())
}
stTrie, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
if err != nil {
return nil, err
}
// It can be an attack vector when iterating a huge contract. Stop collecting
// in case the accumulated nodes reach the threshold. It's fine to not clean
// up the dangling trie nodes since they are non-accessible dangling nodes
// anyway.
var (
paths [][]byte
blobs [][]byte
size common.StorageSize
iter = stTrie.NodeIterator(nil)
)
for iter.Next(true) {
if iter.Hash() == (common.Hash{}) {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
continue
}
path, blob := common.CopyBytes(iter.Path()), common.CopyBytes(iter.NodeBlob())
holiman marked this conversation as resolved.
Show resolved Hide resolved
paths = append(paths, path)
blobs = append(blobs, blob)

// Pretty arbitrary number, approximately 1GB as the threshold
size += common.StorageSize(len(path) + len(blob))
if size > 1073741824 {
return nil, nil
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
}
}
return trie.NewNodeSetWithDeletion(s.addrHash, paths, blobs), nil
}

// AddBalance adds amount to s's balance.
// It is used to add funds to the destination account of a transfer.
func (s *stateObject) AddBalance(amount *big.Int) {
Expand Down
14 changes: 14 additions & 0 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ type StateDB struct {
StorageHashes time.Duration
StorageUpdates time.Duration
StorageCommits time.Duration
StorageDeletes time.Duration
SnapshotAccountReads time.Duration
SnapshotStorageReads time.Duration
SnapshotCommits time.Duration
Expand Down Expand Up @@ -933,6 +934,19 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deleted
}
} else {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
// Account is deleted, nuke out the storage data as well.
set, err := obj.DeleteTrie(s.db)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we merge this PR, the production code will start calling obj.DeleteTrie. It will soup up all the destructed storage slots into set , which get merged into nodes,

I think it will eventually get ignored in trie.Database.Update. Is that correct?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. Perhaps we can run a full sync before merging?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good idea. I'll start one

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is now running on 03 against master on 04

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

INFO [10-19|07:25:02.027] Imported new chain segment               blocks=46   txs=6636    mgas=572.624  elapsed=8.215s       mgasps=69.700  number=10,582,621 hash=2c6a5f..1cd05a age=2y2mo3w   dirty=1022.53MiB
WARN [10-19|07:25:05.512] Full stats report failed                 err="use of closed network connection"
INFO [10-19|07:25:10.056] Imported new chain segment               blocks=54   txs=8816    mgas=633.603  elapsed=8.028s       mgasps=78.920  number=10,582,675 hash=222132..b6c4c3 age=2y2mo3w   dirty=1023.59MiB
INFO [10-19|07:25:11.136] Forkchoice requested sync to new head    number=15,780,646 hash=838d5d..d2a0dc
INFO [10-19|07:25:18.204] Imported new chain segment               blocks=38   txs=6031    mgas=474.161  elapsed=8.148s       mgasps=58.191  number=10,582,713 hash=5e092c..6122bc age=2y2mo3w   dirty=1022.13MiB
INFO [10-19|07:25:26.255] Imported new chain segment               blocks=38   txs=6154    mgas=449.242  elapsed=8.051s       mgasps=55.799  number=10,582,751 hash=81bdcc..5b863f age=2y2mo3w   dirty=1022.24MiB
INFO [10-19|07:25:34.351] Imported new chain segment               blocks=49   txs=7418    mgas=560.476  elapsed=8.096s       mgasps=69.227  number=10,582,800 hash=6d1fac..ab53b9 age=2y2mo3w   dirty=1022.47MiB
INFO [10-19|07:25:42.389] Imported new chain segment               blocks=37   txs=5029    mgas=449.420  elapsed=8.037s       mgasps=55.912  number=10,582,837 hash=e49835..28dbce age=2y2mo3w   dirty=1020.94MiB
INFO [10-19|07:25:50.502] Imported new chain segment               blocks=43   txs=5340    mgas=520.497  elapsed=8.112s       mgasps=64.162  number=10,582,880 hash=da883b..febd87 age=2y2mo3w   dirty=1019.56MiB
INFO [10-19|07:25:58.552] Imported new chain segment               blocks=40   txs=5124    mgas=483.557  elapsed=8.050s       mgasps=60.066  number=10,582,920 hash=868f6b..d04aea age=2y2mo3w   dirty=1023.95MiB
INFO [10-19|07:26:06.656] Imported new chain segment               blocks=39   txs=5016    mgas=484.501  elapsed=8.104s       mgasps=59.782  number=10,582,959 hash=04c5ce..ecf819 age=2y2mo3w   dirty=1023.16MiB
INFO [10-19|07:26:14.831] Imported new chain segment               blocks=40   txs=4348    mgas=472.555  elapsed=8.174s       mgasps=57.806  number=10,582,999 hash=2e0993..e5e54e age=2y2mo3w   dirty=1022.21MiB
INFO [10-19|07:26:22.876] Imported new chain segment               blocks=55   txs=5907    mgas=657.391  elapsed=8.044s       mgasps=81.715  number=10,583,054 hash=850a06..fe8467 age=2y2mo3w   dirty=1020.27MiB
INFO [10-19|07:26:31.040] Imported new chain segment               blocks=37   txs=4889    mgas=460.583  elapsed=8.164s       mgasps=56.414  number=10,583,091 hash=14c569..1203d0 age=2y2mo3w   dirty=1022.72MiB
INFO [10-19|07:26:39.110] Imported new chain segment               blocks=37   txs=4654    mgas=459.029  elapsed=8.069s       mgasps=56.887  number=10,583,128 hash=df3e70..8ca1d7 age=2y2mo3w   dirty=1023.03MiB
INFO [10-19|07:26:47.147] Imported new chain segment               blocks=39   txs=4002    mgas=472.347  elapsed=8.037s       mgasps=58.766  number=10,583,167 hash=97cd6f..31b5e0 age=2y2mo3w   dirty=1023.31MiB
ubuntu@bench03:~$

Should be ok after running 6 days

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually.. was just looking into this, and I don't think I ran the correct code

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

restarted. sorry :(

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no worry, let's wait a few more days then

if err != nil {
return common.Hash{}, err
}
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
_, deleted := set.Size()
storageTrieNodesDeleted += deleted
}
}
}
if len(s.stateObjectsDirty) > 0 {
Expand Down