-
Notifications
You must be signed in to change notification settings - Fork 225
/
generate.go
409 lines (380 loc) · 14.2 KB
/
generate.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
// (c) 2019-2020, Ava Labs, Inc.
//
// This file is a derived work, based on the go-ethereum library whose original
// notices appear below.
//
// It is distributed under a license compatible with the licensing terms of the
// original code from which it is derived.
//
// Much love to the original authors for their work.
// **********
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snapshot
import (
"bytes"
"encoding/binary"
"fmt"
"math/big"
"time"
"github.com/ava-labs/subnet-evm/core/rawdb"
"github.com/ava-labs/subnet-evm/ethdb"
"github.com/ava-labs/subnet-evm/trie"
"github.com/ava-labs/subnet-evm/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
const (
snapshotCacheNamespace = "state/snapshot/clean/fastcache" // prefix for detailed stats from the snapshot fastcache
snapshotCacheStatsUpdateFrequency = 1000 // update stats from the snapshot fastcache once per 1000 ops
)
var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// emptyCode is the known hash of the empty EVM bytecode.
emptyCode = crypto.Keccak256Hash(nil)
)
// generatorStats is a collection of statistics gathered by the snapshot generator
// for logging purposes.
type generatorStats struct {
wiping chan struct{} // Notification channel if wiping is in progress
origin uint64 // Origin prefix where generation started
start time.Time // Timestamp when generation started
accounts uint64 // Number of accounts indexed(generated or recovered)
slots uint64 // Number of storage slots indexed(generated or recovered)
storage common.StorageSize // Total account and storage slot size(generation or recovery)
}
// Info creates an contextual info-level log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) Info(msg string, root common.Hash, marker []byte) {
gs.log(log.LvlInfo, msg, root, marker)
}
// Debug creates an contextual debug-level log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) Debug(msg string, root common.Hash, marker []byte) {
gs.log(log.LvlDebug, msg, root, marker)
}
// log creates an contextual log with the given message and the context pulled
// from the internally maintained statistics.
func (gs *generatorStats) log(level log.Lvl, msg string, root common.Hash, marker []byte) {
var ctx []interface{}
if root != (common.Hash{}) {
ctx = append(ctx, []interface{}{"root", root}...)
}
// Figure out whether we're after or within an account
switch len(marker) {
case common.HashLength:
ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...)
case 2 * common.HashLength:
ctx = append(ctx, []interface{}{
"in", common.BytesToHash(marker[:common.HashLength]),
"at", common.BytesToHash(marker[common.HashLength:]),
}...)
}
// Add the usual measurements
ctx = append(ctx, []interface{}{
"accounts", gs.accounts,
"slots", gs.slots,
"storage", gs.storage,
"elapsed", common.PrettyDuration(time.Since(gs.start)),
}...)
// Calculate the estimated indexing time based on current stats
if len(marker) > 0 {
if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 {
left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8])
speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
ctx = append(ctx, []interface{}{
"eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond),
}...)
}
}
switch level {
case log.LvlTrace:
log.Trace(msg, ctx...)
case log.LvlDebug:
log.Debug(msg, ctx...)
case log.LvlInfo:
log.Info(msg, ctx...)
case log.LvlWarn:
log.Warn(msg, ctx...)
case log.LvlError:
log.Error(msg, ctx...)
case log.LvlCrit:
log.Crit(msg, ctx...)
default:
log.Error(fmt.Sprintf("log with invalid log level %s: %s", level, msg), ctx...)
}
}
// generateSnapshot regenerates a brand new snapshot based on an existing state
// database and head block asynchronously. The snapshot is returned immediately
// and generation is continued in the background until done.
func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash, wiper chan struct{}) *diskLayer {
// Wipe any previously existing snapshot from the database if no wiper is
// currently in progress.
if wiper == nil {
wiper = WipeSnapshot(diskdb, true)
}
// Create a new disk layer with an initialized state marker at zero
var (
stats = &generatorStats{wiping: wiper, start: time.Now()}
batch = diskdb.NewBatch()
genMarker = []byte{} // Initialized but empty!
)
rawdb.WriteSnapshotBlockHash(batch, blockHash)
rawdb.WriteSnapshotRoot(batch, root)
journalProgress(batch, genMarker, stats)
if err := batch.Write(); err != nil {
log.Crit("Failed to write initialized state marker", "err", err)
}
base := &diskLayer{
diskdb: diskdb,
triedb: triedb,
blockHash: blockHash,
root: root,
cache: newMeteredSnapshotCache(cache * 1024 * 1024),
genMarker: genMarker,
genPending: make(chan struct{}),
genAbort: make(chan chan struct{}),
created: time.Now(),
}
go base.generate(stats)
log.Debug("Start snapshot generation", "root", root)
return base
}
// journalProgress persists the generator stats into the database to resume later.
func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) {
// Write out the generator marker. Note it's a standalone disk layer generator
// which is not mixed with journal. It's ok if the generator is persisted while
// journal is not.
entry := journalGenerator{
Done: marker == nil,
Marker: marker,
}
if stats != nil {
entry.Wiping = (stats.wiping != nil)
entry.Accounts = stats.accounts
entry.Slots = stats.slots
entry.Storage = uint64(stats.storage)
}
blob, err := rlp.EncodeToBytes(entry)
if err != nil {
panic(err) // Cannot happen, here to catch dev errors
}
var logstr string
switch {
case marker == nil:
logstr = "done"
case bytes.Equal(marker, []byte{}):
logstr = "empty"
case len(marker) == common.HashLength:
logstr = fmt.Sprintf("%#x", marker)
default:
logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:])
}
log.Debug("Journalled generator progress", "progress", logstr)
rawdb.WriteSnapshotGenerator(db, blob)
}
// checkAndFlush checks to see if snapshot generation has been aborted or if
// the current batch size is greater than ethdb.IdealBatchSize. If so, it saves
// the current progress to disk and returns true. Else, it could log current
// progress and returns true.
func (dl *diskLayer) checkAndFlush(batch ethdb.Batch, stats *generatorStats, currentLocation []byte) bool {
// If we've exceeded our batch allowance or termination was requested, flush to disk
var abort chan struct{}
select {
case abort = <-dl.genAbort:
default:
}
if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
if bytes.Compare(currentLocation, dl.genMarker) < 0 {
log.Error("Snapshot generator went backwards",
"currentLocation", fmt.Sprintf("%x", currentLocation),
"genMarker", fmt.Sprintf("%x", dl.genMarker))
}
// Flush out the batch anyway no matter it's empty or not.
// It's possible that all the states are recovered and the
// generation indeed makes progress.
journalProgress(batch, currentLocation, stats)
if err := batch.Write(); err != nil {
log.Error("Failed to flush batch", "err", err)
if abort == nil {
abort = <-dl.genAbort
}
dl.genStats = stats
close(abort)
return true
}
batch.Reset()
dl.lock.Lock()
dl.genMarker = currentLocation
dl.lock.Unlock()
if abort != nil {
stats.Debug("Aborting state snapshot generation", dl.root, currentLocation)
dl.genStats = stats
close(abort)
return true
}
}
if time.Since(dl.logged) > 8*time.Second {
stats.Info("Generating state snapshot", dl.root, currentLocation)
dl.logged = time.Now()
}
return false
}
// generate is a background thread that iterates over the state and storage tries,
// constructing the state snapshot. All the arguments are purely for statistics
// gathering and logging, since the method surfs the blocks as they arrive, often
// being restarted.
func (dl *diskLayer) generate(stats *generatorStats) {
// If a database wipe is in operation, wait until it's done
if stats.wiping != nil {
stats.Info("Wiper running, state snapshotting paused", common.Hash{}, dl.genMarker)
select {
// If wiper is done, resume normal mode of operation
case <-stats.wiping:
stats.wiping = nil
stats.start = time.Now()
// If generator was aborted during wipe, return
case abort := <-dl.genAbort:
stats.Debug("Aborting state snapshot generation", dl.root, dl.genMarker)
dl.genStats = stats
close(abort)
return
}
}
// Create an account and state iterator pointing to the current generator marker
accTrie, err := trie.NewStateTrie(common.Hash{}, dl.root, dl.triedb)
if err != nil {
// The account trie is missing (GC), surf the chain until one becomes available
stats.Info("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
abort := <-dl.genAbort
dl.genStats = stats
close(abort)
return
}
stats.Debug("Resuming state snapshot generation", dl.root, dl.genMarker)
var accMarker []byte
if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
accMarker = dl.genMarker[:common.HashLength]
}
accIt := trie.NewIterator(accTrie.NodeIterator(accMarker))
batch := dl.diskdb.NewBatch()
// Iterate from the previous marker and continue generating the state snapshot
dl.logged = time.Now()
for accIt.Next() {
// Retrieve the current account and flatten it into the internal format
accountHash := common.BytesToHash(accIt.Key)
var acc struct {
Nonce uint64
Balance *big.Int
Root common.Hash
CodeHash []byte
}
if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
// If the account is not yet in-progress, write it out
if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
rawdb.WriteAccountSnapshot(batch, accountHash, data)
stats.storage += common.StorageSize(1 + common.HashLength + len(data))
stats.accounts++
}
marker := accountHash[:]
// If the snap generation goes here after interrupted, genMarker may go backward
// when last genMarker is consisted of accountHash and storageHash
if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength {
marker = dl.genMarker[:]
}
if dl.checkAndFlush(batch, stats, marker) {
// checkAndFlush handles abort
return
}
// If the iterated account is a contract, iterate through corresponding contract
// storage to generate snapshot entries.
if acc.Root != emptyRoot {
storeTrie, err := trie.NewStateTrie(accountHash, acc.Root, dl.triedb)
if err != nil {
log.Error("Generator failed to access storage trie", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err)
abort := <-dl.genAbort
dl.genStats = stats
close(abort)
return
}
var storeMarker []byte
if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
storeMarker = dl.genMarker[common.HashLength:]
}
storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker))
for storeIt.Next() {
rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value)
stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value))
stats.slots++
if dl.checkAndFlush(batch, stats, append(accountHash[:], storeIt.Key...)) {
// checkAndFlush handles abort
return
}
}
if err := storeIt.Err; err != nil {
log.Error("Generator failed to iterate storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err)
abort := <-dl.genAbort
dl.genStats = stats
close(abort)
return
}
}
if time.Since(dl.logged) > 8*time.Second {
stats.Info("Generating state snapshot", dl.root, accIt.Key)
dl.logged = time.Now()
}
// Some account processed, unmark the marker
accMarker = nil
}
if err := accIt.Err; err != nil {
log.Error("Generator failed to iterate account trie", "root", dl.root, "err", err)
abort := <-dl.genAbort
dl.genStats = stats
close(abort)
return
}
// Snapshot fully generated, set the marker to nil.
// Note even there is nothing to commit, persist the
// generator anyway to mark the snapshot is complete.
journalProgress(batch, nil, stats)
if err := batch.Write(); err != nil {
log.Error("Failed to flush batch", "err", err)
abort := <-dl.genAbort
dl.genStats = stats
close(abort)
return
}
log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots,
"storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start)))
dl.lock.Lock()
dl.genMarker = nil
dl.genStats = stats
close(dl.genPending)
dl.lock.Unlock()
// Someone will be looking for us, wait it out
abort := <-dl.genAbort
close(abort)
}
func newMeteredSnapshotCache(size int) *utils.MeteredCache {
return utils.NewMeteredCache(size, "", snapshotCacheNamespace, snapshotCacheStatsUpdateFrequency)
}