forked from prysmaticlabs/prysm
-
Notifications
You must be signed in to change notification settings - Fork 1
/
service.go
93 lines (81 loc) · 3.28 KB
/
service.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
// Package stategen defines functions to regenerate beacon chain states
// by replaying blocks from a stored state checkpoint, useful for
// optimization and reducing a beacon node's resource consumption.
package stategen
import (
"context"
"sync"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// State represents a management object that handles the internal
// logic of maintaining both hot and cold states in DB.
type State struct {
beaconDB db.NoHeadAccessDatabase
slotsPerArchivedPoint uint64
epochBoundarySlotToRoot map[uint64][32]byte
epochBoundaryLock sync.RWMutex
hotStateCache *cache.HotStateCache
splitInfo *splitSlotAndRoot
stateSummaryCache *cache.StateSummaryCache
}
// This tracks the split point. The point where slot and the block root of
// cold and hot sections of the DB splits.
type splitSlotAndRoot struct {
slot uint64
root [32]byte
}
// New returns a new state management object.
func New(db db.NoHeadAccessDatabase, stateSummaryCache *cache.StateSummaryCache) *State {
return &State{
beaconDB: db,
epochBoundarySlotToRoot: make(map[uint64][32]byte),
hotStateCache: cache.NewHotStateCache(),
splitInfo: &splitSlotAndRoot{slot: 0, root: params.BeaconConfig().ZeroHash},
slotsPerArchivedPoint: params.BeaconConfig().SlotsPerArchivedPoint,
stateSummaryCache: stateSummaryCache,
}
}
// Resume resumes a new state management object from previously saved finalized check point in DB.
func (s *State) Resume(ctx context.Context) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.Resume")
defer span.End()
lastArchivedRoot := s.beaconDB.LastArchivedIndexRoot(ctx)
lastArchivedState, err := s.beaconDB.State(ctx, lastArchivedRoot)
if err != nil {
return nil, err
}
if featureconfig.Get().SkipRegenHistoricalStates {
// If a node doesn't want to regen historical states, the node would
// start from last finalized check point.
cp, err := s.beaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return nil, err
}
lastArchivedState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
return nil, err
}
lastArchivedRoot = bytesutil.ToBytes32(cp.Root)
}
// Resume as genesis state if there's no last archived state.
if lastArchivedState == nil {
return s.beaconDB.GenesisState(ctx)
}
s.splitInfo = &splitSlotAndRoot{slot: lastArchivedState.Slot(), root: lastArchivedRoot}
return lastArchivedState, nil
}
// This verifies the archive point frequency is valid. It checks the interval
// is a divisor of the number of slots per epoch. This ensures we have at least one
// archive point within range of our state root history when iterating
// backwards. It also ensures the archive points align with hot state summaries
// which makes it quicker to migrate hot to cold.
func verifySlotsPerArchivePoint(slotsPerArchivePoint uint64) bool {
return slotsPerArchivePoint > 0 &&
slotsPerArchivePoint%params.BeaconConfig().SlotsPerEpoch == 0
}