-
Notifications
You must be signed in to change notification settings - Fork 181
/
indexer.go
255 lines (222 loc) · 6.82 KB
/
indexer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
package types
import (
"encoding/binary"
"github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
sdk "github.com/okex/exchain/libs/cosmos-sdk/types"
tmtypes "github.com/okex/exchain/libs/tendermint/types"
dbm "github.com/okex/exchain/libs/tm-db"
"github.com/spf13/viper"
"path/filepath"
"sync"
"sync/atomic"
)
var (
indexer *Indexer
enableBloomFilter bool
once sync.Once
)
type Keeper interface {
GetBlockBloom(ctx sdk.Context, height int64) ethtypes.Bloom
GetHeightHash(ctx sdk.Context, height uint64) common.Hash
}
func CloseIndexer() {
if indexer != nil && indexer.backend.db != nil {
indexer.backend.db.Close()
}
}
func GetEnableBloomFilter() bool {
once.Do(func() {
enableBloomFilter = viper.GetBool(FlagEnableBloomFilter)
})
return enableBloomFilter
}
// Indexer does a post-processing job for equally sized sections of the
// canonical chain (like BlooomBits and CHT structures). A Indexer is
// connected to the blockchain through the event system by starting a
// ChainHeadEventLoop in a goroutine.
//
// Further child ChainIndexers can be added which use the output of the parent
// section indexer. These child indexers receive new head notifications only
// after an entire section has been finished or in case of rollbacks that might
// affect already finished sections.
type Indexer struct {
backend bloomIndexer // Background processor generating the index data content
update chan sdk.Context // Notification channel that headers should be processed
quit chan struct{} // Quit channel to tear down running goroutines
storedSections uint64 // Number of sections successfully indexed into the database
processing uint32 // Atomic flag whether indexer is processing or not
}
func InitIndexer(db dbm.DB) {
if !enableBloomFilter {
return
}
indexer = &Indexer{
backend: initBloomIndexer(db),
update: make(chan sdk.Context),
quit: make(chan struct{}),
}
indexer.setValidSections(indexer.GetValidSections())
}
func BloomDb() dbm.DB {
dataDir := filepath.Join(viper.GetString("home"), "data")
var err error
db, err := sdk.NewLevelDB(bloomDir, dataDir)
if err != nil {
panic(err)
}
return db
}
func GetIndexer() *Indexer {
return indexer
}
func (i *Indexer) StoredSection() uint64 {
if i != nil {
return i.storedSections
}
return 0
}
func (i *Indexer) IsProcessing() bool {
return atomic.LoadUint32(&i.processing) == 1
}
func (i *Indexer) ProcessSection(ctx sdk.Context, k Keeper, interval uint64, bloomData *[]*KV) {
if atomic.SwapUint32(&i.processing, 1) == 1 {
ctx.Logger().Error("matcher is already running")
return
}
defer func() {
if r := recover(); r != nil {
ctx.Logger().Error("ProcessSection panic height", ctx.BlockHeight(), r)
}
}()
defer atomic.StoreUint32(&i.processing, 0)
knownSection := interval / BloomBitsBlocks
for i.storedSections < knownSection {
section := i.storedSections
var lastHead common.Hash
if section > 0 {
lastHead = i.sectionHead(section - 1)
}
ctx.Logger().Debug("Processing new chain section", "section", section)
// Reset and partial processing
if err := i.backend.Reset(section); err != nil {
i.setValidSections(0)
ctx.Logger().Error(err.Error())
return
}
begin := section*BloomBitsBlocks + uint64(tmtypes.GetStartBlockHeight())
end := (section+1)*BloomBitsBlocks + uint64(tmtypes.GetStartBlockHeight())
for number := begin; number < end; number++ {
var (
bloom ethtypes.Bloom
hash common.Hash
)
ctx = i.updateCtx(ctx)
// the initial height is 1 but it on ethereum is 0. so set the bloom and hash of the block 0 to empty.
if number == uint64(tmtypes.GetStartBlockHeight()) {
bloom = ethtypes.Bloom{}
hash = common.Hash{}
} else {
hash = k.GetHeightHash(ctx, number)
if hash == (common.Hash{}) {
ctx.Logger().Error("canonical block #%d unknown", number)
return
}
bloom = k.GetBlockBloom(ctx, int64(number))
}
if err := i.backend.Process(hash, number, bloom); err != nil {
ctx.Logger().Error(err.Error())
return
}
lastHead = hash
}
bd, err := i.backend.Commit()
if err != nil {
ctx.Logger().Error(err.Error())
return
}
i.setSectionHead(section, lastHead)
i.setValidSections(section + 1)
i.setBloomData(&bd, section, lastHead)
*bloomData = bd
}
}
// GetDB get db of bloomIndexer
func (b *Indexer) GetDB() dbm.DB {
if b != nil {
return b.backend.db
}
return nil
}
// setValidSections writes the number of valid sections to the index database
func (i *Indexer) setValidSections(sections uint64) {
// Set the current number of valid sections in the database
var data [8]byte
binary.BigEndian.PutUint64(data[:], sections)
i.backend.db.Set([]byte("count"), data[:])
// Remove any reorged sections, caching the valids in the mean time
for i.storedSections > sections {
i.storedSections--
i.removeSectionHead(i.storedSections)
}
i.storedSections = sections // needed if new > old
}
// setBloomData put SectionHead and ValidSections into watcher.bloomData
func (i *Indexer) setBloomData(bloomData *[]*KV, section uint64, hash common.Hash) {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
*bloomData = append(*bloomData, &KV{Key: append([]byte("shead"), data[:]...), Value: hash.Bytes()})
*bloomData = append(*bloomData, &KV{Key: []byte("count"), Value: data[:]})
}
// GetValidSections reads the number of valid sections from the index database
// and caches is into the local state.
func (i *Indexer) GetValidSections() uint64 {
data, _ := i.backend.db.Get([]byte("count"))
if len(data) == 8 {
return binary.BigEndian.Uint64(data)
}
return 0
}
// sectionHead retrieves the last block hash of a processed section from the
// index database.
func (i *Indexer) sectionHead(section uint64) common.Hash {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
hash, _ := i.backend.db.Get(append([]byte("shead"), data[:]...))
if len(hash) == len(common.Hash{}) {
return common.BytesToHash(hash)
}
return common.Hash{}
}
// setSectionHead writes the last block hash of a processed section to the index
// database.
func (i *Indexer) setSectionHead(section uint64, hash common.Hash) {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
i.backend.db.Set(append([]byte("shead"), data[:]...), hash.Bytes())
}
// removeSectionHead removes the reference to a processed section from the index
// database.
func (i *Indexer) removeSectionHead(section uint64) {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
i.backend.db.Delete(append([]byte("shead"), data[:]...))
}
func (i *Indexer) NotifyNewHeight(ctx sdk.Context) {
i.update <- ctx
}
func (i *Indexer) updateCtx(oldCtx sdk.Context) sdk.Context {
newCtx := oldCtx
exit := false
for {
select {
case newCtx = <-i.update:
default:
exit = true
}
if exit {
break
}
}
return newCtx
}