-
Notifications
You must be signed in to change notification settings - Fork 0
/
store.go
936 lines (787 loc) · 27.8 KB
/
store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
package headerfs
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/fichain/filechain/blockchain"
"github.com/fichain/filechain/btcutil/gcs/builder"
"github.com/fichain/filechain/btcwallet/walletdb"
"github.com/fichain/filechain/chaincfg"
"github.com/fichain/filechain/chaincfg/chainhash"
"github.com/fichain/filechain/wire"
)
// BlockStamp represents a block, identified by its height and time stamp in
// the chain. We also lift the timestamp from the block header itself into this
// struct as well.
type BlockStamp struct {
// Height is the height of the target block.
Height int32
// Hash is the hash that uniquely identifies this block.
Hash chainhash.Hash
// Timestamp is the timestamp of the block in the chain.
Timestamp time.Time
}
// BlockHeaderStore is an interface that provides an abstraction for a generic
// store for block headers.
type BlockHeaderStore interface {
// ChainTip returns the best known block header and height for the
// BlockHeaderStore.
ChainTip() (*wire.BlockHeader, uint32, error)
// LatestBlockLocator returns the latest block locator object based on
// the tip of the current main chain from the PoV of the
// BlockHeaderStore.
LatestBlockLocator() (blockchain.BlockLocator, error)
// FetchHeaderByHeight attempts to retrieve a target block header based
// on a block height.
FetchHeaderByHeight(height uint32) (*wire.BlockHeader, error)
// FetchHeaderAncestors fetches the numHeaders block headers that are
// the ancestors of the target stop hash. A total of numHeaders+1
// headers will be returned, as we'll walk back numHeaders distance to
// collect each header, then return the final header specified by the
// stop hash. We'll also return the starting height of the header range
// as well so callers can compute the height of each header without
// knowing the height of the stop hash.
FetchHeaderAncestors(uint32, *chainhash.Hash) ([]wire.BlockHeader,
uint32, error)
// HeightFromHash returns the height of a particular block header given
// its hash.
HeightFromHash(*chainhash.Hash) (uint32, error)
// FetchHeader attempts to retrieve a block header determined by the
// passed block height.
FetchHeader(*chainhash.Hash) (*wire.BlockHeader, uint32, error)
// WriteHeaders adds a set of headers to the BlockHeaderStore in a
// single atomic transaction.
WriteHeaders(...BlockHeader) error
// RollbackLastBlock rolls back the BlockHeaderStore by a _single_
// header. This method is meant to be used in the case of re-org which
// disconnects the latest block header from the end of the main chain.
// The information about the new header tip after truncation is
// returned.
RollbackLastBlock() (*BlockStamp, error)
}
// headerBufPool is a pool of bytes.Buffer that will be re-used by the various
// headerStore implementations to batch their header writes to disk. By
// utilizing this variable we can minimize the total number of allocations when
// writing headers to disk.
var headerBufPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
// headerStore combines a on-disk set of headers within a flat file in addition
// to a databse which indexes that flat file. Together, these two abstractions
// can be used in order to build an indexed header store for any type of
// "header" as it deals only with raw bytes, and leaves it to a higher layer to
// interpret those raw bytes accordingly.
//
// TODO(roasbeef): quickcheck coverage
type headerStore struct {
mtx sync.RWMutex
fileName string
file *os.File
*headerIndex
}
// newHeaderStore creates a new headerStore given an already open database, a
// target file path for the flat-file and a particular header type. The target
// file will be created as necessary.
func newHeaderStore(db walletdb.DB, filePath string,
hType HeaderType) (*headerStore, error) {
var flatFileName string
switch hType {
case Block:
flatFileName = "block_headers.bin"
case RegularFilter:
flatFileName = "reg_filter_headers.bin"
default:
return nil, fmt.Errorf("unrecognized filter type: %v", hType)
}
flatFileName = filepath.Join(filePath, flatFileName)
// We'll open the file, creating it if necessary and ensuring that all
// writes are actually appends to the end of the file.
fileFlags := os.O_RDWR | os.O_APPEND | os.O_CREATE
headerFile, err := os.OpenFile(flatFileName, fileFlags, 0644)
if err != nil {
return nil, err
}
// With the file open, we'll then create the header index so we can
// have random access into the flat files.
index, err := newHeaderIndex(db, hType)
if err != nil {
return nil, err
}
return &headerStore{
fileName: flatFileName,
file: headerFile,
headerIndex: index,
}, nil
}
// blockHeaderStore is an implementation of the BlockHeaderStore interface, a
// fully fledged database for Bitcoin block headers. The blockHeaderStore
// combines a flat file to store the block headers with a database instance for
// managing the index into the set of flat files.
type blockHeaderStore struct {
*headerStore
}
// A compile-time check to ensure the blockHeaderStore adheres to the
// BlockHeaderStore interface.
var _ BlockHeaderStore = (*blockHeaderStore)(nil)
// NewBlockHeaderStore creates a new instance of the blockHeaderStore based on
// a target file path, an open database instance, and finally a set of
// parameters for the target chain. These parameters are required as if this is
// the initial start up of the blockHeaderStore, then the initial genesis
// header will need to be inserted.
func NewBlockHeaderStore(filePath string, db walletdb.DB,
netParams *chaincfg.Params) (BlockHeaderStore, error) {
hStore, err := newHeaderStore(db, filePath, Block)
if err != nil {
return nil, err
}
// With the header store created, we'll fetch the file size to see if
// we need to initialize it with the first header or not.
fileInfo, err := hStore.file.Stat()
if err != nil {
return nil, err
}
bhs := &blockHeaderStore{
headerStore: hStore,
}
// If the size of the file is zero, then this means that we haven't yet
// written the initial genesis header to disk, so we'll do so now.
if fileInfo.Size() == 0 {
genesisHeader := BlockHeader{
BlockHeader: &netParams.GenesisBlock.Header,
Height: 0,
}
if err := bhs.WriteHeaders(genesisHeader); err != nil {
return nil, err
}
return bhs, nil
}
// As a final initialization step (if this isn't the first time), we'll
// ensure that the header tip within the flat files, is in sync with
// out database index.
tipHash, tipHeight, err := bhs.chainTip()
if err != nil {
return nil, err
}
// First, we'll compute the size of the current file so we can
// calculate the latest header written to disk.
fileHeight := uint32(fileInfo.Size()/80) - 1
// Using the file's current height, fetch the latest on-disk header.
latestFileHeader, err := bhs.readHeader(fileHeight)
if err != nil {
return nil, err
}
// If the index's tip hash, and the file on-disk match, then we're
// done here.
latestBlockHash := latestFileHeader.BlockHash()
if tipHash.IsEqual(&latestBlockHash) {
return bhs, nil
}
// TODO(roasbeef): below assumes index can never get ahead?
// * we always update files _then_ indexes
// * need to dual pointer walk back for max safety
// Otherwise, we'll need to truncate the file until it matches the
// current index tip.
for fileHeight > tipHeight {
if err := bhs.singleTruncate(); err != nil {
return nil, err
}
fileHeight--
}
return bhs, nil
}
// FetchHeader attempts to retrieve a block header determined by the passed
// block height.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) FetchHeader(hash *chainhash.Hash) (*wire.BlockHeader, uint32, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
// First, we'll query the index to obtain the block height of the
// passed block hash.
height, err := h.heightFromHash(hash)
if err != nil {
return nil, 0, err
}
// With the height known, we can now read the header from disk.
header, err := h.readHeader(height)
if err != nil {
return nil, 0, err
}
return &header, height, nil
}
// FetchHeaderByHeight attempts to retrieve a target block header based on a
// block height.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) FetchHeaderByHeight(height uint32) (*wire.BlockHeader, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
// For this query, we don't need to consult the index, and can instead
// just seek into the flat file based on the target height and return
// the full header.
header, err := h.readHeader(height)
if err != nil {
return nil, err
}
return &header, nil
}
// FetchHeaderAncestors fetches the numHeaders block headers that are the
// ancestors of the target stop hash. A total of numHeaders+1 headers will be
// returned, as we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) FetchHeaderAncestors(numHeaders uint32,
stopHash *chainhash.Hash) ([]wire.BlockHeader, uint32, error) {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endHeight, err := h.heightFromHash(stopHash)
if err != nil {
return nil, 0, err
}
startHeight := endHeight - numHeaders
headers, err := h.readHeaderRange(startHeight, endHeight)
if err != nil {
return nil, 0, err
}
return headers, startHeight, nil
}
// HeightFromHash returns the height of a particular block header given its
// hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) HeightFromHash(hash *chainhash.Hash) (uint32, error) {
return h.heightFromHash(hash)
}
// RollbackLastBlock rollsback both the index, and on-disk header file by a
// _single_ header. This method is meant to be used in the case of re-org which
// disconnects the latest block header from the end of the main chain. The
// information about the new header tip after truncation is returned.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) RollbackLastBlock() (*BlockStamp, error) {
// Lock store for write.
h.mtx.Lock()
defer h.mtx.Unlock()
// First, we'll obtain the latest height that the index knows of.
_, chainTipHeight, err := h.chainTip()
if err != nil {
return nil, err
}
// With this height obtained, we'll use it to read the latest header
// from disk, so we can populate our return value which requires the
// prev header hash.
bestHeader, err := h.readHeader(chainTipHeight)
if err != nil {
return nil, err
}
prevHeaderHash := bestHeader.PrevBlock
// Now that we have the information we need to return from this
// function, we can now truncate the header file, and then use the hash
// of the prevHeader to set the proper index chain tip.
if err := h.singleTruncate(); err != nil {
return nil, err
}
if err := h.truncateIndex(&prevHeaderHash, true); err != nil {
return nil, err
}
return &BlockStamp{
Height: int32(chainTipHeight) - 1,
Hash: prevHeaderHash,
}, nil
}
// BlockHeader is a Bitcoin block header that also has its height included.
type BlockHeader struct {
*wire.BlockHeader
// Height is the height of this block header within the current main
// chain.
Height uint32
}
// toIndexEntry converts the BlockHeader into a matching headerEntry. This
// method is used when a header is to be written to disk.
func (b *BlockHeader) toIndexEntry() headerEntry {
return headerEntry{
hash: b.BlockHash(),
height: b.Height,
}
}
// WriteHeaders writes a set of headers to disk and updates the index in a
// single atomic transaction.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) WriteHeaders(hdrs ...BlockHeader) error {
// Lock store for write.
h.mtx.Lock()
defer h.mtx.Unlock()
// First, we'll grab a buffer from the write buffer pool so we an
// reduce our total number of allocations, and also write the headers
// in a single swoop.
headerBuf := headerBufPool.Get().(*bytes.Buffer)
headerBuf.Reset()
defer headerBufPool.Put(headerBuf)
// Next, we'll write out all the passed headers in series into the
// buffer we just extracted from the pool.
for _, header := range hdrs {
if err := header.Serialize(headerBuf); err != nil {
return err
}
}
// With all the headers written to the buffer, we'll now write out the
// entire batch in a single write call.
if err := h.appendRaw(headerBuf.Bytes()); err != nil {
return err
}
// Once those are written, we'll then collate all the headers into
// headerEntry instances so we can write them all into the index in a
// single atomic batch.
headerLocs := make([]headerEntry, len(hdrs))
for i, header := range hdrs {
headerLocs[i] = header.toIndexEntry()
}
return h.addHeaders(headerLocs)
}
// blockLocatorFromHash takes a given block hash and then creates a block
// locator using it as the root of the locator. We'll start by taking a single
// step backwards, then keep doubling the distance until genesis after we get
// 10 locators.
//
// TODO(roasbeef): make into single transaction.
func (h *blockHeaderStore) blockLocatorFromHash(hash *chainhash.Hash) (
blockchain.BlockLocator, error) {
var locator blockchain.BlockLocator
// Append the initial hash
locator = append(locator, hash)
// If hash isn't found in DB or this is the genesis block, return the
// locator as is
height, err := h.heightFromHash(hash)
if height == 0 || err != nil {
return locator, nil
}
decrement := uint32(1)
for height > 0 && len(locator) < wire.MaxBlockLocatorsPerMsg {
// Decrement by 1 for the first 10 blocks, then double the jump
// until we get to the genesis hash
if len(locator) > 10 {
decrement *= 2
}
if decrement > height {
height = 0
} else {
height -= decrement
}
blockHeader, err := h.FetchHeaderByHeight(height)
if err != nil {
return locator, err
}
headerHash := blockHeader.BlockHash()
locator = append(locator, &headerHash)
}
return locator, nil
}
// LatestBlockLocator returns the latest block locator object based on the tip
// of the current main chain from the PoV of the database and flat files.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) LatestBlockLocator() (blockchain.BlockLocator, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
var locator blockchain.BlockLocator
chainTipHash, _, err := h.chainTip()
if err != nil {
return locator, err
}
return h.blockLocatorFromHash(chainTipHash)
}
// BlockLocatorFromHash computes a block locator given a particular hash. The
// standard Bitcoin algorithm to compute block locators are employed.
func (h *blockHeaderStore) BlockLocatorFromHash(hash *chainhash.Hash) (
blockchain.BlockLocator, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
return h.blockLocatorFromHash(hash)
}
// CheckConnectivity cycles through all of the block headers on disk, from last
// to first, and makes sure they all connect to each other. Additionally, at
// each block header, we also ensure that the index entry for that height and
// hash also match up properly.
func (h *blockHeaderStore) CheckConnectivity() error {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
return walletdb.View(h.db, func(tx walletdb.ReadTx) error {
// First, we'll fetch the chain tip so we can start our
// backwards scan.
_, tipHeight, err := h.chainTipWithTx(tx)
if err != nil {
return err
}
// With the height extracted, we'll now read the _last_ block
// header within the file before we kick off our connectivity
// loop.
header, err := h.readHeader(tipHeight)
if err != nil {
return err
}
// We'll now cycle backwards, seeking backwards along the
// header file to ensure each header connects properly and the
// index entries are also accurate. To do this, we start from a
// height of one before our current tip.
var newHeader wire.BlockHeader
for height := tipHeight - 1; height > 0; height-- {
// First, read the block header for this block height,
// and also compute the block hash for it.
newHeader, err = h.readHeader(height)
if err != nil {
return fmt.Errorf("couldn't retrieve header "+
"%s: %s", header.PrevBlock, err)
}
newHeaderHash := newHeader.BlockHash()
// With the header retrieved, we'll now fetch the
// height for this current header hash to ensure the
// on-disk state and the index matches up properly.
indexHeight, err := h.heightFromHashWithTx(
tx, &newHeaderHash,
)
if err != nil {
return fmt.Errorf("index and on-disk file "+
"out of sync at height: %v", height)
}
// With the index entry retrieved, we'll now assert
// that the height matches up with our current height
// in this backwards walk.
if indexHeight != height {
return fmt.Errorf("index height isn't " +
"monotonically increasing")
}
// Finally, we'll assert that this new header is
// actually the prev header of the target header from
// the last loop. This ensures connectivity.
if newHeader.BlockHash() != header.PrevBlock {
return fmt.Errorf("block %s doesn't match "+
"block %s's PrevBlock (%s)",
newHeader.BlockHash(),
header.BlockHash(), header.PrevBlock)
}
// As all the checks have passed, we'll now reset our
// header pointer to this current location, and
// continue our backwards walk.
header = newHeader
}
return nil
})
}
// ChainTip returns the best known block header and height for the
// blockHeaderStore.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *blockHeaderStore) ChainTip() (*wire.BlockHeader, uint32, error) {
// Lock store for read.
h.mtx.RLock()
defer h.mtx.RUnlock()
_, tipHeight, err := h.chainTip()
if err != nil {
return nil, 0, err
}
latestHeader, err := h.readHeader(tipHeight)
if err != nil {
return nil, 0, err
}
return &latestHeader, tipHeight, nil
}
// FilterHeaderStore is an implementation of a fully fledged database for any
// variant of filter headers. The FilterHeaderStore combines a flat file to
// store the block headers with a database instance for managing the index into
// the set of flat files.
type FilterHeaderStore struct {
*headerStore
}
// NewFilterHeaderStore returns a new instance of the FilterHeaderStore based
// on a target file path, filter type, and target net parameters. These
// parameters are required as if this is the initial start up of the
// FilterHeaderStore, then the initial genesis filter header will need to be
// inserted.
func NewFilterHeaderStore(filePath string, db walletdb.DB,
filterType HeaderType, netParams *chaincfg.Params,
headerStateAssertion *FilterHeader) (*FilterHeaderStore, error) {
fStore, err := newHeaderStore(db, filePath, filterType)
if err != nil {
return nil, err
}
// With the header store created, we'll fetch the fiie size to see if
// we need to initialize it with the first header or not.
fileInfo, err := fStore.file.Stat()
if err != nil {
return nil, err
}
fhs := &FilterHeaderStore{
fStore,
}
// TODO(roasbeef): also reconsile with block header state due to way
// roll back works atm
// If the size of the file is zero, then this means that we haven't yet
// written the initial genesis header to disk, so we'll do so now.
if fileInfo.Size() == 0 {
var genesisFilterHash chainhash.Hash
switch filterType {
case RegularFilter:
basicFilter, err := builder.BuildBasicFilter(
netParams.GenesisBlock, nil,
)
if err != nil {
return nil, err
}
genesisFilterHash, err = builder.MakeHeaderForFilter(
basicFilter,
netParams.GenesisBlock.Header.PrevBlock,
)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown filter type: %v", filterType)
}
genesisHeader := FilterHeader{
HeaderHash: *netParams.GenesisHash,
FilterHash: genesisFilterHash,
Height: 0,
}
if err := fhs.WriteHeaders(genesisHeader); err != nil {
return nil, err
}
return fhs, nil
}
// If we have a state assertion then we'll check it now to see if we
// need to modify our filter header files before we proceed.
if headerStateAssertion != nil {
reset, err := fhs.maybeResetHeaderState(
headerStateAssertion,
)
if err != nil {
return nil, err
}
// If the filter header store was reset, we'll re-initialize it
// to recreate our on-disk state.
if reset {
return NewFilterHeaderStore(
filePath, db, filterType, netParams, nil,
)
}
}
// As a final initialization step, we'll ensure that the header tip
// within the flat files, is in sync with out database index.
tipHash, tipHeight, err := fhs.chainTip()
if err != nil {
return nil, err
}
// First, we'll compute the size of the current file so we can
// calculate the latest header written to disk.
fileHeight := uint32(fileInfo.Size()/32) - 1
// Using the file's current height, fetch the latest on-disk header.
latestFileHeader, err := fhs.readHeader(fileHeight)
if err != nil {
return nil, err
}
// If the index's tip hash, and the file on-disk match, then we're
// doing here.
if tipHash.IsEqual(latestFileHeader) {
return fhs, nil
}
// Otherwise, we'll need to truncate the file until it matches the
// current index tip.
for fileHeight > tipHeight {
if err := fhs.singleTruncate(); err != nil {
return nil, err
}
fileHeight--
}
// TODO(roasbeef): make above into func
return fhs, nil
}
// maybeResetHeaderState will reset the header state if the header assertion
// fails, but only if the target height is found. The boolean returned indicates
// that header state was reset.
func (f *FilterHeaderStore) maybeResetHeaderState(
headerStateAssertion *FilterHeader) (bool, error) {
// First, we'll attempt to locate the header at this height. If no such
// header is found, then we'll exit early.
assertedHeader, err := f.FetchHeaderByHeight(
headerStateAssertion.Height,
)
if _, ok := err.(*ErrHeaderNotFound); ok {
return false, nil
}
if err != nil {
return false, err
}
// If our on disk state and the provided header assertion don't match,
// then we'll purge this state so we can sync it anew once we fully
// start up.
if *assertedHeader != headerStateAssertion.FilterHash {
// Close the file before removing it. This is required by some
// OS, e.g., Windows.
if err := f.file.Close(); err != nil {
return true, err
}
if err := os.Remove(f.fileName); err != nil {
return true, err
}
return true, nil
}
return false, nil
}
// FetchHeader returns the filter header that corresponds to the passed block
// height.
func (f *FilterHeaderStore) FetchHeader(hash *chainhash.Hash) (*chainhash.Hash, error) {
// Lock store for read.
f.mtx.RLock()
defer f.mtx.RUnlock()
height, err := f.heightFromHash(hash)
if err != nil {
return nil, err
}
return f.readHeader(height)
}
// FetchHeaderByHeight returns the filter header for a particular block height.
func (f *FilterHeaderStore) FetchHeaderByHeight(height uint32) (*chainhash.Hash, error) {
// Lock store for read.
f.mtx.RLock()
defer f.mtx.RUnlock()
return f.readHeader(height)
}
// FetchHeaderAncestors fetches the numHeaders filter headers that are the
// ancestors of the target stop block hash. A total of numHeaders+1 headers will be
// returned, as we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
func (f *FilterHeaderStore) FetchHeaderAncestors(numHeaders uint32,
stopHash *chainhash.Hash) ([]chainhash.Hash, uint32, error) {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endHeight, err := f.heightFromHash(stopHash)
if err != nil {
return nil, 0, err
}
startHeight := endHeight - numHeaders
headers, err := f.readHeaderRange(startHeight, endHeight)
if err != nil {
return nil, 0, err
}
return headers, startHeight, nil
}
// FilterHeader represents a filter header (basic or extended). The filter
// header itself is coupled with the block height and hash of the filter's
// block.
type FilterHeader struct {
// HeaderHash is the hash of the block header that this filter header
// corresponds to.
HeaderHash chainhash.Hash
// FilterHash is the filter header itself.
FilterHash chainhash.Hash
// Height is the block height of the filter header in the main chain.
Height uint32
}
// toIndexEntry converts the filter header into a index entry to be stored
// within the database.
func (f *FilterHeader) toIndexEntry() headerEntry {
return headerEntry{
hash: f.HeaderHash,
height: f.Height,
}
}
// WriteHeaders writes a batch of filter headers to persistent storage. The
// headers themselves are appended to the flat file, and then the index updated
// to reflect the new entires.
func (f *FilterHeaderStore) WriteHeaders(hdrs ...FilterHeader) error {
// Lock store for write.
f.mtx.Lock()
defer f.mtx.Unlock()
// If there are 0 headers to be written, return immediately. This
// prevents the newTip assignment from panicking because of an index
// of -1.
if len(hdrs) == 0 {
return nil
}
// First, we'll grab a buffer from the write buffer pool so we an
// reduce our total number of allocations, and also write the headers
// in a single swoop.
headerBuf := headerBufPool.Get().(*bytes.Buffer)
headerBuf.Reset()
defer headerBufPool.Put(headerBuf)
// Next, we'll write out all the passed headers in series into the
// buffer we just extracted from the pool.
for _, header := range hdrs {
if _, err := headerBuf.Write(header.FilterHash[:]); err != nil {
return err
}
}
// With all the headers written to the buffer, we'll now write out the
// entire batch in a single write call.
if err := f.appendRaw(headerBuf.Bytes()); err != nil {
return err
}
// As the block headers should already be written, we only need to
// update the tip pointer for this particular header type.
newTip := hdrs[len(hdrs)-1].toIndexEntry().hash
return f.truncateIndex(&newTip, false)
}
// ChainTip returns the latest filter header and height known to the
// FilterHeaderStore.
func (f *FilterHeaderStore) ChainTip() (*chainhash.Hash, uint32, error) {
// Lock store for read.
f.mtx.RLock()
defer f.mtx.RUnlock()
_, tipHeight, err := f.chainTip()
if err != nil {
return nil, 0, fmt.Errorf("unable to fetch chain tip: %v", err)
}
latestHeader, err := f.readHeader(tipHeight)
if err != nil {
return nil, 0, fmt.Errorf("unable to read header: %v", err)
}
return latestHeader, tipHeight, nil
}
// RollbackLastBlock rollsback both the index, and on-disk header file by a
// _single_ filter header. This method is meant to be used in the case of
// re-org which disconnects the latest filter header from the end of the main
// chain. The information about the latest header tip after truncation is
// returned.
func (f *FilterHeaderStore) RollbackLastBlock(newTip *chainhash.Hash) (*BlockStamp, error) {
// Lock store for write.
f.mtx.Lock()
defer f.mtx.Unlock()
// First, we'll obtain the latest height that the index knows of.
_, chainTipHeight, err := f.chainTip()
if err != nil {
return nil, err
}
// With this height obtained, we'll use it to read what will be the new
// chain tip from disk.
newHeightTip := chainTipHeight - 1
newHeaderTip, err := f.readHeader(newHeightTip)
if err != nil {
return nil, err
}
// Now that we have the information we need to return from this
// function, we can now truncate both the header file and the index.
if err := f.singleTruncate(); err != nil {
return nil, err
}
if err := f.truncateIndex(newTip, false); err != nil {
return nil, err
}
// TODO(roasbeef): return chain hash also?
return &BlockStamp{
Height: int32(newHeightTip),
Hash: *newHeaderTip,
}, nil
}