/
upgrade.go
6432 lines (5763 loc) · 209 KB
/
upgrade.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2023 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"math"
"os"
"sort"
"time"
"github.com/decred/dcrd/blockchain/stake/v5"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/v3"
"github.com/decred/dcrd/crypto/blake256"
"github.com/decred/dcrd/database/v3"
"github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/decred/dcrd/gcs/v4"
"github.com/decred/dcrd/gcs/v4/blockcf2"
"github.com/decred/dcrd/txscript/v4"
"github.com/decred/dcrd/wire"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
)
// errInterruptRequested indicates that an operation was cancelled due
// to a user-requested interrupt.
var errInterruptRequested = errors.New("interrupt requested")
// errBatchFinished indicates that a foreach database loop was exited due to
// reaching the maximum batch size.
var errBatchFinished = errors.New("batch finished")
// interruptRequested returns true when the provided channel has been closed.
// This simplifies early shutdown slightly since the caller can just use an if
// statement instead of a select.
func interruptRequested(ctx context.Context) bool {
return ctx.Err() != nil
}
// deserializeDatabaseInfoV2 deserializes a database information struct from the
// passed serialized byte slice according to the legacy version 2 format.
//
// The legacy format is as follows:
//
// Field Type Size Description
// version uint32 4 bytes The version of the database
// compVer uint32 4 bytes The script compression version of the database
// created uint32 4 bytes The date of the creation of the database
//
// The high bit (0x80000000) is used on version to indicate that an upgrade
// is in progress and used to confirm the database fidelity on start up.
func deserializeDatabaseInfoV2(dbInfoBytes []byte) (*databaseInfo, error) {
// upgradeStartedBit if the bit flag for whether or not a database
// upgrade is in progress. It is used to determine if the database
// is in an inconsistent state from the update.
const upgradeStartedBit = 0x80000000
byteOrder := binary.LittleEndian
rawVersion := byteOrder.Uint32(dbInfoBytes[0:4])
upgradeStarted := (upgradeStartedBit & rawVersion) > 0
version := rawVersion &^ upgradeStartedBit
compVer := byteOrder.Uint32(dbInfoBytes[4:8])
ts := byteOrder.Uint32(dbInfoBytes[8:12])
if upgradeStarted {
return nil, AssertError("database is in the upgrade started " +
"state before resumable upgrades were supported - " +
"delete the database and resync the blockchain")
}
return &databaseInfo{
version: version,
compVer: compVer,
created: time.Unix(int64(ts), 0),
}, nil
}
// -----------------------------------------------------------------------------
// The legacy version 2 block index consists of an entry for every known block.
// which includes information such as the block header and hashes of tickets
// voted and revoked.
//
// The serialized key format is:
//
// <block height><block hash>
//
// Field Type Size
// block height uint32 4 bytes
// block hash chainhash.Hash chainhash.HashSize
//
// The serialized value format is:
//
// <block header><status><num votes><votes info><num revoked><revoked tickets>
//
// Field Type Size
// block header wire.BlockHeader 180 bytes
// status byte 1 byte
// num votes VLQ variable
// vote info
// ticket hash chainhash.Hash chainhash.HashSize
// vote version VLQ variable
// vote bits VLQ variable
// num revoked VLQ variable
// revoked tickets
// ticket hash chainhash.Hash chainhash.HashSize
//
// The version 2 block status flags format is:
//
// bit 0 - block payload is stored on disk
// bit 1 - block and all of its ancestors have been fully validated
// bit 2 - block failed validation
// bit 3 - an ancestor of the block failed validation
// bits 4-7 - unused
// -----------------------------------------------------------------------------
// blockIndexVoteVersionTuple houses the extracted vote bits and version from
// votes for use in block index database entries.
type blockIndexVoteVersionTuple struct {
version uint32
bits uint16
}
// blockIndexEntryV2 represents a legacy version 2 block index database entry.
type blockIndexEntryV2 struct {
header wire.BlockHeader
status byte
voteInfo []blockIndexVoteVersionTuple
ticketsVoted []chainhash.Hash
ticketsRevoked []chainhash.Hash
}
// blockIndexEntrySerializeSizeV2 returns the number of bytes it would take to
// serialize the passed block index entry according to the legacy version 2
// format described above.
func blockIndexEntrySerializeSizeV2(entry *blockIndexEntryV2) int {
voteInfoSize := 0
for i := range entry.voteInfo {
voteInfoSize += chainhash.HashSize +
serializeSizeVLQ(uint64(entry.voteInfo[i].version)) +
serializeSizeVLQ(uint64(entry.voteInfo[i].bits))
}
return blockHdrSize + 1 + serializeSizeVLQ(uint64(len(entry.voteInfo))) +
voteInfoSize + serializeSizeVLQ(uint64(len(entry.ticketsRevoked))) +
chainhash.HashSize*len(entry.ticketsRevoked)
}
// putBlockIndexEntryV2 serializes the passed block index entry according to the
// legacy version 2 format described above directly into the passed target byte
// slice. The target byte slice must be at least large enough to handle the
// number of bytes returned by the blockIndexEntrySerializeSizeV2 function or it
// will panic.
func putBlockIndexEntryV2(target []byte, entry *blockIndexEntryV2) (int, error) {
if len(entry.voteInfo) != len(entry.ticketsVoted) {
return 0, AssertError("putBlockIndexEntry called with " +
"mismatched number of tickets voted and vote info")
}
// Serialize the entire block header.
w := bytes.NewBuffer(target[0:0])
if err := entry.header.Serialize(w); err != nil {
return 0, err
}
// Serialize the status.
offset := blockHdrSize
target[offset] = entry.status
offset++
// Serialize the number of votes and associated vote information.
offset += putVLQ(target[offset:], uint64(len(entry.voteInfo)))
for i := range entry.voteInfo {
offset += copy(target[offset:], entry.ticketsVoted[i][:])
offset += putVLQ(target[offset:], uint64(entry.voteInfo[i].version))
offset += putVLQ(target[offset:], uint64(entry.voteInfo[i].bits))
}
// Serialize the number of revocations and associated revocation
// information.
offset += putVLQ(target[offset:], uint64(len(entry.ticketsRevoked)))
for i := range entry.ticketsRevoked {
offset += copy(target[offset:], entry.ticketsRevoked[i][:])
}
return offset, nil
}
// decodeBlockIndexEntryV2 decodes the passed serialized block index entry into
// the passed struct according to the legacy version 2 format described above.
// It returns the number of bytes read.
func decodeBlockIndexEntryV2(serialized []byte, entry *blockIndexEntryV2) (int, error) {
// Hardcoded value so updates do not affect old upgrades.
const blockHdrSize = 180
// Ensure there are enough bytes to decode header.
if len(serialized) < blockHdrSize {
return 0, errDeserialize("unexpected end of data while reading block " +
"header")
}
hB := serialized[0:blockHdrSize]
// Deserialize the header.
var header wire.BlockHeader
if err := header.Deserialize(bytes.NewReader(hB)); err != nil {
return 0, err
}
offset := blockHdrSize
// Deserialize the status.
if offset+1 > len(serialized) {
return offset, errDeserialize("unexpected end of data while reading " +
"status")
}
status := serialized[offset]
offset++
// Deserialize the number of tickets spent.
var ticketsVoted []chainhash.Hash
var votes []blockIndexVoteVersionTuple
numVotes, bytesRead := deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of data while reading " +
"num votes")
}
offset += bytesRead
if numVotes > 0 {
ticketsVoted = make([]chainhash.Hash, numVotes)
votes = make([]blockIndexVoteVersionTuple, numVotes)
for i := uint64(0); i < numVotes; i++ {
// Deserialize the ticket hash associated with the vote.
if offset+chainhash.HashSize > len(serialized) {
return offset, errDeserialize(fmt.Sprintf("unexpected end of "+
"data while reading vote #%d hash", i))
}
copy(ticketsVoted[i][:], serialized[offset:])
offset += chainhash.HashSize
// Deserialize the vote version.
version, bytesRead := deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize(fmt.Sprintf("unexpected end of "+
"data while reading vote #%d version", i))
}
offset += bytesRead
// Deserialize the vote bits.
voteBits, bytesRead := deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize(fmt.Sprintf("unexpected end of "+
"data while reading vote #%d bits", i))
}
offset += bytesRead
votes[i].version = uint32(version)
votes[i].bits = uint16(voteBits)
}
}
// Deserialize the number of tickets revoked.
var ticketsRevoked []chainhash.Hash
numTicketsRevoked, bytesRead := deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of data while reading " +
"num tickets revoked")
}
offset += bytesRead
if numTicketsRevoked > 0 {
ticketsRevoked = make([]chainhash.Hash, numTicketsRevoked)
for i := uint64(0); i < numTicketsRevoked; i++ {
// Deserialize the ticket hash associated with the
// revocation.
if offset+chainhash.HashSize > len(serialized) {
return offset, errDeserialize(fmt.Sprintf("unexpected end of "+
"data while reading revocation #%d", i))
}
copy(ticketsRevoked[i][:], serialized[offset:])
offset += chainhash.HashSize
}
}
entry.header = header
entry.status = status
entry.voteInfo = votes
entry.ticketsVoted = ticketsVoted
entry.ticketsRevoked = ticketsRevoked
return offset, nil
}
// incrementalFlatDrop uses multiple database updates to remove key/value pairs
// saved to a flag bucket.
func incrementalFlatDrop(ctx context.Context, db database.DB, bucketKey []byte, humanName string) error {
const maxDeletions = 2000000
var totalDeleted uint64
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
numDeleted = 0
err := db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(bucketKey)
cursor := bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() &&
numDeleted < maxDeletions {
if err := cursor.Delete(); err != nil {
return err
}
numDeleted++
}
return nil
})
if err != nil {
return err
}
if numDeleted > 0 {
totalDeleted += uint64(numDeleted)
log.Infof("Deleted %d keys (%d total) from %s", numDeleted,
totalDeleted, humanName)
}
if interruptRequested(ctx) {
return errInterruptRequested
}
}
return nil
}
// runUpgradeStageOnce ensures the provided function is only run one time by
// checking if the provided key already exists in the database and writing it to
// the database upon successful completion of the provided function when it is
// not.
//
// This is useful to ensure upgrades that consist of multiple stages can be
// interrupted without redoing all of the work associated with stages that were
// previously completed successfully.
func runUpgradeStageOnce(ctx context.Context, db database.DB, doneKeyName []byte, fn func() error) error {
// Don't run again if the provided key already exists.
var alreadyDone bool
err := db.View(func(dbTx database.Tx) error {
alreadyDone = dbTx.Metadata().Get(doneKeyName) != nil
return nil
})
if err != nil || alreadyDone {
return err
}
if err := fn(); err != nil {
return err
}
if interruptRequested(ctx) {
return errInterruptRequested
}
// Save the key to mark the update fully complete in case of interruption.
return db.Update(func(dbTx database.Tx) error {
return dbTx.Metadata().Put(doneKeyName, nil)
})
}
// batchFn represents the batch function used by the batched update function.
type batchFn func(dbTx database.Tx) (bool, error)
// batchedUpdate calls the provided batch function repeatedly until it either
// returns an error other than the special ones described in this comment or
// its return indicates no more calls are necessary.
//
// In order to ensure the database is updated with the results of the batch that
// have already been successfully completed, it is allowed to return
// errBatchFinished and errInterruptRequested. In the case of the former, the
// error will be ignored. In the case of the latter, the database will be
// updated and the error will be returned accordingly. The database will NOT
// be updated if any other errors are returned.
func batchedUpdate(ctx context.Context, db database.DB, doBatch batchFn) error {
var isFullyDone bool
for !isFullyDone {
err := db.Update(func(dbTx database.Tx) error {
var err error
isFullyDone, err = doBatch(dbTx)
if errors.Is(err, errInterruptRequested) ||
errors.Is(err, errBatchFinished) {
// No error here so the database transaction is not cancelled
// and therefore outstanding work is written to disk. The outer
// function will exit with an interrupted error below due to
// another interrupted check.
err = nil
}
return err
})
if err != nil {
return err
}
if interruptRequested(ctx) {
return errInterruptRequested
}
}
return nil
}
// utxoBackendBatchFn represents the batch function used by the UTXO backend
// batched update function.
type utxoBackendBatchFn func(tx UtxoBackendTx) (bool, error)
// utxoBackendBatchedUpdate calls the provided batch function repeatedly until
// it either returns an error other than the special ones described in this
// comment or its return indicates no more calls are necessary.
//
// In order to ensure the backend is updated with the results of the batch that
// have already been successfully completed, it is allowed to return
// errBatchFinished and errInterruptRequested. In the case of the former, the
// error will be ignored. In the case of the latter, the backend will be
// updated and the error will be returned accordingly. The backend will NOT
// be updated if any other errors are returned.
func utxoBackendBatchedUpdate(ctx context.Context,
utxoBackend UtxoBackend, doBatch utxoBackendBatchFn) error {
var isFullyDone bool
for !isFullyDone {
err := utxoBackend.Update(func(tx UtxoBackendTx) error {
var err error
isFullyDone, err = doBatch(tx)
if errors.Is(err, errInterruptRequested) ||
errors.Is(err, errBatchFinished) {
// No error here so the database transaction is not cancelled
// and therefore outstanding work is written to disk. The outer
// function will exit with an interrupted error below due to
// another interrupted check.
return nil
}
return err
})
if err != nil {
return err
}
if interruptRequested(ctx) {
return errInterruptRequested
}
}
return nil
}
// clearFailedBlockFlagsV2 unmarks all blocks in a version 2 block index
// previously marked failed so they are eligible for validation again under new
// consensus rules. This ensures clients that did not update prior to new rules
// activating are able to automatically recover under the new rules without
// having to download the entire chain again.
func clearFailedBlockFlagsV2(ctx context.Context, db database.DB) error {
// Hardcoded bucket name so updates do not affect old upgrades.
v2BucketName := []byte("blockidx")
log.Info("Reindexing block information in the database. This may take a " +
"while...")
start := time.Now()
// doBatch contains the primary logic for updating the block index in
// batches. This is done because attempting to migrate in a single database
// transaction could result in massive memory usage and could potentially
// crash on many systems due to ulimits.
//
// It returns whether or not all entries have been updated.
const maxEntries = 20000
var resumeOffset uint32
var totalUpdated uint64
doBatch := func(dbTx database.Tx) (bool, error) {
meta := dbTx.Metadata()
v2BlockIdxBucket := meta.Bucket(v2BucketName)
if v2BlockIdxBucket == nil {
return false, fmt.Errorf("bucket %s does not exist", v2BucketName)
}
// Update block index entries so long as the max number of entries for
// this batch has not been exceeded.
var logProgress bool
var numUpdated, numIterated uint32
err := v2BlockIdxBucket.ForEach(func(key, oldSerialized []byte) error {
if interruptRequested(ctx) {
logProgress = true
return errInterruptRequested
}
if numUpdated >= maxEntries {
logProgress = true
return errBatchFinished
}
// Skip entries that have already been migrated in previous batches.
numIterated++
if numIterated-1 < resumeOffset {
return nil
}
resumeOffset++
// Decode the old block index entry.
var entry blockIndexEntryV2
_, err := decodeBlockIndexEntryV2(oldSerialized, &entry)
if err != nil {
return err
}
// Mark the block index entry as eligible for validation again.
const (
v2StatusValidateFailed = 1 << 2
v2StatusInvalidAncestor = 1 << 3
)
origStatus := entry.status
entry.status &^= v2StatusValidateFailed | v2StatusInvalidAncestor
if entry.status != origStatus {
targetSize := blockIndexEntrySerializeSizeV2(&entry)
serialized := make([]byte, targetSize)
_, err = putBlockIndexEntryV2(serialized, &entry)
if err != nil {
return err
}
err = v2BlockIdxBucket.Put(key, serialized)
if err != nil {
return err
}
}
numUpdated++
return nil
})
isFullyDone := err == nil
if (isFullyDone || logProgress) && numUpdated > 0 {
totalUpdated += uint64(numUpdated)
log.Infof("Updated %d entries (%d total)", numUpdated, totalUpdated)
}
return isFullyDone, err
}
// Update all entries in batches for the reasons mentioned above.
if err := batchedUpdate(ctx, db, doBatch); err != nil {
return err
}
elapsed := time.Since(start).Round(time.Millisecond)
log.Infof("Done updating block index. Total entries: %d in %v",
totalUpdated, elapsed)
return nil
}
// scriptSourceEntry houses a script and its associated version.
type scriptSourceEntry struct {
version uint16
script []byte
}
// scriptSource provides a source of transaction output scripts and their
// associated script version for given outpoints and implements the PrevScripter
// interface so it may be used in cases that require access to said scripts.
type scriptSource map[wire.OutPoint]scriptSourceEntry
// PrevScript returns the script and script version associated with the provided
// previous outpoint along with a bool that indicates whether or not the
// requested entry exists. This ensures the caller is able to distinguish
// between missing entries and empty v0 scripts.
func (s scriptSource) PrevScript(prevOut *wire.OutPoint) (uint16, []byte, bool) {
entry, ok := s[*prevOut]
if !ok {
return 0, nil, false
}
return entry.version, entry.script, true
}
// determineMinimalOutputsSizeV1 determines and returns the size of the stored
// set of minimal outputs in a version 1 spend journal entry.
func determineMinimalOutputsSizeV1(serialized []byte) (int, error) {
numOutputs, offset := deserializeVLQ(serialized)
if offset == 0 {
return offset, errDeserialize("unexpected end of data during " +
"decoding (num outputs)")
}
for i := 0; i < int(numOutputs); i++ {
// Amount.
_, bytesRead := deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of data during " +
"decoding (output amount)")
}
offset += bytesRead
// Script version.
_, bytesRead = deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of data during " +
"decoding (output script version)")
}
offset += bytesRead
// Script.
var scriptSize uint64
scriptSize, bytesRead = deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of data during " +
"decoding (output script size)")
}
offset += bytesRead
if uint64(len(serialized[offset:])) < scriptSize {
return offset, errDeserialize("unexpected end of data during " +
"decoding (output script)")
}
offset += int(scriptSize)
}
return offset, nil
}
// decodeCompressedScriptSizeV1 treats the passed serialized bytes as a v1
// compressed script, possibly followed by other data, and returns the number of
// bytes it occupies taking into account the special encoding of the script size
// by the domain specific compression algorithm described above.
func decodeCompressedScriptSizeV1(serialized []byte) int {
const (
// Hardcoded constants so updates do not affect old upgrades.
cstPayToPubKeyHash = 0
cstPayToScriptHash = 1
cstPayToPubKeyCompEven = 2
cstPayToPubKeyCompOdd = 3
cstPayToPubKeyUncompEven = 4
cstPayToPubKeyUncompOdd = 5
numSpecialScripts = 64
)
scriptSize, bytesRead := deserializeVLQ(serialized)
if bytesRead == 0 {
return 0
}
switch scriptSize {
case cstPayToPubKeyHash:
return 21
case cstPayToScriptHash:
return 21
case cstPayToPubKeyCompEven, cstPayToPubKeyCompOdd,
cstPayToPubKeyUncompEven, cstPayToPubKeyUncompOdd:
return 33
}
scriptSize -= numSpecialScripts
scriptSize += uint64(bytesRead)
return int(scriptSize)
}
// decompressScriptV1 returns the original script obtained by decompressing the
// passed v1 compressed script according to the domain specific compression
// algorithm described above.
//
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
func decompressScriptV1(compressedPkScript []byte) []byte {
const (
// Hardcoded constants so updates do not affect old upgrades.
cstPayToPubKeyHash = 0
cstPayToScriptHash = 1
cstPayToPubKeyCompEven = 2
cstPayToPubKeyCompOdd = 3
cstPayToPubKeyUncompEven = 4
cstPayToPubKeyUncompOdd = 5
numSpecialScripts = 64
)
// Empty scripts, specified by 0x00, are considered nil.
if len(compressedPkScript) == 0 {
return nil
}
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedPkScript)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
pkScript := make([]byte, 25)
pkScript[0] = txscript.OP_DUP
pkScript[1] = txscript.OP_HASH160
pkScript[2] = txscript.OP_DATA_20
copy(pkScript[3:], compressedPkScript[bytesRead:bytesRead+20])
pkScript[23] = txscript.OP_EQUALVERIFY
pkScript[24] = txscript.OP_CHECKSIG
return pkScript
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
pkScript := make([]byte, 23)
pkScript[0] = txscript.OP_HASH160
pkScript[1] = txscript.OP_DATA_20
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+20])
pkScript[22] = txscript.OP_EQUAL
return pkScript
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyCompEven, cstPayToPubKeyCompOdd:
pkScript := make([]byte, 35)
pkScript[0] = txscript.OP_DATA_33
oddness := byte(0x02)
if encodedScriptSize == cstPayToPubKeyCompOdd {
oddness = 0x03
}
pkScript[1] = oddness
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+32])
pkScript[34] = txscript.OP_CHECKSIG
return pkScript
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyUncompEven, cstPayToPubKeyUncompOdd:
// Change the leading byte to the appropriate compressed pubkey
// identifier (0x02 or 0x03) so it can be decoded as a
// compressed pubkey. This really should never fail since the
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
oddness := byte(0x02)
if encodedScriptSize == cstPayToPubKeyUncompOdd {
oddness = 0x03
}
compressedKey[0] = oddness
copy(compressedKey[1:], compressedPkScript[1:])
key, err := secp256k1.ParsePubKey(compressedKey)
if err != nil {
return nil
}
pkScript := make([]byte, 67)
pkScript[0] = txscript.OP_DATA_65
copy(pkScript[1:], key.SerializeUncompressed())
pkScript[66] = txscript.OP_CHECKSIG
return pkScript
}
// When none of the special cases apply, the script was encoded using
// the general format, so reduce the script size by the number of
// special cases and return the unmodified script.
scriptSize := int(encodedScriptSize - numSpecialScripts)
pkScript := make([]byte, scriptSize)
copy(pkScript, compressedPkScript[bytesRead:bytesRead+scriptSize])
return pkScript
}
// scriptSourceFromSpendJournalV1 uses the legacy v1 spend journal along with
// the provided block to create a source of previous transaction scripts and
// versions spent by the block.
func scriptSourceFromSpendJournalV1(dbTx database.Tx, block *wire.MsgBlock) (scriptSource, error) {
// Load the serialized spend journal entry from the database, construct the
// full list of transactions that spend outputs (notice the coinbase
// transaction is excluded since it can't spend anything), and perform an
// initial sanity check to ensure there is serialized data for the block
// when there are transactions that spend outputs.
blockHash := block.BlockHash()
v1SpendJournalBucketName := []byte("spendjournal")
spendBucket := dbTx.Metadata().Bucket(v1SpendJournalBucketName)
serialized := spendBucket.Get(blockHash[:])
txns := make([]*wire.MsgTx, 0, len(block.STransactions)+
len(block.Transactions[1:]))
txns = append(txns, block.STransactions...)
txns = append(txns, block.Transactions[1:]...)
if len(txns) > 0 && len(serialized) == 0 {
str := fmt.Sprintf("missing spend journal data for %s", blockHash)
return nil, errDeserialize(str)
}
// The legacy version 1 transaction spend journal consists of an entry for
// each block connected to the main chain which contains the transaction
// outputs the block spends serialized such that the order is the reverse of
// the order they were spent.
//
// The legacy format for this entry is roughly:
//
// [<flags><script version><compressed pkscript><optional data>],...
//
// The legacy optional data is only present if the flags indicate the
// transaction is fully spent (bit 4 in legacy format) and its format is
// roughly:
//
// <tx version><optional stake data>
//
// The legacy optional stake data is only present if the flags indicate the
// transaction type is a ticket and its format is roughly:
//
// <num outputs>[<amount><script version><script len><script>],...
//
// Field Type Size
// flags VLQ variable (always 1 byte)
// script version VLQ variable
// compressed pkscript []byte variable
// optional data (only present if flags indicates fully spent)
// transaction version VLQ variable
// stake data (only present if flags indicates tx type ticket)
// num outputs VLQ variable
// output info
// amount VLQ variable
// script version VLQ variable
// script len VLQ variable
// script []byte variable
//
// The legacy serialized flags code format is:
//
// bit 0 - containing transaction is a coinbase
// bit 1 - containing transaction has an expiry
// bits 2-3 - transaction type
// bit 4 - is fully spent
// bits 5-7 - unused
//
// Given the only information needed is the script version and associated
// pkscript, the following specifically finds the relevant information while
// skipping everything else.
const (
v1FullySpentFlag = 1 << 4
v1TxTypeMask = 0x0c
v1TxTypeShift = 2
v1TxTypeTicket = 1
)
// Loop backwards through all transactions so everything is read in reverse
// order to match the serialization order.
source := make(scriptSource)
var offset int
for txIdx := len(txns) - 1; txIdx > -1; txIdx-- {
tx := txns[txIdx]
isVote := stake.IsSSGen(tx)
// Loop backwards through all of the transaction inputs and read the
// associated stxo.
for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- {
// Skip stakebase since it has no input.
if txInIdx == 0 && isVote {
continue
}
txIn := tx.TxIn[txInIdx]
// Deserialize the flags.
if offset >= len(serialized) {
str := "unexpected end of spend journal entry"
return nil, errDeserialize(str)
}
flags64, bytesRead := deserializeVLQ(serialized[offset:])
offset += bytesRead
if bytesRead != 1 {
str := fmt.Sprintf("unexpected flags size -- got %d, want 1",
bytesRead)
return nil, errDeserialize(str)
}
flags := byte(flags64)
fullySpent := flags&v1FullySpentFlag != 0
txType := (flags & v1TxTypeMask) >> v1TxTypeShift
// Deserialize the script version.
if offset >= len(serialized) {
str := "unexpected end of data after flags"
return nil, errDeserialize(str)
}
scriptVersion, bytesRead := deserializeVLQ(serialized[offset:])
offset += bytesRead
// Decode the compressed script size and ensure there are enough
// bytes left in the slice for it.
if offset >= len(serialized) {
str := "unexpected end of data after script version"
return nil, errDeserialize(str)
}
scriptSize := decodeCompressedScriptSizeV1(serialized[offset:])
if scriptSize < 0 {
str := "negative script size"
return nil, errDeserialize(str)
}
if offset+scriptSize > len(serialized) {
str := "unexpected end of data after script size"
return nil, errDeserialize(str)
}
pkScript := serialized[offset : offset+scriptSize]
offset += scriptSize
// Create an output in the script source for the referenced script
// and version using the data from the spend journal.
prevOut := &txIn.PreviousOutPoint
source[*prevOut] = scriptSourceEntry{
version: uint16(scriptVersion),
script: decompressScriptV1(pkScript),
}
// Deserialize the tx version and minimal outputs for tickets as
// needed to locate the offset of the next entry.
if fullySpent {
if offset >= len(serialized) {
str := "unexpected end of data after script size"
return nil, errDeserialize(str)
}
_, bytesRead := deserializeVLQ(serialized[offset:])
offset += bytesRead
if txType == v1TxTypeTicket {
if offset >= len(serialized) {
str := "unexpected end of data after tx version"
return nil, errDeserialize(str)
}
sz, err := determineMinimalOutputsSizeV1(serialized[offset:])
if err != nil {
return nil, err
}
offset += sz
}
}
}
}
return source, nil
}
// initializeGCSFilters creates and stores version 2 GCS filters for all blocks
// in the main chain. This ensures they are immediately available to clients
// and simplifies the rest of the related code since it can rely on the filters
// being available once the upgrade completes.
//
// The database is guaranteed to have a filter entry for every block in the
// main chain if this returns without failure.
func initializeGCSFilters(ctx context.Context, db database.DB, genesisHash *chainhash.Hash) error {
log.Info("Creating and storing GCS filters. This will take a while...")
start := time.Now()
// Determine the blocks in the main chain using the version 2 block index
// and version 1 chain state.
var mainChainBlocks []chainhash.Hash
err := db.View(func(dbTx database.Tx) error {
// Hardcoded bucket names and keys so updates do not affect old
// upgrades.
v2BucketName := []byte("blockidx")
v1ChainStateKeyName := []byte("chainstate")
// Load the current best chain tip hash and height from the v1 chain
// state.
//
// The serialized format of the v1 chain state is roughly:
//
// <block hash><rest of data>
//
// Field Type Size
// block hash chainhash.Hash chainhash.HashSize
// rest of data...
meta := dbTx.Metadata()
serializedChainState := meta.Get(v1ChainStateKeyName)
if serializedChainState == nil {
str := fmt.Sprintf("chain state with key %s does not exist",
v1ChainStateKeyName)
return errDeserialize(str)
}
if len(serializedChainState) < chainhash.HashSize {
str := "version 1 chain state is malformed"
return errDeserialize(str)
}
var tipHash chainhash.Hash
copy(tipHash[:], serializedChainState[0:chainhash.HashSize])
// blockTreeEntry represents a version 2 block index entry with just
// enough information to be able to determine which blocks comprise the
// main chain.
type blockTreeEntry struct {
parent *blockTreeEntry
hash chainhash.Hash
height uint32
}
// Construct a full block tree from the version 2 block index by mapping
// each block to its parent block.
var lastEntry, parent *blockTreeEntry
blockTree := make(map[chainhash.Hash]*blockTreeEntry)
v2BlockIdxBucket := meta.Bucket(v2BucketName)
if v2BlockIdxBucket == nil {
return fmt.Errorf("bucket %s does not exist", v2BucketName)
}
err := v2BlockIdxBucket.ForEach(func(_, serialized []byte) error {
// Decode the block index entry.
var entry blockIndexEntryV2
_, err := decodeBlockIndexEntryV2(serialized, &entry)
if err != nil {
return err
}
header := &entry.header