-
Notifications
You must be signed in to change notification settings - Fork 100
/
assets_store.go
1667 lines (1434 loc) · 48.9 KB
/
assets_store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package tarodb
import (
"bytes"
"context"
"database/sql"
"errors"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/taro/asset"
"github.com/lightninglabs/taro/commitment"
"github.com/lightninglabs/taro/mssmt"
"github.com/lightninglabs/taro/proof"
"github.com/lightninglabs/taro/tarodb/sqlite"
"github.com/lightninglabs/taro/tarofreighter"
"github.com/lightningnetwork/lnd/keychain"
"golang.org/x/exp/maps"
)
type (
// ConfirmedAsset is an asset that has been fully confirmed on chain.
ConfirmedAsset = sqlite.QueryAssetsRow
// RawAssetBalance holds a balance query result for a particular asset
// or all assets tracked by this daemon.
RawAssetBalance = sqlite.QueryAssetBalancesByAssetRow
// RawAssetFamilyBalance holds a balance query result for a particular
// asset family or all asset families tracked by this daemon.
RawAssetFamilyBalance = sqlite.QueryAssetBalancesByFamilyRow
// AssetProof is the asset proof for a given asset, identified by its
// script key.
AssetProof = sqlite.FetchAssetProofsRow
// AssetProofI is identical to AssetProof but is used for the case
// where the proofs for a specific asset are fetched.
AssetProofI = sqlite.FetchAssetProofRow
// PrevInput stores the full input information including the prev out,
// and also the witness information itself.
PrevInput = sqlite.InsertAssetWitnessParams
// AssetWitness is the full prev input for an asset that also couples
// along the asset ID that the witness belong to.
AssetWitness = sqlite.FetchAssetWitnessesRow
// QueryAssetFilters lets us query assets in the database based on some
// set filters. This is useful to get the balance of a set of assets,
// or for things like coin selection.
QueryAssetFilters = sqlite.QueryAssetsParams
// UtxoQuery lets us query a managed UTXO by either the transaction it
// references, or the outpoint.
UtxoQuery = sqlite.FetchManagedUTXOParams
// AnchorPoint wraps a managed UTXO along with all the auxiliary
// information it references.
AnchorPoint = sqlite.FetchManagedUTXORow
// AssetAnchorUpdate is used to update the managed UTXO pointer when
// spending assets on chain.
AssetAnchorUpdate = sqlite.ReanchorAssetsParams
// AssetSpendDelta is used to update the script key and amount of an
// existing asset.
AssetSpendDelta = sqlite.ApplySpendDeltaParams
// AnchorTxConf identifies an unconfirmed anchor tx to confirm.
AnchorTxConf = sqlite.ConfirmChainAnchorTxParams
// AssetDelta tracks the changes to an asset within the confines of a
// transfer.
AssetDelta = sqlite.FetchAssetDeltasRow
// AssetDeltaWithProof tracks the changes to an asset within the
// confines of a transfer, also containing the proofs for the change.
AssetDeltaWithProof = sqlite.FetchAssetDeltasWithProofsRow
// NewAssetDelta wraps the params needed to insert a new asset delta.
NewAssetDelta = sqlite.InsertAssetDeltaParams
// NewAssetTransfer wraps the params needed to insert a new asset
// transfer.
NewAssetTransfer = sqlite.InsertAssetTransferParams
// AssetTransfer packages information related to an asset transfer.
AssetTransfer = sqlite.QueryAssetTransfersRow
// TransferQuery allows callers to filter out the set of transfers
// based on set information.
TransferQuery = sqlite.QueryAssetTransfersParams
// NewSpendProof is used to insert new spend proofs for the
// sender+receiver.
NewSpendProof = sqlite.InsertSpendProofsParams
)
// ActiveAssetsStore is a sub-set of the main sqlite.Querier interface that
// contains methods related to querying the set of confirmed assets.
type ActiveAssetsStore interface {
// UpsertAssetStore houses the methods related to inserting/updating
// assets.
UpsertAssetStore
// QueryAssets fetches the set of fully confirmed assets.
QueryAssets(context.Context, QueryAssetFilters) ([]ConfirmedAsset, error)
// QueryAssetBalancesByAsset queries the balances for assets or
// alternatively for a selected one that matches the passed asset ID
// filter.
QueryAssetBalancesByAsset(context.Context,
interface{}) ([]RawAssetBalance, error)
// QueryAssetBalancesByFamily queries the asset balances for asset
// families or alternatively for a selected one that matches the passed
// filter.
QueryAssetBalancesByFamily(context.Context,
interface{}) ([]RawAssetFamilyBalance, error)
// FetchAssetProofs fetches all the asset proofs we have stored on
// disk.
FetchAssetProofs(ctx context.Context) ([]AssetProof, error)
// FetchAssetProof fetches the asset proof for a given asset identified
// by its script key.
FetchAssetProof(ctx context.Context,
scriptKey []byte) (AssetProofI, error)
// UpsertChainTx inserts a new or updates an existing chain tx into the
// DB.
UpsertChainTx(ctx context.Context, arg ChainTx) (int32, error)
// UpsertManagedUTXO inserts a new or updates an existing managed UTXO
// to disk and returns the primary key.
UpsertManagedUTXO(ctx context.Context, arg RawManagedUTXO) (int32, error)
// UpsertAssetProof inserts a new or updates an existing asset proof on
// disk.
UpsertAssetProof(ctx context.Context,
arg sqlite.UpsertAssetProofParams) error
// InsertAssetWitness inserts a new prev input for an asset into the
// database.
InsertAssetWitness(context.Context, PrevInput) error
// FetchAssetWitnesses attempts to fetch either all the asset witnesses
// on disk (NULL param), or the witness for a given asset ID.
FetchAssetWitnesses(context.Context, sql.NullInt32) ([]AssetWitness, error)
// FetchManagedUTXO fetches a managed UTXO based on either the outpoint
// or the transaction that anchors it.
FetchManagedUTXO(context.Context, UtxoQuery) (AnchorPoint, error)
// ReanchorAssets takes an old anchor point, then updates all assets
// that point to that old anchor point-to-point to the new one.
ReanchorAssets(ctx context.Context, arg AssetAnchorUpdate) error
// ApplySpendDelta applies a sped delta (new amount and script key)
// based on the existing script key of an asset.
ApplySpendDelta(ctx context.Context, arg AssetSpendDelta) (int32, error)
// DeleteManagedUTXO deletes the managed utxo identified by the passed
// serialized outpoint.
DeleteManagedUTXO(ctx context.Context, outpoint []byte) error
// ConfirmChainAnchorTx marks a new anchor transaction that was
// previously unconfirmed as confirmed.
ConfirmChainAnchorTx(ctx context.Context, arg AnchorTxConf) error
// FetchAssetDeltas fetches the asset deltas associated with a given
// transfer id.
FetchAssetDeltas(ctx context.Context,
transferID int32) ([]AssetDelta, error)
// FetchAssetDeltasWithProofs fetches the asset deltas including the
// proofs associated with a given transfer id.
FetchAssetDeltasWithProofs(ctx context.Context,
transferID int32) ([]AssetDeltaWithProof, error)
// InsertAssetDelta inserts a new asset delta into the DB.
InsertAssetDelta(ctx context.Context, arg NewAssetDelta) error
// InsertAssetTransfer inserts a new asset transfer into the DB.
InsertAssetTransfer(ctx context.Context,
arg NewAssetTransfer) (int32, error)
// QueryAssetTransfers queries for a set of asset transfers in the db.
QueryAssetTransfers(ctx context.Context,
tranferQuery TransferQuery) ([]AssetTransfer, error)
// DeleteAssetWitnesses deletes the witnesses on disk associated with a
// given asset ID.
DeleteAssetWitnesses(ctx context.Context, assetID int32) error
// InsertSpendProofs is used to insert the new spend proofs after a
// transfer into DB.
InsertSpendProofs(ctx context.Context, arg NewSpendProof) (int32, error)
// DeleteSpendProofs is used to delete the set of proofs on disk after
// we apply a transfer.
DeleteSpendProofs(ctx context.Context, transferID int32) error
// FetchSpendProofs looks up the spend proofs for the given transfer
// ID.
FetchSpendProofs(ctx context.Context,
transferID int32) (sqlite.FetchSpendProofsRow, error)
}
// AssetBalance holds a balance query result for a particular asset or all
// assets tracked by this daemon.
type AssetBalance struct {
ID asset.ID
Version int32
Balance uint64
Tag string
Meta []byte
Type asset.Type
GenesisPoint wire.OutPoint
OutputIndex uint32
}
// AssetFamilyBalance holds abalance query result for a particular asset family
// or all asset families tracked by this daemon.
type AssetFamilyBalance struct {
FamKey *btcec.PublicKey
Balance uint64
}
// BatchedAssetStore combines the AssetStore interface with the BatchedTx
// interface, allowing for multiple queries to be executed in a single SQL
// transaction.
type BatchedAssetStore interface {
ActiveAssetsStore
BatchedTx[ActiveAssetsStore, TxOptions]
}
// AssetStore is used to query for the set of pending and confirmed assets.
type AssetStore struct {
db BatchedAssetStore
}
// NewAssetStore creates a new AssetStore from the specified BatchedAssetStore
// interface.
func NewAssetStore(db BatchedAssetStore) *AssetStore {
return &AssetStore{
db: db,
}
}
// ChainAsset is a wrapper around the base asset struct that includes
// information detailing where in the chain the asset is currently anchored.
type ChainAsset struct {
*asset.Asset
// AnchorTx is the transaction that anchors this chain asset.
AnchorTx *wire.MsgTx
// AnchorTxid is the TXID of the anchor tx.
AnchorTxid chainhash.Hash
// AnchorBlockHash is the blockhash that mined the anchor tx.
AnchorBlockHash chainhash.Hash
// AnchorOutpoint is the outpoint that commits to the asset.
AnchorOutpoint wire.OutPoint
// AnchorInternalKey is the raw internal key that was used to create the
// anchor Taproot output key.
AnchorInternalKey *btcec.PublicKey
}
// assetWitnesses maps the primary key of an asset to a slice of its previous
// input (witness) information.
type assetWitnesses map[int32][]AssetWitness
// fetchAssetWitnesses attempts to fetch all the asset witnesses that belong to
// the set of passed asset IDs.
func fetchAssetWitnesses(ctx context.Context,
db ActiveAssetsStore, assetIDs []int32) (assetWitnesses, error) {
assetWitnesses := make(map[int32][]AssetWitness)
for _, assetID := range assetIDs {
witnesses, err := db.FetchAssetWitnesses(
ctx, sqlInt32(assetID),
)
if err != nil {
return nil, err
}
// We'll insert a nil witness for genesis asset, so we don't
// add it to the map, which'll give it the genesis witness.
if len(witnesses) == 0 {
continue
}
assetWitnesses[assetID] = witnesses
}
return assetWitnesses, nil
}
// parseAssetWitness maps a witness stored in the database to something we can
// use directly.
func parseAssetWitness(input AssetWitness) (asset.Witness, error) {
var (
op wire.OutPoint
witness asset.Witness
)
err := readOutPoint(
bytes.NewReader(input.PrevOutPoint), 0, 0, &op,
)
if err != nil {
return witness, fmt.Errorf("unable to "+
"read outpoint: %w", err)
}
var (
zeroKey, scriptKey asset.SerializedKey
)
if !bytes.Equal(zeroKey[:], input.PrevScriptKey) {
prevKey, err := btcec.ParsePubKey(input.PrevScriptKey)
if err != nil {
return witness, fmt.Errorf("unable to decode key: %w",
err)
}
scriptKey = asset.ToSerialized(prevKey)
}
var assetID asset.ID
copy(assetID[:], input.PrevAssetID)
witness.PrevID = &asset.PrevID{
OutPoint: op,
ID: assetID,
ScriptKey: scriptKey,
}
var buf [8]byte
if len(input.WitnessStack) != 0 {
err = asset.TxWitnessDecoder(
bytes.NewReader(input.WitnessStack),
&witness.TxWitness, &buf,
uint64(len(input.WitnessStack)),
)
if err != nil {
return witness, fmt.Errorf("unable to decode "+
"witness: %w", err)
}
}
if len(input.SplitCommitmentProof) != 0 {
err := asset.SplitCommitmentDecoder(
bytes.NewReader(input.SplitCommitmentProof),
&witness.SplitCommitment, &buf,
uint64(len(input.SplitCommitmentProof)),
)
if err != nil {
return witness, fmt.Errorf("unable to decode split "+
"commitment: %w", err)
}
}
return witness, nil
}
// dbAssetsToChainAssets maps a set of confirmed assets in the database, and
// the witnesses of those assets to a set of normal ChainAsset structs needed
// by a higher level application.
func dbAssetsToChainAssets(dbAssets []ConfirmedAsset,
witnesses assetWitnesses) ([]*ChainAsset, error) {
chainAssets := make([]*ChainAsset, len(dbAssets))
for i, sprout := range dbAssets {
// First, we'll decode the script key which every asset must
// specify, and populate the key locator information.
rawScriptKeyPub, err := btcec.ParsePubKey(sprout.ScriptKeyRaw)
if err != nil {
return nil, err
}
rawScriptKeyDesc := keychain.KeyDescriptor{
PubKey: rawScriptKeyPub,
KeyLocator: keychain.KeyLocator{
Index: uint32(sprout.ScriptKeyIndex),
Family: keychain.KeyFamily(sprout.ScriptKeyFam),
},
}
// Not all assets have a key family, so we only need to
// populate this information for those that signalled the
// requirement of on going emission.
var familyKey *asset.FamilyKey
if sprout.TweakedFamKey != nil {
tweakedFamKey, err := btcec.ParsePubKey(
sprout.TweakedFamKey,
)
if err != nil {
return nil, err
}
rawFamKey, err := btcec.ParsePubKey(sprout.FamKeyRaw)
if err != nil {
return nil, err
}
famSig, err := schnorr.ParseSignature(sprout.GenesisSig)
if err != nil {
return nil, err
}
familyKey = &asset.FamilyKey{
RawKey: keychain.KeyDescriptor{
PubKey: rawFamKey,
KeyLocator: keychain.KeyLocator{
Index: extractSqlInt32[uint32](
sprout.FamKeyIndex,
),
Family: extractSqlInt32[keychain.KeyFamily](
sprout.FamKeyFamily,
),
},
},
FamKey: *tweakedFamKey,
Sig: *famSig,
}
}
// Next, we'll populate the asset genesis information which
// includes the genesis prev out, and the other information
// needed to derive an asset ID.
var genesisPrevOut wire.OutPoint
if err := readOutPoint(
bytes.NewReader(sprout.GenesisPrevOut), 0, 0,
&genesisPrevOut,
); err != nil {
return nil, fmt.Errorf("unable to read "+
"outpoint: %w", err)
}
assetGenesis := asset.Genesis{
FirstPrevOut: genesisPrevOut,
Tag: sprout.AssetTag,
Metadata: sprout.MetaData,
OutputIndex: uint32(sprout.GenesisOutputIndex),
Type: asset.Type(sprout.AssetType),
}
// With the base information extracted, we'll use that to
// create either a normal asset or a collectible.
lockTime := extractSqlInt32[uint64](sprout.LockTime)
relativeLocktime := extractSqlInt32[uint64](
sprout.RelativeLockTime,
)
var amount uint64
switch asset.Type(sprout.AssetType) {
case asset.Normal:
amount = uint64(sprout.Amount)
case asset.Collectible:
amount = 1
}
scriptKeyPub, err := btcec.ParsePubKey(sprout.TweakedScriptKey)
if err != nil {
return nil, err
}
scriptKey := asset.ScriptKey{
PubKey: scriptKeyPub,
TweakedScriptKey: &asset.TweakedScriptKey{
RawKey: rawScriptKeyDesc,
Tweak: sprout.ScriptKeyTweak,
},
}
assetSprout, err := asset.New(
assetGenesis, amount, lockTime, relativeLocktime,
scriptKey, familyKey,
)
if err != nil {
return nil, fmt.Errorf("unable to create new sprout: "+
"%v", err)
}
if len(sprout.SplitCommitmentRootHash) != 0 {
var nodeHash mssmt.NodeHash
copy(nodeHash[:], sprout.SplitCommitmentRootHash)
assetSprout.SplitCommitmentRoot = mssmt.NewComputedNode(
nodeHash,
uint64(sprout.SplitCommitmentRootValue.Int64),
)
}
// With the asset created, we'll now emplace the set of
// witnesses for the asset itself. If this is a genesis asset,
// then it won't have a set of witnesses.
assetInputs, ok := witnesses[sprout.AssetPrimaryKey]
if ok {
assetSprout.PrevWitnesses = make(
[]asset.Witness, 0, len(assetInputs),
)
for _, input := range assetInputs {
witness, err := parseAssetWitness(input)
if err != nil {
return nil, fmt.Errorf("unable to "+
"parse witness: %w", err)
}
assetSprout.PrevWitnesses = append(
assetSprout.PrevWitnesses, witness,
)
}
}
anchorTx := wire.NewMsgTx(2)
err = anchorTx.Deserialize(bytes.NewBuffer(sprout.AnchorTx))
if err != nil {
return nil, fmt.Errorf("unable to decode tx: %w", err)
}
// An asset will only have an anchor block hash once it has
// confirmed, so we'll only parse this if it exists.
var anchorBlockHash chainhash.Hash
if sprout.AnchorBlockHash != nil {
anchorHash, err := chainhash.NewHash(
sprout.AnchorBlockHash,
)
if err != nil {
return nil, fmt.Errorf("unable to extract block "+
"hash: %w", err)
}
anchorBlockHash = *anchorHash
}
var anchorOutpoint wire.OutPoint
err = readOutPoint(
bytes.NewReader(sprout.AnchorOutpoint), 0, 0,
&anchorOutpoint,
)
if err != nil {
return nil, fmt.Errorf("unable to decode "+
"outpoint: %w", err)
}
anchorInternalKey, err := btcec.ParsePubKey(
sprout.AnchorInternalKey,
)
if err != nil {
return nil, fmt.Errorf("unable to parse anchor "+
"internal key: %w", err)
}
chainAssets[i] = &ChainAsset{
Asset: assetSprout,
AnchorTx: anchorTx,
AnchorTxid: anchorTx.TxHash(),
AnchorBlockHash: anchorBlockHash,
AnchorOutpoint: anchorOutpoint,
AnchorInternalKey: anchorInternalKey,
}
}
return chainAssets, nil
}
// constraintsToDbFilter maps application level constraints to the set of
// filters we use in the SQL queries.
func constraintsToDbFilter(query *AssetQueryFilters) QueryAssetFilters {
var assetFilter QueryAssetFilters
if query != nil {
if query.MinAmt != 0 {
assetFilter.MinAmt = sql.NullInt64{
Int64: int64(query.MinAmt),
Valid: true,
}
}
if query.AssetID != nil {
assetID := query.AssetID[:]
assetFilter.AssetIDFilter = assetID
}
if query.FamilyKey != nil {
famKey := query.FamilyKey.SerializeCompressed()
assetFilter.KeyFamFilter = famKey
}
// TODO(roasbeef): only want to allow asset ID or other and not both?
}
return assetFilter
}
// fetchAssetsWithWitness fetches the set of assets in the backing store based
// on the set asset filter. A set of witnesses for each of the assets keyed by
// the primary key of the asset is also returned.
func fetchAssetsWithWitness(ctx context.Context, q ActiveAssetsStore,
assetFilter QueryAssetFilters) ([]ConfirmedAsset, assetWitnesses, error) {
// First, we'll fetch all the assets we know of on disk.
dbAssets, err := q.QueryAssets(ctx, assetFilter)
if err != nil {
return nil, nil, fmt.Errorf("unable to read db assets: %v", err)
}
assetIDs := fMap(dbAssets, func(a ConfirmedAsset) int32 {
return a.AssetPrimaryKey
})
// With all the assets obtained, we'll now do a second query to
// obtain all the witnesses we know of for each asset.
assetWitnesses, err := fetchAssetWitnesses(ctx, q, assetIDs)
if err != nil {
return nil, nil, fmt.Errorf("unable to fetch asset "+
"witnesses: %w", err)
}
return dbAssets, assetWitnesses, nil
}
// AssetQueryFilters is a wrapper struct over the CommitmentConstraints struct
// which lets us filter the results of the set of assets returned.
type AssetQueryFilters struct {
tarofreighter.CommitmentConstraints
}
// QueryBalancesByAsset queries the balances for assets or alternatively
// for a selected one that matches the passed asset ID filter.
func (a *AssetStore) QueryBalancesByAsset(ctx context.Context,
assetID *asset.ID) (map[asset.ID]AssetBalance, error) {
var assetFilter []byte
if assetID != nil {
assetFilter = assetID[:]
}
balances := make(map[asset.ID]AssetBalance)
readOpts := NewAssetStoreReadTx()
dbErr := a.db.ExecTx(ctx, &readOpts, func(q ActiveAssetsStore) error {
dbBalances, err := q.QueryAssetBalancesByAsset(ctx, assetFilter)
if err != nil {
return fmt.Errorf("unable to query asset "+
"balances by asset: %w", err)
}
for _, assetBalance := range dbBalances {
var assetID asset.ID
copy(assetID[:], assetBalance.AssetID[:])
assetIDBalance := AssetBalance{
Version: assetBalance.Version,
Balance: uint64(assetBalance.Balance),
Tag: assetBalance.AssetTag,
Type: asset.Type(assetBalance.AssetType),
OutputIndex: uint32(assetBalance.OutputIndex),
}
err = readOutPoint(
bytes.NewReader(assetBalance.GenesisPoint),
0, 0, &assetIDBalance.GenesisPoint,
)
if err != nil {
return err
}
copy(assetIDBalance.ID[:], assetBalance.AssetID)
assetIDBalance.Meta = make(
[]byte, len(assetBalance.MetaData),
)
copy(assetIDBalance.Meta, assetBalance.MetaData)
balances[assetID] = assetIDBalance
}
return err
})
if dbErr != nil {
return nil, dbErr
}
return balances, nil
}
// QueryAssetBalancesByFamily queries the asset balances for asset families or
// alternatively for a selected one that matches the passed filter.
func (a *AssetStore) QueryAssetBalancesByFamily(ctx context.Context,
famKey *btcec.PublicKey) (map[asset.SerializedKey]AssetFamilyBalance,
error) {
var famFilter []byte
if famKey != nil {
famKeySerialized := famKey.SerializeCompressed()
famFilter = famKeySerialized[:]
}
balances := make(map[asset.SerializedKey]AssetFamilyBalance)
readOpts := NewAssetStoreReadTx()
dbErr := a.db.ExecTx(ctx, &readOpts, func(q ActiveAssetsStore) error {
dbBalances, err := q.QueryAssetBalancesByFamily(ctx, famFilter)
if err != nil {
return fmt.Errorf("unable to query asset "+
"balances by asset: %w", err)
}
for _, famBalance := range dbBalances {
var famKey *btcec.PublicKey
if famBalance.TweakedFamKey != nil {
famKey, err = btcec.ParsePubKey(
famBalance.TweakedFamKey,
)
if err != nil {
return err
}
}
serializedKey := asset.ToSerialized(famKey)
balances[serializedKey] = AssetFamilyBalance{
FamKey: famKey,
Balance: uint64(famBalance.Balance),
}
}
return err
})
if dbErr != nil {
return nil, dbErr
}
return balances, nil
}
// FetchAllAssets fetches the set of confirmed assets stored on disk.
func (a *AssetStore) FetchAllAssets(ctx context.Context,
query *AssetQueryFilters) ([]*ChainAsset, error) {
var (
dbAssets []ConfirmedAsset
assetWitnesses map[int32][]AssetWitness
err error
)
// We'll now map the application level filtering to the type of
// filtering our database query understands.
assetFilter := constraintsToDbFilter(query)
// With the query constructed, we can now fetch the assets along w/
// their witness information.
readOpts := NewAssetStoreReadTx()
dbErr := a.db.ExecTx(ctx, &readOpts, func(q ActiveAssetsStore) error {
dbAssets, assetWitnesses, err = fetchAssetsWithWitness(
ctx, q, assetFilter,
)
return err
})
if dbErr != nil {
return nil, dbErr
}
return dbAssetsToChainAssets(dbAssets, assetWitnesses)
}
// FetchAssetProofs returns the latest proof file for either the set of target
// assets, or all assets if no script keys for an asset are passed in.
//
// TODO(roasbeef): potentially have a version that writes thru a reader
// instead?
func (a *AssetStore) FetchAssetProofs(ctx context.Context,
targetAssets ...*btcec.PublicKey) (proof.AssetBlobs, error) {
proofs := make(proof.AssetBlobs)
readOpts := NewAssetStoreReadTx()
dbErr := a.db.ExecTx(ctx, &readOpts, func(q ActiveAssetsStore) error {
// No target asset so we can just read them all from disk.
if len(targetAssets) == 0 {
assetProofs, err := q.FetchAssetProofs(ctx)
if err != nil {
return fmt.Errorf("unable to fetch asset "+
"proofs: %w", err)
}
for _, p := range assetProofs {
scriptKey, err := btcec.ParsePubKey(p.ScriptKey)
if err != nil {
return err
}
serializedKey := asset.ToSerialized(scriptKey)
proofs[serializedKey] = p.ProofFile
}
return nil
}
// Otherwise, we'll need to issue a series of queries to fetch
// each of the relevant proof files.
//
// TODO(roasbeef): can modify the query to use IN somewhere
// instead? then would take input params and insert into
// virtual rows to use
for _, scriptKey := range targetAssets {
scriptKey := scriptKey
serializedKey := asset.ToSerialized(scriptKey)
assetProof, err := q.FetchAssetProof(
ctx, serializedKey[:],
)
if err != nil {
return fmt.Errorf("unable to fetch asset "+
"proof: %w", err)
}
proofs[serializedKey] = assetProof.ProofFile
}
return nil
})
if dbErr != nil {
return nil, dbErr
}
return proofs, nil
}
// FetchProof fetches a proof for an asset uniquely idenfitied by the passed
// ProofIdentifier.
//
// NOTE: This implements the proof.ArchiveBackend interface.
func (a *AssetStore) FetchProof(ctx context.Context,
locator proof.Locator) (proof.Blob, error) {
// We don't need anything else but the script key since we have an
// on-disk index for all proofs we store.
scriptKey := locator.ScriptKey
var diskProof proof.Blob
readOpts := NewAssetStoreReadTx()
dbErr := a.db.ExecTx(ctx, &readOpts, func(q ActiveAssetsStore) error {
assetProof, err := q.FetchAssetProof(
ctx, scriptKey.SerializeCompressed(),
)
if err != nil {
return fmt.Errorf("unable to fetch asset "+
"proof: %w", err)
}
diskProof = assetProof.ProofFile
return nil
})
switch {
case errors.Is(dbErr, sql.ErrNoRows):
return nil, proof.ErrProofNotFound
case dbErr != nil:
return nil, dbErr
}
return diskProof, nil
}
// insertAssetWitnesses attempts to insert the set of asset witnesses in to the
// database, referencing the passed asset primary key.
func (a *AssetStore) insertAssetWitnesses(ctx context.Context,
db ActiveAssetsStore, assetID int32, inputs []asset.Witness) error {
var buf [8]byte
for _, input := range inputs {
prevID := input.PrevID
prevOutpoint, err := encodeOutpoint(prevID.OutPoint)
if err != nil {
return fmt.Errorf("unable to write outpoint: %w", err)
}
var witnessStack []byte
if len(input.TxWitness) != 0 {
var b bytes.Buffer
err = asset.TxWitnessEncoder(&b, &input.TxWitness, &buf)
if err != nil {
return fmt.Errorf("unable to encode "+
"witness: %w", err)
}
witnessStack = make([]byte, b.Len())
copy(witnessStack, b.Bytes())
}
var splitCommitmentProof []byte
if input.SplitCommitment != nil {
var b bytes.Buffer
err := asset.SplitCommitmentEncoder(
&b, &input.SplitCommitment, &buf,
)
if err != nil {
return fmt.Errorf("unable to encode split "+
"commitment: %w", err)
}
splitCommitmentProof = make([]byte, b.Len())
copy(splitCommitmentProof, b.Bytes())
}
err = db.InsertAssetWitness(ctx, PrevInput{
AssetID: assetID,
PrevOutPoint: prevOutpoint,
PrevAssetID: prevID.ID[:],
PrevScriptKey: prevID.ScriptKey.CopyBytes(),
WitnessStack: witnessStack,
SplitCommitmentProof: splitCommitmentProof,
})
if err != nil {
return fmt.Errorf("unable to insert witness: %v", err)
}
}
return nil
}
// importAssetFromProof imports a new asset into the database based on the
// information associated with the annotated proofs. This will result in a new
// asset inserted on disk, with all dependencies such as the asset witnesses
// inserted along the way.
func (a *AssetStore) importAssetFromProof(ctx context.Context,
db ActiveAssetsStore, proof *proof.AnnotatedProof) error {
// TODO(roasbeef): below needs to be updated to support asset splits
// We already know where this lives on-chain, so we can go ahead and
// insert the chain information now.
//
// From the final asset snapshot, we'll obtain the final "resting
// place" of the asset and insert that into the DB.
var anchorTxBuf bytes.Buffer
if err := proof.AnchorTx.Serialize(&anchorTxBuf); err != nil {
return err
}
anchorTXID := proof.AnchorTx.TxHash()
chainTXID, err := db.UpsertChainTx(ctx, ChainTx{
Txid: anchorTXID[:],
RawTx: anchorTxBuf.Bytes(),
BlockHeight: sqlInt32(proof.AnchorBlockHeight),
BlockHash: proof.AnchorBlockHash[:],
TxIndex: sqlInt32(proof.AnchorTxIndex),
})
if err != nil {
return fmt.Errorf("unable to insert chain tx: %w", err)
}
anchorOutput := proof.AnchorTx.TxOut[proof.OutputIndex]
anchorPoint, err := encodeOutpoint(wire.OutPoint{
Hash: anchorTXID,
Index: proof.OutputIndex,
})
if err != nil {
return fmt.Errorf("unable to encode outpoint: %w", err)
}
// Before we import the managed UTXO below, we'll make sure to insert
// the internal key, though it might already exist here.
_, err = db.UpsertInternalKey(ctx, InternalKey{
RawKey: proof.InternalKey.SerializeCompressed(),
})
if err != nil {
return fmt.Errorf("unable to insert internal key: %w", err)
}
// Next, we'll insert the managed UTXO that points to the output in our
// control for the specified asset.
//
// TODO(roasbeef): also need to store sibling hash here?
tapscriptRoot := proof.ScriptRoot.TapscriptRoot(nil)
utxoID, err := db.UpsertManagedUTXO(ctx, RawManagedUTXO{
RawKey: proof.InternalKey.SerializeCompressed(),
Outpoint: anchorPoint,
AmtSats: anchorOutput.Value,
TaroRoot: tapscriptRoot[:],
TxnID: chainTXID,
})
if err != nil {
return fmt.Errorf("unable to insert managed utxo: %w", err)
}
newAsset := proof.Asset
// Insert/update the asset information in the database now.
_, assetIDs, err := upsertAssetsWithGenesis(
ctx, db, newAsset.Genesis.FirstPrevOut,
[]*asset.Asset{newAsset}, []sql.NullInt32{sqlInt32(utxoID)},
)
if err != nil {
return fmt.Errorf("error inserting asset with genesis: %w", err)
}
// Now that we have the asset inserted, we'll also insert all the
// witness data associated with the asset in a new row.
err = a.insertAssetWitnesses(