-
Notifications
You must be signed in to change notification settings - Fork 1
/
helper.go
919 lines (786 loc) · 28.1 KB
/
helper.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
package itest
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"testing"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/stretchr/testify/require"
)
type testCase struct {
name string
test func(net *lntest.NetworkHarness, t *harnessTest)
}
// harnessTest wraps a regular testing.T providing enhanced error detection
// and propagation. All error will be augmented with a full stack-trace in
// order to aid in debugging. Additionally, any panics caused by active
// test cases will also be handled and represented as fatals.
type harnessTest struct {
t *testing.T
// testCase is populated during test execution and represents the
// current test case.
testCase *testCase
// lndHarness is a reference to the current network harness. Will be
// nil if not yet set up.
lndHarness *lntest.NetworkHarness
}
// newHarnessTest creates a new instance of a harnessTest from a regular
// testing.T instance.
func newHarnessTest(t *testing.T, net *lntest.NetworkHarness) *harnessTest {
return &harnessTest{t, nil, net}
}
// Skipf calls the underlying testing.T's Skip method, causing the current test
// to be skipped.
func (h *harnessTest) Skipf(format string, args ...interface{}) {
h.t.Skipf(format, args...)
}
// Fatalf causes the current active test case to fail with a fatal error. All
// integration tests should mark test failures solely with this method due to
// the error stack traces it produces.
func (h *harnessTest) Fatalf(format string, a ...interface{}) {
if h.lndHarness != nil {
h.lndHarness.SaveProfilesPages(h.t)
}
stacktrace := errors.Wrap(fmt.Sprintf(format, a...), 1).ErrorStack()
if h.testCase != nil {
h.t.Fatalf("Failed: (%v): exited with error: \n"+
"%v", h.testCase.name, stacktrace)
} else {
h.t.Fatalf("Error outside of test: %v", stacktrace)
}
}
// RunTestCase executes a harness test case. Any errors or panics will be
// represented as fatal.
func (h *harnessTest) RunTestCase(testCase *testCase) {
h.testCase = testCase
defer func() {
h.testCase = nil
}()
defer func() {
if err := recover(); err != nil {
description := errors.Wrap(err, 2).ErrorStack()
h.t.Fatalf("Failed: (%v) panicked with: \n%v",
h.testCase.name, description)
}
}()
testCase.test(h.lndHarness, h)
}
func (h *harnessTest) Logf(format string, args ...interface{}) {
h.t.Logf(format, args...)
}
func (h *harnessTest) Log(args ...interface{}) {
h.t.Log(args...)
}
func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, txid *chainhash.Hash) {
for _, tx := range block.Transactions {
sha := tx.TxHash()
if bytes.Equal(txid[:], sha[:]) {
return
}
}
t.Fatalf("tx was not included in block")
}
// mineBlocks mines 'num' of blocks and checks that blocks are present in
// the mining node's blockchain. numTxs should be set to the number of
// transactions (excluding the coinbase) we expect to be included in the first
// mined block. Between each mined block an artificial delay is introduced to
// give all network participants time to catch up.
func mineBlocks(t *harnessTest, net *lntest.NetworkHarness,
num uint32, numTxs int) []*wire.MsgBlock {
t.t.Helper()
// If we expect transactions to be included in the blocks we'll mine,
// we wait here until they are seen in the miner's mempool.
var txids []*chainhash.Hash
var err error
if numTxs > 0 {
txids, err = waitForNTxsInMempool(
net.Miner.Client, numTxs, minerMempoolTimeout,
)
require.NoError(t.t, err, "unable to find txns in mempool")
}
blocks := make([]*wire.MsgBlock, num)
blockHashes := make([]*chainhash.Hash, 0, num)
for i := uint32(0); i < num; i++ {
generatedHashes, err := net.Miner.Client.Generate(1)
require.NoError(t.t, err, "generate blocks")
blockHashes = append(blockHashes, generatedHashes...)
time.Sleep(slowMineDelay)
}
for i, blockHash := range blockHashes {
block, err := net.Miner.Client.GetBlock(blockHash)
require.NoError(t.t, err, "get blocks")
blocks[i] = block
}
// Finally, assert that all the transactions were included in the first
// block.
for _, txid := range txids {
assertTxInBlock(t, blocks[0], txid)
}
return blocks
}
// openChannelStream blocks until an OpenChannel request for a channel funding
// by alice succeeds. If it does, a stream client is returned to receive events
// about the opening channel.
func openChannelStream(t *harnessTest, net *lntest.NetworkHarness,
alice, bob *lntest.HarnessNode,
p lntest.OpenChannelParams) lnrpc.Lightning_OpenChannelClient {
t.t.Helper()
// Wait until we are able to fund a channel successfully. This wait
// prevents us from erroring out when trying to create a channel while
// the node is starting up.
var chanOpenUpdate lnrpc.Lightning_OpenChannelClient
err := wait.NoError(func() error {
var err error
chanOpenUpdate, err = net.OpenChannel(alice, bob, p)
return err
}, defaultTimeout)
require.NoError(t.t, err, "unable to open channel")
return chanOpenUpdate
}
// openChannelAndAssert attempts to open a channel with the specified
// parameters extended from Alice to Bob. Additionally, two items are asserted
// after the channel is considered open: the funding transaction should be
// found within a block, and that Alice can report the status of the new
// channel.
func openChannelAndAssert(t *harnessTest, net *lntest.NetworkHarness,
alice, bob *lntest.HarnessNode,
p lntest.OpenChannelParams) *lnrpc.ChannelPoint {
t.t.Helper()
chanOpenUpdate := openChannelStream(t, net, alice, bob, p)
// Mine 6 blocks, then wait for Alice's node to notify us that the
// channel has been opened. The funding transaction should be found
// within the first newly mined block. We mine 6 blocks so that in the
// case that the channel is public, it is announced to the network.
block := mineBlocks(t, net, 6, 1)[0]
fundingChanPoint, err := net.WaitForChannelOpen(chanOpenUpdate)
require.NoError(t.t, err, "error while waiting for channel open")
fundingTxID, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint)
require.NoError(t.t, err, "unable to get txid")
assertTxInBlock(t, block, fundingTxID)
// The channel should be listed in the peer information returned by
// both peers.
chanPoint := wire.OutPoint{
Hash: *fundingTxID,
Index: fundingChanPoint.OutputIndex,
}
require.NoError(
t.t, net.AssertChannelExists(alice, &chanPoint),
"unable to assert channel existence",
)
require.NoError(
t.t, net.AssertChannelExists(bob, &chanPoint),
"unable to assert channel existence",
)
return fundingChanPoint
}
// closeChannelAndAssert attempts to close a channel identified by the passed
// channel point owned by the passed Lightning node. A fully blocking channel
// closure is attempted, therefore the passed context should be a child derived
// via timeout from a base parent. Additionally, once the channel has been
// detected as closed, an assertion checks that the transaction is found within
// a block. Finally, this assertion verifies that the node always sends out a
// disable update when closing the channel if the channel was previously enabled.
//
// NOTE: This method assumes that the provided funding point is confirmed
// on-chain AND that the edge exists in the node's channel graph. If the funding
// transactions was reorged out at some point, use closeReorgedChannelAndAssert.
func closeChannelAndAssert(t *harnessTest, net *lntest.NetworkHarness,
node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint,
force bool) *chainhash.Hash {
return closeChannelAndAssertType(
t, net, node, fundingChanPoint, false, force,
)
}
func closeChannelAndAssertType(t *harnessTest, net *lntest.NetworkHarness,
node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint,
anchors, force bool) *chainhash.Hash {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, channelCloseTimeout)
defer cancel()
// Fetch the current channel policy. If the channel is currently
// enabled, we will register for graph notifications before closing to
// assert that the node sends out a disabling update as a result of the
// channel being closed.
curPolicy := getChannelPolicies(
t, node, node.PubKeyStr, fundingChanPoint,
)[0]
expectDisable := !curPolicy.Disabled
closeUpdates, _, err := net.CloseChannel(node, fundingChanPoint, force)
require.NoError(t.t, err, "unable to close channel")
// If the channel policy was enabled prior to the closure, wait until we
// received the disabled update.
if expectDisable {
curPolicy.Disabled = true
assertChannelPolicyUpdate(
t.t, node, node.PubKeyStr,
curPolicy, fundingChanPoint, false,
)
}
return assertChannelClosed(
ctxt, t, net, node, fundingChanPoint, anchors, closeUpdates,
)
}
// assertChannelPolicyUpdate checks that the required policy update has
// happened on the given node.
func assertChannelPolicyUpdate(t *testing.T, node *lntest.HarnessNode,
advertisingNode string, policy *lnrpc.RoutingPolicy,
chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) {
require.NoError(
t, node.WaitForChannelPolicyUpdate(
advertisingNode, policy,
chanPoint, includeUnannounced,
), "error while waiting for channel update",
)
}
// assertChannelClosed asserts that the channel is properly cleaned up after
// initiating a cooperative or local close.
func assertChannelClosed(ctx context.Context, t *harnessTest,
net *lntest.NetworkHarness, node *lntest.HarnessNode,
fundingChanPoint *lnrpc.ChannelPoint, anchors bool,
closeUpdates lnrpc.Lightning_CloseChannelClient) *chainhash.Hash {
txid, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint)
require.NoError(t.t, err, "unable to get txid")
chanPointStr := fmt.Sprintf("%v:%v", txid, fundingChanPoint.OutputIndex)
// If the channel appears in list channels, ensure that its state
// contains ChanStatusCoopBroadcasted.
listChansRequest := &lnrpc.ListChannelsRequest{}
listChansResp, err := node.ListChannels(ctx, listChansRequest)
require.NoError(t.t, err, "unable to query for list channels")
for _, channel := range listChansResp.Channels {
// Skip other channels.
if channel.ChannelPoint != chanPointStr {
continue
}
// Assert that the channel is in coop broadcasted.
require.Contains(
t.t, channel.ChanStatusFlags,
channeldb.ChanStatusCoopBroadcasted.String(),
"channel not coop broadcasted",
)
}
// At this point, the channel should now be marked as being in the
// state of "waiting close".
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := node.PendingChannels(ctx, pendingChansRequest)
require.NoError(t.t, err, "unable to query for pending channels")
var found bool
for _, pendingClose := range pendingChanResp.WaitingCloseChannels {
if pendingClose.Channel.ChannelPoint == chanPointStr {
found = true
break
}
}
require.True(t.t, found, "channel not marked as waiting close")
// We'll now, generate a single block, wait for the final close status
// update, then ensure that the closing transaction was included in the
// block. If there are anchors, we also expect an anchor sweep.
expectedTxes := 1
if anchors {
expectedTxes = 2
}
block := mineBlocks(t, net, 1, expectedTxes)[0]
closingTxid, err := net.WaitForChannelClose(closeUpdates)
require.NoError(t.t, err, "error while waiting for channel close")
assertTxInBlock(t, block, closingTxid)
// Finally, the transaction should no longer be in the waiting close
// state as we've just mined a block that should include the closing
// transaction.
err = wait.Predicate(func() bool {
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := node.PendingChannels(
ctx, pendingChansRequest,
)
if err != nil {
return false
}
for _, pendingClose := range pendingChanResp.WaitingCloseChannels {
if pendingClose.Channel.ChannelPoint == chanPointStr {
return false
}
}
return true
}, defaultTimeout)
require.NoError(
t.t, err, "closing transaction not marked as fully closed",
)
return closingTxid
}
// shutdownAndAssert shuts down the given node and asserts that no errors
// occur.
func shutdownAndAssert(net *lntest.NetworkHarness, t *harnessTest,
node *lntest.HarnessNode) {
// The process may not be in a state to always shutdown immediately, so
// we'll retry up to a hard limit to ensure we eventually shutdown.
err := wait.NoError(func() error {
return net.ShutdownNode(node)
}, defaultTimeout)
require.NoErrorf(t.t, err, "unable to shutdown %v", node.Name())
}
// nodeArgsForCommitType returns the command line flag to supply to enable this
// commitment type.
func nodeArgsForCommitType(commitType lnrpc.CommitmentType) []string {
switch commitType {
case lnrpc.CommitmentType_LEGACY:
return []string{"--protocol.legacy.committweak"}
case lnrpc.CommitmentType_STATIC_REMOTE_KEY:
return []string{}
case lnrpc.CommitmentType_ANCHORS:
return []string{"--protocol.anchors"}
case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
return []string{
"--protocol.anchors",
"--protocol.script-enforced-lease",
}
}
return nil
}
// calcStaticFee calculates appropriate fees for commitment transactions. This
// function provides a simple way to allow test balance assertions to take fee
// calculations into account.
func calcStaticFee(c lnrpc.CommitmentType, numHTLCs int) btcutil.Amount {
const htlcWeight = input.HTLCWeight
var (
feePerKw = chainfee.SatPerKVByte(50000).FeePerKWeight()
commitWeight = input.CommitWeight
anchors = btcutil.Amount(0)
)
// The anchor commitment type is slightly heavier, and we must also add
// the value of the two anchors to the resulting fee the initiator
// pays. In addition the fee rate is capped at 10 sat/vbyte for anchor
// channels.
if commitTypeHasAnchors(c) {
feePerKw = chainfee.SatPerKVByte(
lnwallet.DefaultAnchorsCommitMaxFeeRateSatPerVByte * 1000,
).FeePerKWeight()
commitWeight = input.AnchorCommitWeight
anchors = 2 * anchorSize
}
return feePerKw.FeeForWeight(int64(commitWeight+htlcWeight*numHTLCs)) +
anchors
}
// commitTypeHasAnchors returns whether commitType uses anchor outputs.
func commitTypeHasAnchors(commitType lnrpc.CommitmentType) bool {
switch commitType {
case lnrpc.CommitmentType_ANCHORS,
lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
return true
default:
return false
}
}
// channelCommitType retrieves the active channel commitment type for the given
// chan point.
func channelCommitType(node *lntest.HarnessNode,
chanPoint *lnrpc.ChannelPoint) (lnrpc.CommitmentType, error) {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
req := &lnrpc.ListChannelsRequest{}
channels, err := node.ListChannels(ctxt, req)
if err != nil {
return 0, fmt.Errorf("listchannels failed: %v", err)
}
for _, c := range channels.Channels {
if c.ChannelPoint == txStr(chanPoint) {
return c.CommitmentType, nil
}
}
return 0, fmt.Errorf("channel point %v not found", chanPoint)
}
// txStr returns the string representation of the channel's funding transaction.
func txStr(chanPoint *lnrpc.ChannelPoint) string {
fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint)
if err != nil {
return ""
}
cp := wire.OutPoint{
Hash: *fundingTxID,
Index: chanPoint.OutputIndex,
}
return cp.String()
}
// getChannelPolicies queries the channel graph and retrieves the current edge
// policies for the provided channel points.
func getChannelPolicies(t *harnessTest, node *lntest.HarnessNode,
advertisingNode string,
chanPoints ...*lnrpc.ChannelPoint) []*lnrpc.RoutingPolicy {
ctxb := context.Background()
descReq := &lnrpc.ChannelGraphRequest{
IncludeUnannounced: true,
}
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
chanGraph, err := node.DescribeGraph(ctxt, descReq)
require.NoError(t.t, err, "unable to query for alice's graph")
var policies []*lnrpc.RoutingPolicy
err = wait.NoError(func() error {
out:
for _, chanPoint := range chanPoints {
for _, e := range chanGraph.Edges {
if e.ChanPoint != txStr(chanPoint) {
continue
}
if e.Node1Pub == advertisingNode {
policies = append(policies, e.Node1Policy)
} else {
policies = append(policies, e.Node2Policy)
}
continue out
}
// If we've iterated over all the known edges and we weren't
// able to find this specific one, then we'll fail.
return fmt.Errorf("did not find edge %v", txStr(chanPoint))
}
return nil
}, defaultTimeout)
require.NoError(t.t, err)
return policies
}
// waitForNTxsInMempool polls until finding the desired number of transactions
// in the provided miner's mempool. An error is returned if this number is not
// met after the given timeout.
func waitForNTxsInMempool(miner *rpcclient.Client, n int,
timeout time.Duration) ([]*chainhash.Hash, error) {
breakTimeout := time.After(timeout)
ticker := time.NewTicker(50 * time.Millisecond)
defer ticker.Stop()
var err error
var mempool []*chainhash.Hash
for {
select {
case <-breakTimeout:
return nil, fmt.Errorf("wanted %v, found %v txs "+
"in mempool: %v", n, len(mempool), mempool)
case <-ticker.C:
mempool, err = miner.GetRawMempool()
if err != nil {
return nil, err
}
if len(mempool) == n {
return mempool, nil
}
}
}
}
// getPaymentResult reads a final result from the stream and returns it.
func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) (*lnrpc.Payment, error) {
for {
payment, err := stream.Recv()
if err != nil {
return nil, err
}
if payment.Status != lnrpc.Payment_IN_FLIGHT {
return payment, nil
}
}
}
func assertNumPendingHTLCs(numHTLCs int, nodes ...*lntest.HarnessNode) func() error {
return func() error {
ctxb := context.Background()
listReq := &lnrpc.ListChannelsRequest{}
pending := 0
for _, node := range nodes {
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
nodeChans, err := node.ListChannels(ctxt, listReq)
if err != nil {
return fmt.Errorf("Unable to list channels"+
"for node %s: %v", node.Name(), err)
}
for _, channel := range nodeChans.Channels {
pending += len(channel.PendingHtlcs)
}
}
if numHTLCs != pending {
return fmt.Errorf("expected %d HTLCs, got %d", numHTLCs, pending)
}
return nil
}
}
// assertAmountSent generates a closure which queries sndr and rcvr's currently
// active channels and asserts that sndr sent amt satoshis
// and rcvr received amt satoshis.
func assertAmountSent(amt btcutil.Amount, sndr, rcvr *lntest.HarnessNode) func() error {
return func() error {
// Both channels should also have properly accounted for the
// amount that has been sent/received over the channel.
listChReq := &lnrpc.ListChannelsRequest{}
ctxb := context.Background()
sndrChannels := make(map[uint64]bool)
// List sender channels so as to tally payment amounts over them.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
sndrListChannels, err := sndr.ListChannels(ctxt, listChReq)
if err != nil {
return fmt.Errorf("unable to query %s's channel list: %v",
sndr.Name(), err)
}
for _, channel := range sndrListChannels.Channels {
sndrChannels[channel.ChanId] = true
}
var sndrSatoshisSent int64
listPayReq := &lnrpc.ListPaymentsRequest{}
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
// TODO: Assuming all payments are returned with a single request.
sndrListPayments, err := sndr.ListPayments(ctxt, listPayReq)
if err != nil {
return fmt.Errorf("unable to query %s's payments: %v",
sndr.Name(), err)
}
// Only take into account active sender channels.
for _, payment := range sndrListPayments.Payments {
if payment.Status != lnrpc.Payment_SUCCEEDED {
continue
}
for _, htlc := range payment.Htlcs {
if htlc.Status != lnrpc.HTLCAttempt_SUCCEEDED {
continue
}
firstRouteCh := htlc.Route.Hops[0].ChanId
if _, ok := sndrChannels[firstRouteCh]; ok {
sndrSatoshisSent += payment.ValueSat
}
}
}
if sndrSatoshisSent != int64(amt) {
return fmt.Errorf("%s's satoshis sent is incorrect "+
"got %v, expected %v", sndr.Name(),
sndrSatoshisSent, amt.ToUnit(btcutil.AmountSatoshi))
}
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
rcvrListChannels, err := rcvr.ListChannels(ctxt, listChReq)
if err != nil {
return fmt.Errorf("unable to query %s's channel list: %v",
rcvr.Name(), err)
}
var rcvrSatoshisReceived int64
for _, channel := range rcvrListChannels.Channels {
rcvrSatoshisReceived += channel.TotalSatoshisReceived
}
if rcvrSatoshisReceived != int64(amt) {
return fmt.Errorf("%s's satoshis sent is incorrect "+
"got %v, expected %v", rcvr.Name(),
rcvrSatoshisReceived, amt.ToUnit(btcutil.AmountSatoshi))
}
return nil
}
}
func findChanFundingTx(ctx context.Context, t *harnessTest,
chanPoint *lnrpc.ChannelPoint, node *lntest.HarnessNode) *lnrpc.Transaction {
fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to convert funding txid into chainhash.Hash:"+
" %v", err)
}
target := fundingTxID.String()
txns, err := node.LightningClient.GetTransactions(
ctx, &lnrpc.GetTransactionsRequest{})
if err != nil {
t.Fatalf("could not get transactions: %v", err)
}
for _, tx := range txns.Transactions {
if tx.TxHash == target {
return tx
}
}
return nil
}
func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness,
alice, bob *lntest.HarnessNode, carolHodl bool, c lnrpc.CommitmentType) (
*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) {
net.EnsureConnected(t.t, alice, bob)
// Make sure there are enough utxos for anchoring.
for i := 0; i < 2; i++ {
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice)
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, bob)
}
// We'll start the test by creating a channel between Alice and Bob,
// which will act as the first leg for out multi-hop HTLC.
const chanAmt = 1000000
var aliceFundingShim *lnrpc.FundingShim
var thawHeight uint32
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
_, minerHeight, err := net.Miner.Client.GetBestBlock()
require.NoError(t.t, err)
thawHeight = uint32(minerHeight + 144)
aliceFundingShim, _, _ = deriveFundingShim(
net, t, alice, bob, chanAmt, thawHeight, true,
)
}
aliceChanPoint := openChannelAndAssert(
t, net, alice, bob,
lntest.OpenChannelParams{
Amt: chanAmt,
CommitmentType: c,
FundingShim: aliceFundingShim,
},
)
err := alice.WaitForNetworkChannelOpen(aliceChanPoint)
if err != nil {
t.Fatalf("alice didn't report channel: %v", err)
}
err = bob.WaitForNetworkChannelOpen(aliceChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
// Next, we'll create a new node "carol" and have Bob connect to her. If
// the carolHodl flag is set, we'll make carol always hold onto the
// HTLC, this way it'll force Bob to go to chain to resolve the HTLC.
carolFlags := nodeArgsForCommitType(c)
if carolHodl {
carolFlags = append(carolFlags, "--hodl.exit-settle")
}
carol := net.NewNode(t.t, "Carol", carolFlags)
net.ConnectNodes(t.t, bob, carol)
// Make sure Carol has enough utxos for anchoring. Because the anchor by
// itself often doesn't meet the dust limit, a utxo from the wallet
// needs to be attached as an additional input. This can still lead to a
// positively-yielding transaction.
for i := 0; i < 2; i++ {
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol)
}
// We'll then create a channel from Bob to Carol. After this channel is
// open, our topology looks like: A -> B -> C.
var bobFundingShim *lnrpc.FundingShim
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
bobFundingShim, _, _ = deriveFundingShim(
net, t, bob, carol, chanAmt, thawHeight, true,
)
}
bobChanPoint := openChannelAndAssert(
t, net, bob, carol,
lntest.OpenChannelParams{
Amt: chanAmt,
CommitmentType: c,
FundingShim: bobFundingShim,
},
)
err = bob.WaitForNetworkChannelOpen(bobChanPoint)
if err != nil {
t.Fatalf("alice didn't report channel: %v", err)
}
err = carol.WaitForNetworkChannelOpen(bobChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
err = alice.WaitForNetworkChannelOpen(bobChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
return aliceChanPoint, bobChanPoint, carol
}
// deriveFundingShim creates a channel funding shim by deriving the necessary
// keys on both sides.
func deriveFundingShim(net *lntest.NetworkHarness, t *harnessTest,
carol, dave *lntest.HarnessNode, chanSize btcutil.Amount,
thawHeight uint32, publish bool) (*lnrpc.FundingShim,
*lnrpc.ChannelPoint, *chainhash.Hash) {
ctxb := context.Background()
keyLoc := &walletrpc.KeyReq{KeyFamily: 9999}
carolFundingKey, err := carol.WalletKitClient.DeriveNextKey(ctxb, keyLoc)
require.NoError(t.t, err)
daveFundingKey, err := dave.WalletKitClient.DeriveNextKey(ctxb, keyLoc)
require.NoError(t.t, err)
// Now that we have the multi-sig keys for each party, we can manually
// construct the funding transaction. We'll instruct the backend to
// immediately create and broadcast a transaction paying out an exact
// amount. Normally this would reside in the mempool, but we just
// confirm it now for simplicity.
_, fundingOutput, err := input.GenFundingPkScript(
carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
int64(chanSize),
)
require.NoError(t.t, err)
var txid *chainhash.Hash
targetOutputs := []*wire.TxOut{fundingOutput}
if publish {
txid, err = net.Miner.SendOutputsWithoutChange(
targetOutputs, 5,
)
require.NoError(t.t, err)
} else {
tx, err := net.Miner.CreateTransaction(targetOutputs, 5, false)
require.NoError(t.t, err)
txHash := tx.TxHash()
txid = &txHash
}
// At this point, we can being our external channel funding workflow.
// We'll start by generating a pending channel ID externally that will
// be used to track this new funding type.
var pendingChanID [32]byte
_, err = rand.Read(pendingChanID[:])
require.NoError(t.t, err)
// Now that we have the pending channel ID, Dave (our responder) will
// register the intent to receive a new channel funding workflow using
// the pending channel ID.
chanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: txid[:],
},
}
chanPointShim := &lnrpc.ChanPointShim{
Amt: int64(chanSize),
ChanPoint: chanPoint,
LocalKey: &lnrpc.KeyDescriptor{
RawKeyBytes: daveFundingKey.RawKeyBytes,
KeyLoc: &lnrpc.KeyLocator{
KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
KeyIndex: daveFundingKey.KeyLoc.KeyIndex,
},
},
RemoteKey: carolFundingKey.RawKeyBytes,
PendingChanId: pendingChanID[:],
ThawHeight: thawHeight,
}
fundingShim := &lnrpc.FundingShim{
Shim: &lnrpc.FundingShim_ChanPointShim{
ChanPointShim: chanPointShim,
},
}
_, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
ShimRegister: fundingShim,
},
})
require.NoError(t.t, err)
// If we attempt to register the same shim (has the same pending chan
// ID), then we should get an error.
_, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
ShimRegister: fundingShim,
},
})
if err == nil {
t.Fatalf("duplicate pending channel ID funding shim " +
"registration should trigger an error")
}
// We'll take the chan point shim we just registered for Dave (the
// responder), and swap the local/remote keys before we feed it in as
// Carol's funding shim as the initiator.
fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
RawKeyBytes: carolFundingKey.RawKeyBytes,
KeyLoc: &lnrpc.KeyLocator{
KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
KeyIndex: carolFundingKey.KeyLoc.KeyIndex,
},
}
fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
return fundingShim, chanPoint, txid
}