-
Notifications
You must be signed in to change notification settings - Fork 898
/
testing.go
163 lines (142 loc) · 4.53 KB
/
testing.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
package availability_test
import (
"context"
"testing"
"github.com/ipfs/boxo/bitswap"
"github.com/ipfs/boxo/bitswap/network"
"github.com/ipfs/boxo/blockservice"
"github.com/ipfs/boxo/blockstore"
"github.com/ipfs/boxo/routing/offline"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
record "github.com/libp2p/go-libp2p-record"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/stretchr/testify/require"
"github.com/celestiaorg/celestia-node/share"
"github.com/celestiaorg/celestia-node/share/ipld"
"github.com/celestiaorg/celestia-node/share/sharetest"
)
// RandFillBS fills the given BlockService with a random block of a given size.
func RandFillBS(t *testing.T, n int, bServ blockservice.BlockService) *share.Root {
shares := sharetest.RandShares(t, n*n)
return FillBS(t, bServ, shares)
}
// FillBS fills the given BlockService with the given shares.
func FillBS(t *testing.T, bServ blockservice.BlockService, shares []share.Share) *share.Root {
eds, err := ipld.AddShares(context.TODO(), shares, bServ)
require.NoError(t, err)
dah, err := share.NewRoot(eds)
require.NoError(t, err)
return dah
}
type TestNode struct {
net *TestDagNet
share.Getter
share.Availability
blockservice.BlockService
host.Host
}
// ClearStorage cleans up the storage of the node.
func (n *TestNode) ClearStorage() {
keys, err := n.Blockstore().AllKeysChan(n.net.ctx)
require.NoError(n.net.T, err)
for k := range keys {
err := n.DeleteBlock(n.net.ctx, k)
require.NoError(n.net.T, err)
}
}
type TestDagNet struct {
ctx context.Context
T *testing.T
net mocknet.Mocknet
nodes []*TestNode
}
// NewTestDAGNet creates a new testing swarm utility to spawn different nodes and test how they
// interact and/or exchange data.
func NewTestDAGNet(ctx context.Context, t *testing.T) *TestDagNet {
return &TestDagNet{
ctx: ctx,
T: t,
net: mocknet.New(),
}
}
// NewTestNodeWithBlockstore creates a new plain TestNode with the given blockstore that can serve
// and request data.
func (dn *TestDagNet) NewTestNodeWithBlockstore(dstore ds.Datastore, bstore blockstore.Blockstore) *TestNode {
hst, err := dn.net.GenPeer()
require.NoError(dn.T, err)
routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{})
bs := bitswap.New(
dn.ctx,
network.NewFromIpfsHost(hst, routing),
bstore,
bitswap.ProvideEnabled(false), // disable routines for DHT content provides, as we don't use them
bitswap.EngineBlockstoreWorkerCount(1), // otherwise it spawns 128 routines which is too much for tests
bitswap.EngineTaskWorkerCount(2),
bitswap.TaskWorkerCount(2),
bitswap.SetSimulateDontHavesOnTimeout(false),
bitswap.SetSendDontHaves(false),
)
nd := &TestNode{
net: dn,
BlockService: ipld.NewBlockservice(bstore, bs),
Host: hst,
}
dn.nodes = append(dn.nodes, nd)
return nd
}
// NewTestNode creates a plain network node that can serve and request data.
func (dn *TestDagNet) NewTestNode() *TestNode {
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
return dn.NewTestNodeWithBlockstore(dstore, bstore)
}
// ConnectAll connects all the peers on registered on the TestDagNet.
func (dn *TestDagNet) ConnectAll() {
err := dn.net.LinkAll()
require.NoError(dn.T, err)
err = dn.net.ConnectAllButSelf()
require.NoError(dn.T, err)
}
// Connect connects two given peers.
func (dn *TestDagNet) Connect(peerA, peerB peer.ID) {
_, err := dn.net.LinkPeers(peerA, peerB)
require.NoError(dn.T, err)
_, err = dn.net.ConnectPeers(peerA, peerB)
require.NoError(dn.T, err)
}
// Disconnect disconnects two peers.
// It does a hard disconnect, meaning that disconnected peers won't be able to reconnect on their
// own but only with DagNet.Connect or TestDagNet.ConnectAll.
func (dn *TestDagNet) Disconnect(peerA, peerB peer.ID) {
err := dn.net.UnlinkPeers(peerA, peerB)
require.NoError(dn.T, err)
err = dn.net.DisconnectPeers(peerA, peerB)
require.NoError(dn.T, err)
}
type SubNet struct {
*TestDagNet
nodes []*TestNode
}
func (dn *TestDagNet) SubNet() *SubNet {
return &SubNet{dn, nil}
}
func (sn *SubNet) AddNode(nd *TestNode) {
sn.nodes = append(sn.nodes, nd)
}
func (sn *SubNet) ConnectAll() {
nodes := sn.nodes
for _, n1 := range nodes {
for _, n2 := range nodes {
if n1 == n2 {
continue
}
_, err := sn.net.LinkPeers(n1.ID(), n2.ID())
require.NoError(sn.T, err)
_, err = sn.net.ConnectPeers(n1.ID(), n2.ID())
require.NoError(sn.T, err)
}
}
}