Skip to content
This repository was archived by the owner on Feb 12, 2024. It is now read-only.

Commit 3792b68

Browse files
jacobheunAlan Shaw
authored andcommitted
fix(cli): make swarm addrs more resilient (#2083)
* fix(cli): make swarm addrs more resilient Dont fail if decapsulation of ipfs fails. Go node addresses typically wont contain their peerid * test: add test for addrs handler * refactor(cli): have swarm addrs return the output for printing * chore: fix linting
1 parent 84978c4 commit 3792b68

File tree

2 files changed

+112
-47
lines changed

2 files changed

+112
-47
lines changed

src/cli/commands/swarm/addrs.js

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
'use strict'
22

3-
const print = require('../../utils').print
4-
53
module.exports = {
64
command: 'addrs',
75

@@ -16,15 +14,28 @@ module.exports = {
1614
argv.resolve((async () => {
1715
const ipfs = await argv.getIpfs()
1816
const res = await ipfs.swarm.addrs()
19-
res.forEach((peer) => {
17+
18+
const output = res.map((peer) => {
2019
const count = peer.multiaddrs.size
21-
print(`${peer.id.toB58String()} (${count})`)
20+
const peerAddrs = [`${peer.id.toB58String()} (${count})`]
2221

23-
peer.multiaddrs.forEach((addr) => {
24-
const res = addr.decapsulate('ipfs').toString()
25-
print(`\t${res}`)
22+
peer.multiaddrs.toArray().map((addr) => {
23+
let res
24+
try {
25+
res = addr.decapsulate('ipfs').toString()
26+
} catch (_) {
27+
// peer addresses dont need to have /ipfs/ as we know their peerId
28+
// and can encapsulate on dial.
29+
res = addr.toString()
30+
}
31+
peerAddrs.push(`\t${res}`)
2632
})
33+
34+
return peerAddrs.join('\n')
2735
})
36+
37+
// Return the output for printing
38+
return { data: output.join('\n'), argv }
2839
})())
2940
}
3041
}

test/cli/swarm.js

Lines changed: 94 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,15 @@ const chai = require('chai')
66
const dirtyChai = require('dirty-chai')
77
const expect = chai.expect
88
chai.use(dirtyChai)
9-
const series = require('async/series')
9+
const sinon = require('sinon')
1010
const ipfsExec = require('../utils/ipfs-exec')
1111
const path = require('path')
1212
const parallel = require('async/parallel')
13+
const addrsCommand = require('../../src/cli/commands/swarm/addrs')
14+
15+
const multiaddr = require('multiaddr')
16+
const PeerInfo = require('peer-info')
17+
const PeerId = require('peer-id')
1318

1419
const DaemonFactory = require('ipfsd-ctl')
1520
const df = DaemonFactory.create({ type: 'js' })
@@ -25,50 +30,54 @@ const config = {
2530
}
2631

2732
describe('swarm', () => {
28-
let bMultiaddr
29-
let ipfsA
30-
31-
let nodes = []
32-
before(function (done) {
33-
// CI takes longer to instantiate the daemon, so we need to increase the
34-
// timeout for the before step
35-
this.timeout(80 * 1000)
36-
37-
series([
38-
(cb) => {
39-
df.spawn({
40-
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
41-
config,
42-
initOptions: { bits: 512 }
43-
}, (err, node) => {
44-
expect(err).to.not.exist()
45-
ipfsA = ipfsExec(node.repoPath)
46-
nodes.push(node)
47-
cb()
48-
})
49-
},
50-
(cb) => {
51-
df.spawn({
52-
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
53-
config,
54-
initOptions: { bits: 512 }
55-
}, (err, node) => {
56-
expect(err).to.not.exist()
57-
node.api.id((err, id) => {
33+
afterEach(() => {
34+
sinon.restore()
35+
})
36+
37+
describe('daemon on (through http-api)', function () {
38+
this.timeout(60 * 1000)
39+
40+
let bMultiaddr
41+
let ipfsA
42+
43+
let nodes = []
44+
before(function (done) {
45+
// CI takes longer to instantiate the daemon, so we need to increase the
46+
// timeout for the before step
47+
this.timeout(80 * 1000)
48+
49+
parallel([
50+
(cb) => {
51+
df.spawn({
52+
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
53+
config,
54+
initOptions: { bits: 512 }
55+
}, (err, node) => {
5856
expect(err).to.not.exist()
59-
bMultiaddr = id.addresses[0]
57+
ipfsA = ipfsExec(node.repoPath)
6058
nodes.push(node)
6159
cb()
6260
})
63-
})
64-
}
65-
], done)
66-
})
67-
68-
after((done) => parallel(nodes.map((node) => (cb) => node.stop(cb)), done))
61+
},
62+
(cb) => {
63+
df.spawn({
64+
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
65+
config,
66+
initOptions: { bits: 512 }
67+
}, (err, node) => {
68+
expect(err).to.not.exist()
69+
node.api.id((err, id) => {
70+
expect(err).to.not.exist()
71+
bMultiaddr = id.addresses[0]
72+
nodes.push(node)
73+
cb()
74+
})
75+
})
76+
}
77+
], done)
78+
})
6979

70-
describe('daemon on (through http-api)', function () {
71-
this.timeout(60 * 1000)
80+
after((done) => parallel(nodes.map((node) => (cb) => node.stop(cb)), done))
7281

7382
it('connect', () => {
7483
return ipfsA('swarm', 'connect', bMultiaddr).then((out) => {
@@ -108,4 +117,49 @@ describe('swarm', () => {
108117
})
109118
})
110119
})
120+
121+
describe('handlers', () => {
122+
let peerInfo
123+
const ipfs = {
124+
swarm: { addrs: () => {} }
125+
}
126+
const argv = {
127+
resolve: () => {},
128+
getIpfs: () => ipfs
129+
}
130+
131+
describe('addrs', () => {
132+
before((done) => {
133+
PeerId.create({ bits: 512 }, (err, peerId) => {
134+
if (err) return done(err)
135+
peerInfo = new PeerInfo(peerId)
136+
done()
137+
})
138+
})
139+
140+
it('should return addresses for all peers', (done) => {
141+
sinon.stub(argv, 'resolve').callsFake(promise => {
142+
promise.then(({ data }) => {
143+
expect(data).to.eql([
144+
`${peerInfo.id.toB58String()} (2)`,
145+
`\t/ip4/127.0.0.1/tcp/4001`,
146+
`\t/ip4/127.0.0.1/tcp/4001/ws`
147+
].join('\n'))
148+
done()
149+
})
150+
})
151+
152+
sinon.stub(peerInfo.multiaddrs, '_multiaddrs').value([
153+
multiaddr('/ip4/127.0.0.1/tcp/4001'),
154+
multiaddr(`/ip4/127.0.0.1/tcp/4001/ws/ipfs/${peerInfo.id.toB58String()}`)
155+
])
156+
157+
sinon.stub(ipfs.swarm, 'addrs').returns(
158+
Promise.resolve([peerInfo])
159+
)
160+
161+
addrsCommand.handler(argv)
162+
})
163+
})
164+
})
111165
})

0 commit comments

Comments
 (0)